code
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
226
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# SerialGrabber reads data from a serial port and processes it with the
# configured processor.
# Copyright (C) 2012 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from serial_grabber import constants
from serial_grabber.transform import Transform
api_key = "GT14ED5QGNLE2E8N"
field_map = {
"1": "field8",
"2": "field7",
"3": "field6",
"4": "field5",
"5": "field4",
"6": "field3",
"7": "field2",
"8": "field1",
}
# A Transform is used by a TransformProcessor to transform the data received
# from the reader into a form that can be used for the configured processor.
class ThingSpeakTransformer(Transform):
def transform(self, process_entry):
# Retrieve the transaction's data from the process_entry
payload = process_entry[constants.data][constants.payload]
transformed_data = {"api_key": api_key}
# Process the retrieved data into
lines = payload.split("\n")
for line in lines:
# Strip out the start and end delimiters
if "SENSORS" in line:
continue
# Extract the sensor ID and value from the line
sensor_id, sensor_value = line.split(",")
sensor_value = sensor_value
transformed_data[field_map[sensor_id]] = sensor_value
process_entry[constants.data][constants.payload] = transformed_data
return process_entry
|
nigelb/SerialGrabber
|
examples/ThingSpeak/ThingSpeakTransformer.py
|
Python
|
gpl-2.0
| 2,101
|
# Copyright 2004-2006 Daniel Henninger <[email protected]>
# Licensed for distribution under the GPL version 2, check COPYING for details
from twisted.python import log
import sys, time
import config
def observer(eventDict):
try:
observer2(eventDict)
except Exception, e:
printf("CRITICAL: Traceback in debug.observer2 - " + str(e))
def observer2(eventDict):
edm = eventDict['message']
if isinstance(edm, tuple): # LogEvent can be in tuple
edm = edm[0]
if isinstance(edm, LogEvent):
if edm.category == INFO and config.debugLevel < 3:
return
elif edm.category == WARN and config.debugLevel < 2:
return
elif edm.category == ERROR and config.debugLevel < 1:
return
text = str(edm)
elif edm:
if not eventDict['isError'] and config.debugLevel < 3: return # not error
text = str(edm)
else:
if eventDict['isError'] and eventDict.has_key('failure'):
if config.debugLevel < 1: return
text = eventDict['failure'].getTraceback()
elif eventDict.has_key('format'):
if config.debugLevel < 3: return
text = eventDict['format'] % eventDict
else:
return
# Now log it!
timeStr = time.strftime("[%Y-%m-%d %H:%M:%S]", time.localtime(eventDict['time']))
text = text.replace("\n", "\n\t")
global debugFile
debugFile.write("%s %s\n" % (timeStr, text))
debugFile.flush()
def printf(text):
sys.__stdout__.write(text + "\n")
sys.__stdout__.flush()
debugFile = None
def reloadConfig():
global debugFile
if debugFile:
debugFile.close()
if config.debugLevel > 0:
if len(config.debugFile) > 0:
try:
debugFile = open(config.debugFile, "a")
log.msg("Reopened log file.")
except IOError:
log.discardLogs() # Give up
debugFile = sys.__stdout__
return
else:
debugFile = sys.__stdout__
log.startLoggingWithObserver(observer)
else:
log.discardLogs()
class INFO : pass # debugLevel == 3
class WARN : pass # debugLevel >= 2
class ERROR: pass # debuglevel >= 1
class LogEvent:
def __init__(self, category=INFO, ident="", msg="", log=True, skipargs=False):
self.category, self.ident, self.msg = category, ident, msg
frame = sys._getframe(1)
# Get the class name
s = str(frame.f_locals.get("self", frame.f_code.co_filename))
self.klass = s[s.find(".")+1:s.find(" ")]
if self.klass == "p": self.klass = ""
self.method = frame.f_code.co_name
if self.method == "?": self.method = ""
self.args = frame.f_locals
self.skipargs = skipargs
if log:
self.log()
def __str__(self):
args = {}
if not self.skipargs:
for key in self.args.keys():
if key == "self":
#args["self"] = "instance"
continue
val = self.args[key]
args[key] = val
try:
if len(val) > 128:
args[key] = "Oversize arg"
except:
# If its not an object with length, assume that it can't be too big. Hope that's a good assumption.
pass
category = str(self.category).split(".")[1]
return "%s :: %s :: %s :: %s :: %s :: %s" % (category, str(self.ident), str(self.klass), self.method, str(args), self.msg)
def log(self):
log.msg(self)
|
shizeeg/pyicqt
|
src/debug.py
|
Python
|
gpl-2.0
| 3,058
|
00000 0 output/setDirected.py.err
13678 1 output/setDirected.py.out
|
Conedy/Conedy
|
testing/network/expected/sum_setDirected.py
|
Python
|
gpl-2.0
| 76
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###################################################################
#
# Company: Squeeze Studio Animation
#
# Author: Danilo Pinheiro
# Date: 2014-02-10
# Updated: 2014-02-24
#
# sqStickyLipsSetup.py
#
# This script will create a Sticky Lips setup.
#
#######################################
# importing libraries:
import maya.cmds as cmds
import maya.mel as mel
from functools import partial
# global variables to this module:
CLASS_NAME = "StickyLips"
TITLE = "m061_stickyLips"
DESCRIPTION = "m062_stickyLipsDesc"
ICON = "/Icons/sq_stickyLips.png"
SQSL_VERSION = "1.0"
class StickyLips():
def __init__(self, dpUIinst, langDic, langName):
# redeclaring variables
self.dpUIinst = dpUIinst
self.langDic = langDic
self.langName = langName
# call main function
self.dpMain(self)
def dpMain(self, *args):
self.edgeList = []
self.baseCurve = []
self.baseCurveA = []
self.baseCurveB = []
self.mainCurveA = []
self.mainCurveB = []
self.curveLenght = 0
self.maxIter = 0
self.clusterList = []
self.receptList = []
self.optionCtrl = "Option_Ctrl"
self.wireNodeList = []
if cmds.window('sqStickyLipsWindow', query=True, exists=True):
cmds.deleteUI('sqStickyLipsWindow', window=True)
cmds.window('sqStickyLipsWindow', title='sqStickyLips - v'+str(SQSL_VERSION)+' - UI', widthHeight=(300, 200), menuBar=False, sizeable=False, minimizeButton=True, maximizeButton=False)
cmds.showWindow('sqStickyLipsWindow')
slLayoutColumn = cmds.columnLayout('slLayoutColumn', adjustableColumn=True)
cmds.text("Load meshes:", align="left", parent=slLayoutColumn)
slLayoutA = cmds.rowColumnLayout('slLayoutA', numberOfColumns=2, columnWidth=[(1, 100), (2, 160)], parent=slLayoutColumn)
cmds.button(label="Recept A >>", command=partial(self.sqSLLoad, "A"), parent=slLayoutA)
self.receptA_TF = cmds.textField(parent=slLayoutA)
cmds.button(label="Recept B >>", command=partial(self.sqSLLoad, "B"), parent=slLayoutA)
self.receptB_TF = cmds.textField(parent=slLayoutA)
cmds.text("Select a closed edgeLoop and press the run button", parent=slLayoutColumn)
cmds.button(label="RUN - Generate Sticky Lips", command=self.sqGenerateStickyLips, backgroundColor=[0.3, 1, 0.7], parent=slLayoutColumn)
def sqSLLoad(self, recept, *args):
if recept == "A":
cmds.textField(self.receptA_TF, edit=True, text=cmds.ls(selection=True)[0])
if recept == "B":
cmds.textField(self.receptB_TF, edit=True, text=cmds.ls(selection=True)[0])
def sqGetRecepts(self, receptA=None, receptB=None, *args):
self.receptList = []
self.receptList.append(receptA)
self.receptList.append(receptB)
if receptA == None:
receptAName = cmds.textField(self.receptA_TF, query=True, text=True)
if cmds.objExists(receptAName):
self.receptList[0] = receptAName
if receptB == None:
receptBName = cmds.textField(self.receptB_TF, query=True, text=True)
if cmds.objExists(receptBName):
self.receptList[1] = receptBName
def sqGenerateCurves(self, *args):
self.edgeList = cmds.ls(selection=True, flatten=True)
if not self.edgeList == None and not self.edgeList == [] and not self.edgeList == "":
self.baseCurve = cmds.polyToCurve(name="baseCurve", form=2, degree=1)[0]
cmds.select(self.baseCurve+".ep[*]")
cmds.insertKnotCurve(cmds.ls(selection=True, flatten=True), constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
toDeleteList = []
p = 2
for k in range((sideA+2), (sideB-1)):
if p%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(k)+"]")
toDeleteList.append(self.baseCurve+".cv["+str(k+len(pointListA)-1)+"]")
p = p+1
q = 2
m = sideA-2
if m >= 0:
while m >= 0:
if not m == sideA and not m == sideB:
if q%2 == 0:
toDeleteList.append(self.baseCurve+".cv["+str(m)+"]")
m = m-1
q = q+1
cmds.delete(toDeleteList)
cmds.insertKnotCurve([self.baseCurve+".u["+str(len(pointListA)-1)+"]", self.baseCurve+".ep["+str(len(pointListA)-1)+"]"], constructionHistory=True, curveOnSurface=True, numberOfKnots=1, addKnots=False, insertBetween=True, replaceOriginal=True)
pointListA, pointListB, sideA, sideB = self.sqGetPointLists()
posListA, posListB = [], []
for i in range(0, len(pointListA)-1):
posListA.append(cmds.xform(pointListA[i], query=True, worldSpace=True, translation=True))
posListB.append(cmds.xform(pointListB[i], query=True, worldSpace=True, translation=True))
self.mainCurveA = cmds.curve(name="StickyLips_Main_A_Crv", degree=1, point=posListA)
self.mainCurveB = cmds.curve(name="StickyLips_Main_B_Crv", degree=1, point=posListB)
cmds.rename(cmds.listRelatives(self.mainCurveA, children=True, shapes=True)[0], self.mainCurveA+"Shape")
cmds.rename(cmds.listRelatives(self.mainCurveB, children=True, shapes=True)[0], self.mainCurveB+"Shape")
cmds.select(self.mainCurveA+".cv[*]")
self.curveLenght = len(cmds.ls(selection=True, flatten=True))
cmds.select(clear=True)
self.sqCheckCurveDirection(self.mainCurveA)
self.sqCheckCurveDirection(self.mainCurveB)
self.baseCurveA = cmds.duplicate(self.mainCurveA, name=self.mainCurveA.replace("_Main_", "_Base_"))[0]
self.baseCurveB = cmds.duplicate(self.mainCurveB, name=self.mainCurveB.replace("_Main_", "_Base_"))[0]
cmds.delete(self.baseCurve)
self.maxIter = len(posListA)
cmds.group(self.mainCurveA, self.mainCurveB, self.baseCurveA, self.baseCurveB, name="StickyLips_StaticData_Grp")
else:
mel.eval("warning \"Please, select an closed edgeLoop.\";")
def sqCheckCurveDirection(self, thisCurve, *args):
posMinX = cmds.xform(thisCurve+".cv[0]", query=True, worldSpace=True, translation=True)[0]
posMaxX = cmds.xform(thisCurve+".cv["+str(self.curveLenght-1)+"]", query=True, worldSpace=True, translation=True)[0]
if posMinX > posMaxX:
cmds.reverseCurve(thisCurve, constructionHistory=False, replaceOriginal=True)
def sqGetPointLists(self, *args):
cmds.select(self.baseCurve+".cv[*]")
pointList = cmds.ls(selection=True, flatten=True)
minX = 0
maxX = 0
sideA = 0
sideB = 0
for i in range(0, len(pointList)):
pointPosX = cmds.xform(pointList[i], query=True, worldSpace=True, translation=True)[0]
if pointPosX < minX:
minX = pointPosX
sideA = i
elif pointPosX > maxX:
maxX = pointPosX
sideB = i
if sideA > sideB:
sideC = sideA
sideA = sideB
sideB = sideC
pointListA = pointList[sideA:(sideB+1)]
pointListB = pointList[sideB:]
for j in range(0, (sideA+1)):
pointListB.append(pointList[j])
return pointListA, pointListB, sideA, sideB
def sqCreateClusters(self, curveA, curveB, *args):
self.clusterList = []
for i in range(1, self.curveLenght-1):
self.clusterList.append(cmds.cluster([curveA+".cv["+str(i)+"]", curveB+".cv["+str(i)+"]"], name="StickyLips_"+str(`i-1`)+"_Cls")[1])
return self.clusterList
def sqGenerateMuscleLocators(self, *args):
muscleLoaded = True
if not cmds.pluginInfo('MayaMuscle.mll', query=True, loaded=True):
muscleLoaded = False
try:
cmds.loadPlugin('MayaMuscle.mll')
muscleLoaded = True
except:
print "Error: Can not load the Maya Muscle plugin!"
pass
if muscleLoaded:
minIndex = 0
minPosX = 1000000000000000 # just to avoid non centered characters
minPosId = 0
vertexPairList = []
muscleLocatorList = []
for e, edgeName in enumerate(self.edgeList):
tempCompList = cmds.polyListComponentConversion(edgeName, fromEdge=True, toVertex=True)
tempExpList = cmds.filterExpand(tempCompList, selectionMask=31, expand=True)
vertexPairList.append(tempExpList)
edgePosA = cmds.xform(tempExpList[0], query=True, worldSpace=True, translation=True)[0]
edgePosB = cmds.xform(tempExpList[1], query=True, worldSpace=True, translation=True)[0]
if edgePosA < minPosX:
minIndex = e
minPosX = edgePosA
minPosId = 0
if edgePosB < minPosX:
minIndex = e
minPosX = edgePosB
minPosId = 1
usedIndexList = []
usedIndexList.append(minIndex)
lastIndexUp = minIndex
lastIndexDown = 0
upEdgeList = []
upEdgeList.append(self.edgeList[minIndex])
downEdgeList = []
for i in range(0, len(vertexPairList)-1):
if not i == minIndex:
if vertexPairList[i][0] in vertexPairList[minIndex][minPosId] or vertexPairList[i][1] in vertexPairList[minIndex][minPosId]:
downEdgeList.append(self.edgeList[i])
usedIndexList.append(i)
lastIndexDown = i
for i in range(0, self.maxIter-2):
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexUp] or vertexPairList[j][1] in vertexPairList[lastIndexUp]:
upEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexUp = j
break
for j in range(0, len(vertexPairList)):
if not j in usedIndexList:
if vertexPairList[j][0] in vertexPairList[lastIndexDown] or vertexPairList[j][1] in vertexPairList[lastIndexDown]:
downEdgeList.append(self.edgeList[j])
usedIndexList.append(j)
lastIndexDown = j
break
upMinusDown = len(upEdgeList) - len(downEdgeList)
downMinusUp = len(downEdgeList) - len(upEdgeList)
if upMinusDown > 1:
for i in range(0, upMinusDown):
if not len(upEdgeList) == (self.maxIter-3):
downEdgeList.append(upEdgeList[len(upEdgeList)-1])
upEdgeList = upEdgeList[:-1]
if downMinusUp > 1:
for i in range(0, downMinusUp):
if not len(upEdgeList) == (self.maxIter-3):
upEdgeList.append(downEdgeList[len(downEdgeList)-1])
downEdgeList = downEdgeList[:-1]
upEdgeList = upEdgeList[:self.maxIter-1]
downEdgeList = downEdgeList[:self.maxIter-1]
for k in range(0, self.maxIter-2):
cmds.select([upEdgeList[k], downEdgeList[k]])
# cmds.refresh()
# cmds.pause(seconds=1)
mel.eval("cMuscleSurfAttachSetup();")
msa = cmds.rename("StickLips_"+str(k)+"_MSA")
cmds.disconnectAttr(msa+"Shape.outRotate", msa+".rotate")
cmds.setAttr(msa+".rotateX", 0)
cmds.setAttr(msa+".rotateY", 0)
cmds.setAttr(msa+".rotateZ", 0)
muscleLocatorList.append(msa)
cmds.parent(self.clusterList[k], msa, absolute=True)
def sqSetClustersZeroScale(self, *arqs):
if self.clusterList:
for item in self.clusterList:
cmds.setAttr(item+".scaleX", 0)
cmds.setAttr(item+".scaleY", 0)
cmds.setAttr(item+".scaleZ", 0)
def sqCreateStikyLipsDeformers(self, *args):
baseMesh = None
mainCurveList = [self.mainCurveA, self.mainCurveB]
for mainCurve in mainCurveList:
if baseMesh == None:
baseMesh = cmds.duplicate(self.receptList[0], name=self.receptList[0]+"Base")[0]
cmds.setAttr(baseMesh+".visibility", 0)
wrapNode = cmds.deformer(mainCurve, name="StickyLips_Wrap", type="wrap")[0]
try:
cmds.connectAttr(self.receptList[0]+".dropoff", wrapNode+".dropoff[0]", force=True)
cmds.connectAttr(self.receptList[0]+".inflType", wrapNode+".inflType[0]", force=True)
cmds.connectAttr(self.receptList[0]+".smoothness", wrapNode+".smoothness[0]", force=True)
cmds.connectAttr(self.receptList[0]+"Shape.worldMesh[0]", wrapNode+".driverPoints[0]", force=True)
except:
pass
cmds.connectAttr(baseMesh+"Shape.worldMesh[0]", wrapNode+".basePoints[0]", force=True)
cmds.connectAttr(mainCurve+"Shape.worldMatrix[0]", wrapNode+".geomMatrix", force=True)
cmds.setAttr(wrapNode+".maxDistance", 1)
cmds.setAttr(wrapNode+".autoWeightThreshold", 1)
cmds.setAttr(wrapNode+".exclusiveBind", 1)
baseCurveList = [self.baseCurveA, self.baseCurveB]
for c, baseCurve in enumerate(baseCurveList):
wireNode = cmds.wire(self.receptList[1], name=baseCurve+"_Wire", groupWithBase=False, crossingEffect=0, localInfluence=0)[0]
cmds.connectAttr(mainCurveList[c]+"Shape.worldSpace[0]", wireNode+".baseWire[0]", force=True)
cmds.connectAttr(baseCurve+"Shape.worldSpace[0]", wireNode+".deformedWire[0]", force=True)
self.wireNodeList.append(wireNode)
wireLocList = []
for i in range(0, self.maxIter):
wireLocList.append(baseCurve+".u["+str(i)+"]")
cmds.dropoffLocator(1, 1, wireNode, wireLocList)
def sqCreateStickyLipsCtrlAttr(self, *args):
if not cmds.objExists(self.optionCtrl):
cmds.circle(name=self.optionCtrl, constructionHistory=False)
cmds.addAttr(self.optionCtrl, longName='stickyLips', attributeType='bool')
cmds.setAttr(self.optionCtrl+'.stickyLips', edit=True, keyable=True)
for i in range(0, self.maxIter):
cmds.addAttr(self.optionCtrl, longName="stickyLipsWireLocator"+str(i), attributeType='float', keyable=False)
for i in range(0, self.maxIter):
for wireNode in self.wireNodeList:
cmds.connectAttr(self.optionCtrl+".stickyLipsWireLocator"+str(i), wireNode+".wireLocatorEnvelope["+str(i)+"]")
slTextCurve = cmds.textCurves(ch=False, font="Arial|w400|h-08", text="StickyLips", name="StickyLips_Label_Txt")[0]
if "Shape" in slTextCurve:
slTextCurve = cmds.rename(slTextCurve, slTextCurve[:slTextCurve.find("Shape")])
t = 0
slCharTransformList = cmds.listRelatives(slTextCurve, children=True, type="transform")
for charTransform in slCharTransformList:
txValue = cmds.getAttr(charTransform+".tx")
sLTextShapeList = cmds.listRelatives(charTransform, allDescendents=True, type="nurbsCurve")
for i, textShape in enumerate(sLTextShapeList):
textShape = cmds.rename(textShape, "StickyLips_Txt_"+str(t)+"Shape")
cmds.parent(textShape, slTextCurve, shape=True, relative=True)
cmds.move(txValue, 0, 0, textShape+".cv[:]", relative=True)
t = t+1
cmds.delete(charTransform)
cmds.setAttr(slTextCurve+".translateX", -0.1)
cmds.setAttr(slTextCurve+".translateY", 0.25)
cmds.setAttr(slTextCurve+".scaleX", 0.1)
cmds.setAttr(slTextCurve+".scaleY", 0.1)
cmds.setAttr(slTextCurve+".scaleZ", 0.1)
cmds.setAttr(slTextCurve+".template", 1)
cmds.makeIdentity(slTextCurve, apply=True)
sideNameList = ["L", "R"]
for side in sideNameList:
bg = cmds.circle(name=side+"_StickyLips_Bg", normal=(0,0,1), radius=1, degree=1, sections=4, constructionHistory=False)[0]
cmds.setAttr(bg+".rotateZ", 45)
cmds.setAttr(bg+".translateX", 0.5)
cmds.makeIdentity(bg, apply=True)
cmds.setAttr(bg+".scaleX", 0.85)
cmds.setAttr(bg+".scaleY", 0.15)
cmds.makeIdentity(bg, apply=True)
cmds.setAttr(bg+".template", 1)
self.sliderCtrl = cmds.circle(name=side+"_StickyLips_Ctrl", normal=(0,0,1), radius=0.1, degree=3, constructionHistory=False)[0]
attrToHideList = ['ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']
for attr in attrToHideList:
cmds.setAttr(self.sliderCtrl+"."+attr, edit=True, lock=True, keyable=False)
cmds.transformLimits(self.sliderCtrl, translationX=(0,1), enableTranslationX=(1,1))
distPos = 1.0 / self.maxIter
for i in range(0, self.maxIter):
lPosA = (i * distPos)
lPosB = (lPosA + distPos)
rPosB = 1 - (i * distPos)
rPosA = (rPosB - distPos)
if i > 0:
lPosA = lPosA - (distPos*0.33)
rPosA = rPosA - (distPos*0.33)
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[0]+"_StickyLips_Ctrl.translateX", driverValue=lPosA, value=0, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[0]+"_StickyLips_Ctrl.translateX", driverValue=lPosB, value=1, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[1]+"_StickyLips_Ctrl.translateX", driverValue=rPosA, value=0, inTangentType="linear", outTangentType="linear")
cmds.setDrivenKeyframe(self.optionCtrl, attribute="stickyLipsWireLocator"+str(i), currentDriver=sideNameList[1]+"_StickyLips_Ctrl.translateX", driverValue=rPosB, value=1, inTangentType="linear", outTangentType="linear")
lSliderGrp = cmds.group(sideNameList[0]+"_StickyLips_Ctrl", sideNameList[0]+"_StickyLips_Bg", name=sideNameList[0]+"_StickyLips_Ctrl_Grp")
rSliderGrp = cmds.group(sideNameList[1]+"_StickyLips_Ctrl", sideNameList[1]+"_StickyLips_Bg", name=sideNameList[1]+"_StickyLips_Ctrl_Grp")
cmds.setAttr(rSliderGrp+".rotateZ", 180)
cmds.setAttr(rSliderGrp+".translateY", -0.25)
sliderGrp = cmds.group(lSliderGrp, rSliderGrp, slTextCurve, name="StickyLips_Ctrl_Grp")
def sqGenerateStickyLips(self, *args):
self.sqGetRecepts()
if self.receptList[0] == None or self.receptList[1] == None:
mel.eval("warning \"Please, load ReceptA and ReceptB targets to continue.\";")
else:
self.sqGenerateCurves()
self.sqCreateClusters(self.baseCurveA, self.baseCurveB)
self.sqSetClustersZeroScale()
self.sqGenerateMuscleLocators()
self.sqCreateStikyLipsDeformers()
self.sqCreateStickyLipsCtrlAttr()
cmds.select(clear=True)
|
SqueezeStudioAnimation/dpAutoRigSystem
|
dpAutoRigSystem/Extras/sqStickyLipsSetup.py
|
Python
|
gpl-2.0
| 20,524
|
#!/usr/bin/env python
import random
from itertools import izip as zip, count
PHONES = ['AA0', 'AA1', 'AA2', 'AE0', 'AE1', 'AE2', 'AH0', 'AH1', 'AH2', 'AO0', 'AO1', 'AO2', 'AW0', 'AW1', 'AW2', 'AY0', 'AY1', 'AY2', 'EH0', 'EH1', 'EH2', 'ER0', 'ER1', 'ER2', 'EY0', 'EY1', 'EY2', 'IH0', 'IH1', 'IH2', 'IY0', 'IY1', 'IY2', 'OW0', 'OW1', 'OW2', 'OY0', 'OY1', 'OY2', 'UH0', 'UH1', 'UH2', 'UW0', 'UW1', 'UW2']
STRESSED = ['AA1', 'AA2', 'AE1', 'AE2', 'AH1', 'AH2', 'AO1', 'AO2', 'AW1', 'AW2', 'AY1', 'AY2', 'EH1', 'EH2', 'ER1', 'ER2', 'EY1', 'EY2', 'IH1', 'IH2', 'IY1', 'IY2', 'OW1', 'OW2', 'OY1', 'OY2', 'UH1', 'UH2', 'UW1', 'UW2']
with open("./cmudict_nocred.txt","r") as f:
cmu = f.readlines()
def rhyme():
seed = raw_input('Seed Word: ').upper()
seed_entry = [entry for entry in cmu if seed == entry.split()[0]]
# ^ returns a list with one value
try:
seed_entry = seed_entry[0]
except IndexError:
print "I encountered a problem with " + seed
print "I do not know how to pronounce this, and therefore cannot find a rhyme"
# ^ sets seed_entry as a string instead of single item list
e = seed_entry.split()
# ^ splits phonemes into a list where [0] is the seed word
stressed_vowel_positions = [i for i, j in zip(count(), e) if j in STRESSED]
# ^ list comp that finds the index for each stressed vowel phoneme
try:
last_stressed_vowel = stressed_vowel_positions[-1]
except IndexError:
last_stressed_vowel = 0
# ^ determines the index of the last stressed vowel
# ^ exception for words with no stressed vowels (such as 'A', pronounced 'AH0')
backward_count_vowel = len(e) - last_stressed_vowel
# ^ determines the index from the right instead of left so 'greater syllabic'?! words are counted
candidates = [entry.split()[0] for entry in cmu if entry.split()[-backward_count_vowel:] == e[-backward_count_vowel:]]
# ^ list comp that matches words that share identical phonemes from the last stressed vowel
# ^ i.e. it finds true rhymes!
print candidates
rhyme()
|
JacksonBates/rhymer
|
rhyme.py
|
Python
|
gpl-2.0
| 2,132
|
import unittest
from gourmet.importers import importer
class TestImporter (unittest.TestCase):
def setUp (self):
self.i = importer.Importer()
def _get_last_rec_ (self):
return self.i.added_recs[-1]
def testRecImport (self):
self.i.start_rec()
attrs = [('title','Foo'),('cuisine','Bar'),('yields',3),('yield_unit','cups')]
for att,val in attrs:
self.i.rec[att] = val
self.i.commit_rec()
rec = self._get_last_rec_()
for att,val in attrs:
self.assertEqual(getattr(rec,att),val)
def testIngredientImport (self):
self.i.start_rec()
self.i.rec['title']='Ingredient Import Test'
self.i.start_ing()
self.i.add_amt(2)
self.i.add_unit('cups')
self.i.add_item('water')
self.i.commit_ing()
self.i.commit_rec()
ings = self.i.rd.get_ings(self._get_last_rec_())
self.assertEqual(len(ings),1)
ing = ings[0]
self.assertEqual(ing.amount,2)
self.assertEqual(ing.unit,'cups')
self.assertEqual(ing.item,'water')
if __name__ == '__main__':
unittest.main()
|
kirienko/gourmet
|
tests/test_importer.py
|
Python
|
gpl-2.0
| 1,162
|
# encoding: utf-8
# module PyQt4.QtGui
# from /usr/lib/python3/dist-packages/PyQt4/QtGui.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QGraphicsLayoutItem import QGraphicsLayoutItem
class QGraphicsLayout(QGraphicsLayoutItem):
""" QGraphicsLayout(QGraphicsLayoutItem parent=None) """
def activate(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.activate() """
pass
def addChildLayoutItem(self, QGraphicsLayoutItem): # real signature unknown; restored from __doc__
""" QGraphicsLayout.addChildLayoutItem(QGraphicsLayoutItem) """
pass
def count(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.count() -> int """
return 0
def getContentsMargins(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.getContentsMargins() -> (float, float, float, float) """
pass
def invalidate(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.invalidate() """
pass
def isActivated(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.isActivated() -> bool """
return False
def itemAt(self, p_int): # real signature unknown; restored from __doc__
""" QGraphicsLayout.itemAt(int) -> QGraphicsLayoutItem """
return QGraphicsLayoutItem
def removeAt(self, p_int): # real signature unknown; restored from __doc__
""" QGraphicsLayout.removeAt(int) """
pass
def setContentsMargins(self, p_float, p_float_1, p_float_2, p_float_3): # real signature unknown; restored from __doc__
""" QGraphicsLayout.setContentsMargins(float, float, float, float) """
pass
def updateGeometry(self): # real signature unknown; restored from __doc__
""" QGraphicsLayout.updateGeometry() """
pass
def widgetEvent(self, QEvent): # real signature unknown; restored from __doc__
""" QGraphicsLayout.widgetEvent(QEvent) """
pass
def __init__(self, QGraphicsLayoutItem_parent=None): # real signature unknown; restored from __doc__
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyQt4/QtGui/QGraphicsLayout.py
|
Python
|
gpl-2.0
| 2,337
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from rtfw.PropertySets import (AttributedList, Colours, Fonts, Papers,
MarginsPS, ShadingPS, BorderPS, FramePS,
TabPS, TextPS, ParagraphPS,
MarginsPropertySet, ShadingPropertySet,
BorderPropertySet, FramePropertySet,
TabPropertySet, TextPropertySet,
ParagraphPropertySet)
from rtfw.Elements import (Section, Inline, Paragraph, Cell, Image, Text,
Table, Cell, Document,
StyleSheet,
MakeDefaultStyleSheet,
TAB, LINE, TEXT, B, I, U,
RawCode,
PAGE_NUMBER, TOTAL_PAGES,
SECTION_PAGES, ARIAL_BULLET)
from rtfw.Styles import (TextStyle, ParagraphStyle)
from rtfw.Renderer import (Settings, Renderer)
|
yeleman/rtfw
|
rtfw/__init__.py
|
Python
|
gpl-2.0
| 1,181
|
import numpy as np
import matplotlib.pylab as plt
datos = np.loadtxt( "C_HaloVoid_FOF-Tweb.dat" )
Nneigh = 5
for i in xrange(Nneigh):
plt.hist( datos[:,i*5], range=(0,50), bins=500, normed=True, alpha=0.5 )
print( "Percent of halos without %d-th neighbour = %1.2f"%(i+1,(datos[:,i*5]>150).sum()/(1.0*len(datos))*100))
plt.show()
|
sbustamante/Distance2Void
|
checker.py
|
Python
|
gpl-2.0
| 340
|
import os
import sys
sys.path.append( '../' )
from PyRTF import *
def MakeExample1() :
doc = Document()
ss = doc.StyleSheet
section = Section()
doc.Sections.append( section )
# text can be added directly to the section
# a paragraph object is create as needed
section.append( 'Image Example 1' )
section.append( 'You can add images in one of two ways, either converting the '
'image each and every time like;' )
image = Image( 'image.jpg' )
section.append( Paragraph( image ) )
section.append( 'Or you can use the image object to convert the image and then '
'save it to a raw code element that can be included later.' )
fout = file( 'image_tmp.py', 'w' )
print >> fout, 'from PyRTF import RawCode'
print >> fout
fout.write( image.ToRawCode( 'TEST_IMAGE' ) )
fout.close()
import image_tmp
section.append( Paragraph( image_tmp.TEST_IMAGE ) )
section.append( 'Have a look in image_tmp.py for the converted RawCode.' )
section.append( 'here are some png files' )
for f in [ 'img1.png',
'img2.png',
'img3.png',
'img4.png' ] :
section.append( Paragraph( Image( f ) ) )
return doc
def OpenFile( name ) :
return file( '%s.rtf' % name, 'w' )
if __name__ == '__main__' :
DR = Renderer()
doc1 = MakeExample1()
DR.Write( doc1, OpenFile( 'Image1' ) )
print "Finished"
|
lambdamusic/testproject
|
konproj/libs/PyRTF/examples/examples2.py
|
Python
|
gpl-2.0
| 1,338
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing OpenFOAM, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Xavier Besseron (University of Luxembourg)
@author: Ward Poelmans (Ghent University)
@author: Balazs Hajgato (Free University Brussels (VUB))
"""
import glob
import os
import re
import shutil
import stat
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_OpenFOAM(EasyBlock):
"""Support for building and installing OpenFOAM."""
def __init__(self, *args, **kwargs):
"""Specify that OpenFOAM should be built in install dir."""
super(EB_OpenFOAM, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wm_compiler = None
self.wm_mplib = None
self.openfoamdir = None
self.thrdpartydir = None
if 'extend' in self.name.lower():
if LooseVersion(self.version) >= LooseVersion('3.0'):
self.openfoamdir = 'foam-extend-%s' % self.version
else:
self.openfoamdir = 'OpenFOAM-%s-ext' % self.version
else:
self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])])
self.log.debug("openfoamdir: %s" % self.openfoamdir)
def extract_step(self):
"""Extract sources as expected by the OpenFOAM(-Extend) build scripts."""
super(EB_OpenFOAM, self).extract_step()
# make sure that the expected subdir is really there after extracting
# if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail
openfoam_installdir = os.path.join(self.installdir, self.openfoamdir)
if not os.path.exists(openfoam_installdir):
self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir)
try:
contents_installdir = os.listdir(self.installdir)
# it's one directory but has a wrong name
if len(contents_installdir) == 1 and os.path.isdir(os.path.join(self.installdir, contents_installdir[0])):
source = os.path.join(self.installdir, contents_installdir[0])
target = os.path.join(self.installdir, self.openfoamdir)
self.log.debug("Renaming %s to %s", source, target)
os.rename(source, target)
else:
mkdir(openfoam_installdir)
for fil in contents_installdir:
if fil != self.openfoamdir:
source = os.path.join(self.installdir, fil)
target = os.path.join(openfoam_installdir, fil)
self.log.debug("Moving %s to %s", source, target)
shutil.move(source, target)
os.chdir(openfoam_installdir)
except OSError, err:
raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err)
def patch_step(self):
"""Adjust start directory and start path for patching to correct directory."""
self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir)
super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir'])
def configure_step(self):
"""Configure OpenFOAM build by setting appropriate environment variables."""
# compiler & compiler flags
comp_fam = self.toolchain.comp_family()
extra_flags = ''
if comp_fam == toolchain.GCC: # @UndefinedVariable
self.wm_compiler = 'Gcc'
if get_software_version('GCC') >= LooseVersion('4.8'):
# make sure non-gold version of ld is used, since OpenFOAM requires it
# see http://www.openfoam.org/mantisbt/view.php?id=685
extra_flags = '-fuse-ld=bfd'
# older versions of OpenFOAM-Extend require -fpermissive
if 'extend' in self.name.lower() and LooseVersion(self.version) < LooseVersion('2.0'):
extra_flags += ' -fpermissive'
elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable
self.wm_compiler = 'Icc'
# make sure -no-prec-div is used with Intel compilers
extra_flags = '-no-prec-div'
else:
raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER")
for env_var in ['CFLAGS', 'CXXFLAGS']:
env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags))
# patch out hardcoding of WM_* environment variables
# for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER'
for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]:
self.log.debug("Patching out hardcoded $WM_* env vars in %s", script)
# disable any third party stuff, we use EB controlled builds
regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")]
WM_env_var = ['WM_COMPILER', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR']
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
WM_env_var.append('WM_LABEL_SIZE')
for env_var in WM_env_var:
regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var,
r": ${\g<var>:=\g<val>}; export \g<var>"))
apply_regex_substitutions(script, regex_subs)
# inject compiler variables into wmake/rules files
ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*'))
langs = ['c', 'c++']
suffixes = ['', 'Opt']
wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes]
mpicc = os.environ['MPICC']
mpicxx = os.environ['MPICXX']
cc_seq = os.environ.get('CC_SEQ', os.environ['CC'])
cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX'])
if self.toolchain.mpi_family() == toolchain.OPENMPI:
# no -cc/-cxx flags supported in OpenMPI compiler wrappers
c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc)
cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx)
else:
# -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI)
c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq)
cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq)
comp_vars = {
# specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them
'cc': c_comp_cmd,
'CC': cxx_comp_cmd,
'cOPT': os.environ['CFLAGS'],
'c++OPT': os.environ['CXXFLAGS'],
}
for wmake_rules_file in wmake_rules_files:
fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file)
self.log.debug("Patching compiler variables in %s", fullpath)
regex_subs = []
for comp_var, newval in comp_vars.items():
regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval))
apply_regex_substitutions(fullpath, regex_subs)
# enable verbose build for debug purposes
# starting with openfoam-extend 3.2, PS1 also needs to be set
env.setvar("FOAM_VERBOSE", '1')
# installation directory
env.setvar("FOAM_INST_DIR", self.installdir)
# third party directory
self.thrdpartydir = "ThirdParty-%s" % self.version
# only if third party stuff is actually installed
if os.path.exists(self.thrdpartydir):
os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir)
env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir))
env.setvar("WM_COMPILER", self.wm_compiler)
# set to an MPI unknown by OpenFOAM, since we're handling the MPI settings ourselves (via mpicc, etc.)
# Note: this name must contain 'MPI' so the MPI version of the Pstream library is built (cf src/Pstream/Allwmake)
self.wm_mplib = "EASYBUILDMPI"
env.setvar("WM_MPLIB", self.wm_mplib)
# parallel build spec
env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel']))
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env.setvar("WM_LABEL_SIZE", '64')
else:
env.setvar("WM_LABEL_SIZE", '32')
# make sure lib/include dirs for dependencies are found
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if LooseVersion(self.version) < LooseVersion("2") or openfoam_extend_v3:
self.log.debug("List of deps: %s" % self.cfg.dependencies())
for dep in self.cfg.dependencies():
dep_name = dep['name'].upper(),
dep_root = get_software_root(dep['name'])
env.setvar("%s_SYSTEM" % dep_name, "1")
dep_vars = {
"%s_DIR": "%s",
"%s_BIN_DIR": "%s/bin",
"%s_LIB_DIR": "%s/lib",
"%s_INCLUDE_DIR": "%s/include",
}
for var, val in dep_vars.iteritems():
env.setvar(var % dep_name, val % dep_root)
else:
for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']:
dependloc = get_software_root(depend)
if dependloc:
if depend == 'CGAL' and get_software_root('Boost'):
env.setvar("CGAL_ROOT", dependloc)
env.setvar("BOOST_ROOT", get_software_root('Boost'))
else:
env.setvar("%s_ROOT" % depend.upper(), dependloc)
def build_step(self):
"""Build OpenFOAM using make after sourcing script to set environment."""
precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc")
# make directly in install directory
cmd_tmpl = "%(precmd)s && %(prebuildopts)s %(makecmd)s" % {
'precmd': precmd,
'prebuildopts': self.cfg['prebuildopts'],
'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'),
}
if 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
qa = {
"Proceed without compiling ParaView [Y/n]": 'Y',
"Proceed without compiling cudaSolvers? [Y/n]": 'Y',
}
noqa = [
".* -o .*",
"checking .*",
"warning.*",
"configure: creating.*",
"%s .*" % os.environ['CC'],
"wmake .*",
"Making dependency list for source file.*",
"\s*\^\s*", # warning indicator
"Cleaning .*",
]
run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True)
else:
run_cmd(cmd_tmpl % 'Allwmake', log_all=True, simple=True, log_output=True)
def install_step(self):
"""Building was performed in install dir, so just fix permissions."""
# fix permissions of OpenFOAM dir
fullpath = os.path.join(self.installdir, self.openfoamdir)
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
# fix permissions of ThirdParty dir and subdirs (also for 2.x)
# if the thirdparty tarball is installed
fullpath = os.path.join(self.installdir, self.thrdpartydir)
if os.path.exists(fullpath):
adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True)
adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True)
def sanity_check_step(self):
"""Custom sanity check for OpenFOAM"""
shlib_ext = get_shared_lib_ext()
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
int_size = 'Int64'
else:
int_size = 'Int32'
else:
int_size = ''
psubdir = "linux64%sDP%sOpt" % (self.wm_compiler, int_size)
openfoam_extend_v3 = 'extend' in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0')
if openfoam_extend_v3 or LooseVersion(self.version) < LooseVersion("2"):
toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir)
libsdir = os.path.join(self.openfoamdir, "lib", psubdir)
dirs = [toolsdir, libsdir]
else:
toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin")
libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib")
dirs = [toolsdir, libsdir]
# some randomly selected binaries
# if one of these is missing, it's very likely something went wrong
bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["foamExec", "paraFoam"]] + \
[os.path.join(toolsdir, "buoyant%sSimpleFoam" % x) for x in ["", "Boussinesq"]] + \
[os.path.join(toolsdir, "%sFoam" % x) for x in ["boundary", "engine", "sonic"]] + \
[os.path.join(toolsdir, "surface%s" % x) for x in ["Add", "Find", "Smooth"]] + \
[os.path.join(toolsdir, x) for x in ["deformedGeom", "engineSwirl", "modifyMesh",
"refineMesh", "vorticity"]]
# check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one
if 'extend' in self.name.lower():
libs = [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)]
if LooseVersion(self.version) < LooseVersion('3.2'):
libs.extend([os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]])
else:
libs = [os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", "mpi"]] + \
[os.path.join(libsdir, x, "libptscotchDecomp.%s" % shlib_ext) for x in ["dummy", "mpi"]] +\
[os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)] + \
[os.path.join(libsdir, "dummy", "libscotchDecomp.%s" % shlib_ext)]
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion("2.3.0"):
# surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0
bins.remove(os.path.join(toolsdir, "surfaceSmooth"))
bins.append(os.path.join(toolsdir, "surfaceLambdaMuSmooth"))
custom_paths = {
'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins + libs,
'dirs': dirs,
}
super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define extra environment variables required by OpenFOAM"""
txt = super(EB_OpenFOAM, self).make_module_extra()
env_vars = [
('WM_PROJECT_VERSION', self.version),
('FOAM_INST_DIR', self.installdir),
('WM_COMPILER', self.wm_compiler),
('WM_MPLIB', self.wm_mplib),
('FOAM_BASH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'bashrc')),
('FOAM_CSH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'cshrc')),
]
# OpenFOAM >= 3.0.0 can use 64 bit integers
if 'extend' not in self.name.lower() and LooseVersion(self.version) >= LooseVersion('3.0'):
if self.toolchain.options['i8']:
env_vars += [('WM_LABEL_SIZE', '64')]
else:
env_vars += [('WM_LABEL_SIZE', '32')]
for (env_var, val) in env_vars:
# check whether value is defined for compatibility with --module-only
if val:
txt += self.module_generator.set_environment(env_var, val)
return txt
|
hpcleuven/easybuild-easyblocks
|
easybuild/easyblocks/o/openfoam.py
|
Python
|
gpl-2.0
| 18,326
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Plugin for FILMAFFINITY support
# -----------------------------------------------------------------------
# $Id$
# Version: 080607_01
#
# Notes: FilmAffinity plugin. You can add FilmAffinity.com informations for video items
# with the plugin
# Activate with: plugin.activate('video.filmaffinity')
# And add the following lines to your configuration file:
# FILMAFFINITY_AUTOACCEPT_SINGLE_HIT = True
# It uses also directly the variables:
# - FILMAFFINITY_REMOVE_FROM_LABEL
# - FILMAFFINITY_REMOVE_FROM_SEARCHSTRING
# as the same words shall be removed also for FilmAffinity.
# You can also set filmaffinity_search on a key (e.g. '1') by setting
# EVENTS['menu']['1'] = Event(MENU_CALL_ITEM_ACTION, arg='filmaffinity_search_or_cover_search')
#
# Todo: - Update existing FXD file
# - DVD/VCD support (discset ??)
#
# Author: S. FABRE for Biboobox, http://www.lahiette.com/biboobox
# RE-Author: Jose Maria Franco Fraiz
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
import logging
logger = logging.getLogger("freevo.video.plugins.filmaffinity")
import re
import socket
socket.setdefaulttimeout(30.0)
import urllib2, urlparse, commands
import sys
import os
import traceback
import menu
import config
import plugin
import time
from util import htmlenties2txt
from util import fxdparser
from gui.PopupBox import PopupBox
from util.fxdimdb import makeVideo, point_maker
#beautifulsoup module
from BeautifulSoup import BeautifulSoup
# headers for urllib2
txdata = None
txheaders = {
'User-Agent': 'freevo (%s)' % sys.platform,
'Accept-Language': 'es-es',
}
class PluginInterface(plugin.ItemPlugin):
"""
This plugin obtains movie information in Spanish from the FilmAffinity
website
Configuration::
plugin.activate('video.filmaffinity')
FILMAFFINITY_REMOVE_FROM_LABEL = ('\(.*?\)', '\[.*?\]', 'cd[0-9]+(-[0-9])?', 'title[0-9]+', 'by .*$')
FILMAFFINITY_REMOVE_FROM_SEARCHSTRING = ('spanish','xvid','dvdrip','parte','[0-9]*','dvdscreener','mp3')
FILMAFFINITY_AUTOACCEPT_SINGLE_HIT = True
"""
def __init__(self, license=None):
"""Initialise class instance"""
# these are considered as private variables - don't mess with them unless
# no other choise is given
# fyi, the other choice always exists: add a subroutine or ask :)
if not config.SYS_USE_NETWORK:
self.reason = 'SYS_USE_NETWORK not enabled'
return
plugin.ItemPlugin.__init__(self)
def config(self):
return [
('FILMAFFINITY_REMOVE_FROM_LABEL', ('\(.*?\)', '\[.*?\]', 'cd[0-9]+(-[0-9])?', 'title[0-9]+', 'by .*$'), _('Remove matching of this regexps from item name')),
('FILMAFFINITY_REMOVE_FROM_SEARCHSTRING', ('spanish','xvid','dvdrip','parte','[0-9]*','dvdscreener','mp3'), _('Remove matching of this regexps from search string')),
('FILMAFFINITY_AUTOACCEPT_SINGLE_HIT', True, _('Accept search automatically if it has only one result'))
]
def initmyself(self):
self.isdiscset = False
self.title = ''
self.info = {}
self.image = None # full path image filename
self.image_urls = [] # possible image url list
self.image_url = None # final image url
self.fxdfile = None # filename, full path, WITHOUT extension
self.append = False
self.device = None
self.regexp = None
self.mpl_global_opt = None
self.media_id = None
self.file_opts = []
self.video = []
self.variant = []
self.parts = []
self.var_mplopt = []
self.var_names = []
#image_url_handler stuff
self.image_url_handler = {}
def searchFilmAffinity(self, name):
"""name (string), returns id list
Search for name and returns an id list with tuples:
(id , name, year)"""
# Clean internal variables
self.initmyself()
self.filmaffinity_id_list = []
quoted_name = urllib2.quote(name.strip())
regexp_tag = re.compile('<[^>]+>', re.I)
logger.debug('Request with: %s', quoted_name)
url = 'http://www.filmaffinity.com/es/search.php?stext=%s&stype=title' % quoted_name
req = urllib2.Request(url, txdata, txheaders)
searchstring = name
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, error:
raise FxdFilmaffinity_Net_Error(_('Connection error: ') + error)
exit
regexp_getmultiple = re.compile('.*<b><a href="(/es/film.*\.html)">(.*?)</a></b>\s*\(([0-9]{4})\)\s*', re.I)
regexp_getsingle = re.compile('^<meta name="keywords" content="movie', re.I)
regexp_geturl = re.compile('.*<a href="/es/.*\.php\?movie_id=([0-9]*)',re.I)
multiple = True
for line in response.read().split("\n"):
#print line
if multiple:
mm = regexp_getmultiple.match(line)
if mm:
#print "Found film in line: %s" % line
link = mm.group(1)
name = mm.group(2)
year = mm.group(3)
self.filmaffinity_id_list += [ (link, name, year) ]
ms = regexp_getsingle.match(line)
if ms: multiple = False
else:
mu = regexp_geturl.match(line)
if mu:
link = "/es/film" + mu.group(1) + ".html"
self.filmaffinity_id_list += [ (link, name, '') ]
break
return self.filmaffinity_id_list
def guessFilmAffinity(self, filename, label=False):
"""Guess possible movies from filename. Same return as searchFilmAffinity"""
name = filename
for r in config.FILMAFFINITY_REMOVE_FROM_LABEL:
name = re.sub(r, '', name.lower())
name = vfs.basename(vfs.splitext(name)[0])
name = re.sub('([a-z])([A-Z])', point_maker, name)
name = re.sub('([a-zA-Z])([0-9])', point_maker, name)
name = re.sub('([0-9])([a-zA-Z])', point_maker, name.lower())
name = re.sub(',', ' ', name)
parts = re.split("[\._' -]", name)
name = ''
for p in parts:
if not p.lower() in config.FILMAFFINITY_REMOVE_FROM_SEARCHSTRING and \
not re.search('[^0-9A-Za-z]', p):
# originally: not re.search(p, '[A-Za-z]'):
# not sure what's meant with that
name += '%s ' % p
return self.searchFilmAffinity(name)
def getFilmAffinityPage(self, url):
"""url
Set an filmaffinity number for object, and fetch data"""
self.myurl = 'http://www.filmaffinity.com/' + urllib2.quote(urllib2.unquote(url))
logger.debug("Now trying to get %s", self.myurl)
req = urllib2.Request(self.myurl, txdata, txheaders)
try:
idpage = urllib2.urlopen(req)
except urllib2.HTTPError, error:
raise FxdAllocine_Net_Error(_('Connection error: ') + error)
return None
#print "Response: %s" % idpage.read()
self.parsedata(idpage, id)
idpage.close()
def setFxdFile(self, fxdfilename=None, overwrite=False):
"""
setFxdFile (string, full path)
Set fxd file to write to, may be omitted, may be an existing file
(data will be added) unless overwrite = True
"""
if fxdfilename:
if vfs.splitext(fxdfilename)[1] == '.fxd':
self.fxdfile = vfs.splitext(fxdfilename)[0]
else: self.fxdfile = fxdfilename
else:
if self.isdiscset:
self.fxdfile = vfs.join(config.OVERLAY_DIR, 'disc-set', self.getmedia_id(self.device))
else:
self.fxdfile = vfs.splitext(file)[0]
if not overwrite:
try:
vfs.open(self.fxdfile + '.fxd')
self.append = True
except:
pass
else:
self.append = False
# XXX: add this back in without using parseMovieFile
# if self.append and \
# parseMovieFile(self.fxdfile + '.fxd', None, []) == []:
# raise FxdAllocine_XML_Error("FXD file to be updated is invalid, please correct it.")
if not vfs.isdir(vfs.dirname(self.fxdfile)):
if vfs.dirname(self.fxdfile):
os.makedirs(vfs.dirname(self.fxdfile))
def filmaffinity_get_disc_searchstring(self, item):
name = item.media.label
name = re.sub('([a-z])([A-Z])', point_maker, name)
name = re.sub('([a-zA-Z])([0-9])', point_maker, name)
name = re.sub('([0-9])([a-zA-Z])', point_maker, name.lower())
parts = re.split("[\._' -]", name)
name = ''
for p in parts:
if p:
name += '%s ' % p
if name:
return name[:-1]
else:
return ''
def actions(self, item):
self.item = item
if item.type == 'video' and (not item.files or not item.files.fxd_file):
if item.mode == 'file' or (item.mode in ('dvd', 'vcd') and item.info.has_key('tracks')
and not item.media):
self.disc_set = False
return [ (self.filmaffinity_search , _('Search in FilmAffinity'),
'filmaffinity_search_or_cover_search') ]
elif item.mode in ('dvd', 'vcd') and item.info.has_key('tracks'):
self.disc_set = True
s = self.filmaffinity_get_disc_searchstring(self.item)
if s:
return [ (self.filmaffinity_search , _('Search in FilmAffinity [%s]') % s,
'filmaffinity_search_or_cover_search') ]
if item.type == 'dir' and item.media and item.media.mountdir.find(item.dir) == 0:
self.disc_set = True
s = self.filmaffinity_get_disc_searchstring(self.item)
if s:
return [ (self.filmaffinity_search , _('Search in FilmAffinity [%s]') % s,
'filmaffinity_search_or_cover_search') ]
return []
def filmaffinity_search(self, arg=None, menuw=None):
"""
search filmaffinity for this item
"""
box = PopupBox(text=_('Searching in FilmAffinity...'))
box.show()
items = []
try:
duplicates = []
if self.disc_set:
self.searchstring = self.item.media.label
else:
self.searchstring = self.item.name
for id, name, year in self.guessFilmAffinity(self.searchstring, self.disc_set):
try:
uname = Unicode(name)
for i in self.item.parent.play_items:
if i.name == uname:
if not i in duplicates:
duplicates.append(i)
except:
pass
items.append(menu.MenuItem('%s (%s)' % (htmlenties2txt(name), year),
self.filmaffinity_create_fxd, (id, year)))
except:
box.destroy()
box = PopupBox(text=_('Connection error: Probably connection timeout, try again'))
box.show()
time.sleep(2)
box.destroy()
traceback.print_exc()
return
box.destroy()
if config.FILMAFFINITY_AUTOACCEPT_SINGLE_HIT and len(items) == 1:
self.filmaffinity_create_fxd(arg=items[0].arg, menuw=menuw)
return
if items:
moviemenu = menu.Menu(_('FILMAFFINITY Query'), items)
menuw.pushmenu(moviemenu)
return
box = PopupBox(text=_('No info available'))
box.show()
time.sleep(2)
box.destroy()
return
def filmaffinity_menu_back(self, menuw):
"""
check how many menus we have to go back to see the item
"""
import directory
# check if we have to go one menu back (called directly) or
# two (called from the item menu)
back = 1
if menuw.menustack[-2].selected != self.item:
back = 2
# maybe we called the function directly because there was only one
# entry and we called it with an event
if menuw.menustack[-1].selected == self.item:
back = 0
# update the directory
if directory.dirwatcher:
directory.dirwatcher.scan()
# go back in menustack
for i in range(back):
menuw.delete_menu()
def filmaffinity_create_fxd(self, arg=None, menuw=None):
"""
create fxd file for the item
"""
box = PopupBox(text=_('Fetching movie information'))
box.show()
#if this exists we got a cdrom/dvdrom
if self.item.media and self.item.media.devicename:
devicename = self.item.media.devicename
else:
devicename = None
self.getFilmAffinityPage(arg[0])
if self.disc_set:
self.setDiscset(devicename, None)
else:
if self.item.subitems:
for i in range(len(self.item.subitems)):
video = makeVideo('file', 'f%s' % i,
os.path.basename(self.item.subitems[i].filename),
device=devicename)
self.setVideo(video)
else:
video = makeVideo('file', 'f1', os.path.basename(self.item.filename),
device=devicename)
self.setVideo(video)
self.setFxdFile(os.path.splitext(self.item.filename)[0])
self.writeFxd()
self.filmaffinity_menu_back(menuw)
box.destroy()
def writeFxd(self):
"""Write fxd file"""
#if fxdfile is empty, set it yourself
if not self.fxdfile:
self.setFxdFile()
try:
#should we add to an existing file?
if self.append:
if self.isdiscset:
self.update_discset()
else: self.update_movie()
else:
#fetch images
self.fetch_image()
#should we write a disc-set ?
if self.isdiscset:
self.write_discset()
else:
self.write_movie()
#check fxd
# XXX: add this back in without using parseMovieFile
# if parseMovieFile(self.fxdfile + '.fxd', None, []) == []:
# raise FxdImdb_XML_Error("""FXD file generated is invalid, please "+
# "post bugreport, tracebacks and fxd file.""")
except (IOError, FxdFilmaffinity_IO_Error), error:
raise FxdFilmaffinity_IO_Error('error saving the file: %s' % str(error))
def setDiscset(self, device, regexp, *file_opts, **mpl_global_opt):
"""
device (string), regexp (string), file_opts (tuple (mplayer-opts,file)),
mpl_global_opt (string)
Set media is dvd/vcd,
"""
if len(self.video) != 0 or len(self.variant) != 0:
raise FxdFilmaffinity_XML_Error("<movie> already used, can't use both "+
"<movie> and <disc-set>")
self.isdiscset = True
if (not device and not regexp) or (device and regexp):
raise FxdFilmaffinity_XML_Error("Can't use both media-id and regexp")
self.device = device
self.regexp = regexp
for opts in file_opts:
self.file_opts += [ opts ]
if mpl_global_opt and 'mplayer_opt' in mpl_global_opt:
self.mpl_global_opt = (mpl_global_opt['mplayer_opt'])
def isDiscset(self):
"""Check if fxd file describes a disc-set, returns 1 for true, 0 for false
None for invalid file"""
try:
file = vfs.open(self.fxdfile + '.fxd')
except IOError:
return None
content = file.read()
file.close()
if content.find('</disc-set>') != -1: return 1
return 0
#------ private functions below .....
def write_discset(self):
"""Write a <disc-set> to a fresh file"""
print "Discset not supported for the moment... Sorry"
def write_fxd_copyright(self, fxd, node):
fxd.setcdata(node, "The information in this file are from Filmaffinity.com.\n"+
"Please visit http://www.filmaffinity.com for more informations.\n")
fxd.add(fxd.XMLnode('source', [('url', "%s" % self.myurl)]), node, None)
def write_fxd_video(self, fxd, node):
fxd.setattr(node, 'title', self.title)
fxd.add(fxd.XMLnode('cover-img', (('source', self.image_url), ("test", "test")), self.image), node, None)
videonode = fxd.XMLnode('video')
fxd.add(videonode, node)
if self.item.subitems:
for i in range(len(self.item.subitems)):
fxd.add(fxd.XMLnode('file', [('id', 'f%s' % i)], os.path.basename(self.item.subitems[i].filename)), videonode, None)
else:
fxd.add(fxd.XMLnode('file', [('id', 'f1')], os.path.basename(self.item.filename)), videonode, None)
infonode = fxd.XMLnode('info')
fxd.add(infonode, node)
if self.info:
for k in self.info.keys():
fxd.add(fxd.XMLnode(k, [], self.info[k]), infonode, None)
def write_movie(self):
"""Write <movie> to fxd file"""
try:
parser = fxdparser.FXD(self.fxdfile + '.fxd')
parser.set_handler('copyright', self.write_fxd_copyright, 'w', True)
parser.set_handler('movie', self.write_fxd_video, 'w', True)
parser.save()
except:
print "fxd file %s corrupt" % self.fxdfile
traceback.print_exc()
def update_movie(self):
"""Updates an existing file, adds exftra dvd|vcd|file and variant tags"""
print "Update not supported for the moment... Sorry"
def update_discset(self):
"""Updates an existing file, adds extra disc in discset"""
print "Update not supported for the moment... Sorry"
def parsedata(self, results, id=0):
"""results (filmaffinity html page), filmaffinity_id
Returns tuple of (title, info(dict), image_url)"""
dvd = 0
inside_plot = None
self.image_url = ''
soup = BeautifulSoup(results.read(), convertEntities='html')
results.close()
img = soup.find('img',src=re.compile('.*-full\.jpg$'))
if img:
trs = img.findParent('table').findAll('tr')
img_ratings = soup.find('img',src=re.compile('imgs/ratings'))
else:
trs = None
img_ratings = None
# _debug_("Tag %s" % trs)
self.title = soup.find('img', src=re.compile('movie.gif$')).nextSibling.string.strip().encode('latin-1')
self.info['director'] = stripTags(soup.find(text='DIRECTOR').findParent('table').td.nextSibling.nextSibling.contents).strip()
self.info['year'] = soup.find(text='AÑO').parent.parent.parent.table.td.string.strip()
self.info['country'] = soup.find('img', src=re.compile('^\/imgs\/countries\/'))['title'].strip()
if img_ratings:
self.info['rating'] = img_ratings['alt'] + ' (' + trs[1].td.string + '/' + trs[4].td.string.strip('(')
self.info['tagline'] = soup.find(text='TÍTULO ORIGINAL').findParent('table').td.nextSibling.nextSibling.b.string.strip().encode('latin-1')
self.info['actor']= stripTags(soup.find(text='REPARTO').parent.parent.nextSibling.nextSibling.contents).strip()
sinopsis = soup.find(text='SINOPSIS')
if sinopsis:
td = sinopsis.findNext('td')
logger.debug('PLOT: %s', td.contents)
self.info['plot'] = '\n'.join([td.string for td in td.findAll(text=True)]).strip().encode('latin-1')
genero = soup.find(text='GÉNERO')
if genero:
td = genero.findNext('td')
logger.debug('GENRE: %s', td.contents)
self.info['genre'] = '/'.join([td.string for td in td.findAll('a')]).strip().encode('latin-1')
#self.imagefile = self.tmppath + vfs.basename(self.title)
# filmaffinity renders two types of html code. The new one
# with an <a> tag to show a big image and the old one without it
#
if img:
if img.parent.has_key('href'):
self.image_url = img.parent['href']
else:
self.image_url = img['src']
return (self.title, self.info, self.image_url)
def fetch_image(self):
"""Fetch the best image"""
if (len(self.image_url) == 0): # No images
return
self.image = (self.fxdfile + '.jpg')
req = urllib2.Request(self.image_url, txdata, txheaders)
r = urllib2.urlopen(req)
i = vfs.open(self.image, 'w')
i.write(r.read())
i.close()
r.close()
print "Downloaded cover image from Filmaffinity.com"
print "Freevo knows nothing about the copyright of this image, please"
print "go to Filmaffinity.com to check for more informations about private."
print "use of this image"
def setVideo(self, *videos, **mplayer_opt):
"""
videos (tuple (type, id-ref, device, mplayer-opts, file/param) (multiple allowed),
global_mplayer_opts
Set media file(s) for fxd
"""
if self.isdiscset:
raise FxdFilmaffinity_XML_Error("<disc-set> already used, can't use both "+
"<movie> and <disc-set>")
if videos:
for video in videos:
self.video += [ video ]
if mplayer_opt and 'mplayer_opt' in mpl_global_opt:
self.mpl_global_opt = mplayer_opt['mplayer_opt']
class Error(Exception):
"""Base class for exceptions in Filmaffinity_Fxd"""
def __str__(self):
return self.message
def __init__(self, message):
self.message = message
class FxdFilmaffinity_Error(Error):
"""used to raise exceptions"""
pass
class FxdFilmaffinity_XML_Error(Error):
"""used to raise exceptions"""
pass
class FxdFilmaffinity_IO_Error(Error):
"""used to raise exceptions"""
pass
class FxdFilmaffinity_Net_Error(Error):
"""used to raise exceptions"""
pass
def stripTags(c):
str_list = []
for num in xrange(len(c)):
str_list.append(c[num].string)
return ''.join(str_list)
|
freevo/freevo1
|
src/video/plugins/filmaffinity.py
|
Python
|
gpl-2.0
| 23,935
|
# -*- coding: utf-8 -*-
from ...analysis.parameters.glucose_dose import GlucoseGramsDose
from ...analysis.parameters.carb_dose import CarbsDoses
class ParametersGathering(object):
"""
Recopila objetos que extraen parámetros
"""
def __init__(self, context, basic_gathering):
pass
#self._c = context
#self.
#self.carbs_and_doses = CarbsDoses(context, start_dt, end_dt, cf, meal)
#self.glucose_and_doses = GlucoseGramsDose(carbs_doses, body_traits)
@property
def context(self):
return self._c
|
koyadovic/Dia
|
predictive/systems/statistical/analysis/bundles/parameters.py
|
Python
|
gpl-2.0
| 592
|
#!/usr/bin/env python
#
# $File: genoStru.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
pop = sim.Population(size=[2, 3], ploidy=2, loci=[5, 10],
lociPos=list(range(0, 5)) + list(range(0, 20, 2)), chromNames=['Chr1', 'Chr2'],
alleleNames=['A', 'C', 'T', 'G'])
# access genotypic information from the sim.Population
pop.ploidy()
pop.ploidyName()
pop.numChrom()
pop.locusPos(2)
pop.alleleName(1)
# access from an individual
ind = pop.individual(2)
ind.numLoci(1)
ind.chromName(0)
ind.locusName(1)
# utility functions
ind.chromBegin(1)
ind.chromByName('Chr2')
# loci pos can be unordered within each chromosome
pop = sim.Population(loci=[2, 3], lociPos=[3, 1, 1, 3, 2],
lociNames=['loc%d' % x for x in range(5)])
pop.lociPos()
pop.lociNames()
|
BoPeng/simuPOP
|
docs/genoStru.py
|
Python
|
gpl-2.0
| 1,769
|
# Common dialog code.
#
# Copyright (C) 2007, 2008 Red Hat, Inc. All rights reserved.
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program; if
# not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Red Hat Author: Miloslav Trmac <[email protected]>
import os
import gtk.glade
import settings
__all__ = ('DialogBase')
class DialogBase(object):
'''Commmon utilities for dialogs.'''
def __init__(self, toplevel_name, parent, notebook_name = None):
'''Create a window from the glade file and get references to widgets.
If notebook_name is not None, use it in validate_values(). Make the
window transient for parent.
'''
glade_xml = gtk.glade.XML(settings.glade_file_path, toplevel_name)
for name in self._glade_widget_names:
w = glade_xml.get_widget(name)
assert w is not None, 'Widget %s not found in glade file' % name
setattr(self, name, w)
# This name is special :)
self.window = glade_xml.get_widget(toplevel_name)
if parent is not None:
self.window.set_transient_for(parent)
if notebook_name is None:
self.__notebook_widget = None
else:
self.__notebook_widget = glade_xml.get_widget(notebook_name)
assert self.__notebook_widget is not None
def destroy(self):
'''Destroy the dialog.'''
self.window.destroy()
def _validate_get_failure(self):
'''Check whether the window state is a valid configuration.
Return None if it is valid. Otherwise, return (message, notebook page
index or None, widget).
'''
raise NotImplementedError()
def _validate_values(self):
'''Check whether the dialog state is a valid configuration.
Return True if it is valid. Otherwise, display an error message and
return False.
'''
a = self._validate_get_failure()
if a is None:
return True
(msg, page, widget) = a
if self.__notebook_widget is not None:
self.__notebook_widget.set_current_page(page)
self._modal_error_dialog(msg)
widget.grab_focus()
return False
def _modal_error_dialog(self, msg):
'''Show a modal error dialog.'''
dlg = gtk.MessageDialog(self.window, gtk.DIALOG_DESTROY_WITH_PARENT,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, msg)
dlg.run()
dlg.destroy()
def _radio_set(self, value, pairs):
'''Update the "active" state of several toggle buttons.
The pairs parameter is a tuple of (widget name, expected value) pairs.
Expected value is either a single value, or a tuple of possible values.
'''
for (name, expected) in pairs:
if type(expected) == tuple:
active = value in expected
else:
active = value == expected
getattr(self, name).set_active(active)
def _radio_get(self, pairs):
'''Get the "active" button from a group of radio buttons.
The pairs parameter is a tuple of (widget name, return value) pairs.
If no widget is active, an assertion will fail.
'''
for (name, value) in pairs:
if getattr(self, name).get_active():
return value
assert False, 'No widget is active'
def _setup_browse_button(self, button, entry, title, action):
'''Set up a "Browse" button for a path entry.'''
button.connect('clicked', self.__browse_button_clicked, entry, title,
action)
def __browse_button_clicked(self, unused, entry, title, action):
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
stock_accept = gtk.STOCK_SAVE
else:
stock_accept = gtk.STOCK_OPEN
dlg = gtk.FileChooserDialog(title, self.window, action,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
stock_accept, gtk.RESPONSE_ACCEPT))
path = entry.get_text()
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
dlg.set_current_folder(os.path.dirname(path))
dlg.set_current_name(os.path.basename(path))
else:
dlg.set_filename(path)
r = dlg.run()
if r == gtk.RESPONSE_ACCEPT:
entry.set_text(dlg.get_filename())
dlg.destroy()
|
ystk/debian-audit
|
system-config-audit/src/dialog_base.py
|
Python
|
gpl-2.0
| 5,208
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
ilk_sayisal_deger = 10
ilk_ondalikli_deger = 3.14
ilk_mantiksal_deger = True
print ilk_sayisal_deger
|
ozgebarbaros/ab2015PythonKurs
|
1.gun/1.ilk_program/verilen_ilk_deger.py
|
Python
|
gpl-2.0
| 147
|
import numpy as np
from .fingerprint import Fingerprint
class RelativeLocationFingerprint(Fingerprint):
def trans_func_(self, row):
values = row
max_value = max(values)
features = []
for i in range(0, len(values)):
for j in range(0, len(values)):
if i == j:
continue
r = values[i] / float(values[j])
features.append(r)
return features
|
rloliveirajr/sklearn_transformers
|
trans4mers/feature_extraction/relative_location_fingerprint.py
|
Python
|
gpl-2.0
| 463
|
from . import minic_ast
class PrettyGenerator(object):
def __init__(self):
# Statements start with indentation of self.indent_level spaces, using
# the _make_indent method
#
self.indent_level = 0
def _make_indent(self):
return ' ' * self.indent_level
def visit(self, node):
method = 'visit_' + node.__class__.__name__
return getattr(self, method, self.generic_visit)(node)
def generic_visit(self, node):
#~ print('generic:', type(node))
if node is None:
return ''
else:
return ''.join(self.visit(c) for c_name, c in node.children())
|
martylee/Python
|
CSC410-Project-1-master/minic/pretty_minic.py
|
Python
|
gpl-2.0
| 658
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Converts a color scheme texture image to json arrays
.. moduleauthor:: Mihai Andrei <[email protected]>
"""
import numpy
import Image
# See TVB-985
if not hasattr(Image, 'open'):
from Image import Image
def color_texture_to_list(img_pth, img_width, band_height):
"""
:param img_pth: Path to the texure
:param img_width: Texture width
:param band_height: Height of a color scheme band
:return: A list of img_width/band_height color schemes. A scheme is a list of img_width colors
"""
im = Image.open(img_pth)
ima = numpy.asarray(im)
if ima.shape != (img_width, img_width, 4):
raise ValueError("unexpected image shape " + str(ima.shape))
tex_vs = [(i * band_height + 0.5)/img_width for i in xrange(img_width/band_height)]
color_schemes = []
for v in tex_vs:
idx = int(v * img_width)
color_schemes.append(ima[idx, :, :3].tolist())
return color_schemes
|
rajul/tvb-framework
|
tvb/core/services/texture_to_json.py
|
Python
|
gpl-2.0
| 2,352
|
#!/usr/bin/python
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
## Simple XOR experiment. This should be the minimum amount of effort required
## to create an experiment with pyNEAT.
import pyNEAT
import math
#import profile
class XORTest(pyNEAT.Experiment):
def __init__(self):
pyNEAT.Experiment.__init__(self, 'XOR', 'xorstartgenes')
self.inputs = [[1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0]]
self.targets = [0.0, 1.0, 1.0, 0.0]
def evaluate(self, network):
outputs = network.activate(inputs=self.inputs)
errorSum = 0
winner = True
for i in range(len(self.targets)):
target = self.targets[i]
output = outputs[i][0]
errorSum += math.fabs(output - target)
if (target > 0.5 and output < 0.5) or \
(target < 0.5 and output > 0.5):
winner = False
fitness = (4.0 - errorSum) ** 2
error = errorSum
return fitness, outputs, error, winner
if __name__ == '__main__':
pyNEAT.loadConfiguration('xor.ne')
xorTest = XORTest()
xorTest.run(useGUI=True)
#profile.run("xorTest.run()")
|
liquidkarma/pyneat
|
examples/xor/xor_neat.py
|
Python
|
gpl-2.0
| 1,835
|
# screensaverpause - pauses Exaile playback on screensaver activation
# Copyright (C) 2009-2011 Johannes Sasongko <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
import dbus, gtk
from xl import event, player, settings
SERVICES = [
dict( # GNOME
bus_name='org.gnome.ScreenSaver',
path='/org/gnome/ScreenSaver',
dbus_interface='org.gnome.ScreenSaver',
),
dict( # KDE
bus_name='org.freedesktop.ScreenSaver',
path='/',
dbus_interface='org.freedesktop.ScreenSaver',
),
]
import prefs
def get_preferences_pane():
return prefs
matches = set()
bus = None
was_playing = None
def screensaver_active_changed(is_active):
global was_playing
if is_active:
was_playing = player.PLAYER.is_playing()
player.PLAYER.pause()
elif was_playing and settings.get_option("screensaverpause/unpause", 0):
player.PLAYER.unpause()
def enable(exaile):
if exaile.loading:
event.add_callback(_enable, 'exaile_loaded')
else:
_enable()
def _enable(*a):
global bus
bus = dbus.SessionBus()
for service in SERVICES:
matches.add(bus.add_signal_receiver(screensaver_active_changed,
signal_name='ActiveChanged', **service))
def disable(exaile):
if bus is None: return
for match in frozenset(matches):
match.remove()
matches.remove(match)
def test():
import glib, gobject
gobject.threads_init()
import dbus.mainloop.glib as dbgl
dbgl.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SessionBus()
for service in SERVICES:
try:
proxy = bus.get_object(service['bus_name'], service['path'],
follow_name_owner_changes=True)
except dbus.DBusException:
continue
break
else:
return None
assert proxy
interface = dbus.Interface(proxy, service['dbus_interface'])
mainloop = glib.MainLoop()
def active_changed(new_value):
if not new_value:
mainloop.quit()
interface.connect_to_signal('ActiveChanged', screensaver_active_changed)
# For some reason Lock never returns.
interface.Lock(ignore_reply=True)
mainloop.run()
if __name__ == '__main__':
test()
# vi: et sts=4 sw=4 tw=80
|
eri-trabiccolo/exaile
|
plugins/screensaverpause/__init__.py
|
Python
|
gpl-2.0
| 3,410
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb
from datetime import *
#Creador de una matriz vacia
def Matriz_vacia(filas, columnas):
A = []
for i in range(0,filas):
A.append([0]*columnas)
return A
#Muestra la informacion del reparto de la actividad temporal del autor elegido
def activity_info(list_commits_author, authors, author):
print list_commits_author[author]
hours = Matriz_vacia(2, 24)
hours[0] = range(1,25)
for aux in authors[author]:
hours[1][int(aux[1].hour)-1] += 1
print hours[0]
print hours[1]
list_projects = []
list_projects.append('vizgrimoire_vizgrimoirer_cvsanaly')
list_projects.append('openstack_tempest_cvsanaly')
list_projects.append('twbs_bootstrap_cvsanaly')
con = MySQLdb.connect(host='localhost', user='tthebosss', passwd='1234', \
db=list_projects[1])
cursor = con.cursor()
cursor.execute('SELECT COUNT(*) FROM people')
tot_authors = int(cursor.fetchall()[0][0])
cursor.execute('SELECT MIN(date) FROM scmlog')
date_min = cursor.fetchall()[0][0]
date_min = date_min.year
cursor.execute('SELECT MAX(date) FROM scmlog')
date_max = cursor.fetchall()[0][0]
date_max = date_max.year
period = range(date_min,date_max)
period.append(date_max)
authors = []
for i in range(1,tot_authors+1):
query = ('SELECT author_id, date FROM scmlog '
'WHERE author_id = %s ORDER BY date')
cursor.execute(query, i)
authors.append(cursor.fetchall())
list_commits_author = []
for author in authors:
#OH=Office Hour, AO=After Office, LN=Late Night
activity = [0, 0, 0] # [OH, AO, LN]
for commit in author:
if commit[1].hour >= 9 and commit[1].hour < 17: #OH
activity[0] += 1
elif commit[1].hour >= 1 and commit[1].hour < 9: #LN
activity[2] += 1
else: #AO
activity[1] += 1
list_commits_author.append(activity)
author = 42
activity_info(list_commits_author, authors, author)
cursor.close()
con.close()
|
ccervigon/codigo_pfc
|
otros_scripts/author_clasif.py
|
Python
|
gpl-2.0
| 2,056
|
"""
Python Archive Reader 1.0
https://github.com/PastebinArchiveReader/PAR
"""
import requests
from bs4 import BeautifulSoup
import urllib
import argparse
import time
# add parsing functionality to provide files
parser = argparse.ArgumentParser(description="Script to download pastebin.com archives",
epilog='''You can download different archives from pastebin.com with this script.
Simply specify a language, extension and path.''')
parser.add_argument("-l", "--language", dest="language", help="specify the programming language",
metavar="python, csharp, cpp, etc.")
parser.add_argument("-e", "--extension", dest="extension", help="file extension of the language",
metavar="extension")
parser.add_argument("-p", "--path", dest="path", help="where to save the downloaded files",
metavar="/home/anon/scripts/")
args = parser.parse_args()
url = "http://pastebin.com/archive/" + args.language
while 1:
source = requests.get(url)
soup = BeautifulSoup(source.text)
for link in soup.find_all('a'):
if len(link.get('href')) == 9:
if link.get('href') != "/settings": # "/settings" is just a 9-characters configuration file from Pastebin.com. Pointless.
ID = link.get('href')
paste = link.get('href').replace('/', '')
paste = "http://www.pastebin.com/raw.php?i=" + paste
print("[?] {}".format(paste))
downloaded_file = args.path + "/" + ID + args.extension
urllib.urlretrieve(paste, downloaded_file)
print("[!] Downloaded !\n")
time.sleep(3.5) # If the delay is smaller, Pastebin.com will block your IP
print("Finished !")
time.sleep(1)
print("Restarting...")
|
PastebinArchiveReader/PAR
|
PAR.py
|
Python
|
gpl-2.0
| 1,869
|
from typing import Any
from binary_tree import BinaryTree, BinaryTreeNode
class AvlTree(BinaryTree):
def __init__(self, value):
super(AvlTree, self).__init__(value)
self.height = 1
def _left_rotate(self, node: BinaryTreeNode) -> BinaryTreeNode:
"""
Rotate node left
:param node: Node to roate
:return: Updated node
"""
y: BinaryTreeNode = node.right
t2: BinaryTreeNode = y.left
# Perform rotation
y.left = node
node.right = t2
# Update heights
node.height = 1 + max(self.get_height(node.left),
self.get_height(node.right))
y.height = 1 + max(self.get_height(y.left),
self.get_height(y.right))
# Return the new root
return y
def _right_rotate(self, node: BinaryTreeNode) -> BinaryTreeNode:
"""
Rotate Right
:param node: Node to rotate
:return: Updated Node
"""
y: BinaryTreeNode = node.left
t3: BinaryTreeNode = y.right
# Perform rotation
y.right = node
node.left = t3
# Update heights
node.height = 1 + max(self.get_height(node.left),
self.get_height(node.right))
y.height = 1 + max(self.get_height(y.left),
self.get_height(y.right))
# Return the new root
return y
def _get_balance(self, node: BinaryTreeNode) -> int:
"""
Get the balance at the node
:param node: Node to check
:return: Balance
"""
if not node:
return 0
return self.get_height(node.left) - self.get_height(node.right)
def _remove_node(self, tree: BinaryTreeNode, value: Any) -> BinaryTreeNode:
"""
Remove a node from the tree
:param tree: Tree to remove from within
:param value: Value to remove
:return: Updated tree
"""
if None is tree:
return tree
else:
if value < tree.value:
tree.left = self._remove_node(tree.left, value)
elif value > tree.value:
tree.right = self._remove_node(tree.right, value)
else:
if None is tree.right:
return tree.left
if None is tree.left:
return tree.right
# Get Min Right Value
temp_node = tree.right
the_min = temp_node.value
while temp_node.left:
temp_node = temp_node.left
the_min = temp_node.value
# Now remove it from the right tree
tree.right = self._remove_node(tree.right, the_min)
tree.value = the_min
if tree is None:
return tree
# Step 2 - Update the height of the
# ancestor node
tree.height = 1 + max(self.get_height(tree.left),
self.get_height(tree.right))
# Step 3 - Get the balance factor
balance = self._get_balance(tree)
# Step 4 - If the node is unbalanced,
# then try out the 4 cases
# Case 1 - Left Left
if balance > 1 and self._get_balance(tree.left) >= 0:
return self._right_rotate(tree)
# Case 2 - Right Right
if balance < -1 and self._get_balance(tree.right) <= 0:
return self._left_rotate(tree)
# Case 3 - Left Right
if balance > 1 and self.getBalance(tree.left) < 0:
tree.left = self._left_rotate(tree.left)
return self._right_rotate(tree)
# Case 4 - Right Left
if balance < -1 and self._get_balance(tree.right) > 0:
tree.right = self._right_rotate(tree.right)
return self._left_rotate(tree)
return tree
def _add_node(self, node: BinaryTreeNode, value: Any) -> BinaryTreeNode:
if None is node:
return BinaryTreeNode(value)
else:
if node.value < value:
node.right = self._add_node(node.right, value)
elif node.value > value:
node.left = self._add_node(node.left, value)
node.height = 1 + max(self.get_height(node.left),
self.get_height(node.right))
# Step 3 - Get the balance factor
balance = self._get_balance(node)
# Step 4 - If the node is unbalanced,
# then try out the 4 cases
# Case 1 - Left Left
if balance > 1 and value < node.left.value:
return self._right_rotate(node)
# Case 2 - Right Right
if balance < -1 and value > node.right.value:
return self._left_rotate(node)
# Case 3 - Left Right
if balance > 1 and value > node.left.value:
node.left = self._left_rotate(node.left)
return self._right_rotate(node)
# Case 4 - Right Left
if balance < -1 and value < node.right.value:
node.right = self._right_rotate(node.right)
return self._left_rotate(node)
return node
|
jidol/Examples
|
src/avl_tree.py
|
Python
|
gpl-2.0
| 5,481
|
from phantom_team.strategy.formation import positions
from smsoccer.strategy import formation
from superman import SuperMan
from smsoccer.players.abstractplayer import AbstractPlayer
from smsoccer.strategy.formation import player_position
from smsoccer.world.world_model import WorldModel, PlayModes
class AtackAgent(AbstractPlayer, SuperMan):
"""
This is a DEMO about how to extend the AbstractAgent and implement the
think method. For a new development is recommended to do the same.
"""
def __init__(self, visualization=False):
AbstractPlayer.__init__(self)
SuperMan.__init__(self)
self.visualization = visualization
if visualization:
from smsoccer.util.fielddisplay import FieldDisplay
self.display = FieldDisplay()
self.current_time = 0
self.drib = True
def think(self):
"""
Performs a single step of thinking for our agent. Gets called on every
iteration of our think loop.
"""
self.update_super()
self.wm.ah.say('"hello hello"')
print len(self.wm.team_message_queue)
if self.visualization:
if self.wm.abs_coords[0] is None:
return
self.display.clear()
self.display.draw_robot(self.wm.abs_coords, self.wm.abs_body_dir)
if self.wm.ball is not None:
self.display.draw_circle(self.wm.get_object_absolute_coords(self.wm.ball), 4)
# print self.wm.ball.direction, self.wm.ball.distance
self.display.show()
# take places on the field by uniform number
if not self.in_kick_off_formation:
# Teleport to right position
self.teleport_to_point(positions[3])
# turns to attack field
if self.wm.side == WorldModel.SIDE_R:
self.wm.ah.turn(180)
# Player is ready in formation
self.in_kick_off_formation = True
return
# kick off!
if self.wm.play_mode == PlayModes.BEFORE_KICK_OFF:
# player 9 takes the kick off
if self.wm.uniform_number == 9:
if self.is_ball_kickable():
# kick with 100% extra effort at enemy goal
self.kick_to(self.goal_pos, 1.0)
# print self.goal_pos
else:
# move towards ball
if self.wm.ball is not None:
if self.wm.ball.direction is not None \
and -7 <= self.wm.ball.direction <= 7:
self.wm.ah.dash(50)
else:
self.wm.turn_body_to_point((0, 0))
# turn to ball if we can see it, else face the enemy goal
if self.wm.ball is not None:
self.turn_neck_to_object(self.wm.ball)
return
# attack!
else:
# self.wm.ah.dash(50)
# return
# If not new cicle
# if self.current_time == self.wm.sim_time:
# return
# self.current_time = self.wm.sim_time
# print self.wm.sim_time
# if self.wm.abs_coords is not None:
# self.dash_to_point((50,25))
# return
# find the ball
if self.drib:
if self.wm.ball is None or self.wm.ball.direction is None:
self.wm.ah.turn(35)
return
self.drib = self.dribbling_to((35, 15))
# # kick it at the enemy goal
# if self.is_ball_kickable():
#
# # angle = cut_angle(angle_between_points(self.wm.abs_coords, self.goal_pos)) - cut_angle(self.wm.abs_body_dir)
# # self.wm.ah.kick(20, angle)
# self.kick_to((0, 20))
# return
# else:
# # move towards ball
# if -7 <= self.wm.ball.direction <= 7:
# self.wm.ah.dash(5 * self.wm.ball.distance + 20)
# else:
# # face ball
# self.wm.ah.turn(self.wm.ball.direction / 2)
#
# return
|
dsaldana/phantoms_soccer2d
|
phantom_team/players/atack_agent.py
|
Python
|
gpl-2.0
| 4,311
|
import re
from xcsoar.mapgen.waypoints.waypoint import Waypoint
from xcsoar.mapgen.waypoints.list import WaypointList
def __parse_line(line, bounds = None):
if line.startswith('$'): return None
lat = line[45:52]
lat_neg = lat.startswith('S')
lat = float(lat[1:3]) + float(lat[3:5]) / 60. + float(lat[5:7]) / 3600.
if lat_neg: lat = -lat
if bounds and (lat > bounds.top or lat < bounds.bottom): return None
lon = line[52:60]
lon_neg = lon.startswith('W')
lon = float(lon[1:4]) + float(lon[4:6]) / 60. + float(lon[6:8]) / 3600.
if lon_neg: lon = -lon
if bounds and (lon > bounds.right or lon < bounds.left): return None
wp = Waypoint()
wp.lat = lat
wp.lon = lon
elev = line[41:45].strip()
if elev != '': wp.altitude = float(elev)
else: wp.altitude = 0.0
wp.short_name = line[:6]
if wp.short_name.endswith('1'): wp.type = 'airport'
elif wp.short_name.endswith('2'): wp.type = 'outlanding'
wp.short_name = wp.short_name.strip()
wp.name = line[7:41].strip()
if 'GLD' in wp.name: wp.type = 'glider_site'
if 'ULM' in wp.name: wp.type = 'ulm'
pos = -1
if '#' in wp.name: pos = wp.name.find('#')
if '*' in wp.name: pos = wp.name.find('*')
if pos > -1:
data = wp.name[pos + 1:]
wp.name = wp.name[:pos].strip()
icao = data[:4]
if not icao.startswith('GLD') and not icao.startswith('ULM'): wp.icao = icao
if data[4:5] == 'A': wp.surface = 'asphalt'
elif data[4:5] == 'C': wp.surface = 'concrete'
elif data[4:5] == 'L': wp.surface = 'loam'
elif data[4:5] == 'S': wp.surface = 'sand'
elif data[4:5] == 'Y': wp.surface = 'clay'
elif data[4:5] == 'G': wp.surface = 'gras'
elif data[4:5] == 'V': wp.surface = 'gravel'
elif data[4:5] == 'D': wp.surface = 'dirt'
runway_len = data[5:8].strip()
if runway_len != '':
wp.runway_len = int(runway_len) * 10
runway_dir = data[8:10].strip()
if runway_dir != '':
wp.runway_dir = int(runway_dir) * 10
freq = data[12:17].strip()
if len(freq) == 5:
if freq.endswith('2') or freq.endswith('7'): freq += '5'
else: freq += '0'
wp.freq = float(freq) / 1000.
if wp.name.endswith('GLD'):
wp.name = wp.name[:-3].strip()
else:
wp.name = wp.name.rstrip('!?1 ')
if re.search('(^|\s)BERG($|\s)', wp.name): wp.type = 'mountain top'
if re.search('(^|\s)COL($|\s)', wp.name): wp.type = 'mountain pass'
if re.search('(^|\s)PASS($|\s)', wp.name): wp.type = 'mountain pass'
if re.search('(^|\s)TOP($|\s)', wp.name): wp.type = 'mountain top'
if re.search('(\s)A(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit'
if re.search('(\s)AB(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit'
if re.search('(\s)BAB(\d){0,3}($|\s)', wp.name): wp.type = 'highway exit'
if re.search('(\s)(\w){0,3}XA(\d){0,3}($|\s)', wp.name): wp.type = 'highway cross'
if re.search('(\s)(\w){0,3}YA(\d){0,3}($|\s)', wp.name): wp.type = 'highway junction'
if re.search('(\s)STR($|\s)', wp.name): wp.type = 'road'
if re.search('(\s)SX($|\s)', wp.name): wp.type = 'road cross'
if re.search('(\s)SY($|\s)', wp.name): wp.type = 'road junction'
if re.search('(\s)EX($|\s)', wp.name): wp.type = 'railway cross'
if re.search('(\s)EY($|\s)', wp.name): wp.type = 'railway junction'
if re.search('(\s)TR($|\s)', wp.name): wp.type = 'gas station'
if re.search('(\s)BF($|\s)', wp.name): wp.type = 'railway station'
if re.search('(\s)RS($|\s)', wp.name): wp.type = 'railway station'
if re.search('(\s)BR($|\s)', wp.name): wp.type = 'bridge'
if re.search('(\s)TV($|\s)', wp.name): wp.type = 'tower'
if re.search('(\s)KW($|\s)', wp.name): wp.type = 'powerplant'
wp.name = wp.name.title()
while ' ' in wp.name:
wp.name = wp.name.replace(' ', ' ')
wp.country_code = line[60:62].strip();
return wp
def parse_welt2000_waypoints(lines, bounds = None):
waypoint_list = WaypointList()
for line in lines:
wp = __parse_line(line, bounds)
if wp: waypoint_list.append(wp)
return waypoint_list
|
TobiasLohner/mapgen
|
lib/xcsoar/mapgen/waypoints/welt2000_reader.py
|
Python
|
gpl-2.0
| 4,373
|
import praw
r = praw.Reddit(user_agent='kumaX')
#r.login('kumaX','Sho3lick')
submissions = r.get_subreddit('worldnews').get_top()
print [str(x) for x in submissions]
|
fro391/Investing
|
PRAW/Praw.py
|
Python
|
gpl-2.0
| 168
|
from rest_framework import serializers, fields
from . import models
class TypeSerializer(serializers.ModelSerializer):
class Meta:
model = models.Type
class IssueSerializer(serializers.ModelSerializer):
class Meta:
model = models.Issue
address = fields.CharField(read_only=True)
postal_code = fields.CharField(read_only=True)
city = fields.CharField(read_only=True)
state = fields.CharField(read_only=True)
county = fields.CharField(read_only=True)
country = fields.CharField(read_only=True)
type = serializers.PrimaryKeyRelatedField(queryset=models.Type.objects.all())
type_nested = TypeSerializer(read_only=True, source='type')
class PopulateExternalSerializer(serializers.Serializer):
city = fields.CharField(write_only=True)
|
initios/hackvg-cityreport-backend
|
core/serializers.py
|
Python
|
gpl-2.0
| 798
|
# -*- python-indent: 4; coding: iso-8859-1; mode: python -*-
# Copyright (C) 2008 Cedric Pinson, Jeremy Moles
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Cedric Pinson <[email protected]>
# Jeremy Moles <[email protected]>
import bpy
import json
import mathutils
Matrix = mathutils.Matrix
Vector = mathutils.Vector
FLOATPRE = 5
CONCAT = lambda s, j="": j.join(str(v) for v in s)
STRFLT = lambda f: "%%.%df" % FLOATPRE % float(f)
INDENT = 2
VERSION = (0, 0, 0)
def findNode(name, root):
if root.name == name:
return root
if isinstance(root, Group) is False:
return None
for i in root.children:
found = findNode(name, i)
if found is not None:
return found
return None
def findMaterial(name, root):
if root.stateset is not None:
for i in root.stateset.attributes:
if isinstance(i, Material) is True and i.name == name:
return i
if isinstance(root, Geode) is True:
for i in root.drawables:
found = findMaterial(name, i)
if found is not None:
return found
if isinstance(root, Group) is True:
for i in root.children:
found = findMaterial(name, i)
if found is not None:
return found
return None
class Writer(object):
instances = {}
wrote_elements = {}
file_object = None
def __init__(self, comment=None):
object.__init__(self)
self.comment = comment
self.indent_level = 0
self.counter = len(Writer.instances)
Writer.instances[self] = True
def writeFile(self, output):
self.writeHeader(output)
self.write(output)
def writeHeader(self, output):
output.write("#Ascii Scene\n".encode('utf-8'))
output.write("#Version 92\n".encode('utf-8'))
output.write(("#Generator osgexport %d.%d.%d\n\n" % VERSION).encode('utf-8'))
def write(self, output):
Writer.serializeInstanceOrUseIt(self, output)
def encode(self, string):
text = string.replace("\t", "") \
.replace("#", (" " * INDENT)) \
.replace("$", (" " * (INDENT * self.indent_level)))
return text.encode('utf-8')
def writeMatrix(self, output, matrix):
if bpy.app.version[0] >= 2 and bpy.app.version[1] >= 62:
for i in range(0, 4):
output.write(self.encode("$##%s %s %s %s\n" % (STRFLT(matrix[0][i]),
STRFLT(matrix[1][i]),
STRFLT(matrix[2][i]),
STRFLT(matrix[3][i]))))
else:
for i in range(0, 4):
output.write(self.encode("$##%s %s %s %s\n" % (STRFLT(matrix[i][0]),
STRFLT(matrix[i][1]),
STRFLT(matrix[i][2]),
STRFLT(matrix[i][3]))))
output.write(self.encode("$#}\n"))
@staticmethod
def resetWriter():
Writer.instances = {}
ArrayData.instance = 0
Object.instance = 0
@staticmethod
def serializeInstanceOrUseIt(obj, output):
if obj in Writer.wrote_elements and \
hasattr(obj, "uniqueID") and \
obj.uniqueID is not None and \
hasattr(obj, 'serializeReference'):
return obj.serializeReference(output)
Writer.wrote_elements[obj] = True
return obj.serialize(output)
class Object(Writer):
instance = 0
def __init__(self, *args, **kwargs):
Writer.__init__(self, *args)
self.dataVariance = "UNKNOWN"
self.name = kwargs.get('name', "None")
self.uniqueID = None
self.userdata = None
def generateID(self):
self.uniqueID = Object.instance
Object.instance += 1
def copyFrom(self, obj):
self.name = obj.name
self.dataVariance = obj.dataVariance
def serializeReference(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
output.write(self.encode("$#UniqueID %d\n" % self.uniqueID))
output.write(self.encode("$}\n"))
def getOrCreateUserData(self):
if self.userdata is None:
self.userdata = DefaultUserDataContainer()
return self.userdata
def getNameSpaceClass(self):
return "{}::{}".format(self.nameSpace(), self.className())
def setName(self, name):
self.name = name
def className(self):
return "Object"
def nameSpace(self):
return "osg"
def serializeContent(self, output):
if self.uniqueID is not None:
output.write(self.encode("$#UniqueID {}\n".format(self.uniqueID)))
if self.name is not "None":
output.write(self.encode("$#Name \"{}\"\n".format(self.name)))
if self.dataVariance is not "UNKNOWN":
output.write(self.encode("$#DataVariance {}\n".format(self.dataVariance)))
if self.userdata is not None:
output.write(self.encode("$#UserDataContainer TRUE {\n"))
self.userdata.indent_level = self.indent_level + 2
self.userdata.write(output)
output.write(self.encode("$#}\n"))
class StringValueObject(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self)
self.generateID()
self.key = args[0]
self.value = args[1]
def className(self):
return "StringValueObject"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
output.write(self.encode("$#Name %s\n" % json.dumps(self.key)))
output.write(self.encode("$#Value %s\n" % json.dumps(self.value)))
output.write(self.encode("$}\n"))
class DefaultUserDataContainer(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.value = []
def append(self, value):
self.value.append(value)
def className(self):
return "DefaultUserDataContainer"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#UDC_UserObjects %d {\n" % len(self.value)))
for s in self.value:
s.indent_level = self.indent_level + 2
s.write(output)
output.write(self.encode("$#}\n"))
class UpdateMatrixTransform(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.stacked_transforms = []
def className(self):
return "UpdateMatrixTransform"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#StackedTransforms %d {\n" % len(self.stacked_transforms)))
for s in self.stacked_transforms:
s.indent_level = self.indent_level + 2
s.write(output)
output.write(self.encode("$#}\n"))
class UpdateMaterial(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
def className(self):
return "UpdateMaterial"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
output.write(self.encode("$}\n"))
class StackedMatrixElement(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
if self.name == "None":
self.name = "matrix"
m = Matrix().to_4x4()
m.identity()
self.matrix = kwargs.get('matrix', m)
def className(self):
return "StackedMatrixElement"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Matrix {\n"))
self.writeMatrix(output, self.matrix)
class StackedTranslateElement(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.translate = Vector((0, 0, 0))
self.name = "translate"
def className(self):
return "StackedTranslateElement"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Translate %s %s %s\n" % (STRFLT(self.translate[0]),
STRFLT(self.translate[1]),
STRFLT(self.translate[2]))))
class StackedScaleElement(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.scale = Vector((1, 1, 1))
self.name = "scale"
def className(self):
return "StackedScaleElement"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Scale %s %s %s\n" % (STRFLT(self.scale[0]),
STRFLT(self.scale[1]),
STRFLT(self.scale[2]))))
class StackedRotateAxisElement(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.axis = kwargs.get('axis', Vector((1, 0, 0)))
self.angle = kwargs.get('angle', 0)
def className(self):
return "StackedRotateAxisElement"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Axis %s %s %s\n" % (STRFLT(self.axis[0]),
STRFLT(self.axis[1]),
STRFLT(self.axis[2]))))
output.write(self.encode("$#Angle %s\n" % (STRFLT(self.angle))))
class StackedQuaternionElement(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
m = Matrix().to_4x4()
m.identity()
self.quaternion = m.to_quaternion()
self.name = "quaternion"
def className(self):
return "StackedQuaternionElement"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Quaternion %s %s %s %s\n" % (STRFLT(self.quaternion.x),
STRFLT(self.quaternion.y),
STRFLT(self.quaternion.z),
STRFLT(self.quaternion.w))))
class UpdateBone(UpdateMatrixTransform):
def __init__(self, *args, **kwargs):
UpdateMatrixTransform.__init__(self, *args, **kwargs)
def nameSpace(self):
return "osgAnimation"
def className(self):
return "UpdateBone"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
UpdateMatrixTransform.serializeContent(self, output)
output.write(self.encode("$}\n"))
class UpdateSkeleton(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
def className(self):
return "UpdateSkeleton"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
output.write(self.encode("$}\n"))
class Node(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.cullingActive = "TRUE"
self.stateset = None
self.update_callbacks = []
def className(self):
return "Node"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
if len(self.update_callbacks) > 0:
output.write(self.encode("$#UpdateCallback TRUE {\n"))
for i in self.update_callbacks:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
if self.stateset is not None:
output.write(self.encode("$#StateSet TRUE {\n"))
self.stateset.indent_level = self.indent_level + 2
self.stateset.write(output)
output.write(self.encode("$#}\n"))
class Geode(Node):
def __init__(self, *args, **kwargs):
Node.__init__(self, *args, **kwargs)
self.drawables = []
def setName(self, name):
self.name = self.className() + name
def className(self):
return "Geode"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Drawables %d {\n" % (len(self.drawables))))
for i in self.drawables:
if i is not None:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
class Group(Node):
def __init__(self, *args, **kwargs):
Node.__init__(self, *args, **kwargs)
self.children = []
def className(self):
return "Group"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
if len(self.children) > 0:
output.write(self.encode("$#Children %d {\n" % (len(self.children))))
for i in self.children:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
class MatrixTransform(Group):
def __init__(self, *args, **kwargs):
Group.__init__(self, *args, **kwargs)
self.matrix = Matrix().to_4x4()
self.matrix.identity()
def className(self):
return "MatrixTransform"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
Group.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Matrix {\n"))
self.writeMatrix(output, self.matrix)
class StateAttribute(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.update_callbacks = []
def className(self):
return "StateAttribute"
def serializeContent(self, output):
Object.serializeContent(self, output)
if len(self.update_callbacks) > 0:
output.write(self.encode("$#UpdateCallback TRUE {\n"))
for i in self.update_callbacks:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
class StateTextureAttribute(StateAttribute):
def __init__(self, *args, **kwargs):
StateAttribute.__init__(self, *args, **kwargs)
self.unit = 0
def className(self):
return "StateTextureAttribute"
def serializeContent(self, output):
StateAttribute.serializeContent(self, output)
class Light(StateAttribute):
def __init__(self, *args, **kwargs):
StateAttribute.__init__(self, *args, **kwargs)
self.light_num = 0
self.ambient = (0.0, 0.0, 0.0, 1.0)
self.diffuse = (0.8, 0.8, 0.8, 1.0)
self.specular = (1.0, 1.0, 1.0, 1.0)
self.position = (0.0, 0.0, 1.0, 0.0)
self.direction = (0.0, 0.0, -1.0)
self.spot_exponent = 0.0
self.spot_cutoff = 180.0
self.constant_attenuation = 1.0
self.linear_attenuation = 0.0
self.quadratic_attenuation = 0.0
def className(self):
return "Light"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
StateAttribute.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#LightNum %s\n" % self.light_num))
output.write(self.encode("$#Ambient %s %s %s %s\n" % (STRFLT(self.ambient[0]),
STRFLT(self.ambient[1]),
STRFLT(self.ambient[2]),
STRFLT(self.ambient[3]))))
output.write(self.encode("$#Diffuse %s %s %s %s\n" % (STRFLT(self.diffuse[0]),
STRFLT(self.diffuse[1]),
STRFLT(self.diffuse[2]),
STRFLT(self.diffuse[3]))))
output.write(self.encode("$#Specular %s %s %s %s\n" % (STRFLT(self.specular[0]),
STRFLT(self.specular[1]),
STRFLT(self.specular[2]),
STRFLT(self.specular[3]))))
output.write(self.encode("$#Position %s %s %s %s\n" % (STRFLT(self.position[0]),
STRFLT(self.position[1]),
STRFLT(self.position[2]),
STRFLT(self.position[3]))))
output.write(self.encode("$#Direction %s %s %s\n" % (STRFLT(self.direction[0]),
STRFLT(self.direction[1]),
STRFLT(self.direction[2]))))
output.write(self.encode("$#ConstantAttenuation %s\n" % STRFLT(self.constant_attenuation)))
output.write(self.encode("$#LinearAttenuation %s\n" % STRFLT(self.linear_attenuation)))
output.write(self.encode("$#QuadraticAttenuation %s\n" % STRFLT(self.quadratic_attenuation)))
output.write(self.encode("$#SpotExponent %s\n" % STRFLT(self.spot_exponent)))
output.write(self.encode("$#SpotCutoff %s\n" % STRFLT(self.spot_cutoff)))
class LightSource(Group):
def __init__(self, *args, **kwargs):
Group.__init__(self, *args, **kwargs)
self.light = Light()
self.cullingActive = "FALSE"
def className(self):
return "LightSource"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
Group.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
if self.light is not None:
output.write(self.encode("$#Light TRUE {\n"))
self.light.indent_level = self.indent_level + 2
self.light.write(output)
output.write(self.encode("$#}\n"))
class Texture2D(StateTextureAttribute):
def __init__(self, *args, **kwargs):
StateTextureAttribute.__init__(self, *args, **kwargs)
self.source_image = None
self.file = "none"
self.wrap_s = "REPEAT"
self.wrap_t = "REPEAT"
self.wrap_r = "REPEAT"
self.min_filter = "LINEAR_MIPMAP_LINEAR"
self.mag_filter = "LINEAR"
self.internalFormatMode = "USE_IMAGE_DATA_FORMAT"
def className(self):
return "Texture2D"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
StateTextureAttribute.serializeContent(self, output)
output.write(self.encode("$#WRAP_S %s\n" % self.wrap_s))
output.write(self.encode("$#WRAP_T %s\n" % self.wrap_t))
output.write(self.encode("$#WRAP_R %s\n" % self.wrap_r))
output.write(self.encode("$#MIN_FILTER %s\n" % self.min_filter))
output.write(self.encode("$#MAG_FILTER %s\n" % self.mag_filter))
image = Image(filename=self.file)
output.write(self.encode("$#Image TRUE {\n"))
image.indent_level = self.indent_level + 1
image.write(output)
output.write(self.encode("$#}\n"))
class Image(Object):
def __init__(self, *args, **kwargs):
self.filename = kwargs.get("filename")
Object.__init__(self, *args, **kwargs)
self.generateID()
def serialize(self, output):
Object.serializeContent(self, output)
output.write(self.encode("$#FileName \"%s\"\n" % self.filename))
output.write(self.encode("$#WriteHint 0 2\n"))
class Material(StateAttribute):
def __init__(self, *args, **kwargs):
StateAttribute.__init__(self, *args, **kwargs)
diffuse_energy = 0.8
self.colormode = "OFF"
self.emission = (0.0, 0.0, 0.0, 1.0)
self.ambient = (0.0, 0.0, 0.0, 1.0)
self.diffuse = (0.8 * diffuse_energy, 0.8 * diffuse_energy, 0.8 * diffuse_energy, 1.0)
self.specular = (0.5, 0.5, 0.5, 1.0)
self.shininess = 40 / (512 / 128) # blender encode shininess to 512 and opengl to 128
def className(self):
return "Material"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
StateAttribute.serializeContent(self, output)
output.write(self.encode("$#Ambient TRUE Front %s %s %s %s Back %s %s %s %s\n" % (STRFLT(self.ambient[0]),
STRFLT(self.ambient[1]),
STRFLT(self.ambient[2]),
STRFLT(self.ambient[3]),
STRFLT(self.ambient[0]),
STRFLT(self.ambient[1]),
STRFLT(self.ambient[2]),
STRFLT(self.ambient[3]))))
output.write(self.encode("$#Diffuse TRUE Front %s %s %s %s Back %s %s %s %s\n" % (STRFLT(self.diffuse[0]),
STRFLT(self.diffuse[1]),
STRFLT(self.diffuse[2]),
STRFLT(self.diffuse[3]),
STRFLT(self.diffuse[0]),
STRFLT(self.diffuse[1]),
STRFLT(self.diffuse[2]),
STRFLT(self.diffuse[3]))))
output.write(self.encode("$#Specular TRUE Front %s %s %s %s Back %s %s %s %s\n" % (STRFLT(self.specular[0]),
STRFLT(self.specular[1]),
STRFLT(self.specular[2]),
STRFLT(self.specular[3]),
STRFLT(self.specular[0]),
STRFLT(self.specular[1]),
STRFLT(self.specular[2]),
STRFLT(self.specular[3]))))
output.write(self.encode("$#Emission TRUE Front %s %s %s %s Back %s %s %s %s\n" % (STRFLT(self.emission[0]),
STRFLT(self.emission[1]),
STRFLT(self.emission[2]),
STRFLT(self.emission[3]),
STRFLT(self.emission[0]),
STRFLT(self.emission[1]),
STRFLT(self.emission[2]),
STRFLT(self.emission[3]))))
output.write(self.encode("$#Shininess TRUE Front %s Back %s\n" % (STRFLT(self.shininess),
STRFLT(self.shininess))))
class LightModel(StateAttribute):
def __init__(self, *args, **kwargs):
StateAttribute.__init__(self, *args, **kwargs)
self.local_viewer = "FALSE"
self.color_control = "SEPARATE_SPECULAR_COLOR"
self.ambient = (0.2, 0.2, 0.2, 1.0)
def className(self):
return "LightModel"
def nameSpace(self):
return "osg"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
StateAttribute.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#AmbientIntensity %s %s %s %s\n" % (STRFLT(self.ambient[0]),
STRFLT(self.ambient[1]),
STRFLT(self.ambient[2]),
STRFLT(self.ambient[3]))))
output.write(self.encode("$#ColorControl %s\n" % self.color_control))
output.write(self.encode("$#LocalViewer %s\n" % self.local_viewer))
class StateSet(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.modes = {}
self.attributes = []
self.texture_attributes = {}
def getMaxTextureUnitUsed(self):
max_texture_unit = 0
for i in self.texture_attributes.keys():
if i > max_texture_unit:
max_texture_unit = i
return max_texture_unit
def className(self):
return "StateSet"
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.getNameSpaceClass())))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
if len(self.modes) > 0:
output.write(self.encode("$#ModeList %d {\n" % (len(self.modes))))
for i in self.modes.items():
if i is not None:
output.write(self.encode("$##%s %s\n" % i))
output.write(self.encode("$#}\n"))
if len(self.attributes) > 0:
output.write(self.encode("$#AttributeList %d {\n" % (len(self.attributes))))
for i in self.attributes:
if i is not None:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$##Value OFF\n"))
output.write(self.encode("$#}\n"))
if len(self.texture_attributes) > 0:
max_texture_used = self.getMaxTextureUnitUsed()
output.write(self.encode("$#TextureModeList %d {\n" % (1 + max_texture_used)))
for i in range(0, max_texture_used + 1):
if i in self.texture_attributes:
output.write(self.encode("$##Data 1 {\n"))
output.write(self.encode("$###GL_TEXTURE_2D ON\n"))
output.write(self.encode("$##}\n"))
else:
output.write(self.encode("$##Data 0\n"))
output.write(self.encode("$#}\n"))
output.write(self.encode("$#TextureAttributeList %d {\n" % (1 + max_texture_used)))
for i in range(0, max_texture_used + 1):
if i in self.texture_attributes:
attributes = self.texture_attributes.get(i)
output.write(self.encode("$##Data %d {\n" % len(attributes)))
for a in attributes:
if a is not None:
a.indent_level = self.indent_level + 3
a.write(output)
output.write(self.encode("$###Value OFF\n"))
output.write(self.encode("$##}\n"))
else:
output.write(self.encode("$##Data 0\n"))
output.write(self.encode("$#}\n"))
class ArrayData(Object):
instance = 0
def __init__(self, *args, **kwargs):
Object.__init__(self)
self.array = kwargs.get('array')
self.type = kwargs.get('type')
self.uniqueID = ArrayData.instance
ArrayData.instance += 1
def serializeReference(self, output):
output.write(self.encode("$Array TRUE ArrayID %d\n" % self.uniqueID))
def serialize(self, output):
output.write(self.encode("$Array TRUE ArrayID %s %s %d {\n" % (self.uniqueID, self.type, len(self.array))))
dim = len(self.array[0])
for i in self.array:
if dim == 3:
output.write(self.encode("$#%s %s %s\n" % (STRFLT(i[0]), STRFLT(i[1]), STRFLT(i[2]))))
elif dim == 2:
output.write(self.encode("$#%s %s\n" % (STRFLT(i[0]), STRFLT(i[1]))))
elif dim == 4:
output.write(self.encode("$#%s %s %s %s\n" % (STRFLT(i[0]), STRFLT(i[1]), STRFLT(i[2]), STRFLT(i[3]))))
output.write(self.encode("$}\n"))
class VertexAttributeData(Writer):
def __init__(self, *args, **kwargs):
Writer.__init__(self)
self.array = None
if kwargs.get("array") is not None:
self.array = ArrayData(array=kwargs.get('array', None),
type=kwargs.get('type', None))
def getArray(self):
return self.array.array
def serialize(self, output):
output.write(self.encode("$%s {\n" % (self.className())))
if self.array is None:
output.write(self.encode("$#Array FALSE\n"))
else:
self.array.indent_level = self.indent_level + 1
self.array.write(output)
output.write(self.encode("$#Indices FALSE\n"))
output.write(self.encode("$#Binding BIND_PER_VERTEX\n"))
output.write(self.encode("$#Normalize 0\n"))
output.write(self.encode("$}\n"))
class VertexArray(VertexAttributeData):
def __init__(self, *args, **kwargs):
kwargs["array"] = kwargs.get("array", [])
kwargs["type"] = "Vec3fArray"
VertexAttributeData.__init__(self, *args, **kwargs)
def className(self):
return "VertexData"
class NormalArray(VertexArray):
def __init__(self, *args, **kwargs):
VertexArray.__init__(self, *args, **kwargs)
def className(self):
return "NormalData"
class ColorArray(VertexAttributeData):
def __init__(self, *args, **kwargs):
kwargs["type"] = "Vec3fArray"
kwargs["array"] = kwargs.get("array", [])
VertexAttributeData.__init__(self, *args, **kwargs)
def className(self):
return "ColorData"
class TexCoordArray(VertexAttributeData):
def __init__(self, *args, **kwargs):
kwargs["array"] = kwargs.get("array", [])
kwargs["type"] = "Vec2fArray"
VertexAttributeData.__init__(self, *args, **kwargs)
self.index = 0
def className(self):
return "Data"
class DrawElements(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.indexes = []
self.type = None
def getSizeArray(self):
# dont waste time here
# return max drawElements
# return "DrawElementsUInt"
element = "DrawElementsUByte"
for i in self.indexes:
if i > 255 and element == "DrawElementsUByte":
element = "DrawElementsUShort"
elif i > 65535 and element == "DrawElementsUShort":
element = "DrawElementsUInt"
break
return element
def className(self):
return "DrawElements"
def serialize(self, output):
element = self.getSizeArray()
output.write(self.encode("$#%s %s %s {\n" % (element, self.type, str(len(self.indexes)))))
n = 1
if self.type == "GL_TRIANGLES":
n = 3
if self.type == "GL_QUADS":
n = 4
total = int(len(self.indexes) / n)
for i in range(0, total):
output.write(self.encode("$##"))
for a in range(0, n):
output.write(self.encode("%s " % self.indexes[i * n + a]))
output.write(self.encode("\n"))
output.write(self.encode("$#}\n"))
class Geometry(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.primitives = []
self.vertexes = None
self.normals = None
self.colors = None
self.uvs = {}
self.stateset = None
def className(self):
return "Geometry"
def copyFrom(self, geometry):
Object.copyFrom(self, geometry)
self.primitives = geometry.primitives
self.vertexes = geometry.vertexes
self.normals = geometry.normals
self.colors = geometry.colors
self.uvs = geometry.uvs
self.stateset = geometry.stateset
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
if self.stateset is not None:
output.write(self.encode("$#StateSet TRUE {\n"))
self.stateset.indent_level = self.indent_level + 2
self.stateset.write(output)
output.write(self.encode("$#}\n"))
if len(self.primitives):
output.write(self.encode("$#PrimitiveSetList %d {\n" % (len(self.primitives))))
for i in self.primitives:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
if self.vertexes:
self.vertexes.indent_level = self.indent_level + 1
self.vertexes.write(output)
if self.normals:
self.normals.indent_level = self.indent_level + 1
self.normals.write(output)
if self.colors:
self.colors.indent_level = self.indent_level + 1
self.colors.write(output)
if len(self.uvs) > 0:
output.write(self.encode("$#TexCoordData %d {\n" % (len(self.uvs))))
for i in self.uvs.values():
if i:
i.indent_level = self.indent_level + 2
i.write(output)
else:
emptyTexCoord = TexCoordArray()
emptyTexCoord.indent_level = self.indent_level + 2
emptyTexCoord.write(output)
output.write(self.encode("$#}\n"))
# animation node ######################################
class Bone(MatrixTransform):
def __init__(self, skeleton=None, bone=None, parent=None, **kwargs):
MatrixTransform.__init__(self, **kwargs)
self.dataVariance = "DYNAMIC"
self.parent = parent
self.skeleton = skeleton
self.bone = bone
self.inverse_bind_matrix = Matrix().to_4x4().identity()
def buildBoneChildren(self):
if self.skeleton is None or self.bone is None:
return
self.setName(self.bone.name)
update_callback = UpdateBone()
update_callback.setName(self.name)
self.update_callbacks.append(update_callback)
bone_matrix = self.bone.matrix_local.copy()
if self.parent:
parent_matrix = self.bone.parent.matrix_local.copy()
bone_matrix = parent_matrix.inverted() * bone_matrix
# add bind matrix in localspace callback
update_callback.stacked_transforms.append(StackedMatrixElement(name="bindmatrix", matrix=bone_matrix))
update_callback.stacked_transforms.append(StackedTranslateElement())
update_callback.stacked_transforms.append(StackedQuaternionElement())
update_callback.stacked_transforms.append(StackedScaleElement())
self.bone_inv_bind_matrix_skeleton = self.bone.matrix_local.copy().inverted()
if not self.bone.children:
return
for boneChild in self.bone.children:
b = Bone(self.skeleton, boneChild, self)
self.children.append(b)
b.buildBoneChildren()
def getMatrixInArmatureSpace(self):
return self.bone.matrix_local
def collect(self, d):
d[self.name] = self
for boneChild in self.children:
boneChild.collect(d)
def className(self):
return "Bone"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
Group.serializeContent(self, output)
MatrixTransform.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
matrix = self.bone_inv_bind_matrix_skeleton.copy()
output.write(self.encode("$#InvBindMatrixInSkeletonSpace {\n"))
self.writeMatrix(output, matrix)
class Skeleton(MatrixTransform):
def __init__(self, name="", matrix=None):
MatrixTransform.__init__(self)
self.boneDict = {}
self.matrix = matrix
self.setName(name)
self.update_callbacks = []
self.update_callbacks.append(UpdateSkeleton())
def collectBones(self):
self.boneDict = {}
for bone in self.children:
bone.collect(self.boneDict)
def getMatrixInArmatureSpace(self):
return self.matrix
def className(self):
return "Skeleton"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
Node.serializeContent(self, output)
Group.serializeContent(self, output)
MatrixTransform.serializeContent(self, output)
output.write(self.encode("$}\n"))
class RigGeometry(Geometry):
def __init__(self, *args, **kwargs):
Geometry.__init__(self, *args, **kwargs)
self.groups = {}
self.dataVariance = "DYNAMIC"
self.sourcegeometry = None
def className(self):
return "RigGeometry"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
Geometry.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#InfluenceMap %d {\n" % len(self.groups)))
if len(self.groups) > 0:
for name, grp in self.groups.items():
grp.indent_level = self.indent_level + 2
grp.write(output)
output.write(self.encode("$#}\n"))
if self.sourcegeometry is not None:
output.write(self.encode("$#SourceGeometry TRUE {\n"))
self.sourcegeometry.indent_level = self.indent_level + 2
self.sourcegeometry.write(output)
output.write(self.encode("$#}\n"))
class AnimationManagerBase(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.animations = []
def className(self):
return "AnimationManagerBase"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Animations %d {\n" % len(self.animations)))
for i in self.animations:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
class BasicAnimationManager(AnimationManagerBase):
def __init__(self, *args, **kwargs):
AnimationManagerBase.__init__(self, *args, **kwargs)
def className(self):
return "BasicAnimationManager"
def serialize(self, output):
AnimationManagerBase.serialize(self, output)
def serializeContent(self, output):
AnimationManagerBase.serializeContent(self, output)
class VertexGroup(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.vertexes = []
self.targetGroupName = "None"
def className(self):
return "VertexGroup"
def serialize(self, output):
self.setName(self.targetGroupName)
output.write(self.encode("$VertexInfluence \"%s\" %d {\n" % (self.targetGroupName, len(self.vertexes))))
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
for i in self.vertexes:
output.write(self.encode("$#%s %s\n" % (i[0], STRFLT(i[1]))))
class Animation(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.generateID()
self.channels = []
def className(self):
return "Animation"
def nameSpace(self):
return "osgAnimation"
def serialize(self, output):
output.write(self.encode("$%s {\n" % self.getNameSpaceClass()))
Object.serializeContent(self, output)
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Channels %d {\n" % len(self.channels)))
for i in self.channels:
i.indent_level = self.indent_level + 2
i.write(output)
output.write(self.encode("$#}\n"))
class Channel(Object):
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.keys = []
self.target = "none"
self.type = "Unknown"
def generateID(self):
return None
def className(self):
return "Channel"
def serialize(self, output):
output.write(self.encode("$Type %s {\n" % self.type))
self.serializeContent(output)
output.write(self.encode("$}\n"))
def serializeContent(self, output):
output.write(self.encode("$#Name %s\n" % self.name))
output.write(self.encode("$#TargetName %s\n" % self.target))
output.write(self.encode("$#KeyFrameContainer TRUE %d {\n" % (len(self.keys))))
for i in self.keys:
output.write(self.encode("$##"))
for a in range(0, len(i)):
output.write(self.encode(" %s" % (STRFLT(i[a]))))
output.write(self.encode("\n"))
output.write(self.encode("$#}\n"))
|
sketchfab/osgexport
|
blender-2.5/exporter/osg/osgobject.py
|
Python
|
gpl-2.0
| 48,200
|
# Valid-IP-checker-
#This program check whether a given IP is valid or not IPV4
def ip_checkv4(ip):
parts=ip.split(".")
if len(parts)<4 or len(parts)>4:
return "invalid IP length should be 4 not greater or less than 4"
else:
while len(parts)== 4:
a=int(parts[0])
b=int(parts[1])
c=int(parts[2])
d=int(parts[3])
if a<= 0 or a == 127 :
return "invalid IP address"
elif d == 0:
return "host id should not be 0 or less than zero "
elif a>=255:
return "should not be 255 or greater than 255 or less than 0 A"
elif b>=255 or b<0:
return "should not be 255 or greater than 255 or less than 0 B"
elif c>=255 or c<0:
return "should not be 255 or greater than 255 or less than 0 C"
elif d>=255 or c<0:
return "should not be 255 or greater than 255 or less than 0 D"
else:
return "Valid IP address ", ip
p=raw_input("Enter IP address")
print ip_checkv4(p)
|
0101a/Valid-IP-checker-
|
Ipv4.py
|
Python
|
gpl-2.0
| 918
|
""" A hierarchical Bayesian approach to linear regression with error in both X and Y.
"""
from linmix import LinMix
|
martadesimone/Protoplanetarydisks
|
linmix/linmix/__init__.py
|
Python
|
gpl-2.0
| 117
|
from cell import Cell
from twodarray import TwoDArray
from time import sleep
from configparser import config
import random
class Life(object):
def __init__(self, height, width):
self.area = TwoDArray(width, height)
self.buffer_area = TwoDArray(width, height)
self.rows = height
self.cols = width
for x in range(self.area.width):
for y in range(self.area.height):
self.area.set(x, y, Cell())
self.buffer_area.set(x, y, Cell())
@staticmethod
def copy_cells(from_, to_):
for x in range(from_.width):
for y in range(from_.height):
to_.get(x, y).set_state(from_.get(x, y).get_state())
def __repr__(self):
return self.__str__(self);
def __str__(self):
result = []
for cell in self.area:
result.append(str(cell))
result.append(' ') # TODO: not here...
return ''.join(result)
def get_alive_neighbours(self, area, x, y):
neighbours = 0
for offset_x in range(-1, 2):
for offset_y in range(-1, 2):
if offset_x == offset_y == 0:
continue
try:
current_cell = self.area.get(x + offset_x, y + offset_y)
if current_cell.is_alive():
neighbours += 1
except IndexError, e:
pass
return neighbours
def evolve(self):
Life.copy_cells(self.area, self.buffer_area)
for cell_num_x in range(self.area.width):
for cell_num_y in range(self.area.height):
neighbours = self.get_alive_neighbours(self.area, cell_num_x, cell_num_y)
curr_cell = self.buffer_area.get(cell_num_x, cell_num_y)
if ( neighbours == 3 and curr_cell.is_dead() ) or ( curr_cell.is_alive() and ( neighbours < 2 or neighbours > 3 ) ):
curr_cell.flip_state()
Life.copy_cells(self.buffer_area, self.area)
def randomize(self):
for cell in self.area:
if random.random() < float(config.random_alive_chance):
cell.set_alive()
else:
cell.set_dead()
def play_forever(self):
while 1:
print
print self
self.evolve()
#for debugging only, comment otherwise
#sys.exit(0)
sleep(float(config.evolve_interval))
|
migglu/life
|
src/life.py
|
Python
|
gpl-2.0
| 2,013
|
# simplebuttons4.py
# Button with a border...
import sys
sys.path.append("../..")
from wax import *
WaxConfig.default_font = ("Verdana", 9)
class MainFrame(Frame):
def Body(self):
b = Button(self, "one")
b.SetSize((80, 80))
self.AddComponent(b, expand='both', border=15)
self.Pack()
app = Application(MainFrame)
app.MainLoop()
|
MSMBA/msmba-workflow
|
msmba-workflow/srclib/wax/examples/simplebuttons4.py
|
Python
|
gpl-2.0
| 368
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class RequisitosGarantia(models.Model):
_name = 'solicitudes.requisitos_garantia'
solicitudes_id = fields.Many2one('solicitudes.solicitudes', string="Número de expediente")
documentos_garantia_id = fields.Many2one('politicas.documentos_garantia', string="Tipo de Documento")
documento = fields.Binary(string='Documento')
observaciones = fields.Char(string='Observaciones')
valido = fields.Boolean(string='Valido')
solicitudes_tipos_garantia_id = fields.Many2one(string='Garantia', related='solicitudes_id.propuestas_tipos_garantia_id', readonly=True)
|
sani-coop/tinjaca
|
addons/solicitudes/models/requisitos_garantia.py
|
Python
|
gpl-2.0
| 648
|
#!/usr/bin/env python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Setuptools-based setup script for MDAnalysis.
A working installation of NumPy <http://numpy.scipy.org> is required.
For a basic installation just type the command::
python setup.py install
For more in-depth instructions, see the installation section at the
MDAnalysis Wiki:
https://github.com/MDAnalysis/mdanalysis/wiki/INSTALL
Also free to ask on the MDAnalysis mailing list for help:
http://groups.google.com/group/mdnalysis-discussion
(Note that the group really is called `mdnalysis-discussion' because
Google groups forbids any name that contains the string `anal'.)
"""
from __future__ import print_function
from setuptools import setup, Extension, find_packages
from distutils.ccompiler import new_compiler
import codecs
import os
import sys
import shutil
import tempfile
import warnings
# Make sure I have the right Python version.
if sys.version_info[:2] < (2, 7):
print('MDAnalysis requires Python 2.7 or better. Python {0:d}.{1:d} detected'.format(*
sys.version_info[:2]))
print('Please upgrade your version of Python.')
sys.exit(-1)
if sys.version_info[0] < 3:
import ConfigParser as configparser
open_kwargs = {}
else:
import configparser
open_kwargs = {'encoding': 'utf-8'}
# Handle cython modules
try:
from Cython.Distutils import build_ext
cython_found = True
cmdclass = {'build_ext': build_ext}
except ImportError:
cython_found = False
cmdclass = {}
# NOTE: keep in sync with MDAnalysis.__version__ in version.py
RELEASE = "0.16.0"
is_release = 'dev' not in RELEASE
if cython_found:
# cython has to be >=0.16 to support cython.parallel
import Cython
from Cython.Build import cythonize
from distutils.version import LooseVersion
required_version = "0.16"
if not LooseVersion(Cython.__version__) >= LooseVersion(required_version):
# We don't necessarily die here. Maybe we already have
# the cythonized '.c' files.
print("Cython version {0} was found but won't be used: version {1} "
"or greater is required because it offers a handy "
"parallelization module".format(
Cython.__version__, required_version))
cython_found = False
del Cython
del LooseVersion
else:
if not is_release:
print("*** package: Cython not found ***")
print("MDAnalysis requires cython for development builds")
sys.exit(1)
class Config(object):
"""Config wrapper class to get build options
This class looks for options in the environment variables and the
'setup.cfg' file. The order how we look for an option is.
1. Environment Variable
2. set in 'setup.cfg'
3. given default
Environment variables should start with 'MDA_' and be all uppercase.
Values passed to environment variables are checked (case-insensitively)
for specific strings with boolean meaning: 'True' or '1' will cause `True`
to be returned. '0' or 'False' cause `False` to be returned.
"""
def __init__(self, fname='setup.cfg'):
if os.path.exists(fname):
self.config = configparser.SafeConfigParser()
self.config.read(fname)
def get(self, option_name, default=None):
environ_name = 'MDA_' + option_name.upper()
if environ_name in os.environ:
val = os.environ[environ_name]
if val.upper() in ('1', 'TRUE'):
return True
elif val.upper() in ('0', 'FALSE'):
return False
return val
try:
option = self.config.get('options', option_name)
return option
except configparser.NoOptionError:
return default
class MDAExtension(Extension, object):
"""Derived class to cleanly handle setup-time (numpy) dependencies.
"""
# The only setup-time numpy dependency comes when setting up its
# include dir.
# The actual numpy import and call can be delayed until after pip
# has figured it must install numpy.
# This is accomplished by passing the get_numpy_include function
# as one of the include_dirs. This derived Extension class takes
# care of calling it when needed.
def __init__(self, *args, **kwargs):
self._mda_include_dirs = []
super(MDAExtension, self).__init__(*args, **kwargs)
@property
def include_dirs(self):
if not self._mda_include_dirs:
for item in self._mda_include_dir_args:
try:
self._mda_include_dirs.append(item()) #The numpy callable
except TypeError:
self._mda_include_dirs.append(item)
return self._mda_include_dirs
@include_dirs.setter
def include_dirs(self, val):
self._mda_include_dir_args = val
def get_numpy_include():
# Obtain the numpy include directory. This logic works across numpy
# versions.
# setuptools forgets to unset numpy's setup flag and we get a crippled
# version of it unless we do it ourselves.
try:
# Python 3 renamed the ``__builin__`` module into ``builtins``.
# Here we import the python 2 or the python 3 version of the module
# with the python 3 name. This could be done with ``six`` but that
# module may not be installed at that point.
import __builtin__ as builtins
except ImportError:
import builtins
builtins.__NUMPY_SETUP__ = False
try:
import numpy as np
except ImportError:
print('*** package "numpy" not found ***')
print('MDAnalysis requires a version of NumPy (>=1.10.4), even for setup.')
print('Please get it from http://numpy.scipy.org/ or install it through '
'your package manager.')
sys.exit(-1)
return np.get_include()
def hasfunction(cc, funcname, include=None, extra_postargs=None):
# From http://stackoverflow.com/questions/
# 7018879/disabling-output-when-compiling-with-distutils
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
devnull = oldstderr = None
try:
try:
fname = os.path.join(tmpdir, 'funcname.c')
with open(fname, 'w') as f:
if include is not None:
f.write('#include {0!s}\n'.format(include))
f.write('int main(void) {\n')
f.write(' {0!s};\n'.format(funcname))
f.write('}\n')
# Redirect stderr to /dev/null to hide any error messages
# from the compiler.
# This will have to be changed if we ever have to check
# for a function on Windows.
devnull = open('/dev/null', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
objects = cc.compile([fname], output_dir=tmpdir,
extra_postargs=extra_postargs)
cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
except Exception:
return False
return True
finally:
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if devnull is not None:
devnull.close()
shutil.rmtree(tmpdir)
def detect_openmp():
"""Does this compiler support OpenMP parallelization?"""
print("Attempting to autodetect OpenMP support... ", end="")
compiler = new_compiler()
compiler.add_library('gomp')
include = '<omp.h>'
extra_postargs = ['-fopenmp']
hasopenmp = hasfunction(compiler, 'omp_get_num_threads()', include=include,
extra_postargs=extra_postargs)
if hasopenmp:
print("Compiler supports OpenMP")
else:
print("Did not detect OpenMP support.")
return hasopenmp
def extensions(config):
# dev installs must build their own cythonized files.
use_cython = config.get('use_cython', default=not is_release)
use_openmp = config.get('use_openmp', default=True)
if config.get('debug_cflags', default=False):
extra_compile_args = '\
-std=c99 -pedantic -Wall -Wcast-align -Wcast-qual -Wpointer-arith \
-Wchar-subscripts -Winline -Wnested-externs -Wbad-function-cast \
-Wunreachable-code -Werror'
define_macros = [('DEBUG', '1')]
else:
extra_compile_args = ''
define_macros = []
# Needed for large-file seeking under 32bit systems (for xtc/trr indexing
# and access).
largefile_macros = [
('_LARGEFILE_SOURCE', None),
('_LARGEFILE64_SOURCE', None),
('_FILE_OFFSET_BITS', '64')
]
has_openmp = detect_openmp()
if use_openmp and not has_openmp:
print('No openmp compatible compiler found default to serial build.')
parallel_args = ['-fopenmp'] if has_openmp and use_openmp else []
parallel_libraries = ['gomp'] if has_openmp and use_openmp else []
parallel_macros = [('PARALLEL', None)] if has_openmp and use_openmp else []
if use_cython:
print('Will attempt to use Cython.')
if not cython_found:
print("Couldn't find a Cython installation. "
"Not recompiling cython extensions.")
use_cython = False
else:
print('Will not attempt to use Cython.')
source_suffix = '.pyx' if use_cython else '.c'
# The callable is passed so that it is only evaluated at install time.
include_dirs = [get_numpy_include]
dcd = MDAExtension('coordinates._dcdmodule',
['MDAnalysis/coordinates/src/dcd.c'],
include_dirs=include_dirs + ['MDAnalysis/coordinates/include'],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
dcd_time = MDAExtension('coordinates.dcdtimeseries',
['MDAnalysis/coordinates/dcdtimeseries' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/coordinates/include'],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
distances = MDAExtension('lib.c_distances',
['MDAnalysis/lib/c_distances' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
libraries=['m'],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
distances_omp = MDAExtension('lib.c_distances_openmp',
['MDAnalysis/lib/c_distances_openmp' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
libraries=['m'] + parallel_libraries,
define_macros=define_macros + parallel_macros,
extra_compile_args=parallel_args,
extra_link_args=parallel_args)
qcprot = MDAExtension('lib.qcprot',
['MDAnalysis/lib/qcprot' + source_suffix],
include_dirs=include_dirs,
extra_compile_args=["-O3", "-ffast-math"])
transformation = MDAExtension('lib._transformations',
['MDAnalysis/lib/src/transformations/transformations.c'],
libraries=['m'],
define_macros=define_macros,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args)
libmdaxdr = MDAExtension('lib.formats.libmdaxdr',
sources=['MDAnalysis/lib/formats/libmdaxdr' + source_suffix,
'MDAnalysis/lib/formats/src/xdrfile.c',
'MDAnalysis/lib/formats/src/xdrfile_xtc.c',
'MDAnalysis/lib/formats/src/xdrfile_trr.c',
'MDAnalysis/lib/formats/src/trr_seek.c',
'MDAnalysis/lib/formats/src/xtc_seek.c',
],
include_dirs=include_dirs + ['MDAnalysis/lib/formats/include',
'MDAnalysis/lib/formats'],
define_macros=largefile_macros)
util = MDAExtension('lib.formats.cython_util',
sources=['MDAnalysis/lib/formats/cython_util' + source_suffix],
include_dirs=include_dirs)
encore_utils = MDAExtension('analysis.encore.cutils',
sources = ['MDAnalysis/analysis/encore/cutils' + source_suffix],
include_dirs = include_dirs,
extra_compile_args = ["-O3", "-ffast-math"])
ap_clustering = MDAExtension('analysis.encore.clustering.affinityprop',
sources = ['MDAnalysis/analysis/encore/clustering/affinityprop' + source_suffix, 'MDAnalysis/analysis/encore/clustering/src/ap.c'],
include_dirs = include_dirs+['MDAnalysis/analysis/encore/clustering/include'],
libraries=["m"],
extra_compile_args=["-O3", "-ffast-math","-std=c99"])
spe_dimred = MDAExtension('analysis.encore.dimensionality_reduction.stochasticproxembed',
sources = ['MDAnalysis/analysis/encore/dimensionality_reduction/stochasticproxembed' + source_suffix, 'MDAnalysis/analysis/encore/dimensionality_reduction/src/spe.c'],
include_dirs = include_dirs+['MDAnalysis/analysis/encore/dimensionality_reduction/include'],
libraries=["m"],
extra_compile_args=["-O3", "-ffast-math","-std=c99"])
pre_exts = [dcd, dcd_time, distances, distances_omp, qcprot,
transformation, libmdaxdr, util, encore_utils,
ap_clustering, spe_dimred]
cython_generated = []
if use_cython:
extensions = cythonize(pre_exts)
for pre_ext, post_ext in zip(pre_exts, extensions):
for source in post_ext.sources:
if source not in pre_ext.sources:
cython_generated.append(source)
else:
#Let's check early for missing .c files
extensions = pre_exts
for ext in extensions:
for source in ext.sources:
if not (os.path.isfile(source) and
os.access(source, os.R_OK)):
raise IOError("Source file '{}' not found. This might be "
"caused by a missing Cython install, or a "
"failed/disabled Cython build.".format(source))
return extensions, cython_generated
def dynamic_author_list():
"""Generate __authors__ from AUTHORS
This function generates authors.py that contains the list of the
authors from the AUTHORS file. This avoids having that list maintained in
several places. Note that AUTHORS is sorted chronologically while we want
__authors__ in authors.py to be sorted alphabetically.
The authors are written in AUTHORS as bullet points under the
"Chronological list of authors" title.
"""
authors = []
with codecs.open('AUTHORS', encoding='utf-8') as infile:
# An author is a bullet point under the title "Chronological list of
# authors". We first want move the cursor down to the title of
# interest.
for line_no, line in enumerate(infile, start=1):
if line[:-1] == "Chronological list of authors":
break
else:
# If we did not break, it means we did not find the authors.
raise IOError('EOF before the list of authors')
# Skip the next line as it is the title underlining
line = next(infile)
line_no += 1
if line[:4] != '----':
raise IOError('Unexpected content on line {0}, '
'should be a string of "-".'.format(line_no))
# Add each bullet point as an author until the next title underlining
for line in infile:
if line[:4] in ('----', '====', '~~~~'):
# The previous line was a title, hopefully it did not start as
# a bullet point so it got ignored. Since we hit a title, we
# are done reading the list of authors.
break
elif line.strip()[:2] == '- ':
# This is a bullet point, so it should be an author name.
name = line.strip()[2:].strip()
authors.append(name)
# So far, the list of authors is sorted chronologically. We want it
# sorted alphabetically of the last name.
authors.sort(key=lambda name: name.split()[-1])
# Move Naveen and Elizabeth first, and Oliver last.
authors.remove('Naveen Michaud-Agrawal')
authors.remove('Elizabeth J. Denning')
authors.remove('Oliver Beckstein')
authors = (['Naveen Michaud-Agrawal', 'Elizabeth J. Denning']
+ authors + ['Oliver Beckstein'])
# Write the authors.py file.
out_path = 'MDAnalysis/authors.py'
with codecs.open(out_path, 'w', encoding='utf-8') as outfile:
# Write the header
header = '''\
#-*- coding:utf-8 -*-
# This file is generated from the AUTHORS file during the installation process.
# Do not edit it as your changes will be overwritten.
'''
print(header, file=outfile)
# Write the list of authors as a python list
template = u'__authors__ = [\n{}\n]'
author_string = u',\n'.join(u' u"{}"'.format(name)
for name in authors)
print(template.format(author_string), file=outfile)
if __name__ == '__main__':
try:
dynamic_author_list()
except (OSError, IOError):
warnings.warn('Cannot write the list of authors.')
with open("SUMMARY.txt") as summary:
LONG_DESCRIPTION = summary.read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: C',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
]
config = Config()
exts, cythonfiles = extensions(config)
setup(name='MDAnalysis',
version=RELEASE,
description='An object-oriented toolkit to analyze molecular dynamics '
'trajectories generated by CHARMM, Gromacs, NAMD, LAMMPS, or Amber.',
long_description=LONG_DESCRIPTION,
author='Naveen Michaud-Agrawal',
author_email='[email protected]',
maintainer='Richard Gowers',
maintainer_email='[email protected]',
url='http://www.mdanalysis.org',
download_url='https://github.com/MDAnalysis/mdanalysis/releases',
provides=['MDAnalysis'],
license='GPL 2',
packages=find_packages(),
package_dir={'MDAnalysis': 'MDAnalysis'},
ext_package='MDAnalysis',
ext_modules=exts,
classifiers=CLASSIFIERS,
cmdclass=cmdclass,
requires=['numpy (>=1.10.4)', 'biopython', 'mmtf (>=1.0.0)',
'networkx (>=1.0)', 'GridDataFormats (>=0.3.2)', 'joblib'],
# all standard requirements are available through PyPi and
# typically can be installed without difficulties through setuptools
setup_requires=[
'numpy>=1.9.3',
],
install_requires=[
'numpy>=1.10.4',
'biopython>=1.59',
'networkx>=1.0',
'GridDataFormats>=0.3.2',
'six>=1.4.0',
'mmtf-python>=1.0.0',
'joblib',
],
# extras can be difficult to install through setuptools and/or
# you might prefer to use the version available through your
# packaging system
extras_require={
'AMBER': ['netCDF4>=1.0'], # for AMBER netcdf, also needs HDF5
# and netcdf-4
'analysis': [
'matplotlib>=1.5.1',
'scipy',
'seaborn', # for annotated heat map and nearest neighbor
# plotting in PSA
'sklearn', # For clustering and dimensionality reduction
# functionality in encore
],
},
test_suite="MDAnalysisTests",
tests_require=[
'nose>=1.3.7',
'MDAnalysisTests=={0}'.format(RELEASE), # same as this release!
],
zip_safe=False, # as a zipped egg the *.so files are not found (at
# least in Ubuntu/Linux)
)
# Releases keep their cythonized stuff for shipping.
if not config.get('keep_cythonized', default=is_release):
for cythonized in cythonfiles:
try:
os.unlink(cythonized)
except OSError as err:
print("Warning: failed to delete cythonized file {0}: {1}. "
"Moving on.".format(cythonized, err.strerror))
|
alejob/mdanalysis
|
package/setup.py
|
Python
|
gpl-2.0
| 22,845
|
# -*- coding: utf-8 -*-
import ctypes
from cpylmnl.linux.netfilter import nfnetlinkh as nfnl
from cpylmnl.linux.netfilter import nfnetlink_compath as nfnlcm
try:
from enum import Enum
except ImportError:
Enum = object
# enum
CONNTRACK = nfnl.NFNL_SUBSYS_CTNETLINK
EXPECT = nfnl.NFNL_SUBSYS_CTNETLINK_EXP
# Subscribe to all possible conntrack event groups. Use this
# flag in case that you want to catch up all the possible
# events. Do not use this flag for dumping or any other
# similar operation.
NFCT_ALL_CT_GROUPS = (nfnlcm.NF_NETLINK_CONNTRACK_NEW
|nfnlcm.NF_NETLINK_CONNTRACK_UPDATE
|nfnlcm.NF_NETLINK_CONNTRACK_DESTROY)
## conntrack attributes
class ConntrackAttr(Enum):
ATTR_ORIG_IPV4_SRC = 0, # u32 bits
ATTR_IPV4_SRC = ATTR_ORIG_IPV4_SRC # alias
ATTR_ORIG_IPV4_DST = 1 # u32 bits
ATTR_IPV4_DST = ATTR_ORIG_IPV4_DST # alias
ATTR_REPL_IPV4_SRC = 2 # u32 bits
ATTR_REPL_IPV4_DST = 3 # u32 bits
ATTR_ORIG_IPV6_SRC = 4 # u128 bits
ATTR_IPV6_SRC = ATTR_ORIG_IPV6_SRC # alias
ATTR_ORIG_IPV6_DST = 5 # u128 bits
ATTR_IPV6_DST = ATTR_ORIG_IPV6_DST # alias
ATTR_REPL_IPV6_SRC = 6 # u128 bits
ATTR_REPL_IPV6_DST = 7 # u128 bits
ATTR_ORIG_PORT_SRC = 8 # u16 bits
ATTR_PORT_SRC = ATTR_ORIG_PORT_SRC # alias
ATTR_ORIG_PORT_DST = 9 # u16 bits
ATTR_PORT_DST = ATTR_ORIG_PORT_DST # alias
ATTR_REPL_PORT_SRC = 10 # u16 bits
ATTR_REPL_PORT_DST = 11 # u16 bits
ATTR_ICMP_TYPE = 12 # u8 bits
ATTR_ICMP_CODE = 13 # u8 bits
ATTR_ICMP_ID = 14 # u16 bits
ATTR_ORIG_L3PROTO = 15 # u8 bits
ATTR_L3PROTO = ATTR_ORIG_L3PROTO # alias
ATTR_REPL_L3PROTO = 16 # u8 bits
ATTR_ORIG_L4PROTO = 17 # u8 bits
ATTR_L4PROTO = ATTR_ORIG_L4PROTO # alias
ATTR_REPL_L4PROTO = 18 # u8 bits
ATTR_TCP_STATE = 19 # u8 bits
ATTR_SNAT_IPV4 = 20 # u32 bits
ATTR_DNAT_IPV4 = 21 # u32 bits
ATTR_SNAT_PORT = 22 # u16 bits
ATTR_DNAT_PORT = 23 # u16 bits
ATTR_TIMEOUT = 24 # u32 bits
ATTR_MARK = 25 # u32 bits
ATTR_ORIG_COUNTER_PACKETS = 26 # u64 bits
ATTR_REPL_COUNTER_PACKETS = 27 # u64 bits
ATTR_ORIG_COUNTER_BYTES = 28 # u64 bits
ATTR_REPL_COUNTER_BYTES = 29 # u64 bits
ATTR_USE = 30 # u32 bits
ATTR_ID = 31 # u32 bits
ATTR_STATUS = 32 # u32 bits
ATTR_TCP_FLAGS_ORIG = 33 # u8 bits
ATTR_TCP_FLAGS_REPL = 34 # u8 bits
ATTR_TCP_MASK_ORIG = 35 # u8 bits
ATTR_TCP_MASK_REPL = 36 # u8 bits
ATTR_MASTER_IPV4_SRC = 37 # u32 bits
ATTR_MASTER_IPV4_DST = 38 # u32 bits
ATTR_MASTER_IPV6_SRC = 39 # u128 bits
ATTR_MASTER_IPV6_DST = 40 # u128 bits
ATTR_MASTER_PORT_SRC = 41 # u16 bits
ATTR_MASTER_PORT_DST = 42 # u16 bits
ATTR_MASTER_L3PROTO = 43 # u8 bits
ATTR_MASTER_L4PROTO = 44 # u8 bits
ATTR_SECMARK = 45 # u32 bits
ATTR_ORIG_NAT_SEQ_CORRECTION_POS = 46 # u32 bits
ATTR_ORIG_NAT_SEQ_OFFSET_BEFORE = 47 # u32 bits
ATTR_ORIG_NAT_SEQ_OFFSET_AFTER = 48 # u32 bits
ATTR_REPL_NAT_SEQ_CORRECTION_POS = 49 # u32 bits
ATTR_REPL_NAT_SEQ_OFFSET_BEFORE = 50 # u32 bits
ATTR_REPL_NAT_SEQ_OFFSET_AFTER = 51 # u32 bits
ATTR_SCTP_STATE = 52 # u8 bits
ATTR_SCTP_VTAG_ORIG = 53 # u32 bits
ATTR_SCTP_VTAG_REPL = 54 # u32 bits
ATTR_HELPER_NAME = 55 # string (30 bytes max)
ATTR_DCCP_STATE = 56 # u8 bits
ATTR_DCCP_ROLE = 57 # u8 bits
ATTR_DCCP_HANDSHAKE_SEQ = 58 # u64 bits
ATTR_TCP_WSCALE_ORIG = 59 # u8 bits
ATTR_TCP_WSCALE_REPL = 60 # u8 bits
ATTR_ZONE = 61 # u16 bits
ATTR_SECCTX = 62 # string
ATTR_TIMESTAMP_START = 63 # u64 bits, linux >= 2.6.38
ATTR_TIMESTAMP_STOP = 64 # u64 bits, linux >= 2.6.38
ATTR_HELPER_INFO = 65 # variable length
ATTR_CONNLABELS = 66 # variable length
ATTR_CONNLABELS_MASK = 67 # variable length
ATTR_MAX = 68
ATTR_ORIG_IPV4_SRC = 0
ATTR_IPV4_SRC = ATTR_ORIG_IPV4_SRC
ATTR_ORIG_IPV4_DST = 1
ATTR_IPV4_DST = ATTR_ORIG_IPV4_DST
ATTR_REPL_IPV4_SRC = 2
ATTR_REPL_IPV4_DST = 3
ATTR_ORIG_IPV6_SRC = 4
ATTR_IPV6_SRC = ATTR_ORIG_IPV6_SRC
ATTR_ORIG_IPV6_DST = 5
ATTR_IPV6_DST = ATTR_ORIG_IPV6_DST
ATTR_REPL_IPV6_SRC = 6
ATTR_REPL_IPV6_DST = 7
ATTR_ORIG_PORT_SRC = 8
ATTR_PORT_SRC = ATTR_ORIG_PORT_SRC
ATTR_ORIG_PORT_DST = 9
ATTR_PORT_DST = ATTR_ORIG_PORT_DST
ATTR_REPL_PORT_SRC = 10
ATTR_REPL_PORT_DST = 11
ATTR_ICMP_TYPE = 12
ATTR_ICMP_CODE = 13
ATTR_ICMP_ID = 14
ATTR_ORIG_L3PROTO = 15
ATTR_L3PROTO = ATTR_ORIG_L3PROTO
ATTR_REPL_L3PROTO = 16
ATTR_ORIG_L4PROTO = 17
ATTR_L4PROTO = ATTR_ORIG_L4PROTO
ATTR_REPL_L4PROTO = 18
ATTR_TCP_STATE = 19
ATTR_SNAT_IPV4 = 20
ATTR_DNAT_IPV4 = 21
ATTR_SNAT_PORT = 22
ATTR_DNAT_PORT = 23
ATTR_TIMEOUT = 24
ATTR_MARK = 25
ATTR_ORIG_COUNTER_PACKETS = 26
ATTR_REPL_COUNTER_PACKETS = 27
ATTR_ORIG_COUNTER_BYTES = 28
ATTR_REPL_COUNTER_BYTES = 29
ATTR_USE = 30
ATTR_ID = 31
ATTR_STATUS = 32
ATTR_TCP_FLAGS_ORIG = 33
ATTR_TCP_FLAGS_REPL = 34
ATTR_TCP_MASK_ORIG = 35
ATTR_TCP_MASK_REPL = 36
ATTR_MASTER_IPV4_SRC = 37
ATTR_MASTER_IPV4_DST = 38
ATTR_MASTER_IPV6_SRC = 39
ATTR_MASTER_IPV6_DST = 40
ATTR_MASTER_PORT_SRC = 41
ATTR_MASTER_PORT_DST = 42
ATTR_MASTER_L3PROTO = 43
ATTR_MASTER_L4PROTO = 44
ATTR_SECMARK = 45
ATTR_ORIG_NAT_SEQ_CORRECTION_POS = 46
ATTR_ORIG_NAT_SEQ_OFFSET_BEFORE = 47
ATTR_ORIG_NAT_SEQ_OFFSET_AFTER = 48
ATTR_REPL_NAT_SEQ_CORRECTION_POS = 49
ATTR_REPL_NAT_SEQ_OFFSET_BEFORE = 50
ATTR_REPL_NAT_SEQ_OFFSET_AFTER = 51
ATTR_SCTP_STATE = 52
ATTR_SCTP_VTAG_ORIG = 53
ATTR_SCTP_VTAG_REPL = 54
ATTR_HELPER_NAME = 55
ATTR_DCCP_STATE = 56
ATTR_DCCP_ROLE = 57
ATTR_DCCP_HANDSHAKE_SEQ = 58
ATTR_TCP_WSCALE_ORIG = 59
ATTR_TCP_WSCALE_REPL = 60
ATTR_ZONE = 61
ATTR_SECCTX = 62
ATTR_TIMESTAMP_START = 63
ATTR_TIMESTAMP_STOP = 64
ATTR_HELPER_INFO = 65
ATTR_CONNLABELS = 66
ATTR_CONNLABELS_MASK = 67
ATTR_MAX = 68
## conntrack attribute groups
class ConntrackAttrGrp(Enum):
ATTR_GRP_ORIG_IPV4 = 0 # struct nfct_attr_grp_ipv4
ATTR_GRP_REPL_IPV4 = 1 # struct nfct_attr_grp_ipv4
ATTR_GRP_ORIG_IPV6 = 2 # struct nfct_attr_grp_ipv6
ATTR_GRP_REPL_IPV6 = 3 # struct nfct_attr_grp_ipv6
ATTR_GRP_ORIG_PORT = 4 # struct nfct_attr_grp_port
ATTR_GRP_REPL_PORT = 5 # struct nfct_attr_grp_port
ATTR_GRP_ICMP = 6 # struct nfct_attr_grp_icmp
ATTR_GRP_MASTER_IPV4 = 7 # struct nfct_attr_grp_ipv4
ATTR_GRP_MASTER_IPV6 = 8 # struct nfct_attr_grp_ipv6
ATTR_GRP_MASTER_PORT = 9 # struct nfct_attr_grp_port
ATTR_GRP_ORIG_COUNTERS = 10 # struct nfct_attr_grp_ctrs
ATTR_GRP_REPL_COUNTERS = 11 # struct nfct_attr_grp_ctrs
ATTR_GRP_ORIG_ADDR_SRC = 12 # union nfct_attr_grp_addr
ATTR_GRP_ORIG_ADDR_DST = 13 # union nfct_attr_grp_addr
ATTR_GRP_REPL_ADDR_SRC = 14 # union nfct_attr_grp_addr
ATTR_GRP_REPL_ADDR_DST = 15 # union nfct_attr_grp_addr
ATTR_GRP_MAX = 16
ATTR_GRP_ORIG_IPV4 = 0
ATTR_GRP_REPL_IPV4 = 1
ATTR_GRP_ORIG_IPV6 = 2
ATTR_GRP_REPL_IPV6 = 3
ATTR_GRP_ORIG_PORT = 4
ATTR_GRP_REPL_PORT = 5
ATTR_GRP_ICMP = 6
ATTR_GRP_MASTER_IPV4 = 7
ATTR_GRP_MASTER_IPV6 = 8
ATTR_GRP_MASTER_PORT = 9
ATTR_GRP_ORIG_COUNTERS = 10
ATTR_GRP_REPL_COUNTERS = 11
ATTR_GRP_ORIG_ADDR_SRC = 12
ATTR_GRP_ORIG_ADDR_DST = 13
ATTR_GRP_REPL_ADDR_SRC = 14
ATTR_GRP_REPL_ADDR_DST = 15
ATTR_GRP_MAX = 16
class AttrGrpIpv4(ctypes.Structure):
"""struct nfct_attr_grp_ipv4
uint32_t src, dst
"""
_fields_ = [("src", ctypes.c_uint32),
("dst", ctypes.c_uint32)]
class AttrGrpIpv6(ctypes.Structure):
"""struct nfct_attr_grp_ipv6
uint32_t src[4], dst[4]
"""
_fields_ = [("src", (ctypes.c_uint32 * 4)),
("dst", (ctypes.c_uint32 * 4))]
class AttrGrpPort(ctypes.Structure):
"""struct nfct_attr_grp_port
uint16_t sport, dport
"""
_fields_ = [("sport", ctypes.c_uint16),
("dport", ctypes.c_uint16)]
class AttrGrpIcmp(ctypes.Structure):
"""struct nfct_attr_grp_icmp
"""
_fields_ = [("id", ctypes.c_uint16), # uint16_t id
("code", ctypes.c_uint8), # uint8_t code, type
("type", ctypes.c_uint8)]
class AttrGrpCtrs(ctypes.Structure):
"""struct nfct_attr_grp_ctrs
"""
_fields_ = [("packets", ctypes.c_uint64), # uint64_t packets
("bytes", ctypes.c_uint64)] # uint64_t bytes
class AttrGrpAddr(ctypes.Union):
"""union nfct_attr_grp_addr
"""
_fields_ = [("ip", ctypes.c_uint32), # uint32_t ip
("ip6", (ctypes.c_uint32 * 4)), # uint32_t ip6[4]
("addr", (ctypes.c_uint32 * 4))] # uint32_t addr[4]
## message type
# enum nf_conntrack_msg_type
class ConntrackMsgType(Enum):
NFCT_T_UNKNOWN = 0
NFCT_T_NEW_BIT = 0
NFCT_T_NEW = (1 << NFCT_T_NEW_BIT)
NFCT_T_UPDATE_BIT = 1
NFCT_T_UPDATE = (1 << NFCT_T_UPDATE_BIT)
NFCT_T_DESTROY_BIT = 2
NFCT_T_DESTROY = (1 << NFCT_T_DESTROY_BIT)
NFCT_T_ALL = NFCT_T_NEW | NFCT_T_UPDATE | NFCT_T_DESTROY
NFCT_T_ERROR_BIT = 31
NFCT_T_ERROR = (1 << NFCT_T_ERROR_BIT)
NFCT_T_UNKNOWN = 0
NFCT_T_NEW_BIT = 0
NFCT_T_NEW = (1 << NFCT_T_NEW_BIT)
NFCT_T_UPDATE_BIT = 1
NFCT_T_UPDATE = (1 << NFCT_T_UPDATE_BIT)
NFCT_T_DESTROY_BIT = 2
NFCT_T_DESTROY = (1 << NFCT_T_DESTROY_BIT)
NFCT_T_ALL = NFCT_T_NEW | NFCT_T_UPDATE | NFCT_T_DESTROY
NFCT_T_ERROR_BIT = 31
NFCT_T_ERROR = (1 << NFCT_T_ERROR_BIT)
## set option
# enum
NFCT_SOPT_UNDO_SNAT = 0
NFCT_SOPT_UNDO_DNAT = 1
NFCT_SOPT_UNDO_SPAT = 2
NFCT_SOPT_UNDO_DPAT = 3
NFCT_SOPT_SETUP_ORIGINAL = 4
NFCT_SOPT_SETUP_REPLY = 5
__NFCT_SOPT_MAX = 6
NFCT_SOPT_MAX = (__NFCT_SOPT_MAX - 1)
## get option
# enum
NFCT_GOPT_IS_SNAT = 0
NFCT_GOPT_IS_DNAT = 1
NFCT_GOPT_IS_SPAT = 2
NFCT_GOPT_IS_DPAT = 3
__NFCT_GOPT_MAX = 4
NFCT_GOPT_MAX = (__NFCT_GOPT_MAX - 1)
## print
## output type
# enum
NFCT_O_PLAIN = 0
NFCT_O_DEFAULT = NFCT_O_PLAIN
NFCT_O_XML = 1
NFCT_O_MAX = 2
## output flags
# enum
NFCT_OF_SHOW_LAYER3_BIT = 0
NFCT_OF_SHOW_LAYER3 = (1 << NFCT_OF_SHOW_LAYER3_BIT)
NFCT_OF_TIME_BIT = 1
NFCT_OF_TIME = (1 << NFCT_OF_TIME_BIT)
NFCT_OF_ID_BIT = 2
NFCT_OF_ID = (1 << NFCT_OF_ID_BIT)
NFCT_OF_TIMESTAMP_BIT = 3
NFCT_OF_TIMESTAMP = (1 << NFCT_OF_TIMESTAMP_BIT)
## comparison
# enum
NFCT_CMP_ALL = 0
NFCT_CMP_ORIG = (1 << 0)
NFCT_CMP_REPL = (1 << 1)
NFCT_CMP_TIMEOUT_EQ = (1 << 2)
NFCT_CMP_TIMEOUT_GT = (1 << 3)
NFCT_CMP_TIMEOUT_GE = (NFCT_CMP_TIMEOUT_EQ | NFCT_CMP_TIMEOUT_GT)
NFCT_CMP_TIMEOUT_LT = (1 << 4)
NFCT_CMP_TIMEOUT_LE = (NFCT_CMP_TIMEOUT_EQ | NFCT_CMP_TIMEOUT_LT)
NFCT_CMP_MASK = (1 << 5)
NFCT_CMP_STRICT = (1 << 6)
## copy
# enum
NFCT_CP_ALL = 0
NFCT_CP_ORIG = (1 << 0)
NFCT_CP_REPL = (1 << 1)
NFCT_CP_META = (1 << 2)
NFCT_CP_OVERRIDE = (1 << 3)
## event filtering
class FilterProto(ctypes.Structure):
"""struct nfct_filter_proto
"""
_fields_ = [("proto", ctypes.c_uint16), # uint16_t proto
("state", ctypes.c_uint16)] # uint16_t state
class FilterIpv4(ctypes.Structure):
"""struct nfct_filter_ipv4
"""
_fields_ = [("addr", ctypes.c_uint32), # uint32_t addr
("mask", ctypes.c_uint32)] # uint32_t mask
class FilterIpv6(ctypes.Structure):
"""struct nfct_filter_ipv6
"""
_fields_ = [("addr", (ctypes.c_uint32 * 4)), # uint32_t addr[4]
("mask", (ctypes.c_uint32 * 4))] # uint32_t mask[4]
class FilterAttr(Enum):
NFCT_FILTER_L4PROTO = 0 # uint32_t
NFCT_FILTER_L4PROTO_STATE = 1 # struct nfct_filter_proto
NFCT_FILTER_SRC_IPV4 = 2 # struct nfct_filter_ipv4
NFCT_FILTER_DST_IPV4 = 3 # struct nfct_filter_ipv4
NFCT_FILTER_SRC_IPV6 = 4 # struct nfct_filter_ipv6
NFCT_FILTER_DST_IPV6 = 5 # struct nfct_filter_ipv6
NFCT_FILTER_MARK = 6 # struct nfct_filter_dump_mark
NFCT_FILTER_MAX = 7
NFCT_FILTER_L4PROTO = 0
NFCT_FILTER_L4PROTO_STATE = 1
NFCT_FILTER_SRC_IPV4 = 2
NFCT_FILTER_DST_IPV4 = 3
NFCT_FILTER_SRC_IPV6 = 4
NFCT_FILTER_DST_IPV6 = 5
NFCT_FILTER_MARK = 6
NFCT_FILTER_MAX = 7
class FilterLogic(Enum):
NFCT_FILTER_LOGIC_POSITIVE = 0
NFCT_FILTER_LOGIC_NEGATIVE = 1
NFCT_FILTER_LOGIC_MAX = 2
NFCT_FILTER_LOGIC_POSITIVE = 0
NFCT_FILTER_LOGIC_NEGATIVE = 1
NFCT_FILTER_LOGIC_MAX = 2
## dump filtering
class FilterDumpMark(ctypes.Structure):
"""struct nfct_filter_dump_mark
"""
_fields_ = [("val", ctypes.c_uint32), # uint32_t val
("mask", ctypes.c_uint32)] # uint32_t mask
class FilterDumpAttr(Enum):
NFCT_FILTER_DUMP_MARK = 0 # struct nfct_filter_dump_mark
NFCT_FILTER_DUMP_L3NUM = 1 # uint8_t
NFCT_FILTER_DUMP_MAX = 2
NFCT_FILTER_DUMP_MARK = 0
NFCT_FILTER_DUMP_L3NUM = 1
NFCT_FILTER_DUMP_MAX = 2
## expect attributes
class ExpectAttr(Enum):
ATTR_EXP_MASTER = 0 # pointer to conntrack object
ATTR_EXP_EXPECTED = 1 # pointer to conntrack object
ATTR_EXP_MASK = 2 # pointer to conntrack object
ATTR_EXP_TIMEOUT = 3 # u32 bits
ATTR_EXP_ZONE = 4 # u16 bits
ATTR_EXP_FLAGS = 5 # u32 bits
ATTR_EXP_HELPER_NAME = 6 # string (16 bytes max)
ATTR_EXP_CLASS = 7 # u32 bits
ATTR_EXP_NAT_TUPLE = 8 # pointer to conntrack object
ATTR_EXP_NAT_DIR = 9 # u8 bits
ATTR_EXP_FN = 10 # string
ATTR_EXP_MAX = 11
ATTR_EXP_MASTER = 0
ATTR_EXP_EXPECTED = 1
ATTR_EXP_MASK = 2
ATTR_EXP_TIMEOUT = 3
ATTR_EXP_ZONE = 4
ATTR_EXP_FLAGS = 5
ATTR_EXP_HELPER_NAME = 6
ATTR_EXP_CLASS = 7
ATTR_EXP_NAT_TUPLE = 8
ATTR_EXP_NAT_DIR = 9
ATTR_EXP_FN = 10
ATTR_EXP_MAX = 11
|
chamaken/cpylmnfct
|
cpylmnfct/_libnetfilter_conntrackh.py
|
Python
|
gpl-2.0
| 13,117
|
from flask import Flask
from flask.ext import restful
from flask.ext.restful import Resource, reqparse
from lxml import html
import urllib2
import json
app = Flask(__name__)
api = restful.Api(app)
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, location='form')
parser.add_argument('xpath', type=str, location='form')
parser.add_argument('attribute', type=str, location='form')
class SimpleExtractor(Resource):
def post(self, **kwargs):
args = parser.parse_args()
source_url = args['url']
element_xpath = args['xpath']
element_attribute = args['attribute']
result = self.parse_html(source_url, element_xpath, element_attribute)
results = {'elements': [{'value': result }]}
return json.dumps(results)
def get(self):
results = {'elements': [{'value':result}]}
return json.dumps(results)
def parse_html(self, source_url, element_xpath="/title", element_attribute=None):
request = urllib2.urlopen(source_url)
page = request.read()
tree = html.fromstring(page)
elements = tree.xpath(element_xpath)
if len(elements) == 0:
return ''
elem_value = elements[0].attrib[element_attribute] if element_attribute else elements[0].text
return elem_value
class BaseExtractor(Resource):
def get(self):
return {'value':'A simple extraction service'}
api.add_resource(BaseExtractor, '/')
api.add_resource(SimpleExtractor, '/extract')
if __name__ == '__main__':
app.run(debug=True)
|
sparkica/simex-service
|
service.py
|
Python
|
gpl-2.0
| 1,442
|
# coding:utf-8
'''
8b d8 88
Y8, ,8P ,d 88
Y8, ,8P 88 88
"8aa8" ,adPPYba, 88 88 MM88MMM 88,dPPYba,
`88' a8" "8a 88 88 88 88P' "8a
88 8b d8 88 88 88 88 88
88 "8a, ,a8" "8a, ,a88 88, 88 88
88 `"YbbdP"' `"YbbdP'Y8 "Y888 88 88
88888888ba 88
88 "8b 88
88 ,8P 88
88aaaaaa8P' 8b,dPPYba, ,adPPYba, ,adPPYYba, ,adPPYb,88
88""""""8b, 88P' "Y8 a8P_____88 "" `Y8 a8" `Y88
88 `8b 88 8PP""""""" ,adPPPPP88 8b 88
88 a8P 88 "8b, ,aa 88, ,88 "8a, ,d88
88888888P" 88 `"Ybbd8"' `"8bbdP"Y8 `"8bbdP"Y8
'''
import os
import requests
import json
import sys
import re
import datetime
import time
from bs4 import BeautifulSoup
from tabulate import tabulate
reload(sys)
sys.setdefaultencoding('utf8')
class KillMail_Character(object):
def __init__(self, url):
# 检查网络连接,确定主体
self.URL_kill = url
global client
client = requests.session()
r = client.get(self.URL_kill)
if r.status_code == requests.codes.ok:
print '网络连接正常'
soup = BeautifulSoup(r.text, 'html.parser')
ans = soup.find('div', class_='content')
if ans.span:
print '找到相关', url.split('/')[-3], ans.span.text
self.URL_lost = self.URL_kill + '/losts/'
self.URL_atk = self.URL_kill + '/atk/'
else:
print '请检查相关人物或军团联盟URL'
sys.exit()
else:
print '网络连接错误'
r.raise_for_status()
def latest_km(self, time=1):
# 返回人物页面“击杀”、“损失”、“参与击杀”三个栏目第一页的KM基本信息
killmail = {'kill': [], 'lost': [], 'atk': []}
if self.URL_kill:
killmail['kill'] = self.get_km_basic_info(self.URL_kill)
print '成功获取最新的击杀KM', len(killmail['kill'])
killmail['lost'] = self.get_km_basic_info(self.URL_lost)
print '成功获取最新的损失KM', len(killmail['lost'])
killmail['atk'] = self.get_km_basic_info(self.URL_atk)
print '成功获取最新的参与击杀KM', len(killmail['atk'])
else:
print 'ERROR'
return killmail
def get_km_basic_info(self, url):
# 返回URL页面中所有KM的基本信息
r = client.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find('table',id='kbtable').find('tbody').find_all('tr')
info = []
url_info = []
killmail = []
for km in a[:-1]:
for n in km.find_all('td'):
if n.find_all(href=re.compile("/pilot/0")) != []:
info.append('未知角色')
if n.find_all(style='color:grey') != []:
null_alli_tag = 1
else:
null_alli_tag = 0
if n.find_all(href=re.compile("/corp/None")) != []:
null_corp_tag = 1
else:
null_corp_tag = 0
for text in n.text.split('\n'):
if len(text) != 0:
if text == '无联盟' and null_alli_tag == 1:
info.append('无联盟(NULL)')
if null_corp_tag == 1:
info.append('无军团(NULL)')
elif text == '未知':
pass
else:
info.append(text)
url_info.append(km['onclick'].split('\'')[-2])
while info:
killmail.append({
'victim_ship': info.pop(0),
'victim_shiptype': info.pop(0),
'victim': info.pop(0),
'victim_alli': info.pop(0),
'victim_corp': info.pop(0),
'final_blow': info.pop(0),
'alliance': info.pop(0),
'corp': info.pop(0),
'location': info.pop(0),
'time': info.pop(0),
'url': url_info.pop(0)
})
return killmail
class KillMail_Details(object):
def __init__(self, url):
if url:
self.url = 'http://kb.ceve-market.org' + url
global client
global soup
client = requests.session()
r = client.get(self.url)
if r.status_code == requests.codes.ok:
print '网络连接正常'
soup = BeautifulSoup(r.text, 'html.parser')
self.data_set = soup.find_all('div', class_='customjumbotron')
else:
print '网络连接错误'
r.raise_for_status()
else:
print 'KM对应URL不存在或错误'
sys.exit()
def km_victim_detail(self):
# 返回受害者的信息
info = map(lambda x: x.strip(), filter(
lambda x: len(x.strip()) > 0, self.data_set[0].text.split('\n')))
info.pop(info.index('相关击坠'))
killmail_info = {
'victim_character': info.pop(0)[2:],
'corp': info.pop(0)[2:],
'alliance': info.pop(0)[2:],
'ship': info.pop(0)[2:],
'time': info.pop(0)[2:],
'location': info.pop(0)[2:],
'damage_taken': info.pop(0)[4:],
'value': info.pop(0)[4:]
}
return killmail_info
def km_lastblow_detail(self):
# 返回最后一击的个人信息
info = map(lambda x: x.strip(), filter(
lambda x: len(x.strip()) > 0, self.data_set[1].text.split('\n')))
killmail_info = {
'last_blow_character': info.pop(0),
'corp': info.pop(0),
'alliance': info.pop(0)
}
# 对应武器以及船只信息
info = map(lambda x: x.get('title'), filter(
lambda x: x.get('title'), self.data_set[1].find_all('img')))
killmail_info.update({'ship': info.pop(0)})
killmail_info.update({'weapon': info.pop(0)})
return killmail_info
def km_comp_detail(self):
# KM的伤害来源组成,按照联盟以及船只分类
info_set = self.data_set[2].find_all('td')
info = map(lambda x: x.strip(), filter(
lambda x: len(x.strip()) > 0, info_set[0].text.split('\n')))
killmail_info = {'com_alliance': info}
info = map(lambda x: x.strip(), filter(
lambda x: len(x.strip()) > 0, info_set[1].text.split('\n')))
killmail_info.update({'com_ship': info})
return killmail_info
def km_involve_detail(self):
# 伤害来源个人信息
info = []
killmail_info = []
info = map(lambda x: x.strip(), filter(
lambda x: len(x.strip()) > 0, self.data_set[3].text.split('\n')))
info.pop(info.index('最后一击!'))
while info:
killmail_info.append({
'character': info.pop(0),
'corp': info.pop(0),
'alliance': info.pop(0),
'damage': info.pop(0)
})
# 对应武器以及船只信息
info = map(lambda x: x.get('title'), filter(
lambda x: x.get('title'), self.data_set[3].find_all('img')))
for n in xrange(len(killmail_info)):
killmail_info[n].update({
'ship': info.pop(0),
'weapon': info.pop(0)
})
return killmail_info
def km_fit_detail(self):
# 高中低槽装配信息以及损失价值信息
data_set = soup.find(id='kl-detail-shipdetails')
info = []
killmail = {}
for text in data_set.text.split('\n'):
if len(text.strip()) != 0:
if "装配" in text.strip() or "无人机挂舱" in text.strip() \
or "货舱" in text.strip() or "其他" in text.strip() or "损失价值" in text.strip():
info.append([])
if text.strip() != "价值":
info[-1].append(text.strip())
for n in info[:-1]:
slot_name = n.pop(0)
killmail[slot_name] = []
while n:
killmail[slot_name].append({
'item': n.pop(0),
'num': n.pop(0),
'value': n.pop(0)
})
killmail['values'] = []
while info[-1]:
killmail['values'].append({
'type': info[-1].pop(0),
'value': info[-1].pop(0)
})
return killmail
def km_fit_info(self):
# 详细参数
data_set = soup.find('div', id='attr_Panel').find(
'div', class_='row col-md-12')
data_set = filter(
lambda x: len(x.strip()) != 0, data_set.text.split('\n'))
# 工程部门, 6行
# 工程部门, 电容, 回充速率, CPU, 能量栅格, 改装值
for thing in data_set[0:6]:
print thing
# 攻击部门, 4行
# 攻击部门, 炮DPS, 导弹DPS, 立体炸弹DPS
for thing in data_set[6:10]:
print thing
# 防御部门, 20行
# 防御部门, 全伤有效/极限防御/常规防御,有效HP[字样],最小有效/有效[混合],最大有效
# 5行护盾防御: HP, 电磁, 热能, 动能,爆炸
# 5行装甲防御: HP, 电磁, 热能, 动能,爆炸
# 5行结构防御: HP, 电磁, 热能, 动能,爆炸
for thing in data_set[10:30]:
print thing
# 维修部门, 9行
# 维修部门, 护盾修理2, 装甲修理2, 结构修理2, 护盾被动回充2
for thing in data_set[30:39]:
print thing
# 导航部门, 6行
# 导航部门, 最大速度, 敏捷度, 起跳时间, 跃迁速度, 免疫跃迁干扰强度
for thing in data_set[39:45]:
print thing
# 索敌部门, 5行
# 索敌部门, 最大锁定个数/雷达强度, 锁定距离, 锁定分辨率, 信号半径
for thing in data_set[45:50]:
print thing
class KillMail_Search():
# seems broken
def __init__(self):
self.url = 'http://kb.ceve-market.org/ajax_search/'
global client
global soup
client = requests.session()
r = client.get(self.url)
if r.status_code == requests.codes.ok:
print '网络连接正常'
soup = BeautifulSoup(r.text, 'html.parser')
self.data_set = soup.find_all('div', class_='customjumbotron')
else:
print '网络连接错误'
r.raise_for_status()
def search(self, type, name):
URL = self.url
client.get(URL)
csrftoken_cookies = client.cookies['csrftoken']
end_time = time.strftime('%Y-%m-%d %H:%M')
start_time = datetime.datetime.now() + datetime.timedelta(days=-1)
search_data = dict(
searchtype=type,
name=name,
type='lost',
shiptype='shiptype',
systemtype='sys',
ship='',
system='',
starttime='',
endtime='',
prev='',
next='',
csrfmiddlewaretoken=csrftoken_cookies,
)
r = client.post(URL, data=search_data, headers=dict(Referer=URL))
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find('table',id='kbtable').find('tbody').find_all('tr')
info = []
url_info = []
killmail = []
for km in a[:-1]:
for n in km.find_all('td'):
if n.find_all(href=re.compile("/pilot/0")) != []:
info.append('未知角色')
if n.find_all(style='color:grey') != []:
null_alli_tag = 1
else:
null_alli_tag = 0
if n.find_all(href=re.compile("/corp/None")) != []:
null_corp_tag = 1
else:
null_corp_tag = 0
for text in n.text.split('\n'):
if len(text) != 0:
if text == '无联盟' and null_alli_tag == 1:
info.append('无联盟(NULL)')
if null_corp_tag == 1:
info.append('无军团(NULL)')
elif text == '未知':
pass
else:
info.append(text)
url_info.append(km['onclick'].split('\'')[-2])
while info:
killmail.append({
'victim_ship': info.pop(0),
'victim_shiptype': info.pop(0),
'victim': info.pop(0),
'victim_alli': info.pop(0),
'victim_corp': info.pop(0),
'final_blow': info.pop(0),
'alliance': info.pop(0),
'corp': info.pop(0),
'location': info.pop(0),
'time': info.pop(0),
'url': url_info.pop(0)
})
return killmail
class KillMail_LeaderBoard():
def __init__(self):
self.url = 'http://kb.ceve-market.org/rank/'
global client
client = requests.session()
r = client.get(self.url)
if r.status_code == requests.codes.ok:
print '网络连接正常'
soup = BeautifulSoup(r.text, 'html.parser')
self.data_set = soup.find_all('div', class_='col-md-6')
else:
print '网络连接错误'
r.raise_for_status()
def top_win_character(self):
# 30日个人综合战利排名
info = filter(lambda x: x, self.data_set[0].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0).decode('utf8'),
'value': info.pop(0)
})
return organized_info
def top_lose_character(self):
# 30日个人损失排名
info = filter(lambda x: x, self.data_set[1].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0)
})
return organized_info
def top_win_corp(self):
# 30日军团综合战利排名
info = filter(lambda x: x, self.data_set[2].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0)
})
return organized_info
def top_lose_corp(self):
# 30日军团损失排名
info = filter(lambda x: x, self.data_set[3].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0)
})
return organized_info
def top_win_alliance(self):
# 30日联盟综合战利排名
info = filter(lambda x: x, self.data_set[4].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0)
})
return organized_info
def top_lose_alliance(self):
# 30日联盟损失排名
info = filter(lambda x: x, self.data_set[5].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0)
})
return organized_info
def top_blackhand_corp(self):
info = filter(lambda x: x, self.data_set[6].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0),
'ratio': info.pop(0)
})
return organized_info
def top_blackhand_alliance(self):
info = filter(lambda x: x, self.data_set[7].text.split('\n'))[1:]
organized_info = []
while info:
organized_info.append({
'rank': info.pop(0),
'name': info.pop(0),
'value': info.pop(0),
'ratio': info.pop(0)
})
return organized_info
def losers_233(self):
r = client.get('http://kb.ceve-market.org/233/')
soup = BeautifulSoup(r.text, 'html.parser')
data_set = soup.find_all('tbody')
info = []
url_info = []
killmail = []
for n in data_set[0].find_all('td'):
if n.find_all(href=re.compile("/pilot/0")) != []:
info.append('未知角色')
if n.find_all(style='color:grey') != []:
null_alli_tag = 1
if n.find_all(href=re.compile("/corp/None")) != []:
null_corp_tag = 1
for text in n.text.split('\n'):
if len(text) != 0:
if text == '无联盟' and null_alli_tag == 1:
info.append('无联盟(NULL)')
if null_corp_tag == 1:
info.append('无军团(NULL)')
elif text == '未知':
pass
else:
info.append(text)
for n in data_set[0].find_all('tr'):
url_info.append(n['onclick'].split('\'')[-2])
while info:
killmail.append({
'victim_ship': info.pop(0),
'victim_shiptype': info.pop(0),
'victim': info.pop(0),
'victim_alli': info.pop(0),
'victim_corp': info.pop(0),
'final_blow': info.pop(0),
'alliance': info.pop(0),
'corp': info.pop(0),
'location': info.pop(0),
'time': info.pop(0),
'value': info.pop(0)
})
return killmail
class KillMail_FrontPage():
def __init__(self):
self.base_url = 'http://kb.ceve-market.org/'
self.url = self.base_url
global client
client = requests.session()
def get_km_basic_info(self, url):
# 返回URL页面中所有KM的基本信息
r = client.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find('table',id='kbtable').find('tbody').find_all('tr')
info = []
url_info = []
killmail = []
for km in a[:-1]:
for n in km.find_all('td'):
if n.find_all(href=re.compile("/pilot/0")) != []:
info.append('未知角色')
if n.find_all(style='color:grey') != []:
null_alli_tag = 1
else:
null_alli_tag = 0
if n.find_all(href=re.compile("/corp/None")) != []:
null_corp_tag = 1
else:
null_corp_tag = 0
for text in n.text.split('\n'):
if len(text) != 0:
if text == '无联盟' and null_alli_tag == 1:
info.append('无联盟(NULL)')
if null_corp_tag == 1:
info.append('无军团(NULL)')
elif text == '未知':
pass
else:
info.append(text)
url_info.append(km['onclick'].split('\'')[-2])
while info:
killmail.append({
'victim_ship': info.pop(0),
'victim_shiptype': info.pop(0),
'victim': info.pop(0),
'victim_alli': info.pop(0),
'victim_corp': info.pop(0),
'final_blow': info.pop(0),
'alliance': info.pop(0),
'corp': info.pop(0),
'location': info.pop(0),
'time': info.pop(0),
'url': url_info.pop(0)
})
return killmail
def update_url(self, url):
client = requests.session()
r = client.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
a = soup.find_all('a')
for n in a:
if '下一页' in n.text.strip():
url = self.base_url + n.get('href')
break
return url
class KillMail_spider(object):
def __init__(self, date):
self.spider = KillMail_FrontPage()
# 更改选定日期格式
self.date = date
self.start_date = datetime.datetime.strptime(date, "%Y-%m-%d")
self.end_date = datetime.datetime.combine(
self.start_date, datetime.time.max)
print self.start_date, self.end_date
with open(self.date + '.json', 'w') as f:
f.write(json.dumps([], ensure_ascii=False, indent=1))
# 检查是否存在前一天的url文件
self.last_date = (
self.start_date+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
if os.path.isfile(self.last_date + '_url.json'):
print "find last day's url file"
print "loading its last url"
with open(self.last_date + '_url.json', 'r') as f:
url_data = json.load(f)
self.find_start_url(url_data[-1])
else:
print "cannot find url file"
print "start from first page"
self.find_start_url('http://kb.ceve-market.org/')
def find_start_url(self, url):
info = self.spider.get_km_basic_info(url)
# 判断是否找到该日期KM的起始页面
last_km_date = datetime.datetime.strptime(
info[-1]['time'].split(' ')[0], "%Y-%m-%d")
if last_km_date != self.start_date:
print 'cannot find any validate killmaills in this page, last km is', info[-1]['time']
next_url = self.spider.update_url(url)
self.find_start_url(next_url)
else:
print 'Find validate killmaills in this page'
print 'Now url has been set to', url
self.url = url
self.url_set = [self.url]
print '==============================='
print 'Now start to find url set'
self.find_end_url(self.url)
def find_end_url(self, url):
info = self.spider.get_km_basic_info(url)
self.fetch_km(url)
# 判断是否找到该日期KM的终结页面
last_km_date = datetime.datetime.strptime(
info[-1]['time'].split(' ')[0], "%Y-%m-%d")
if last_km_date == self.start_date:
print 'no other killmail in this page, last km is', info[-1]['time']
next_url = self.spider.update_url(url)
self.url_set.append(next_url)
self.find_end_url(next_url)
else:
print 'find validate killmaills in this page'
with open(self.date + '_url.json', 'w') as f:
f.write(json.dumps(self.url_set, ensure_ascii=False, indent=1))
print 'Store the urls in file'
def url_fetch(self):
# url文件是否存在?
file_name = self.date + '_url.json'
if os.path.isfile(file_name):
print "find url file"
with open(file_name, 'r') as f:
url_data = json.load(f)
print 'start fetching'
for n in url_data:
self.fetch_km(n)
print 'loading next page...'
else:
print "cannot find today's file, start processing"
def fetch_km(self, url):
file_name = self.date + '.json'
info = self.spider.get_km_basic_info(url)
info = self.check_km(info)
if os.path.isfile(file_name):
with open(file_name, 'r') as f:
old_data = json.load(f)
with open(self.date + '.json', 'w') as f:
f.write(
json.dumps(old_data+info, ensure_ascii=False, indent=1))
print 'now we have', len(old_data + info), 'killmails.'
else:
with open(self.date + '.json', 'w') as f:
f.write(json.dumps(info, ensure_ascii=False, indent=1))
def check_km(self, info):
# 判断页面第一与最后的KM时间是否相同, 如果不相同则进行队头与队尾操作
if datetime.datetime.strptime(info[0]['time'].split(' ')[0], "%Y-%m-%d") > datetime.datetime.strptime(info[-1]['time'].split(' ')[0], "%Y-%m-%d"):
print 'contain different km'
if datetime.datetime.strptime(info[-1]['time'].split(' ')[0], "%Y-%m-%d") != self.start_date:
print 'this is the end'
return self.km_end(info)
else:
print 'ths is the front'
return self.km_front(info)
else:
print 'this is the same day\'s page'
return info
def km_front(self, info):
# 判断队头
data = []
for n in info:
if datetime.datetime.strptime(n['time'].strip(), "%Y-%m-%d %H:%M:%S") < self.end_date:
data.append(n)
return data
def km_end(self, info):
# 判断队尾
data = []
for n in info:
if datetime.datetime.strptime(n['time'].strip(), "%Y-%m-%d %H:%M:%S") > self.start_date:
data.append(n)
return data
|
YouthBread/CEVE-KB
|
kb.py
|
Python
|
gpl-2.0
| 26,992
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibauthorid HTML templates"""
# pylint: disable=W0105
# pylint: disable=C0301
from flask import url_for
#from cgi import escape
#from urllib import quote
#
import invenio.legacy.bibauthorid.config as bconfig
from invenio.config import CFG_SITE_LANG
from invenio.config import CFG_SITE_URL
from invenio.config import CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
from invenio.modules.formatter import format_record
from invenio.legacy.bibrecord import get_fieldvalues
from invenio.legacy.bibauthorid.config import EXTERNAL_SYSTEMS_LIST
from invenio.legacy.bibauthorid.webapi import get_person_redirect_link, get_canonical_id_from_person_id, get_person_names_from_id
from invenio.legacy.bibauthorid.webapi import get_personiID_external_ids
from invenio.legacy.bibauthorid.frontinterface import get_uid_from_personid
from invenio.legacy.bibauthorid.frontinterface import get_bibrefrec_name_string
from invenio.legacy.bibauthorid.frontinterface import get_canonical_id_from_personid
from invenio.base.i18n import gettext_set_language, wash_language
from invenio.legacy.webuser import get_email
from invenio.utils.html import escape_html
#from invenio.utils.text import encode_for_xml
from flask import session
class Template:
"""Templating functions used by aid"""
def __init__(self, language=CFG_SITE_LANG):
"""Set defaults for all aid template output"""
self.language = language
self._ = gettext_set_language(wash_language(language))
def tmpl_person_detail_layout(self, content):
'''
writes HTML content into the person css container
@param content: HTML content
@type content: string
@return: HTML code
@rtype: string
'''
html = []
h = html.append
h('<div id="aid_person">')
h(content)
h('</div>')
return "\n".join(html)
def tmpl_transaction_box(self, teaser_key, messages, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param messages: list of keys to a dict which return the message to display in the box
@type messages: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
transaction_teaser_dict = { 'success': 'Success!',
'failure': 'Failure!' }
transaction_message_dict = { 'confirm_success': '%s transaction%s successfully executed.',
'confirm_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'reject_success': '%s transaction%s successfully executed.',
'reject_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.',
'reset_success': '%s transaction%s successfully executed.',
'reset_failure': '%s transaction%s failed. The system may have been updating during your operation. Please try again or contact %s to obtain help.' }
teaser = self._(transaction_teaser_dict[teaser_key])
html = []
h = html.append
for key in transaction_message_dict.keys():
same_kind = [mes for mes in messages if mes == key]
trans_no = len(same_kind)
if trans_no == 0:
continue
elif trans_no == 1:
args = [trans_no, '']
else:
args = [trans_no, 's']
color = ''
if teaser_key == 'failure':
color = 'background: #FC2626;'
args.append(CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL)
message = self._(transaction_message_dict[key] % tuple(args))
h('<div id="aid_notification_' + key + '" class="ui-widget ui-alert">')
h(' <div style="%s margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">' % (color))
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify" style="border-style: none;">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_notification_box(self, teaser_key, message_key, bibrefs, show_close_btn=True):
'''
Creates a notification box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
notification_teaser_dict = {'info': 'Info!' }
notification_message_dict = {'attribute_papers': 'You are about to attribute the following paper%s:' }
teaser = self._(notification_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(notification_message_dict[message_key] % (arg) )
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>" % (format_record(int(pbibrec), "ha")))
h("</ul>")
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_error_box(self, teaser_key, message_key, show_close_btn=True):
'''
Creates an error box based on the jQuery UI style
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'sorry': 'Sorry.',
'error': 'Error:' }
error_message_dict = {'check_entries': 'Please check your entries.',
'provide_transaction': 'Please provide at least one transaction.' }
teaser = self._(error_teaser_dict[teaser_key])
message = self._(error_message_dict[message_key])
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="background: #FC2626; margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-error ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-alert"></span>')
h(' <strong>%s</strong> %s' % (teaser, message))
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_ticket_box(self, teaser_key, message_key, trans_no, show_close_btn=True):
'''
Creates a semi-permanent box informing about ticket
status notifications
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param trans_no: number of transactions in progress
@type trans_no: integer
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
ticket_teaser_dict = {'in_process': 'Claim in process!' }
ticket_message_dict = {'transaction': 'There %s %s transaction%s in progress.' }
teaser = self._(ticket_teaser_dict[teaser_key])
if trans_no == 1:
args = ['is', trans_no, '']
else:
args = ['are', trans_no, 's']
message = self._(ticket_message_dict[message_key] % tuple(args))
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s ' % (teaser, message))
h('<a rel="nofollow" id="checkout" href="action?checkout=True">' + self._('Click here to review the transactions.') + '</a>')
h('<br>')
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
return "\n".join(html)
def tmpl_search_ticket_box(self, teaser_key, message_key, bibrefs, show_close_btn=False):
'''
Creates a box informing about a claim in progress for
the search.
@param teaser_key: key to a dict which returns the teaser
@type teaser_key: string
@param message_key: key to a dict which returns the message to display in the box
@type message_key: string
@param bibrefs: bibrefs which are about to be assigned
@type bibrefs: list of strings
@param show_close_btn: display close button [x]
@type show_close_btn: boolean
@return: HTML code
@rtype: string
'''
error_teaser_dict = {'person_search': 'Person search for assignment in progress!' }
error_message_dict = {'assign_papers': 'You are searching for a person to assign the following paper%s:' }
teaser = self._(error_teaser_dict[teaser_key])
arg = ''
if len(bibrefs) > 1:
arg = 's'
message = self._(error_message_dict[message_key] % (arg) )
html = []
h = html.append
h('<div id="aid_notification_' + teaser_key + '" class="ui-widget ui-alert">')
h(' <div style="margin-top: 20px; padding: 0pt 0.7em;" class="ui-state-highlight ui-corner-all">')
h(' <p><span style="float: left; margin-right: 0.3em;" class="ui-icon ui-icon-info"></span>')
h(' <strong>%s</strong> %s ' % (teaser, message))
h("<ul>")
for paper in bibrefs:
if ',' in paper:
pbibrec = paper.split(',')[1]
else:
pbibrec = paper
h("<li>%s</li>"
% (format_record(int(pbibrec), "ha")))
h("</ul>")
h('<a rel="nofollow" id="checkout" href="action?cancel_search_ticket=True">' + self._('Quit searching.') + '</a>')
if show_close_btn:
h(' <span style="float:right; margin-right: 0.3em;"><a rel="nofollow" href="#" class="aid_close-notify">X</a></span></p>')
h(' </div>')
h('</div>')
h('<p> </p>')
return "\n".join(html)
def tmpl_meta_includes(self, kill_browser_cache=False):
'''
Generates HTML code for the header section of the document
META tags to kill browser caching
Javascript includes
CSS definitions
@param kill_browser_cache: Do we want to kill the browser cache?
@type kill_browser_cache: boolean
'''
js_path = "%s/js" % CFG_SITE_URL
imgcss_path = "%s/img" % CFG_SITE_URL
result = []
# Add browser cache killer, hence some notifications are not displayed
# out of the session.
if kill_browser_cache:
result = [
'<META HTTP-EQUIV="Pragma" CONTENT="no-cache">',
'<META HTTP-EQUIV="Cache-Control" CONTENT="no-cache">',
'<META HTTP-EQUIV="Pragma-directive" CONTENT="no-cache">',
'<META HTTP-EQUIV="Cache-Directive" CONTENT="no-cache">',
'<META HTTP-EQUIV="Expires" CONTENT="0">']
scripts = ["jquery-ui.min.js",
"jquery.form.js",
"jquery.dataTables.min.js",
]
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/jquery-ui/themes/smoothness/jquery-ui.css" />'
% (imgcss_path))
result.append('<link rel="stylesheet" type="text/css" href='
'"%s/datatables_jquery-ui.css" />'
% (imgcss_path))
result.append('<link rel="stylesheet" type="text/css" href='
'"%s" />'
% (url_for('authorids.static',
filename='css/authorids/base.css'), ))
for script in scripts:
result.append('<script type="text/javascript" src="%s/%s">'
'</script>' % (js_path, script))
result.append('<script type="text/javascript" src="%s">'
'</script>' % (url_for('authorids.static',
filename='js/authorids/base.js')))
return "\n".join(result)
def tmpl_author_confirmed(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
},
show_reset_button=True):
'''
Generate play per-paper links for the table for the
status "confirmed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!--2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s <br>')
if show_reset_button:
stri = stri + (
'<a rel="nofollow" id="aid_reset_gr" class="aid_grey" href="%(url)s/person/action?reset=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reset_gray.png" alt="%(alt_forget)s" style="margin-left:22px;" />'
'%(forget_text)s</a><br>')
stri = stri + (
'<a rel="nofollow" id="aid_repeal" class="aid_grey" href="%(url)s/person/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject_gray.png" alt="%(alt_repeal)s" style="margin-left:22px;"/>'
'%(repeal_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey" href="%(url)s/person/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_forget':verbiage_dict['alt_forget'],
'forget_text':verbiage_dict['forget_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_author_repealed(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
} ):
'''
Generate play per-paper links for the table for the
status "repealed"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
stri = ('<!---2!--><span id="aid_status_details"> '
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s <br>'
'<a rel="nofollow" id="aid_confirm" class="aid_grey" href="%(url)s/person/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check_gray.png" alt="%(alt_confirm)s" style="margin-left: 22px;" />'
'%(confirm_text)s</a><br>'
'<a rel="nofollow" id="aid_to_other" class="aid_grey" href="%(url)s/person/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other_gray.png" alt="%(alt_to_other)s" style="margin-left:22px;"/>'
'%(to_other_text)s</a> </span>')
return (stri
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_forget':verbiage_dict['alt_forget'],
'forget_text':verbiage_dict['forget_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_author_undecided(self, bibref, pid, verbiage_dict={'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.',
'to_other_text':'Assign to another person',
'alt_to_other':'To other person!'
},
show_reset_button=True):
'''
Generate play per-paper links for the table for the
status "no decision taken yet"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param verbiage_dict: language for the link descriptions
@type verbiage_dict: dict
'''
#batchprocess?mconfirm=True&bibrefs=['100:17,16']&pid=1
string = ('<!--0!--><span id="aid_status_details"> '
'<a rel="nofollow" id="aid_confirm" href="%(url)s/person/action?confirm=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_check.png" alt="%(alt_confirm)s" />'
'%(confirm_text)s</a><br />'
'<a rel="nofollow" id="aid_repeal" href="%(url)s/person/action?repeal=True&selection=%(ref)s&pid=%(pid)s">'
'<img src="%(url)s/img/aid_reject.png" alt="%(alt_repeal)s" />'
'%(repeal_text)s</a> <br />'
'<a rel="nofollow" id="aid_to_other" href="%(url)s/person/action?to_other_person=True&selection=%(ref)s">'
'<img src="%(url)s/img/aid_to_other.png" alt="%(alt_to_other)s" />'
'%(to_other_text)s</a> </span>')
return (string
% ({'url': CFG_SITE_URL, 'ref': bibref, 'pid': pid,
'alt_confirm':verbiage_dict['alt_confirm'],
'confirm_text':verbiage_dict['confirm_text'],
'alt_repeal':verbiage_dict['alt_repeal'],
'repeal_text':verbiage_dict['repeal_text'],
'to_other_text':verbiage_dict['to_other_text'],
'alt_to_other':verbiage_dict['alt_to_other']}))
def tmpl_open_claim(self, bibrefs, pid, last_viewed_pid,
search_enabled=True):
'''
Generate entry page for "claim or attribute this paper"
@param bibref: construct of unique ID for this author on this paper
@type bibref: string
@param pid: the Person ID
@type pid: int
@param last_viewed_pid: last ID that had been subject to an action
@type last_viewed_pid: int
'''
t_html = []
h = t_html.append
h(self.tmpl_notification_box('info', 'attribute_papers', bibrefs, show_close_btn=False))
h('<p> ' + self._('Your options') + ': </p>')
bibs = ''
for paper in bibrefs:
if bibs:
bibs = bibs + '&'
bibs = bibs + 'selection=' + str(paper)
if pid > -1:
h('<a rel="nofollow" id="clam_for_myself" href="%s/person/action?confirm=True&%s&pid=%s"> ' % (CFG_SITE_URL, bibs, str(pid)) )
h(self._('Claim for yourself') + ' </a> <br>')
if last_viewed_pid:
h('<a rel="nofollow" id="clam_for_last_viewed" href="%s/person/action?confirm=True&%s&pid=%s"> ' % (CFG_SITE_URL, bibs, str(last_viewed_pid[0])) )
h(self._('Attribute to') + ' %s </a> <br>' % (last_viewed_pid[1]) )
if search_enabled:
h('<a rel="nofollow" id="claim_search" href="%s/person/action?to_other_person=True&%s"> ' % (CFG_SITE_URL, bibs))
h(self._('Search for a person to attribute the paper to') + ' </a> <br>')
return "\n".join(t_html)
def __tmpl_admin_records_table(self, form_id, person_id, bibrecids, verbiage_dict={'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
buttons_verbiage_dict={'mass_buttons':{'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
'record_undecided':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.'},
'record_confirmed':{'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment'},
'record_repealed':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.'}},
show_reset_button=True):
'''
Generate the big tables for the person overview page
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append('%s' % self._(verbiage_dict['no_doc_string']) )
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="%s" action="/person/action" method="post">'
% (form_id))
h('<div class="aid_reclist_selector">') #+self._(' On all pages: '))
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="confirm" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_confirm']) )
h('<input type="submit" name="repeal" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_repeal']) )
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' % self._(verbiage_dict['b_to_others']) )
#if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h('<table class="paperstable" cellpadding="3" width="100%">')
h("<thead>")
h(" <tr>")
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Author Name') + '</th>')
h(' <th>' + self._('Affiliation') + '</th>')
h(' <th>' + self._('Date') + '</th>')
h(' <th>' + self._('Experiment') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h('</thead>')
h('<tbody>')
for idx, paper in enumerate(bibrecids):
h(' <tr style="padding-top: 6px; padding-bottom: 6px;">')
h(' <td><input type="checkbox" name="selection" '
'value="%s" /> </td>' % (paper['bibref']))
rec_info = format_record(int(paper['recid']), "ha")
rec_info = str(idx + 1) + '. ' + rec_info
h(" <td>%s</td>" % (rec_info))
h(" <td>%s</td>" % (paper['authorname']))
aff = ""
if paper['authoraffiliation']:
aff = paper['authoraffiliation']
else:
aff = self._("Not assigned")
h(" <td>%s</td>" % (aff))
if paper['paperdate']:
pdate = paper['paperdate']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
if paper['paperexperiment']:
pdate = paper['paperexperiment']
else:
pdate = 'N.A.'
h(" <td>%s</td>" % pdate)
paper_status = self._("No status information found.")
if paper['flag'] == 2:
paper_status = self.tmpl_author_confirmed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_confirmed'],
show_reset_button=show_reset_button)
elif paper['flag'] == -2:
paper_status = self.tmpl_author_repealed(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_repealed'])
else:
paper_status = self.tmpl_author_undecided(paper['bibref'], person_id,
verbiage_dict=buttons_verbiage_dict['record_undecided'],
show_reset_button=show_reset_button)
h(' <td><div id="bibref%s" style="float:left"><!--%s!-->%s </div>'
% (paper['bibref'], paper['flag'], paper_status))
if 'rt_status' in paper and paper['rt_status']:
h('<img src="%s/img/aid_operator.png" title="%s" '
'alt="actions pending" style="float:right" '
'height="24" width="24" />'
% (CFG_SITE_URL, self._("Operator review of user actions pending")))
h(' </td>')
h(" </tr>")
h(" </tbody>")
h("</table>")
h('<div class="aid_reclist_selector">') #+self._(' On all pages: '))
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a> | ')
h('<a rel="nofollow" id="toggle_claimed_rows" href="javascript:toggle_claimed_rows();" '
'alt="hide">' + self._('Hide successful claims') + '</a>')
h('</div>')
h('<div class="aid_reclist_buttons">')
h(('<img src="%s/img/aid_90low_right.png" alt="∟" />')
% (CFG_SITE_URL))
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="submit" name="confirm" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_confirm'])
h('<input type="submit" name="repeal" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_repeal'])
h('<input type="submit" name="to_other_person" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_to_others'])
#if show_reset_button:
# h('<input type="submit" name="reset" value="%s" class="aid_btn_blue" />' % verbiage_dict['b_forget'])
h(" </div>")
h("</form>")
return "\n".join(pp_html)
def __tmpl_reviews_table(self, person_id, bibrecids, admin=False):
'''
Generate the table for potential reviews.
@param form_id: name of the form
@type form_id: string
@param person_id: Person ID
@type person_id: int
@param bibrecids: List of records to display
@type bibrecids: list
@param admin: Show admin functions
@type admin: boolean
'''
no_papers_html = ['<div style="text-align:left;margin-top:1em;"><strong>']
no_papers_html.append(self._('Sorry, there are currently no records to be found in this category.'))
no_papers_html.append('</strong></div>')
if not bibrecids or not person_id:
return "\n".join(no_papers_html)
pp_html = []
h = pp_html.append
h('<form id="review" action="/person/batchprocess" method="post">')
h('<table class="reviewstable" cellpadding="3" width="100%">')
h(' <thead>')
h(' <tr>')
h(' <th> </th>')
h(' <th>' + self._('Paper Short Info') + '</th>')
h(' <th>' + self._('Actions') + '</th>')
h(' </tr>')
h(' </thead>')
h(' <tbody>')
for paper in bibrecids:
h(' <tr>')
h(' <td><input type="checkbox" name="selected_bibrecs" '
'value="%s" /> </td>' % (paper))
rec_info = format_record(int(paper[0]), "ha")
if not admin:
rec_info = rec_info.replace("person/search?q=", "author/")
h(" <td>%s</td>" % (rec_info))
h(' <td><a rel="nofollow" href="/person/batchprocess?selected_bibrecs=%s&mfind_bibref=claim">' + self._('Review Transaction') + '</a></td>'
% (paper))
h(" </tr>")
h(" </tbody>")
h("</table>")
h('<div style="text-align:left;"> ' + self._('On all pages') + ': ')
h('<a rel="nofollow" rel="group_1" href="#select_all">' + self._('Select All') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#select_none">' + self._('Select None') + '</a> | ')
h('<a rel="nofollow" rel="group_1" href="#invert_selection">' + self._('Invert Selection') + '</a>')
h('</div>')
h('<div style="vertical-align:middle;">')
h('∟ ' + self._('With selected do') + ': ')
h('<input type="hidden" name="pid" value="%s" />' % (person_id))
h('<input type="hidden" name="mfind_bibref" value="claim" />')
h('<input type="submit" name="submit" value="Review selected transactions" />')
h(" </div>")
h('</form>')
return "\n".join(pp_html)
def tmpl_admin_person_info_box(self, ln, person_id= -1, names=[]):
'''
Generate the box showing names
@param ln: the language to use
@type ln: string
@param person_id: Person ID
@type person_id: int
@param names: List of names to display
@type names: list
'''
html = []
h = html.append
if not ln:
pass
#class="ui-tabs ui-widget ui-widget-content ui-corner-all">
h('<div id="aid_person_names"')
h('<p><strong>' + self._('Names variants') + ':</strong></p>')
h("<p>")
h('<!--<span class="aid_lowlight_text">Person ID: <span id="pid%s">%s</span></span><br />!-->'
% (person_id, person_id))
for name in names:
# h(("%s "+self._('as appeared on')+" %s"+self._(' records')+"<br />")
# % (name[0], name[1]))
h(("%s (%s); ")
% (name[0], name[1]))
h("</p>")
h("</div>")
return "\n".join(html)
def tmpl_admin_tabs(self, ln=CFG_SITE_LANG, person_id= -1,
rejected_papers=[],
rest_of_papers=[],
review_needed=[],
rt_tickets=[],
open_rt_tickets=[],
show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
show_reset_button=True,
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'],
verbiage_dict={'confirmed':'Records', 'repealed':'Not this person\'s records',
'review':'Records in need of review',
'tickets':'Open Tickets', 'data':'Data',
'confirmed_ns':'Papers of this Person',
'repealed_ns':'Papers _not_ of this Person',
'review_ns':'Papers in need of review',
'tickets_ns':'Tickets for this Person',
'data_ns':'Additional Data for this Person'},
buttons_verbiage_dict={'mass_buttons':{'no_doc_string':'Sorry, there are currently no documents to be found in this category.',
'b_confirm':'Confirm',
'b_repeal':'Repeal',
'b_to_others':'Assign to other person',
'b_forget':'Forget decision'},
'record_undecided':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_repeal':'Rejected!',
'repeal_text':'This record has been repealed.'},
'record_confirmed':{'alt_confirm':'Confirmed.',
'confirm_text':'This record assignment has been confirmed.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Repeal!',
'repeal_text':'Repeal record assignment'},
'record_repealed':{'alt_confirm':'Confirm!',
'confirm_text':'Confirm record assignment.',
'alt_forget':'Forget decision!',
'forget_text':'Forget assignment decision',
'alt_repeal':'Rejected!',
'repeal_text':'Repeal this record assignment.'}}):
'''
Generate the tabs for the person overview page
@param ln: the language to use
@type ln: string
@param person_id: Person ID
@type person_id: int
@param rejected_papers: list of repealed papers
@type rejected_papers: list
@param rest_of_papers: list of attributed of undecided papers
@type rest_of_papers: list
@param review_needed: list of papers that need a review (choose name)
@type review_needed:list
@param rt_tickets: list of tickets for this Person
@type rt_tickets: list
@param open_rt_tickets: list of open request tickets
@type open_rt_tickets: list
@param show_tabs: list of tabs to display
@type show_tabs: list of strings
@param ticket_links: list of links to display
@type ticket_links: list of strings
@param verbiage_dict: language for the elements
@type verbiage_dict: dict
@param buttons_verbiage_dict: language for the buttons
@type buttons_verbiage_dict: dict
'''
html = []
h = html.append
h('<div id="aid_tabbing">')
h(' <ul>')
if 'records' in show_tabs:
r = verbiage_dict['confirmed']
h(' <li><a rel="nofollow" href="#tabRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(rest_of_papers)}))
if 'repealed' in show_tabs:
r = verbiage_dict['repealed']
h(' <li><a rel="nofollow" href="#tabNotRecords"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(rejected_papers)}))
if 'review' in show_tabs:
r = verbiage_dict['review']
h(' <li><a rel="nofollow" href="#tabReviewNeeded"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(review_needed)}))
if 'tickets' in show_tabs:
r = verbiage_dict['tickets']
h(' <li><a rel="nofollow" href="#tabTickets"><span>%(r)s (%(l)s)</span></a></li>' %
({'r':r, 'l':len(open_rt_tickets)}))
if 'data' in show_tabs:
r = verbiage_dict['data']
h(' <li><a rel="nofollow" href="#tabData"><span>%s</span></a></li>' % r)
h(' </ul>')
if 'records' in show_tabs:
h(' <div id="tabRecords">')
r = verbiage_dict['confirmed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_admin_records_table("massfunctions",
person_id, rest_of_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'repealed' in show_tabs:
h(' <div id="tabNotRecords">')
r = verbiage_dict['repealed_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self._('These records have been marked as not being from this person.'))
h('<br />' + self._('They will be regarded in the next run of the author ')
+ self._('disambiguation algorithm and might disappear from this listing.'))
h(self.__tmpl_admin_records_table("rmassfunctions",
person_id, rejected_papers,
verbiage_dict=buttons_verbiage_dict['mass_buttons'],
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button))
h(" </div>")
if 'review' in show_tabs:
h(' <div id="tabReviewNeeded">')
r = verbiage_dict['review_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
h(self.__tmpl_reviews_table(person_id, review_needed, True))
h(' </div>')
if 'tickets' in show_tabs:
h(' <div id="tabTickets">')
r = verbiage_dict['tickets']
h('<noscript><h5>%s</h5></noscript>' % r)
r = verbiage_dict['tickets_ns']
h('<p>%s:</p>' % r)
if rt_tickets:
pass
# open_rt_tickets = [a for a in open_rt_tickets if a[1] == rt_tickets]
for t in open_rt_tickets:
name = self._('Not provided')
surname = self._('Not provided')
uidip = self._('Not available')
comments = self._('No comments')
email = self._('Not provided')
date = self._('Not Available')
actions = []
for info in t[0]:
if info[0] == 'firstname':
name = info[1]
elif info[0] == 'lastname':
surname = info[1]
elif info[0] == 'uid-ip':
uidip = info[1]
elif info[0] == 'comments':
comments = info[1]
elif info[0] == 'email':
email = info[1]
elif info[0] == 'date':
date = info[1]
elif info[0] in ['confirm', 'repeal']:
actions.append(info)
if 'delete' in ticket_links:
h(('<strong>Ticket number: %(tnum)s </strong> <a rel="nofollow" id="cancel" href=%(url)s/person/action?cancel_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Delete this ticket') + ' </a>')
% ({'tnum':t[1], 'url':CFG_SITE_URL, 'pid':str(person_id)}))
if 'commit' in ticket_links:
h((' or <a rel="nofollow" id="commit" href=%(url)s/person/action?commit_rt_ticket=True&selection=%(tnum)s&pid=%(pid)s>' + self._(' Commit this entire ticket') + ' </a> <br>')
% ({'tnum':t[1], 'url':CFG_SITE_URL, 'pid':str(person_id)}))
h('<dd>')
h('Open from: %s, %s <br>' % (surname, name))
h('Date: %s <br>' % date)
h('identified by: %s <br>' % uidip)
h('email: %s <br>' % email)
h('comments: %s <br>' % comments)
h('Suggested actions: <br>')
h('<dd>')
for a in actions:
bibref, bibrec = a[1].split(',')
pname = get_bibrefrec_name_string(bibref)
title = ""
try:
title = get_fieldvalues(int(bibrec), "245__a")[0]
except IndexError:
title = self._("No title available")
title = escape_html(title)
if 'commit_entry' in ticket_links:
h('<a rel="nofollow" id="action" href="%(url)s/person/action?%(action)s=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s">%(action)s - %(name)s on %(title)s </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib':a[1],
'name': pname, 'title': title, 'rt': t[1]}))
else:
h('%(action)s - %(name)s on %(title)s'
% ({'action': a[0], 'name': pname, 'title': title}))
if 'del_entry' in ticket_links:
h(' - <a rel="nofollow" id="action" href="%(url)s/person/action?cancel_rt_ticket=True&pid=%(pid)s&selection=%(bib)s&rt_id=%(rt)s&rt_action=%(action)s"> Delete this entry </a>'
% ({'action': a[0], 'url': CFG_SITE_URL,
'pid': str(person_id), 'bib': a[1], 'rt': t[1]}))
h(' - <a rel="nofollow" id="show_paper" target="_blank" href="%(url)s/record/%(record)s"> View record <br>' % ({'url':CFG_SITE_URL, 'record':str(bibrec)}))
h('</dd>')
h('</dd><br>')
# h(str(open_rt_tickets))
h(" </div>")
if 'data' in show_tabs:
h(' <div id="tabData">')
r = verbiage_dict['data_ns']
h('<noscript><h5>%s</h5></noscript>' % r)
canonical_name = str(get_canonical_id_from_person_id(person_id))
if '.' in str(canonical_name) and not isinstance(canonical_name, int):
canonical_name = canonical_name[0:canonical_name.rindex('.')]
h('<div><div> <strong> Person id </strong> <br> %s <br>' % person_id)
h('<strong> <br> Canonical name setup </strong>')
h('<div style="margin-top: 15px;"> Current canonical name: %s <form method="GET" action="%s/person/action" rel="nofollow">' % (canonical_name, CFG_SITE_URL))
h('<input type="hidden" name="set_canonical_name" value="True" />')
h('<input name="canonical_name" id="canonical_name" type="text" style="border:1px solid #333; width:500px;" value="%s" /> ' % canonical_name)
h('<input type="hidden" name="pid" value="%s" />' % person_id)
h('<input type="submit" value="set canonical name" class="aid_btn_blue" />')
h('<br>NOTE: please note the a number is appended automatically to the name displayed above. This cannot be manually triggered so to ensure unicity of IDs.')
h('To change the number if greater then one, please change all the other names first, then updating this one will do the trick. </div>')
h('</form> </div></div>')
userid = get_uid_from_personid(person_id)
h('<div> <br>')
h('<strong> Internal IDs </strong> <br>')
if userid:
email = get_email(int(userid))
h('UserID: INSPIRE user %s is associated with this profile with email: %s' % (str(userid), str(email)))
else:
h('UserID: There is no INSPIRE user associated to this profile!')
h('<br></div>')
external_ids = get_personiID_external_ids(person_id)
h('<div> <br>')
h('<strong> External IDs </strong> <br>')
h('<form method="GET" action="%s/person/action" rel="nofollow">' % (CFG_SITE_URL) )
h('<input type="hidden" name="add_missing_external_ids" value="True">')
h('<input type="hidden" name="pid" value="%s">' % person_id)
h('<br> <input type="submit" value="add missing ids" class="aid_btn_blue"> </form>')
h('<form method="GET" action="%s/person/action" rel="nofollow">' % (CFG_SITE_URL) )
h('<input type="hidden" name="rewrite_all_external_ids" value="True">')
h('<input type="hidden" name="pid" value="%s">' % person_id)
h('<br> <input type="submit" value="rewrite all ids" class="aid_btn_blue"> </form> <br>')
if external_ids:
h('<form method="GET" action="%s/person/action" rel="nofollow">' % (CFG_SITE_URL) )
h(' <input type="hidden" name="delete_external_ids" value="True">')
h(' <input type="hidden" name="pid" value="%s">' % person_id)
for idx in external_ids:
try:
sys = [s for s in EXTERNAL_SYSTEMS_LIST if EXTERNAL_SYSTEMS_LIST[s] == idx][0]
except (IndexError):
sys = ''
for k in external_ids[idx]:
h('<br> <input type="checkbox" name="existing_ext_ids" value="%s||%s"> <strong> %s: </strong> %s' % (idx, k, sys, k))
h(' <br> <br> <input type="submit" value="delete selected ids" class="aid_btn_blue"> <br> </form>')
else:
h('UserID: There are no external users associated to this profile!')
h('<br> <br>')
h('<form method="GET" action="%s/person/action" rel="nofollow">' % (CFG_SITE_URL) )
h(' <input type="hidden" name="add_external_id" value="True">')
h(' <input type="hidden" name="pid" value="%s">' % person_id)
h(' <select name="ext_system">')
h(' <option value="" selected>-- ' + self._('Choose system') + ' --</option>')
for el in EXTERNAL_SYSTEMS_LIST:
h(' <option value="%s"> %s </option>' % (EXTERNAL_SYSTEMS_LIST[el], el))
h(' </select>')
h(' <input type="text" name="ext_id" id="ext_id" style="border:1px solid #333; width:350px;">')
h(' <input type="submit" value="add external id" class="aid_btn_blue">')
# h('<br>NOTE: please note that if you add an external id it will replace the previous one (if any).')
h('<br> </form> </div>')
h('</div> </div>')
h('</div>')
return "\n".join(html)
def tmpl_bibref_check(self, bibrefs_auto_assigned, bibrefs_to_confirm):
'''
Generate overview to let user chose the name on the paper that
resembles the person in question.
@param bibrefs_auto_assigned: list of auto-assigned papers
@type bibrefs_auto_assigned: list
@param bibrefs_to_confirm: list of unclear papers and names
@type bibrefs_to_confirm: list
'''
html = []
h = html.append
h('<form id="review" action="/person/action" method="post">')
h('<p><strong>' + self._("Make sure we match the right names!")
+ '</strong></p>')
h('<p>' + self._('Please select an author on each of the records that will be assigned.') + '<br/>')
h(self._('Papers without a name selected will be ignored in the process.'))
h('</p>')
for person in bibrefs_to_confirm:
if not "bibrecs" in bibrefs_to_confirm[person]:
continue
person_name = bibrefs_to_confirm[person]["person_name"]
if person_name.isspace():
h((self._('Claim for person with id') + ': %s. ') % person)
h(self._('This seems to be an empty profile without names associated to it yet'))
h(self._('(the names will be automatically gathered when the first paper is claimed to this profile).'))
else:
h((self._("Select name for") + " %s") % (person_name))
pid = person
for recid in bibrefs_to_confirm[person]["bibrecs"]:
h('<div id="aid_moreinfo">')
try:
fv = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
fv = self._('Error retrieving record title')
fv = escape_html(fv)
h(self._("Paper title: ") + fv)
h('<select name="bibrecgroup%s">' % (recid))
h('<option value="" selected>-- Choose author name --</option>')
for bibref in bibrefs_to_confirm[person]["bibrecs"][recid]:
h('<option value="%s||%s">%s</option>'
% (pid, bibref[0], bibref[1]))
h('</select>')
h("</div>")
if bibrefs_auto_assigned:
h(self._('The following names have been automatically chosen:'))
for person in bibrefs_auto_assigned:
if not "bibrecs" in bibrefs_auto_assigned[person]:
continue
h((self._("For") + " %s:") % bibrefs_auto_assigned[person]["person_name"])
pid = person
for recid in bibrefs_auto_assigned[person]["bibrecs"]:
try:
fv = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
fv = self._('Error retrieving record title')
fv = escape_html(fv)
h('<div id="aid_moreinfo">')
h(('%s' + self._(' -- With name: ')) % (fv) )
#, bibrefs_auto_assigned[person]["bibrecs"][recid][0][1]))
# asbibref = "%s||%s" % (person, bibrefs_auto_assigned[person]["bibrecs"][recid][0][0])
pbibref = bibrefs_auto_assigned[person]["bibrecs"][recid][0][0]
h('<select name="bibrecgroup%s">' % (recid))
h('<option value="" selected>-- ' + self._('Ignore') + ' --</option>')
for bibref in bibrefs_auto_assigned[person]["bibrecs"][recid]:
selector = ""
if bibref[0] == pbibref:
selector = ' selected="selected"'
h('<option value="%s||%s"%s>%s</option>'
% (pid, bibref[0], selector, bibref[1]))
h('</select>')
# h('<input type="hidden" name="bibrecgroup%s" value="%s" />'
# % (recid, asbibref))
h('</div>')
h('<div style="text-align:center;">')
h(' <input type="submit" class="aid_btn_green" name="bibref_check_submit" value="Accept" />')
h(' <input type="submit" class="aid_btn_blue" name="cancel_stage" value="Cancel" />')
h("</div>")
h('</form>')
return "\n".join(html)
def tmpl_invenio_search_box(self):
'''
Generate little search box for missing papers. Links to main invenio
search on start papge.
'''
html = []
h = html.append
h('<div style="margin-top: 15px;"> <strong>Search for missing papers:</strong> <form method="GET" action="%s/search">' % CFG_SITE_URL)
h('<input name="p" id="p" type="text" style="border:1px solid #333; width:500px;" /> ')
h('<input type="submit" name="action_search" value="search" '
'class="aid_btn_blue" />')
h('</form> </div>')
return "\n".join(html)
def tmpl_person_menu(self):
'''
Generate the menu bar
'''
html = []
h = html.append
h('<div id="aid_menu">')
h(' <ul>')
h(' <li>' + self._('Navigation:') + '</li>')
h((' <li><a rel="nofollow" href="%s/person/search">' + self._('Run paper attribution for another author') + '</a></li>') % CFG_SITE_URL)
h(' <!--<li><a rel="nofollow" href="#">' + self._('Person Interface FAQ') + '</a></li>!-->')
h(' </ul>')
h('</div>')
return "\n".join(html)
def tmpl_person_menu_admin(self):
'''
Generate the menu bar
'''
html = []
h = html.append
h('<div id="aid_menu">')
h(' <ul>')
h(' <li>' + self._('Navigation:') + '</li>')
h((' <li><a rel="nofollow" href="%s/person/search">' + self._('Person Search') + '</a></li>') % CFG_SITE_URL)
h((' <li><a rel="nofollow" href="%s/person/tickets_admin">' + self._('Open tickets') + '</a></li>') % CFG_SITE_URL)
h(' <!--<li><a rel="nofollow" href="#">' + self._('Person Interface FAQ') + '</a></li>!-->')
h(' </ul>')
h('</div>')
return "\n".join(html)
def tmpl_ticket_final_review(self, req, mark_yours=[], mark_not_yours=[],
mark_theirs=[], mark_not_theirs=[]):
'''
Generate final review page. Displaying transactions if they
need confirmation.
@param req: Apache request object
@type req: Apache request object
@param mark_yours: papers marked as 'yours'
@type mark_yours: list
@param mark_not_yours: papers marked as 'not yours'
@type mark_not_yours: list
@param mark_theirs: papers marked as being someone else's
@type mark_theirs: list
@param mark_not_theirs: papers marked as NOT being someone else's
@type mark_not_theirs: list
'''
def html_icon_legend():
html = []
h = html.append
h('<div id="legend">')
h("<p>")
h(self._("Symbols legend: "))
h("</p>")
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Everything is shiny, captain!")))
h(self._('The result of this request will be visible immediately'))
h('</span><br />')
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_warning_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Confirmation needed to continue")))
h(self._('The result of this request will be visible immediately but we need your confirmation to do so for this paper has been manually claimed before'))
h('</span><br />')
h('<span style="margin-left:25px; vertical-align:middle;">')
h('<img src="%s/img/aid_denied.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("This will create a change request for the operators")))
h(self._("The result of this request will be visible upon confirmation through an operator"))
h("</span>")
h("</div>")
return "\n".join(html)
def mk_ticket_row(ticket):
recid = -1
rectitle = ""
recauthor = "No Name Found."
personname = "No Name Found."
try:
recid = ticket['bibref'].split(",")[1]
except (ValueError, KeyError, IndexError):
return ""
try:
rectitle = get_fieldvalues(int(recid), "245__a")[0]
except (ValueError, IndexError, TypeError):
rectitle = self._('Error retrieving record title')
rectitle = escape_html(rectitle)
if "authorname_rec" in ticket:
recauthor = ticket['authorname_rec']
if "person_name" in ticket:
personname = ticket['person_name']
html = []
h = html.append
# h("Debug: " + str(ticket) + "<br />")
h('<td width="25"> </td>')
h('<td>')
h(rectitle)
h('</td>')
h('<td>')
h((personname + " (" + self._("Selected name on paper") + ": %s)") % recauthor)
h('</td>')
h('<td>')
if ticket['status'] == "granted":
h('<img src="%s/img/aid_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Everything is shiny, captain!")))
elif ticket['status'] == "warning_granted":
h('<img src="%s/img/aid_warning_granted.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("Verification needed to continue")))
else:
h('<img src="%s/img/aid_denied.png" '
'alt="%s" width="30" height="30" />'
% (CFG_SITE_URL, self._("This will create a request for the operators")))
h('</td>')
h('<td>')
h('<a rel="nofollow" href="%s/person/action?checkout_remove_transaction=%s ">'
'Cancel'
'</a>' % (CFG_SITE_URL, ticket['bibref']))
h('</td>')
return "\n".join(html)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
html = []
h = html.append
# h(html_icon_legend())
if "checkout_faulty_fields" in pinfo and pinfo["checkout_faulty_fields"]:
h(self.tmpl_error_box('sorry', 'check_entries'))
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "tickets" in pinfo["checkout_faulty_fields"]):
h(self.tmpl_error_box('error', 'provide_transaction'))
# h('<div id="aid_checkout_teaser">' +
# self._('Almost done! Please use the button "Confirm these changes" '
# 'at the end of the page to send this request to an operator '
# 'for review!') + '</div>')
h('<div id="aid_person_names" '
'class="ui-tabs ui-widget ui-widget-content ui-corner-all"'
'style="padding:10px;">')
h("<h4>" + self._('Please provide your information') + "</h4>")
h('<form id="final_review" action="%s/person/action" method="post">'
% (CFG_SITE_URL))
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_first_name" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your first name') + "</p>")
h("<p>")
if "user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]:
h((self._("Your first name:") + " %s") % pinfo["user_first_name"])
else:
h(self._('Your first name:') + ' <input type="text" name="user_first_name" value="%s" />'
% pinfo["user_first_name"])
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_last_name" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your last name') + "</p>")
h("</p><p>")
if "user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]:
h((self._("Your last name:") + " %s") % pinfo["user_last_name"])
else:
h(self._('Your last name:') + ' <input type="text" name="user_last_name" value="%s" />'
% pinfo["user_last_name"])
h("</p>")
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_email" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" + self._('Please provide your eMail address') + "</p>")
if ("checkout_faulty_fields" in pinfo
and pinfo["checkout_faulty_fields"]
and "user_email_taken" in pinfo["checkout_faulty_fields"]):
h("<p class='aid_error_line'>" +
self._('This eMail address is reserved by a user. Please log in or provide an alternative eMail address')
+ "</p>")
h("<p>")
if "user_email_sys" in pinfo and pinfo["user_email_sys"]:
h((self._("Your eMail:") + " %s") % pinfo["user_email"])
else:
h((self._('Your eMail:') + ' <input type="text" name="user_email" value="%s" />')
% pinfo["user_email"])
h("</p><p>")
h(self._("You may leave a comment (optional)") + ":<br>")
h('<textarea name="user_comments">')
if "user_ticket_comments" in pinfo:
h(pinfo["user_ticket_comments"])
h("</textarea>")
h("</p>")
h("<p> </p>")
h('<div style="text-align: center;">')
h((' <input type="submit" name="checkout_continue_claiming" class="aid_btn_green" value="%s" />')
% self._("Continue claiming*"))
h((' <input type="submit" name="checkout_submit" class="aid_btn_green" value="%s" />')
% self._("Confirm these changes**"))
h('<span style="margin-left:150px;">')
h((' <input type="submit" name="cancel" class="aid_btn_red" value="%s" />')
% self._("!Delete the entire request!"))
h('</span>')
h('</div>')
h("</form>")
h('</div>')
h('<div id="aid_person_names" '
'class="ui-tabs ui-widget ui-widget-content ui-corner-all"'
'style="padding:10px;">')
h('<table width="100%" border="0" cellspacing="0" cellpadding="4">')
if not ulevel == "guest":
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as your documents') + "</h4></td>")
h('</tr>')
if mark_yours:
for idx, ticket in enumerate(mark_yours):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">Nothing staged as yours</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._("Mark as _not_ your documents") + "</h4></td>")
h('</tr>')
if mark_not_yours:
for idx, ticket in enumerate(mark_not_yours):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged as not yours') + '</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as their documents') + "</h4></td>")
h('</tr>')
if mark_theirs:
for idx, ticket in enumerate(mark_theirs):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged in this category') + '</td>')
h("</tr>")
h('<tr>')
h("<td colspan='5'><h4>" + self._('Mark as _not_ their documents') + "</h4></td>")
h('</tr>')
if mark_not_theirs:
for idx, ticket in enumerate(mark_not_theirs):
h('<tr id="aid_result%s">' % ((idx + 1) % 2))
h(mk_ticket_row(ticket))
h('</tr>')
else:
h('<tr>')
h('<td width="25"> </td>')
h('<td colspan="4">' + self._('Nothing staged in this category') + '</td>')
h("</tr>")
h('</table>')
h("</div>")
h("<p>")
h(self._(" * You can come back to this page later. Nothing will be lost. <br />"))
h(self._(" ** Performs all requested changes. Changes subject to permission restrictions "
"will be submitted to an operator for manual review."))
h("</p>")
h(html_icon_legend())
return "\n".join(html)
def tmpl_author_search(self, query, results,
search_ticket=None, author_pages_mode=True,
fallback_mode=False, fallback_title='',
fallback_message='', new_person_link=False):
'''
Generates the search for Person entities.
@param query: the query a user issued to the search
@type query: string
@param results: list of results
@type results: list
@param search_ticket: search ticket object to inform about pending
claiming procedure
@type search_ticket: dict
'''
linktarget = "person"
if author_pages_mode:
linktarget = "author"
if not query:
query = ""
html = []
h = html.append
h('<form id="searchform" action="/person/search" method="GET">')
h('Find author clusters by name. e.g: <i>Ellis, J</i>: <br>')
h('<input placeholder="Search for a name, e.g: Ellis, J" type="text" name="q" style="border:1px solid #333; width:500px;" '
'maxlength="250" value="%s" class="focus" />' % query)
h('<input type="submit" value="Search" />')
h('</form>')
if fallback_mode:
if fallback_title:
h('<div id="header">%s</div>' % fallback_title)
if fallback_message:
h('%s' % fallback_message)
if not results and not query:
h('</div>')
return "\n".join(html)
h("<p> </p>")
if query and not results:
authemail = CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL
h('<strong>' + self._("We do not have a publication list for '%(x_name)s'." +
" Try using a less specific author name, or check" +
" back in a few days as attributions are updated " +
"frequently. Or you can send us feedback, at " +
"<a rel='nofollow' href=\"mailto:%(x_mail)s\">%(x_email)s</a>.</strong>", x_name=query, x_mail=authemail, x_email=authemail))
h('</div>')
if new_person_link:
link = "%s/person/action?confirm=True&pid=%s" % (CFG_SITE_URL, '-3')
if search_ticket:
for r in search_ticket['bibrefs']:
link = link + '&selection=%s' % str(r)
h('<div>')
h('<a rel="nofollow" href="%s">' % (link))
h(self._("Create a new Person for your search"))
h('</a>')
h('</div>')
return "\n".join(html)
# base_color = 100
# row_color = 0
for index, result in enumerate(results):
# if len(results) > base_color:
# row_color += 1
# else:
# row_color = base_color - (base_color - index *
# (base_color / len(results)))
pid = result[0]
names = result[1]
papers = result[2]
try:
total_papers = result[3]
if total_papers > 1:
papers_string = '(%s Papers)' % str(total_papers)
elif total_papers == 1:
if (len(papers) == 1 and
len(papers[0]) == 1 and
papers[0][0] == 'Not retrieved to increase performances.'):
papers_string = ''
else:
papers_string = '(1 Paper)'
else:
papers_string = '(No papers)'
except IndexError:
papers_string = ''
h('<div id="aid_result%s">' % (index % 2))
h('<div style="padding-bottom:5px;">')
# h('<span style="color:rgb(%d,%d,%d);">%s. </span>'
# % (row_color, row_color, row_color, index + 1))
h('<span>%s. </span>' % (index + 1))
# for nindex, name in enumerate(names):
# color = row_color + nindex * 35
# color = min(color, base_color)
# h('<span style="color:rgb(%d,%d,%d);">%s; </span>'
# % (color, color, color, name[0]))
for name in names:
h('<span style="margin-right:20px;">%s </span>'
% (name[0]))
h('</div>')
h('<em style="padding-left:1.5em;">')
if index < bconfig.PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT:
h(('<a rel="nofollow" href="#" id="aid_moreinfolink" class="mpid%s">'
'<img src="../img/aid_plus_16.png" '
'alt = "toggle additional information." '
'width="11" height="11"/> '
+ self._('Recent Papers') +
'</a></em>')
% (pid))
else:
h("</em>")
if search_ticket:
link = "%s/person/action?confirm=True&pid=%s" % (CFG_SITE_URL, pid)
for r in search_ticket['bibrefs']:
link = link + '&selection=%s' % str(r)
h(('<span style="margin-left: 120px;">'
'<em><a rel="nofollow" href="%s" id="confirmlink">'
'<strong>' + self._('YES!') + '</strong>'
+ self._(' Attribute Papers To ') +
'%s %s </a></em></span>')
% (link, get_person_redirect_link(pid), papers_string))
else:
h(('<span style="margin-left: 40px;">'
'<em><a rel="nofollow" href="%s/%s/%s" id="aid_moreinfolink">'
+ self._('Publication List ') + '(%s) %s </a></em></span>')
% (CFG_SITE_URL, linktarget,
get_person_redirect_link(pid),
get_person_redirect_link(pid), papers_string))
h('<div class="more-mpid%s" id="aid_moreinfo">' % (pid))
if papers and index < bconfig.PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT:
h((self._('Showing the') + ' %d ' + self._('most recent documents:')) % len(papers))
h("<ul>")
for paper in papers:
h("<li>%s</li>"
% (format_record(int(paper[0]), "ha")))
h("</ul>")
elif not papers:
h("<p>" + self._('Sorry, there are no documents known for this person') + "</p>")
elif index >= bconfig.PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT:
h("<p>" + self._('Information not shown to increase performances. Please refine your search.') + "</p>")
h(('<span style="margin-left: 40px;">'
'<em><a rel="nofollow" href="%s/%s/%s" target="_blank" id="aid_moreinfolink">'
+ self._('Publication List ') + '(%s)</a> (in a new window or tab)</em></span>')
% (CFG_SITE_URL, linktarget,
get_person_redirect_link(pid),
get_person_redirect_link(pid)))
h('</div>')
h('</div>')
if new_person_link:
link = "%s/person/action?confirm=True&pid=%s" % (CFG_SITE_URL, '-3')
if search_ticket:
for r in search_ticket['bibrefs']:
link = link + '&selection=%s' % str(r)
h('<div>')
h('<a rel="nofollow" href="%s">' % (link))
h(self._("Create a new Person for your search"))
h('</a>')
h('</div>')
return "\n".join(html)
def tmpl_welcome_start(self):
'''
Shadows the behaviour of tmpl_search_pagestart
'''
return '<div class="pagebody"><div class="pagebodystripemiddle">'
def tmpl_welcome_arxiv(self):
'''
SSO landing/welcome page.
'''
html = []
h = html.append
h('<p><b>Congratulations! you have now successfully connected to INSPIRE via arXiv.org!</b></p>')
h('<p>Right now, you can verify your'
' publication records, which will help us to produce better publication lists and'
' citation statistics.'
'</p>')
h('<p>We are currently importing your publication list from arXiv.org .'
'When we\'re done, you\'ll see a link to verify your'
' publications below; please claim the papers that are yours '
' and remove the ones that are not. This information will be automatically processed'
' or be sent to our operator for approval if needed, usually within 24'
' hours.'
'</p>')
h('If you have '
'any questions or encounter any problems please contact us here: '
'<a rel="nofollow" href="mailto:%s">%s</a></p>'
% (CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL))
return "\n".join(html)
def tmpl_welcome(self):
'''
SSO landing/welcome page.
'''
html = []
h = html.append
h('<p><b>Congratulations! you have successfully logged in!</b></p>')
h('<p>We are currently creating your publication list. When we\'re done, you\'ll see a link to correct your '
'publications below.</p>')
h('<p>When the link appears we invite you to confirm the papers that are '
'yours and to reject the ones that you are not author of. If you have '
'any questions or encounter any problems please contact us here: '
'<a rel="nofollow" href="mailto:%s">%s</a></p>'
% (CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL))
return "\n".join(html)
def tmpl_claim_profile(self):
'''
claim profile
'''
html = []
h = html.append
h('<p>Unfortunately it was not possible to automatically match your arXiv account to an INSPIRE person profile. Please choose the correct person profile from the list below.')
h('If your profile is not in the list or none of them represents you correctly, please select the one which fits you best or choose '
'to create a new one; keep in mind that no matter what your choice is, you will be able to correct your publication list until it contains all of your publications.'
' In case of any question please do not hesitate to contact us at <a rel="nofollow" href="mailto:%s">%s</a></p>' % (CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL,
CFG_BIBAUTHORID_AUTHOR_TICKET_ADMIN_EMAIL))
return "\n".join(html)
def tmpl_profile_option(self, top5_list):
'''
show profile option
'''
html = []
h = html.append
h('<table border="0"> <tr>')
for pid in top5_list:
pid = int(pid)
canonical_id = get_canonical_id_from_personid(pid)
full_name = get_person_names_from_id(pid)
name_length = 0
most_common_name = ""
for name in full_name:
if len(name[0]) > name_length:
most_common_name = name [0]
if len(full_name) > 0:
name_string = most_common_name
else:
name_string = "[No name available] "
if len(canonical_id) > 0:
canonical_name_string = "(" + canonical_id[0][0] + ")"
canonical_id = canonical_id[0][0]
else:
canonical_name_string = "(" + pid + ")"
canonical_id = pid
h('<td>')
h('%s ' % (name_string))
h('<a href="%s/author/%s" target="_blank"> %s </a>' % (CFG_SITE_URL, canonical_id, canonical_name_string))
h('</td>')
h('<td>')
h('<INPUT TYPE="BUTTON" VALUE="This is my profile" ONCLICK="window.location.href=\'welcome?chosen_profile=%s\'">' % (str(pid)))
h('</td>')
h('</tr>')
h('</table>')
h('</br>')
if top5_list:
h('If none of the above is your profile it seems that you cannot match any of the existing accounts.</br>Would you like to create one?')
h('<INPUT TYPE="BUTTON" VALUE="Create an account" ONCLICK="window.location.href=\'welcome?chosen_profile=%s\'">' % (str(-1)))
else:
h('It seems that you cannot match any of the existig accounts.</br>Would you like to create one?')
h('<INPUT TYPE="BUTTON" VALUE="Create an account" ONCLICK="window.location.href=\'welcome?chosen_profile=%s\'">' % (str(-1)))
return "\n".join(html)
def tmpl_profile_not_available(self):
'''
show profile option
'''
html = []
h = html.append
h('<p> Unfortunately the profile that you previously chose is no longer available. A new empty profile has been created. You will be able to correct '
'your publication list until it contains all of your publications.</p>')
return "\n".join(html)
def tmpl_profile_assigned_by_user (self):
html = []
h = html.append
h('<p> Congratulations you have successfully claimed the chosen profile.</p>')
return "\n".join(html)
def tmpl_claim_stub(self, person='-1'):
'''
claim stub page
'''
html = []
h = html.append
h(' <ul><li><a rel="nofollow" href=%s> Login through arXiv.org </a> <small>' % bconfig.BIBAUTHORID_CFG_INSPIRE_LOGIN)
h(' - Use this option if you have an arXiv account and have claimed your papers in arXiv.')
h('(If you login through arXiv.org, INSPIRE will immediately verify you as an author and process your claimed papers.) </small><br><br>')
h(' <li><a rel="nofollow" href=%s/person/%s?open_claim=True> Continue as a guest </a> <small>' % (CFG_SITE_URL, person))
h(' - Use this option if you DON\'T have an arXiv account, or you have not claimed any paper in arXiv.')
h('(If you login as a guest, INSPIRE will need to confirm you as an author before processing your claimed papers.) </small><br><br>')
h('If you login through arXiv.org we can verify that you are the author of these papers and accept your claims rapidly, '
'as well as adding additional claims from arXiv. <br>If you choose not to login via arXiv your changes will '
'be publicly visible only after our editors check and confirm them, usually a few days.<br> '
'Either way, claims made on behalf of another author will go through our staff and may take longer to display. '
'This applies as well to papers which have been previously claimed, by yourself or someone else.')
return "\n".join(html)
def tmpl_welcome_link(self):
'''
Creates the link for the actual user action.
'''
return '<a rel="nofollow" href=action?checkout=True><b>' + \
self._('Correct my publication lists!') + \
'</b></a>'
def tmpl_welcome_personid_association(self, pid):
"""
"""
canon_name = get_canonical_id_from_personid(pid)
head = "<br>"
if canon_name:
body = ("Your arXiv.org account is associated "
"with person %s." % canon_name[0][0])
else:
body = ("Warning: your arXiv.org account is associated with an empty profile. "
"This can happen if it is the first time you log in and you do not have any "
"paper directly claimed in arXiv.org."
" In this case, you are welcome to search and claim your papers to your"
" new profile manually, or please contact us to get help.")
body += ("<br>You are very welcome to contact us shall you need any help or explanation"
" about the management of"
" your profile page"
" in INSPIRE and it's connections with arXiv.org: "
'''<a href="mailto:[email protected]?subject=Help on arXiv.org SSO login and paper claiming"> [email protected] </a>''')
tail = "<br>"
return head + body + tail
def tmpl_welcome_arXiv_papers(self, paps):
'''
Creates the list of arXiv papers
'''
plist = "<br><br>"
if paps:
plist = plist + "We have got and we are about to automatically claim for You the following papers from arXiv.org: <br>"
for p in paps:
plist = plist + " " + str(p) + "<br>"
else:
plist = "We have got no papers from arXiv.org which we could claim automatically for You. <br>"
return plist
def tmpl_welcome_end(self):
'''
Shadows the behaviour of tmpl_search_pageend
'''
return '</div></div>'
def tmpl_tickets_admin(self, tickets=[]):
'''
Open tickets short overview for operators.
'''
html = []
h = html.append
if len(tickets) > 0:
h('List of open tickets: <br><br>')
for t in tickets:
h('<a rel="nofollow" href=%(cname)s#tabTickets> %(longname)s - (%(cname)s - PersonID: %(pid)s): %(num)s open tickets. </a><br>'
% ({'cname':str(t[1]), 'longname':str(t[0]), 'pid':str(t[2]), 'num':str(t[3])}))
else:
h('There are currently no open tickets.')
return "\n".join(html)
# pylint: enable=C0301
|
PXke/invenio
|
invenio/legacy/bibauthorid/templates.py
|
Python
|
gpl-2.0
| 91,185
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2006-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
"""This module is used to parse a supervision graph Salome (XML) and convert it into
YACS calculation schema
This parsing is done with SalomeLoader class and its method load.
"""
import sys,os
try:
import cElementTree as ElementTree
except ImportError:
import ElementTree
#from sets import Set
Set=set
import graph
import pilot
import SALOMERuntime
class UnknownKind(Exception):pass
#global variables
debug=0
typeMap={}
objref=None
_containers={}
currentProc=None
def typeName(name):
"""Replace :: in type name by /"""
return "/".join(name.split("::"))
streamTypes={
'0':"Unknown",
'1':"CALCIUM_integer",
'3':"CALCIUM_real",
}
class SalomeLoader:
"""This class parses a Salome graph (version 3.2.x) and converts it into YACS schema.
The loadxml method parses xml file and returns a SalomeProc object
The load method calls the loadxml method and creates a YACS object of class Proc
"""
def loadxml(self,filename):
"""
Parse a XML file from Salome SUPERV and return a list of SalomeProc objects.
"""
tree = ElementTree.ElementTree(file=filename)
root = tree.getroot()
if debug:print "root.tag:",root.tag,root
procs=[]
if root.tag == "dataflow":
#only one dataflow
dataflow=root
if debug:print dataflow
proc=SalomeProc(dataflow)
procs.append(proc)
else:
#one or more dataflows. The graph contains macros.
#All macros are defined at the same level in the XML file.
for dataflow in root.findall("dataflow"):
if debug:print dataflow
proc=SalomeProc(dataflow)
if debug:print "dataflow name:",proc.name
procs.append(proc)
return procs
def load(self,filename):
"""Parse a SUPERV XML file (method loadxml) and return a YACS Proc object.
"""
global typeMap,_containers,objref,currentProc
typeMap.clear()
objref=None
_containers.clear()
currentProc=None
procs=self.loadxml(filename)
#Split the master proc from the possible macros.
proc=procs.pop(0)
#proc.display()
#Put macros in macro_dict
macro_dict={}
for p in procs:
if debug:print "proc_name:",p.name,"coupled_node:",p.coupled_node
macro_dict[p.name]=p
if debug:print filename
yacsproc=ProcNode(proc,macro_dict,filename)
return yacsproc.createNode()
class Container:
"""Class that defines a Salome Container"""
def __init__(self,mach,name):
self.mach=mach
self.name=name
self.components={}
def getName(self):
return self.mach+"/"+self.name
def getContainer(name):
if not name:
name="localhost/FactoryServer"
elif "/" not in name:
#no machine name: use localhost
name="localhost/"+name
return _containers.get(name)
def addContainer(name):
if not name:
mach="localhost"
name="FactoryServer"
elif "/" not in name:
#no machine name: use localhost for mach
mach="localhost"
else:
mach,name=name.split("/")
c=Container(mach,name)
_containers[mach+"/"+name]=c
return c
class Service:
"""Class for Service properties"""
class Parameter:
"""Class for Parameter properties"""
class Link:
"""Class for Link properties"""
class Data:
"""Class for Data properties"""
class Node:
"""Base class for all nodes """
label="Node: "
def __init__(self):
self.links=[] # list to store inputs as links
# a link has two attributes : from_node, the starting node
# to_node, the end node
self.datas=[]
self.inStreamLinks=[] #list of dataStream links connected to this node (in)
self.outStreamLinks=[] #list of dataStream links connected to this node (out)
self.node=None
def createNode(self):
raise NotImplementedError
def getInputPort(self,p):
return self.node.getInputPort(".".join(p.split("__")))
def getOutputPort(self,p):
if not self.node:
self.createNode()
return self.node.getOutputPort(".".join(p.split("__")))
def getInputDataStreamPort(self,p):
return self.node.getInputDataStreamPort(p)
def getOutputDataStreamPort(self,p):
return self.node.getOutputDataStreamPort(p)
def initPort(self,l):
if l.type == 7:
#double (CORBA::tk_double)
try:
self.getInputPort(l.tonodeparam).edInitDbl(l.value)
except:
reason="Problem in initialization, not expected type (double): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 3:
#int (CORBA::tk_long)
try:
self.getInputPort(l.tonodeparam).edInitInt(l.value)
except:
reason="Problem in initialization, not expected type (int): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 14:
#objref (CORBA::tk_objref)
try:
self.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (objref): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 18:
#string (CORBA::tk_string)
try:
self.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (string): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
else:
reason="Problem in initialization, not expected type (%s): %s %s" % (l.type,l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
class InlineNode(Node):
"""Inline Node salome : python function in self.codes[0]"""
def __init__(self):
Node.__init__(self)
self.codes=[]
def createNode(self):
r = pilot.getRuntime()
if self.fnames[0] == "?":
n=r.createScriptNode("",self.name)
else:
n=r.createFuncNode("",self.name)
n.setFname(self.fnames[0])
n.setScript(self.codes[0])
self.node=n
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
n.edAddInputPort(para.name,typeMap[para.type])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
n.edAddOutputPort(para.name,typeMap[para.type])
for d in self.datas:
self.initPort(d)
return n
class ComputeNode(Node):
"""Compute Node Salome execute a component service"""
def createNode(self):
if self.node:
return self.node
r = pilot.getRuntime()
if self.container.components.has_key(self.sComponent):
#a node for this component already exists
compo_node=self.container.components[self.sComponent]
#It's a node associated with another node of the same component instance
#It is not sure that the yacs node has been created ????
master_node=compo_node.createNode()
n=master_node.createNode(self.name)
else:
#there is no node for this component. This node is first
self.container.components[self.sComponent]=self
#There is no component instance for this node
n=r.createCompoNode("",self.name)
n.setRef(self.sComponent)
n.setMethod(self.service.name)
self.node=n
#set the container for the node
if self.container:
n.getComponent().setContainer(currentProc.containerMap[self.container.getName()])
#add dataflow ports in out
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#Create the missing type and adds it into types table
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
n.edAddInputPort(para.name,typeMap[para.type])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#Create the missing type and adds it into types table
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
pout=n.edAddOutputPort(para.name,typeMap[para.type])
#add datastream ports in and out
for para in self.inStreams:
if debug:print para.name,para.type,para.dependency,para.schema, para.interpolation,
if debug:print para.extrapolation
pin=n.edAddInputDataStreamPort(para.name,typeMap[streamTypes[para.type]])
for para in self.outStreams:
if debug:print para.name,para.type,para.dependency,para.values
pout=n.edAddOutputDataStreamPort(para.name,typeMap[streamTypes[para.type]])
for d in self.datas:
self.initPort(d)
return n
class ComposedNode(Node):
"""Composed Node Salome (base class)"""
def reduceLoop(self):
"""Transform a Salome graph with loops on one level
in a hierarchical graph.
The initial graph is in self.G. It is transformed in place.
"""
G=self.G
if debug:graph.display(G)
#invert the graph
I=graph.invert(G)
#graph.display(I)
#Get all loops and their internal nodes
loops={}
for n in G:
if n.kind == 4:
#Beginning of loop
loops[n]=graph.reachable(G,n)&graph.reachable(I,n.endloop)
n.inner_nodes=loops[n]
n.G=graph.InducedSubgraph(loops[n],G)
if debug:print "all loops"
if debug:print loops
#Get most external loops
outer_loops=loops.keys()
for l in loops:
for ll in outer_loops:
if loops[l] < loops[ll]:
#internal loop
outer_loops.remove(l)
ll.set_inner(l)
break
#In the end all remaining loops in outer_loops are the most external
if debug:print outer_loops
#We remove all internal nodes of most external loops
for l in outer_loops:
#Remove internal nodes
for n in loops[l]:
del G[n]
#Remove endloop node
suiv=G[l.endloop]
del G[l.endloop]
#Replace neighbours of loop by those of endloop
G[l]= suiv
#Try to transform incoming and outcoming links of endloop in incoming and
#outcoming links of internal nodes. Probably not complete.
inputs={}
for link in l.endloop.links:
if debug:print link.from_node,link.to_node,link.from_param,link.to_param
inputs[link.to_param]=link.from_node,link.from_param
for s in suiv:
for link in s.links:
if link.from_node == l.endloop:
link.from_node,link.from_param=inputs[link.from_param]
if debug:print link.from_node,link.to_node,link.from_param,link.to_param
if debug:graph.display(G)
#Apply the reduction treatment to most external loops (recurse)
for l in outer_loops:
l.reduceLoop()
def connect_macros(self,macro_dict):
"""This method connects the salome macros in macro_dict to the master YACS Proc.
"""
if debug:print "connect_macros",self.node,macro_dict
for node in self.G:
if isinstance(node,MacroNode):
#node is a macro, connect its definition to self.
#p is the Salome macro (class SalomeProc)
#node is the Salome MacroNode that has the subgraph p
#node.node is the YACS Bloc equivalent to node
p=macro_dict[node.coupled_node]
bloc=node.node
if debug:print "macronode:",node.name,node.coupled_node,p
#Create a hierarchical graph from the salome graph
G=p.create_graph()
node.G=G
for n in G:
#create an equivalent YACS node from each salome node
nod=n.createNode()
bloc.edAddChild(nod)
#Connect macros to node
node.connect_macros(macro_dict)
#add control links
for n in G:
for v in G[n]:
bloc.edAddCFLink(n.node,v.node)
#add dataflow links and initializations
for n in G:
#dataflow links
for l in n.links:
bloc.edAddLink(l.from_node.getOutputPort(l.from_param),
l.to_node.getInputPort(l.to_param))
#datastream links
for l in n.outStreamLinks:
pout=l.from_node.getOutputDataStreamPort(l.from_param)
pin=l.to_node.getInputDataStreamPort(l.to_param)
bloc.edAddLink(pout,pin)
#initializations
for l in n.datas:
if l.type == 7:
#double (CORBA::tk_double)
try:
n.getInputPort(l.tonodeparam).edInitDbl(l.value)
except:
reason="Problem in initialization, not expected type (double): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 3:
#int (CORBA::tk_long)
try:
n.getInputPort(l.tonodeparam).edInitInt(l.value)
except:
reason="Problem in initialization, not expected type (int): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 14:
#objref (CORBA::tk_objref)
try:
n.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (objref): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 18:
#string (CORBA::tk_string)
try:
n.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (string): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
else:
reason="Problem in initialization, not expected type (%s): %s %s" % (l.type,l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
class LoopNode(ComposedNode):
"""Objet qui simule le comportement d'une boucle Salome."""
def __init__(self):
ComposedNode.__init__(self)
self.inner_loops=[]
#inner_nodes contains internal nodes as in Salome (on one level with endloop nodes)
self.inner_nodes=[]
def set_node(self,node):
self.node=node
def set_inner(self,loop):
for i in self.inner_loops:
if loop.inner_nodes < i.inner_nodes:
#the loop is contained in i
i.set_inner(loop)
break
self.inner_loops.append(loop)
def createNode(self):
"""Create the equivalent YACS loop and store it in attribute node
A Salome loop has n input ports and output ports with exactly same names.
The head of loop has 3 functions : init, next, more which have almost same
interface. init and next have same interface : on input, input loop parameters
on output, output loop parameters (same as input). more has one more output parameter
in first place. This parameter says if the loop must go on or not.
The endloop has a function with the same interface as next.
To transform this node, create a YACS Bloc. In this bloc put a node for the init function
and a While node. In the while put all internal nodes plus 2 nodes for the next and more
functions.
"""
r = pilot.getRuntime()
bloop=r.createBloc(self.name)
#init node
init=r.createFuncNode("","init")
#print self.codes[0]
init.setScript(self.codes[0])
init.setFname(self.fnames[0])
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
init.edAddInputPort(para.name,typeMap[para.type])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
init.edAddOutputPort(para.name,typeMap[para.type])
bloop.edAddChild(init)
self.init=init
wh=r.createWhileLoop(self.name)
bloop.edAddChild(wh)
blnode=r.createBloc(self.name)
wh.edSetNode(blnode)
cport=wh.edGetConditionPort()
cport.edInitBool(True)
#next node
next=r.createFuncNode("","next")
#print self.codes[2]
next.setScript(self.codes[2])
next.setFname(self.fnames[2])
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
next.edAddInputPort(para.name,typeMap[para.type])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
next.edAddOutputPort(para.name,typeMap[para.type])
blnode.edAddChild(next)
self.next=next
#more node
more=r.createFuncNode("","more")
#print self.codes[1]
more.setScript(self.codes[1])
more.setFname(self.fnames[1])
for para in self.service.inParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
more.edAddInputPort(para.name,typeMap[para.type])
more.edAddOutputPort("DoLoop",typeMap["int"])
for para in self.service.outParameters:
if not typeMap.has_key(para.type):
#create the missing type and add it in type map
typeMap[para.type]= currentProc.createInterfaceTc("",para.type,[objref])
if not currentProc.typeMap.has_key(para.type):
currentProc.typeMap[para.type]=typeMap[para.type]
more.edAddOutputPort(para.name,typeMap[para.type])
blnode.edAddChild(more)
self.more=more
for para in self.service.outParameters:
bloop.edAddDFLink(init.getOutputPort(para.name),next.getInputPort(para.name))
for para in self.service.outParameters:
blnode.edAddDFLink(next.getOutputPort(para.name),more.getInputPort(para.name))
wh.edAddLink(more.getOutputPort("DoLoop"),wh.getInputPort("condition"))
for para in self.service.outParameters:
wh.edAddLink(more.getOutputPort(para.name),next.getInputPort(para.name))
self.node=bloop
for n in self.G:
node=n.createNode()
blnode.edAddChild(node)
for n in self.G:
for v in self.G[n]:
blnode.edAddCFLink(n.node,v.node)
for n in self.G:
for l in n.links:
try:
blnode.edAddDFLink(l.from_node.getOutputPort(l.from_param),
l.to_node.getInputPort(l.to_param))
except:
reason="Error while connecting output port: "+l.from_param+" from node: "+l.from_node.name
reason=reason+" to input port: "+l.to_param+" from node: "+l.to_node.name
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
return bloop
def getInputPort(self,p):
return self.init.getInputPort(p)
def getOutputPort(self,p):
return self.more.getOutputPort(p)
class Bloc(ComposedNode):
""" Composed node containing a set of connected nodes
"""
label="Bloc: "
def __init__(self):
Node.__init__(self)
self.nodes=[]
def addLink(self,node1,node2):
if node1 not in self.nodes:self.nodes.append(node1)
if node2 not in self.nodes:self.nodes.append(node2)
class MacroNode(Bloc):
"""Objet that represents a Salome Macro
"""
def createNode(self):
"""Create a YACS node (Bloc) equivalent to a Salome Macro """
r = pilot.getRuntime()
macro=r.createBloc(self.name)
self.node=macro
return macro
def is_loop(n):
"""Return true if n is a head loop node"""
return isinstance(n,LoopNode)
class ProcNode(ComposedNode):
"""Salome proc with its macros
The Salome proc is stored in attribute proc
The Salome macros are stored in attribute macro_dict ({})
"""
def __init__(self,proc,macro_dict,filename):
ComposedNode.__init__(self)
self.proc=proc
self.macro_dict=macro_dict
self.filename=filename
def createNode(self):
"""Create the YACS node (Proc) equivalent a Salome proc"""
global currentProc,objref
r = pilot.getRuntime()
#create_graph gives a hierarchical graph equivalent to the Salome proc
G=self.proc.create_graph()
self.G=G
#Create the YACS proc with its elements (types, nodes, containers)
p=r.createProc("pr")
self.node=p
currentProc=p
p.filename=self.filename
typeMap["double"]=p.typeMap["double"]
typeMap["float"]=p.typeMap["double"]
typeMap["int"]=p.typeMap["int"]
typeMap["short"]=p.typeMap["int"]
typeMap["long"]=p.typeMap["int"]
typeMap["string"]=p.typeMap["string"]
typeMap["char"]=p.typeMap["string"]
typeMap["boolean"]=p.typeMap["bool"]
typeMap["bool"]=p.typeMap["bool"]
objref=p.createInterfaceTc("IDL:omg.org/CORBA/Object:1.0","Object",[])
typeMap["objref"]=objref
typeMap["Unknown"]=p.createInterfaceTc("","Unknown",[])
typeMap["GEOM_Object"]=p.createInterfaceTc("IDL:GEOM/GEOM_Object:1.0","GEOM_Object",[objref])
typeMap["GEOM_Shape"]=typeMap["GEOM_Object"]
typeMap["CALCIUM_integer"]=p.createInterfaceTc("IDL:Ports/Calcium_Ports/Calcium_Integer_Port:1.0","CALCIUM_integer",[])
typeMap["CALCIUM_real"]=p.createInterfaceTc("IDL:Ports/Calcium_Ports/Calcium_Real_Port:1.0","CALCIUM_real",[])
typeMap["CALCIUM_double"]=p.createInterfaceTc("IDL:Ports/Calcium_Ports/Calcium_Double_Port:1.0","CALCIUM_double",[])
typeMap["CALCIUM_string"]=p.createInterfaceTc("IDL:Ports/Calcium_Ports/Calcium_String_Port:1.0","CALCIUM_string",[])
typeMap["CALCIUM_boolean"]=p.createInterfaceTc("IDL:Ports/Calcium_Ports/Calcium_Logical_Port:1.0","CALCIUM_boolean",[])
typeMap["SuperVisionTest::Adder"]=p.createInterfaceTc("","SuperVisionTest/Adder",[objref])
typeMap["Adder"]=typeMap["SuperVisionTest::Adder"]
currentProc.typeMap["Object"]=typeMap["objref"]
currentProc.typeMap["Unknown"]=typeMap["Unknown"]
currentProc.typeMap["GEOM_Object"]=typeMap["GEOM_Object"]
currentProc.typeMap["GEOM_Shape"]=typeMap["GEOM_Shape"]
currentProc.typeMap["CALCIUM_integer"]=typeMap["CALCIUM_integer"]
currentProc.typeMap["CALCIUM_real"]=typeMap["CALCIUM_real"]
#create all containers
for name,container in _containers.items():
cont=r.createContainer()
cont.setName(name)
cont.setProperty("hostname",container.mach)
cont.setProperty("container_name",container.name)
currentProc.containerMap[name]=cont
for n in G:
#each node in G creates an equivalent YACS node.
node=n.createNode()
p.edAddChild(node)
#Connect Salome macros to nodes of proc p.
self.connect_macros(self.macro_dict)
#add control links
for n in G:
for v in G[n]:
p.edAddCFLink(n.node,v.node)
#add dataflow links and initializations
for n in G:
#dataflow links
for l in n.links:
try:
p.edAddLink(l.from_node.getOutputPort(l.from_param),
l.to_node.getInputPort(l.to_param))
except:
reason="Error while connecting output port: "+l.from_param+" from node: "+l.from_node.name
reason=reason+" to input port: "+l.to_param+" from node: "+l.to_node.name
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
#datastream links
for l in n.outStreamLinks:
pout=l.from_node.getOutputDataStreamPort(l.from_param)
pin=l.to_node.getInputDataStreamPort(l.to_param)
p.edAddLink(pout,pin)
#initializations
for l in n.datas:
if l.type == 7:
#double (CORBA::tk_double)
try:
n.getInputPort(l.tonodeparam).edInitDbl(l.value)
except:
reason="Problem in initialization, not expected type (double): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 3:
#int (CORBA::tk_long)
port=n.getInputPort(l.tonodeparam)
try:
port.edInitInt(l.value)
except:
reason="Problem in initialization, not expected type (int): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 14:
#objref (CORBA::tk_objref)
try:
n.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (objref): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
elif l.type == 18:
#string (CORBA::tk_string)
try:
n.getInputPort(l.tonodeparam).edInitString(l.value)
except:
reason="Problem in initialization, not expected type (string): %s %s" % (l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
else:
reason="Problem in initialization, not expected type (%s): %s %s" % (l.type,l.tonodeparam,l.value)
currentProc.getLogger("parser").error(reason,currentProc.filename,-1)
return p
class SalomeProc(ComposedNode):
"""Salome proc with all its dataflow, datastream and control links
The object is built by parsing an XML file.
"""
def __init__(self,dataflow):
self.name="name"
self.parse(dataflow)
#self.links : list of dataflow links (Link objects)
#self.nodes : list of graph nodes
#self.node_dict : nodes dict ({name:node})
#self.datas : list of graph datas
#each node has 2 lists of datastream links (inStreams, outStreams)
def parse(self,dataflow):
if debug:print "All XML nodes"
for node in dataflow:
if debug:print node.tag,node
#Parse dataflow info-list
self.dataflow_info=self.parseService(dataflow.find("info-list/node/service"))
if debug:print self.dataflow_info
if debug:print self.dataflow_info.inParameters
if debug:print self.dataflow_info.outParameters
if debug:
for para in self.dataflow_info.inParameters:
print "inParam:",para.name,para.name.split("__",1)
self.name=dataflow.findtext("info-list/node/node-name")
self.coupled_node=dataflow.findtext("info-list/node/coupled-node")
if debug:print "All XML nodes dataflow/node-list"
nodes=[]
node_dict={}
#Parse all nodes
for n in dataflow.findall('node-list/node'):
#n is a node-list node
kind=n.findtext("kind")
comp=n.findtext("component-name")
name=n.findtext("node-name")
coupled_node=n.findtext("coupled-node")
interface=n.findtext("interface-name")
container=n.findtext("container")
#kind=1 : dataflow ?
#kind=2 : ?
#kind=9 : datastream graph ?
#kind=6 : ??
#kind=8 : ??
if kind == "0":
#It's a service
node=ComputeNode()
node.kind=0
node.sComponent = comp
node.interface=interface
node.container= getContainer(container)
if not node.container:
node.container=addContainer(container)
if debug:print "\tcontainer",node.container
elif kind == "3":
#It's a python function
node=InlineNode()
node.kind=3
codes=[]
fnames=[]
for pyfunc in n.findall("PyFunction-list/PyFunction"):
fnames.append(pyfunc.findtext("FuncName"))
codes.append(self.parsePyFunction(pyfunc))
node.fnames=fnames
node.codes=codes
elif kind == "4":
#It's a loop : make a LoopNode
#python functions (next, more, init) are found in codes
node=LoopNode()
node.kind=4
codes=[]
fnames=[]
for pyfunc in n.findall("PyFunction-list/PyFunction"):
fnames.append(pyfunc.findtext("FuncName"))
codes.append(self.parsePyFunction(pyfunc))
node.fnames=fnames
node.codes=codes
elif kind == "5":
#End of loop : make an InlineNode
node=InlineNode()
node.kind=5
codes=[]
fnames=[]
for pyfunc in n.findall("PyFunction-list/PyFunction"):
fnames.append(pyfunc.findtext("FuncName"))
codes.append(self.parsePyFunction(pyfunc))
node.fnames=fnames
node.codes=codes
elif kind == "10":
# It's a Macro node : make a MacroNode
node=MacroNode()
node.kind=10
else:
raise UnknownKind,kind
node.name=name
node.service=None
node.coupled_node=coupled_node
#Put nodes in a dict to ease search
node_dict[node.name]=node
if debug:print "\tnode-name",node.name
if debug:print "\tkind",node.kind,node.__class__.__name__
s=n.find("service")
if s:
node.service=self.parseService(s)
#Parse datastream ports
if debug:print "DataStream ports"
inStreams=[]
for indata in n.findall("DataStream-list/inParameter"):
inStreams.append(self.parseInData(indata))
node.inStreams=inStreams
outStreams=[]
outStreams_dict={}
for outdata in n.findall("DataStream-list/outParameter"):
p=self.parseOutData(outdata)
outStreams.append(p)
outStreams_dict[p.name]=p
node.outStreams=outStreams
node.outStreams_dict=outStreams_dict
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
nodes.append(node)
self.nodes=nodes
self.node_dict=node_dict
#Nodes parsing is finished.
#Parse dataflow and datastream links.
"""
<link>
<fromnode-name>Node_A_1</fromnode-name>
<fromserviceparameter-name>a_1</fromserviceparameter-name>
<tonode-name>Node_B_1</tonode-name>
<toserviceparameter-name>b_1</toserviceparameter-name>
<coord-list/>
</link>
"""
if debug:print "All XML nodes dataflow/link-list"
links=[]
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
for link in dataflow.findall('link-list/link'):
l=Link()
l.from_name=link.findtext("fromnode-name")
l.to_name=link.findtext("tonode-name")
l.from_param=link.findtext("fromserviceparameter-name")
l.to_param=link.findtext("toserviceparameter-name")
links.append(l)
if debug:print "\tfromnode-name",l.from_name
if debug:print "\tfromserviceparameter-name",l.from_param
if debug:print "\ttonode-name",l.to_name
if debug:print "\ttoserviceparameter-name",l.to_param
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
self.links=links
if debug:print "All XML nodes dataflow/data-list"
datas=[]
for data in dataflow.findall('data-list/data'):
d=self.parseData(data)
datas.append(d)
if debug:print "\ttonode-name",d.tonode
if debug:print "\ttoserviceparameter-name",d.tonodeparam
if debug:print "\tparameter-value",d.value
if debug:print "\tparameter-type",d.type
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
self.datas=datas
def parseService(self,s):
service=Service()
service.name=s.findtext("service-name")
if debug:print "\tservice-name",service.name
inParameters=[]
for inParam in s.findall("inParameter-list/inParameter"):
p=Parameter()
p.name=inParam.findtext("inParameter-name")
p.type=typeName(inParam.findtext("inParameter-type"))
if debug:print "\tinParameter-name",p.name
if debug:print "\tinParameter-type",p.type
inParameters.append(p)
service.inParameters=inParameters
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
outParameters=[]
for outParam in s.findall("outParameter-list/outParameter"):
p=Parameter()
p.name=outParam.findtext("outParameter-name")
p.type=typeName(outParam.findtext("outParameter-type"))
if debug:print "\toutParameter-name",p.name
if debug:print "\toutParameter-type",p.type
outParameters.append(p)
service.outParameters=outParameters
if debug:print "\t++++++++++++++++++++++++++++++++++++++++++++"
return service
def parseData(self,d):
da=Data()
da.tonode=d.findtext("tonode-name")
da.tonodeparam=d.findtext("toserviceparameter-name")
da.value=d.findtext("data-value/value")
da.type=eval(d.findtext("data-value/value-type"))
if da.type < 9:
da.value=eval(da.value)
return da
def parsePyFunction(self,pyfunc):
if debug:print pyfunc.tag,":",pyfunc
if debug:print "\tFuncName",pyfunc.findtext("FuncName")
text=""
for cdata in pyfunc.findall("PyFunc"):
if text:text=text+'\n'
if cdata.text != '?':
text=text+ cdata.text
return text
"""<inParameter-type>1</inParameter-type>
<inParameter-name>istream</inParameter-name>
<inParameter-dependency>2</inParameter-dependency>
<inParameter-schema>0</inParameter-schema>
<inParameter-interpolation>0</inParameter-interpolation>
<inParameter-extrapolation>0</inParameter-extrapolation>
</inParameter>
<outParameter>
<outParameter-type>1</outParameter-type>
<outParameter-name>ostream</outParameter-name>
<outParameter-dependency>2</outParameter-dependency>
<outParameter-values>0</outParameter-values>
</outParameter>
"""
def parseInData(self,d):
if debug:print d.tag,":",d
p=Parameter()
p.name=d.findtext("inParameter-name")
p.type=typeName(d.findtext("inParameter-type"))
p.dependency=d.findtext("inParameter-dependency")
p.schema=d.findtext("inParameter-schema")
p.interpolation=d.findtext("inParameter-interpolation")
p.extrapolation=d.findtext("inParameter-extrapolation")
if debug:print "\tinParameter-name",p.name
return p
def parseOutData(self,d):
if debug:print d.tag,":",d
p=Parameter()
p.name=d.findtext("outParameter-name")
p.type=typeName(d.findtext("outParameter-type"))
p.dependency=d.findtext("outParameter-dependency")
p.values=d.findtext("outParameter-values")
if debug:print "\toutParameter-name",p.name
return p
def create_graph(self):
#a graph is a dict {node:neighbours}
#neighbours is a Set of neighbour nodes (of course)
#for v in graph (python >= 2.3): iterate through graph nodes
#for v in graph[node] iterate through node neighbours
G={}
#create all nodes without neighbours
for n in self.nodes:
G[n]=Set()
#calculate neighbours with links
for link in self.links:
from_node=self.node_dict[link.from_name]
if link.from_param == "Gate" or link.to_param == "Gate":
#control link salome : add to_name node to neighbours
if debug:print "add control link",link.from_name,link.to_name
G[self.node_dict[link.from_name]].add(self.node_dict[link.to_name])
elif from_node.outStreams_dict.has_key(link.from_param):
# datastream link :
# 1- add link in link list
# 2- add in link references on from_node and to_node
if debug:print "add stream link",link.from_name,link.to_name
self.node_dict[link.to_name].inStreamLinks.append(link)
self.node_dict[link.from_name].outStreamLinks.append(link)
link.from_node=self.node_dict[link.from_name]
link.to_node=self.node_dict[link.to_name]
else:
# other salome link
# if link from Loop node to EndOfLoop node, we ignore it
# all others are kept
from_node=self.node_dict[link.from_name]
to_node=self.node_dict[link.to_name]
if isinstance(to_node,LoopNode):
# If it's the link from EndOfLoop to Loop , we ignore it
if to_node.coupled_node == from_node.name:
if debug:print "backlink loop:",from_node,to_node
#ignored
continue
if debug:print "add dataflow link",link.from_name,link.to_name
G[self.node_dict[link.from_name]].add(self.node_dict[link.to_name])
if link.from_param != "DoLoop" and link.to_param != "DoLoop":
#Links on DoLoop are used by Salome supervisor. We ignore them.
#Add in the link references on nodes (from_node and to_node)
#Add this link into the list of links of to_node node.
self.node_dict[link.to_name].links.append(link)
link.from_node=self.node_dict[link.from_name]
link.to_node=self.node_dict[link.to_name]
#In a Salome graph with loops, head node and end node are connected
#with 2 opposite links
#Store the endloop in attribute endloop of head node.
if link.from_param == "DoLoop" and link.to_param == "DoLoop" \
and is_loop(self.node_dict[link.from_name]) \
and isinstance(self.node_dict[link.to_name],InlineNode):
#Store the end loop inline node in attribute endloop
#self.node_dict[link.to_name] is the end node of the head loop node self.node_dict[link.from_name]
if debug:print "add loop",link.from_name,link.to_name
self.node_dict[link.from_name].endloop=self.node_dict[link.to_name]
self.node_dict[link.to_name].loop=self.node_dict[link.from_name]
for data in self.datas:
if debug:print "datas",data
self.node_dict[data.tonode].datas.append(data)
self.G=G
#Transform the graph in place
# Transform one level loops in hierarchical graph
self.reduceLoop()
#Return the hierarchical graph that can be transform into YACS objects.
return G
def display(self,suivi="sync"):
"""Display Salome proc with graphviz (dot file)"""
#to display : dot -Tpng salome.dot |display
f=file("salome.dot", 'w')
self.write_dot(f)
f.close()
cmd="dot -Tpng salome.dot |display" + (suivi == "async" and "&" or "")
os.system(cmd)
def write_dot(self,stream):
"""Dump Salome proc into stream with dot format"""
stream.write('digraph %s {\nnode [ style="filled" ]\n' % self.name)
for node in self.nodes:
label = "%s:%s"% (node.name,node.__class__.__name__)
color='green'
stream.write(' %s [fillcolor="%s" label=< %s >];\n' % (
id(node), color, label
))
for link in self.links:
from_node=self.node_dict[link.from_name]
to_node=self.node_dict[link.to_name]
stream.write(' %s -> %s;\n' % (id(from_node), id(to_node)))
stream.write("}\n")
def main():
import traceback
usage ="""Usage: %s salomeFile convertedFile
where salomeFile is the name of the input schema file (old Salome syntax)
and convertedFile is the name of the output schema file (new YACS syntax)
"""
try:
salomeFile=sys.argv[1]
convertedFile=sys.argv[2]
except :
print usage%(sys.argv[0])
sys.exit(3)
SALOMERuntime.RuntimeSALOME_setRuntime()
loader=SalomeLoader()
try:
p= loader.load(salomeFile)
s= pilot.SchemaSave(p)
s.save(convertedFile)
except:
traceback.print_exc(file=sys.stdout)
f=open(convertedFile,'w')
f.write("<proc></proc>\n")
sys.exit(2)
logger=p.getLogger("parser")
if not logger.isEmpty():
print logger.getStr()
sys.exit(1)
if __name__ == "__main__":
main()
|
FedoraScientific/salome-yacs
|
src/salomeloader/salomeloader.py
|
Python
|
gpl-2.0
| 43,462
|
from provisioning.images_repository import ImagesRepository
import uuid
class Images(object):
def __init__(self):
self.images_repository = ImagesRepository()
def get_all_images(self):
return self.images_repository.get_all_images()
def get_image(self, name):
return self.images_repository.get_image(name)
def create_image(self, image):
image['uuid'] = str(uuid.uuid4())
return self.images_repository.create_image(image)
def delete_image(self, name):
return self.images_repository.delete_image(name)
def update_image(self, name, image):
return self.images_repository.update_image(name, image)
|
janutechnology/VirtShell
|
virtshell_server/virtshell_server/provisioning/images.py
|
Python
|
gpl-2.0
| 676
|
## Nao functions version 1.41
## change log:
## 1.02: Added class "Region"
## 1.02: Resolution stuff.
## 1.03: Detect() now returns an object of the class "Region()"
## 1.04: Added Aldebarans face detection NaoFaceLocation().
## 1.05: Added the gesture() function and EyeLED() function.
## 1.06: Now able to look for the correct haarcascade file within the pythonpath
## 1.07: Changed Track() function to better support different frame rates
## 1.08: Added ALTrack function
## 1.09: Added second gesture in Gesture()
## 1.10: Added InitPose
## 1.11: Added Move
## 1.12: Added Crouch
## 1.13: Removed Gesture(), instead use the gesture lib. Changed comments for Move()
## 1.14: Added Play() function for playing sound files
## 1.15: Added Record() function
## 1.16: Added WalkTo function
## 1.17: Added PlaySine function
## 1.18: Added function FindFace()
## 1.19: Added RunMovement() (19-09-2011 - Turin)
## 1.20: Added Stiffen() for stiffening the joints
## 1.21: Added RunLed() for running led scripts
## 1.22: GetAvailableLEDPatterns() and GetAvailableGestures() added.
## 1.23: speechProxy added
## 1.24: File existence check added in RunMovement, RunLed, RunSpeech
## 1.25: Fixed remove = remove.reverse() returning None error
## 1.26: Added InitSpeech() and DetectSpeech()
## 1.27: GetAvailableDialogs() added.
## 1.28: Added LoadDialog()
## 1.29: Changed searchpaths of RunLED, RunMovement and RunSpeech to include /led, /gestures and /tts subfolders, respectively.
## 1.30: Added possibility of sending port number to InitProxy
## 1.31: Added better error handling in several functions and made posting of text optional.
## 1.32: RunLED changed to read files with ; as delimiter and to deal with multi-line led-files
## 1.33: LoadDialog() reads files with ; as delimiter
## 1.34: Added functions MoveHead() to move nao's head and GetYaw() to request the yaw of nao's head
## 1.35: Added functions SetTTSVolume() and GetTTSVolume() for checking and controlling the volume of the Text to Speech
## 1.36: Added functions SetMusicVolume() and GetMusicVolume() for checking and controlling the volume of the Music
## 1.37: Updated FindFace to include arbitrary offset and gain. Default changed to up -0.2.
## 1.38: Speed of GetImage() improved. Removed dependency on Python Image Library
## 1.39: Removed "from naoqi import xxx" statements.
## 1.40: Added ALRobotPosture proxy, GoToPosture and proper InitPose() and Crouch(); InitProxy rewritten
## 1.41: Added Landmark detection, Sound localization and Sound detection
import numpy as np
import cv2
from time import time
from time import sleep
#import Image
import random
import math
import sys
import os
import csv
import naoqi
from collections import deque
__naoqi_version__='2.1'
__nao_module_name__ ="Nao Library"
__version__='2.0'
gftt_list = list() # initialize good features to track for opencv
fast = 0 # initiliaze face detection state for opencv
time_q = deque([1,1,1,1,1,1,1,1,1,1])
old_time = time()
time_old_track = time()
#font = cv2.InitFont(cv2.FONT_HERSHEY_TRIPLEX, 0.5, 0.5, 0.0, 1)
## Find the *.xml file for face detection.
list_path = sys.path
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/haarcascade_frontalface_alt2.xml"):
break
#cascade_front = cv.Load(list_path[i]+"/haarcascade_frontalface_alt2.xml")
interpol_time=0.3
start_mov_t = time()
weights = list()
existence = list()
id_pose = None
alface_subscribed = False
xtargetold = 0
ytargetold = 0
class ResolutionCamera:
def __init__(self):
self.low = 0
self.medium = 1
self.high = 2
self.very_high=3
self.res_160x120 = 0 #kQQVGA
self.res_320x240 = 1 #kQVGA
self.res_640x480 = 2 #kVGA
self.res_1280x960 = 3 #k4VGA
self.resolutionar = [160,120],[320,240],[640,480],[1280,960]
self.framerate=30
resolution = ResolutionCamera()
class Region:
def __init__(self):
self.x = 0
self.y = 0
self.width = 0
self.height = 0
def Say(text, POST=True):
global tts
#print text
try:
#volume=GetTTSVolume()
#SetTTSVolume(0.99)
if POST:
tts.post.say(text)
else:
tts.say(text)
#SetTTSVolume(volume)
except NameError:
print 'ALTextToSpeech proxy undefined. Are you running a simulated naoqi?'
def HeadTouch():
head_touch = memoryProxy.getData("Device/SubDeviceList/Head/Touch/Front/Sensor/Value", 0)
return head_touch
#################################################################################
## Use this function, InitProxy, to initialise the proxy. As an argument give up
## the Ip of Nao
#################################################################################
def ConnectProxy(proxy_name, IP, PORT):
theProxy=None
try:
theProxy = naoqi.ALProxy(proxy_name, IP, PORT)
sleep(0.01)
except RuntimeError as e:
print "Error when creating ", proxy_name ," proxy:"
print str(e)
return theProxy
def InitProxy(IP="marvin.local", proxy=[0], PORT = 9559):
"""proxy: (list) 1->TTS, 2->audio, 3->motion, 4->memory, 5->face, 6->video, 7->LED's, 8->Track, 9->Speech, 10->Audioplayer, 11->VisionToolbox"""
global audioProxy
global motionProxy
global memoryProxy
global cameraProxy
global faceProxy
global ledProxy
global tts
global trackfaceProxy
global playProxy
global videoProxy
global asr
global speechProxy # same as asr for backward compatibility
global sonarProxy
global postureProxy
global landmarkProxy
global ALModuleList
global proxyDict
ALModuleList=["ALTextToSpeech","ALAudioDevice","ALMotion","ALMemory","ALFaceDetection","ALVideoDevice","ALLeds","ALFaceTracker","ALSpeechRecognition","ALAudioPlayer","ALVideoRecorder","ALSonar","ALRobotPosture","ALLandMarkDetection","ALSoundDetection","ALAudioSourceLocalization"]
proxyDict={}
#proxyList=[None]*(len(ALModuleList))
# check if list is empty
if len(proxy)==0:
proxy=range(1, len(ALModuleList)+1)
else:
#if not check whether it contains a 0
if 0 in proxy:
proxy=range(1, len(ALModuleList)+1)
for i in proxy:
proxyDict[ALModuleList[i-1]]=ConnectProxy(ALModuleList[i-1],IP, PORT)
#define globals
tts=proxyDict["ALTextToSpeech"]
audioProxy=proxyDict["ALAudioDevice"]
motionProxy=proxyDict["ALMotion"]
memoryProxy=proxyDict["ALMemory"]
faceProxy=proxyDict["ALFaceDetection"]
cameraProxy=proxyDict["ALVideoDevice"]
ledProxy=proxyDict["ALLeds"]
trackfaceProxy=proxyDict["ALFaceTracker"]
asr=proxyDict["ALSpeechRecognition"]
speechProxy=asr # for backward compatibility
playProxy=proxyDict["ALAudioPlayer"]
videoProxy=proxyDict["ALVideoRecorder"]
sonarProxy=proxyDict["ALSonar"]
postureProxy=proxyDict["ALRobotPosture"]
landmarkProxy=proxyDict["ALLandMarkDetection"]
soundProxy=proxyDict["ALSoundDetection"]
soundsourceProxy=proxyDict["ALAudioSourceLocalization"]
def InitSonar(flag=1):
#period = 100
#precision = 0.1
if flag:
#sonarProxy.subscribe("test4", period , precision )
sonarProxy.subscribe("test4" )
else:
try:
sonarProxy.unsubscribe("test4" )
flag=0
except:
print "Sonar already unsubscribed"
flag=0
return flag
#################################################################################
## Use this function, CloseProxy, to close the proxy. As an argument give up
## the Ip of Nao
#################################################################################
def CloseProxy(proxy=[0]):
"""proxy: (list) 1->TTS, 2->audio, 3->motion, 4->memory, 5->face, 6->video, 7->LED's, 8->Track, 9->Speech, 10->Audioplayer, 11->VisionToolbox"""
global ALModuleList
global proxyDict
# check if list is empty
if len(proxy)==0:
proxy=range(1, len(ALModuleList)+1)
else:
#if not check whether it contains a 0
if 0 in proxy:
proxy=range(1, len(ALModuleList)+1)
for i in proxy:
try:
proxyDict[ALModuleList[i-1]].exit()
sleep(0.1)
#print "Proxy ALTextToSpeech established"
except RuntimeError as e:
print "Error when deleting ", ALModuleList[i-1], " TextToSpeech proxy:"
print str(e)
#redefine globals (one or more are set to None)
tts=proxyDict["ALTextToSpeech"]
audioProxy=proxyDict["ALAudioDevice"]
motionProxy=proxyDict["ALMotion"]
memoryProxy=proxyDict["ALMemory"]
faceProxy=proxyDict["ALFaceDetection"]
cameraProxy=proxyDict["ALVideoDevice"]
ledProxy=proxyDict["ALLeds"]
trackfaceProxy=proxyDict["ALFaceTracker"]
asr=proxyDict["ALSpeechRecognition"]
speechProxy=asr # for backward compatibility
playProxy=proxyDict["ALAudioPlayer"]
videoProxy=proxyDict["ALVideoRecorder"]
sonarProxy=proxyDict["ALSonar"]
postureProxy=proxyDict["ALRobotPosture"]
landmarkProxy=proxyDict["ALLandMarkDetection"]
################################################################################
## nao.ALFacePosition() subscribes to faceProxy and returns location of face.
## It uses the embedded functions of Aldebaran face detection. If you want to
## change the period, you will have to first unsubscribe using switch = False.
## It returns [face_location,detected]. detected = whether a face has been seen.
################################################################################
def ALFacePosition(switch = True, period = 100):
global alface_subscribed
if alface_subscribed == False:
faceProxy.subscribe("Test_Face", period, 0.0)
alface_subscribed = True
location_face = memoryProxy.getData("FaceDetected")
if switch == False:
faceProxy.unsubscribe("Test_Face")
alface_subscribed == False
#print " location face: " , location_face
if location_face==None:
location_face=[]
if len(location_face) >= 2: # Changed with respect to old naoqi versions
return [-location_face[1][0][0][1],location_face[1][0][0][2]], True
else:
return [], False
###############################################################################
## EyesLED() can change the color of the leds. The color parameter sets
## the color in RGB values.
## The standard color is off, [0,0,0]. The interpolation time defines the time
## in seconds it will take to fully switch to the new color.
###############################################################################
def EyeLED(color=[0,0,0],interpol_time = 0, POST=True):
sGroup = "FaceLeds"
try:
if POST:
ledProxy.post.fadeRGB(sGroup, 256*256*color[0] + 256*color[1] + color[2],interpol_time)
else:
ledProxy.fadeRGB(sGroup, 256*256*color[0] + 256*color[1] + color[2],interpol_time)
except NameError:
print 'ALLeds proxy undefined.'
###############################################################################
## This function returns the available gestures located in the gesture dir.
###############################################################################
def GetAvailableGestures():
"""Returns available gestures in a list"""
list_path = sys.path
found = 0
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/gestures"):
found = 1
break
if found == 0:
print "Could not find /gestures directory!"
raise IOError
return None
remove = []
list_gestures = os.listdir(list_path[i]+"/gestures")
for i in range(len(list_gestures)):
list_gestures[i] = "/gestures/"+list_gestures[i]
if not list_gestures[i].endswith(".py") and not list_gestures[i].endswith(".ges"):
remove.append(i)
## remove non py files
remove.reverse()
for i in range(len(remove)):
list_gestures.pop(remove[i])
return list_gestures
###############################################################################
## This function returns the available gestures located in the gesture dir.
###############################################################################
def GetAvailableLEDPatterns():
"""Returns available gestures in a list"""
list_path = sys.path
found = 0
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/led"):
found = 1
break
if found == 0:
print "Could not find /led directory!"
raise IOError
return None
list_led = os.listdir(list_path[i]+"/led")
remove = []
for i in range(len(list_led)):
list_led[i] = "/led/"+list_led[i]
if not list_led[i].endswith(".csv") and not list_led[i].endswith(".led"):
remove.append(i)
## remove non csv files
remove.reverse()
for i in remove:
list_led.pop(i)
return list_led
###############################################################################
## This function returns the available dialogs located in the dialogs dir.
###############################################################################
def GetAvailableDialogs():
"""Returns available dialogs in a list"""
list_path = sys.path
found = 0
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/dialogs"):
found = 1
break
if found == 0:
print "Could not find /dialogs directory!"
raise IOError
return None
list_dlg = os.listdir(list_path[i]+"/dialogs")
remove = []
for i in range(len(list_dlg)):
list_dlg[i] = "/dialogs/"+list_dlg[i]
if not list_dlg[i].endswith(".csv") and not list_dlg[i].endswith(".dlg"):
remove.append(i)
## remove non csv files
remove.reverse()
for i in remove:
list_dlg.pop(i)
return list_dlg
#########################################################################
## Loads a dialog csv file and converts its logic and questions/messages
## to dictionaires for use in a smach state machine
#########################################################################
def LoadDialog(file_name):
""" Give the filename of the dialog in the /dialogs folder. Extension should be .csv or .dlg."""
list_path = sys.path
filefound=False
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/dialogs/"+file_name):
filefound=True
break
if not filefound:
print "Dialog file "+str(file_name)+" not found in PYTHONPATH"
return
file_load = open(list_path[i]+"/dialogs/"+file_name)
#read all rows of CSV file (assumes delimiter is ';')
csv_reader = csv.reader(file_load, delimiter=';')
return csv_reader
################################################################################
## nao.InitVideo() initialises the cv image and sets the variables on Nao.
## It allows you to give up the resolution. But first execute nao.InitProxy()
################################################################################
def InitVideo(resolution):
global nameId
global cameraProxy
global cv_im
# AL_kTopCamera = 0
# AL_kQVGA = 1 # 320x240
AL_kBGRColorSpace = 13
fps=10
resolutionar = [160,120],[320,240],[640,480],[1280,960]
try:
nameId = cameraProxy.subscribe("python_GVM2"+str(random.random()*10), resolution, AL_kBGRColorSpace, fps) #0, 0, 10
except NameError:
print 'ALVideoDevice proxy undefined. Are you running a simulated naoqi?'
return None
try:
# cv_im = cv.CreateImageHeader((resolutionar[resolution][0],
# resolutionar[resolution][1]),
# cv.IPL_DEPTH_8U, 1)
cv_im=np.zeros((resolutionar[resolution][0],
resolutionar[resolution][1], 3), np.uint8)
except:
print "Cannot create image header"
return None
#################################################################################
## nao.GetImage() gets the image from Nao. You will fist need to execute
## nao.Initvideo()
#################################################################################
def GetImage():
global img
global nameId
global cv_im
gotimage = False
count = 0
while not gotimage and count < 10:
try:
img =cameraProxy.getImageRemote(nameId)
#pi=Image.frombuffer("L",(img[0],img[1]),img[6]) # original version leading to warnings about future incompatibilities
#pi=Image.frombuffer("L",(img[0],img[1]),img[6],"raw", "L", 0, -1) # -1 is upside down orientation, 1 upright orientation
#pi=Image.fromstring("L",(img[0],img[1]),img[6])
gotimage =True
except NameError:
print 'ALVideoDevice proxy undefined. Are you running a simulated naoqi?'
break
except:
count = count + 1
print "problems with video buffer!! Did you initialize nao.InitVideo() the video first?"
#cv.SetData(cv_im, pi.tostring()) # conversion using PIL not necessary, pass img[6] directly to cv_im
#cv.Flip(cv_im,cv_im,0) # not needed when using from string
#key = cv.WaitKey(10) # only useful after a cv.ShowImage("test",cv_im)
cv_im = np.reshape(map(ord,img[6]),(img[1],img[0],img[2]))
return cv_im
################################################################################
## NOTE!! THIS FUNCTION STILL NEEDS TO BE CLEANED UP
## nao.Detect(frame) looks for a face within the "frame".
## it outputs a opencv image with a box around the face, the centre coordinates in approx. radians
## and whether a face is detected
################################################################################
def Detect(frame, draw = True):
global face1_x
global face1_y
global face1_width
global face1_center
global old_face1_x
global old_face1_y
global old_face1_width
global fast
global windowsz
global cascade_front
roiscale = 2
windowscale = 10
face1_center = (0,0)
if fast>0:
if fast == 3:
#The cvrectangle defines the ROI that is used for face detection
#it depends on the previous location of the face and increases in
#size if no face is detected
cvrectangle = [face1_x-(face1_width/(roiscale*2)),
face1_y-(face1_width/(roiscale*2)),
face1_width+(face1_width/roiscale),
face1_width+(face1_width/roiscale)]
windowsz = face1_width-(face1_width/windowscale)
old_face1_x = face1_x # windowsize should be kept as big as possible
old_face1_y = face1_y # a larger windowsz means faster detection
old_face1_width = face1_width
if fast == 2:
cvrectangle = [old_face1_x-(old_face1_width/(roiscale)),
old_face1_y-(old_face1_width/(roiscale)),
old_face1_width+(old_face1_width/(roiscale*0.5)),
old_face1_width+(old_face1_width/(roiscale*0.5))]
windowsz = old_face1_width-(old_face1_width/(windowscale/2))
if fast == 1:
cvrectangle = [old_face1_x-(old_face1_width/(roiscale*0.5)),
old_face1_y-(old_face1_width/(roiscale*0.5)),
old_face1_width+(old_face1_width/(roiscale*0.25)),
old_face1_width+(old_face1_width/(roiscale*0.25))]
windowsz = old_face1_width-(old_face1_width/(windowscale/4))
for i in range (0,2): #Make sure the window under consideration is not
if cvrectangle[i]<0: #outside the camera region. If so, user edge
cvrectangle[i] = 0
if i == 0 and (cvrectangle[i]+cvrectangle[i+2]) > frame.width:
cvrectangle[i+2]= frame.width - cvrectangle[i]
if i == 1 and (cvrectangle[i]+cvrectangle[i+2]) > frame.height:
cvrectangle[i+2]= frame.height - cvrectangle[i]
if draw == True:
cv.Rectangle(frame, (cvrectangle[0], cvrectangle[1]),
(cvrectangle[0]+cvrectangle[2],
cvrectangle[1]+cvrectangle[3]),cv.RGB(0,255,0))
cv.SetImageROI(frame,tuple(cvrectangle))
else:
windowsz = 20
cv.ResetImageROI(frame)
faces = cv.HaarDetectObjects(frame, cascade_front, cv.CreateMemStorage(0),1.2, 6, 1,(windowsz,windowsz))
cv.ResetImageROI(frame)
try:
if fast > 0:
face1_x = faces[0][0][0]+cvrectangle[0] #These results are from the ROI
face1_y = faces[0][0][1]+cvrectangle[1] #instead of from the entire image
else:
face1_x = faces[0][0][0]
face1_y = faces[0][0][1]
face1_width = faces[0][0][2]
face1_height = faces[0][0][3]
face1_center = (face1_x + (face1_width/2),face1_y + (face1_height/2))
region = Region()
region.x = face1_x
region.y = face1_y
region.width = face1_width
region.height = face1_height
if draw == True:
cv.Rectangle(frame, (face1_x, face1_y),
(face1_x+face1_width,face1_y+face1_height),
cv.RGB(255,255,255))
cv.Circle(frame, face1_center, 2, cv.RGB(255, 0, 0))
fast = 3
except:
fast = fast-1
region = Region()
if fast == 3:
facedetected = True
else:
facedetected = False
face_loc = list(face1_center)
convrad = 0.55/(frame.width/2)
face_loc[0] = (face_loc[0] - (frame.width/2))*convrad
face_loc[1] = (face_loc[1] - (frame.height/2))*convrad
return frame, face_loc, facedetected, region
################################################################################
## Function Framerate(frame) adds the framerate to the provided
## opencv image "frame"
################################################################################
def Framerate(frame):
global time_q
global old_time
font=cv2.FONT_HERSHEY_TRIPLEX
time_q.append(round(time()-old_time,3))
time_q.popleft()
old_time = time()
avg_time = round(sum(time_q)/float(10),5)
cv2.putText(frame,
str(avg_time),
(15,15),
font,
cv2.RGB(0,0,255))
return frame
################################################################################
## Initializes the track function it stiffens the joints, gathers the IDPose
################################################################################
def InitTrack():
global xtargetold
global ytargetold
xtargetold = 0
ytargetold = 0
# Stiffening the head joints
motionProxy.stiffnessInterpolation('HeadYaw', 1.0, 1.0)
motionProxy.stiffnessInterpolation('HeadPitch', 1.0, 1.0)
interpol_time = 0.5
names = ["HeadYaw","HeadPitch"]
################################################################################
## Releasing stiffness of the head joints
################################################################################
def EndTrack():
motionProxy.stiffnessInterpolation('HeadYaw', 0.0, 1.0)
motionProxy.stiffnessInterpolation('HeadPitch', 0.0, 1.0)
################################################################################
## If the tracking function is initialised you can let nao follow a point in
## the camera stream the boolean "detected" specifies whether the target
## was detected. "frametime" is the time between frames.
################################################################################
def Track(target_loc, detected, speed = 5, min_move = 0.04):
"""
target_loc = the location Nao's head should move to in radians
detected = is the head detected, If False target_loc is not used and speed of movement gradually decreases
(optional) speed = the speed of the movement
(optional) min_move = the minimal angle of difference between the target_loc and current location for movements to occur.
"""
global xtargetold
global ytargetold
global time_old_track
global id_pose
global interpol_time
global start_mov_t
interpol_time = 1.0/speed
xtarget = target_loc[0]
ytarget = target_loc[1]
try:
frametime = time() - time_old_track
time_old_track = time()
except:
print "Not able to determine frame rate. Guessing..."
frametime = 0.15
if detected == False:
xtarget = xtargetold-xtargetold*(frametime)
ytarget = ytargetold-ytargetold*(frametime)
xtargetold = xtarget
ytargetold = ytarget
if ((xtarget > min_move or xtarget < -min_move) or (ytarget > min_move or ytarget < -min_move)):
names = ["HeadYaw","HeadPitch"]
try:
id_pose
except NameError:
id_pose = None
if id_pose != None:
#motionProxy.stop(id_pose)
pass
try:
id_pose = motionProxy.post.angleInterpolation(names, [-xtarget/2.5,ytarget/2.5] , interpol_time, False)
except RuntimeError,e:
print "Kan hoofd niet draaien"
start_mov_t = time()
################################################################################
## Is used to see if Nao's head is moving.
################################################################################
def MovingHead():
time_mov = time()-start_mov_t
if time_mov > 2*interpol_time:
return False
else:
return True
return
###############################################################################
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def FindObject(frame):
global old_frame
global gftt_list
global weights
global existence
if not MovingHead():
try:
mask = FrameMask(old_frame, frame)
except:
old_frame = cv.CloneImage(frame)
gftt_list = list()
return None, None, False
else:
old_frame = cv.CloneImage(frame)
gftt_list = list()
return None, None, False
if mask == None:
gftt_list = list()
print "2"
return None, None, False
## Find Good Features to track
if len(gftt_list) < 300:
#gftt_list.append((GoodFeaturesToTrack(old_frame, mask),1))
gftt_new, weights_new, existence_new = GoodFeaturesToTrack(old_frame, mask)
if gftt_new != None:
gftt_list= gftt_list + gftt_new
weights = weights + weights_new
existence = existence + existence_new
gftt_list_new, weights, existence = OpticalFlow(frame,old_frame,gftt_list, weights, existence)
weights, existence = UpdatePointWeights(gftt_list_new, gftt_list, weights, existence)
gftt_list = gftt_list_new
gftt_list, weights, existence = DropPoints(gftt_list, weights, existence)
gftt_img = DrawPoints(frame,gftt_list)
if len(gftt_list)>30:
loc_obj = list()
loc_obj = AvgPoint(gftt_list,1)
cv.Circle(gftt_img,loc_obj,4,255,4,8,0)
convrad = 0.55/(frame.width/2)
loc_obj = list(loc_obj)
loc_obj[0]=(loc_obj[0] - (frame.width/2))*convrad
loc_obj[1]=(loc_obj[1] - (frame.height/2))*convrad
else:
loc_obj = (None, None)
cv.ShowImage("Good Features",gftt_img)
cv.ShowImage("Difference", mask)
cv.Copy(frame, old_frame)
if MovingHead():
print "Object Location = 0"
loc_obj[0] = 0
loc_obj[1] = 0
gftt_list = list()
old_frame = 0
return loc_obj[0], loc_obj[1], True
###############################################################################
## Subfunction used by "FindObjects()". Returns a difference image
###############################################################################
def FrameMask(old_frame, frame):
if MovingHead():
return None
mask = cv.CloneImage(old_frame)
cv.AbsDiff(old_frame, frame, mask)
cv.Threshold(mask,mask, 15, 255, cv.CV_THRESH_BINARY)
return mask
###############################################################################
## Subfunction used in "FindObjects()" it is used to find the good features to
## to track. Good Features are features in the image that are corners between
## light and darker areas.
###############################################################################
def GoodFeaturesToTrack(image, mask):
list_gftt = list()
weights = list()
existence = list()
initpoint = 0
eig_image = cv.CreateMat(image.height ,image.width, cv.CV_32FC1)
temp_image = cv.CreateMat(image.height, image.width, cv.CV_32FC1)
gfttar = cv.GoodFeaturesToTrack(image, eig_image, temp_image, 25, 0.01, 5.0, mask, 3, 0, 0.04)
gfttar = cv.FindCornerSubPix(image,
gfttar,
(10,10),
(-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,20,0.03))
for i in range (0,len(gfttar)):
weights.append(1)
existence.append(1)
if len(gfttar) == 0:
return None, None, None
return gfttar, weights, existence
###############################################################################
## Subfunction used in "FindObjects()". It plots points gftt_list as circles in
## "image".
###############################################################################
def DrawPoints(image,gftt_list):
gftt_image = cv.CloneImage(image)
try:
for i in range(0,len(gftt_list)):
cv.Circle(gftt_image,gftt_list[i],2,255,1,8,0)
except:
pass
return gftt_image
###############################################################################
## Subfunction used in "FindObjects()". It calculates the new location of
## previous points
###############################################################################
def OpticalFlow(imagenew,imageold,gfttar,weights=0,existence=0):
pyrold = cv.CreateImage((imagenew.width,imagenew.height),
cv.IPL_DEPTH_32F,
1)
pyrnew = cv.CreateImage((imagenew.width,imagenew.height),
cv.IPL_DEPTH_32F,
1)
(gfttarnew,status,track_error)= cv.CalcOpticalFlowPyrLK(imageold,
imagenew,
pyrold,
pyrnew,
gfttar,
(10,10),
5,
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,20,0.03),
0)
#UpdatePointWeights(list_gftt_new,list_gftt)
#DropPoints(gf
return gfttarnew, weights, existence
################################################################################
# Track Face
################################################################################
def ALTrack(switch=1):
"""Turn head tracking on or off. Or get status = 2"""
if switch == 1:
InitTrack()
trackfaceProxy.startTracker()
elif switch == 0:
trackfaceProxy.stopTracker()
#EndTrack()
else:
return trackfaceProxy.isActive()
def OpticalFlowForOrientation(imagenew,imageold,gfttar,weights=0,existence=0):
pyrold = cv.CreateImage((imagenew.width,imagenew.height),
cv.IPL_DEPTH_32F,
1)
pyrnew = cv.CreateImage((imagenew.width,imagenew.height),
cv.IPL_DEPTH_32F,
1)
(gfttarnew,status,track_error)= cv.CalcOpticalFlowPyrLK(imageold,
imagenew,
pyrold,
pyrnew,
gfttar,
(10,10),
5,
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,20,0.03),
0)
for i in range (0,len(status)):
if status[i] == 0:
gfttar.pop(i)
gfttarnew.pop(i)
return gfttarnew, gfttar
def UpdatePointWeights(newpoints, oldpoints, weights, existence):
#remove points that do not move--------------------
minmove = 1.5 #minimal movement for the point not to disappear
#Calculate the vector length of the different points
difference = DifferencePoints(newpoints, oldpoints)
#fill the weights lists with appropriate values
for i in range (len(weights),len(newpoints)):
weights.append(1)
existence.append(1)
#i=0
for i in range(0,len(newpoints)-1):
weights[i]=weights[i] + (difference[i]-minmove)
existence[i] = existence[i] + 1
if weights[i] > 15:
weights[i] = 15
i = i+1
return (weights, existence)
## is used in UpdatePointWeights
def DifferencePoints(newpoints,oldpoints):
difference2 = list()
## if type(newpoints) != list:
## numpy.asarray(newpoints)
##
## if type(oldpoints) !=list:
## numpy.asarray(oldpoints)
for i in range(0,len(oldpoints)-1):
xcoor = math.sqrt(math.pow(newpoints[i][0]-oldpoints[i][0],2))
ycoor = math.sqrt(math.pow(newpoints[i][1]-oldpoints[i][1],2))
diff = math.sqrt(math.pow(xcoor,2)+math.pow(ycoor,2))
difference2.append(diff)
return difference2
def DropPoints(points, weights, existence):
i=0
if MovingHead():
print "In movement!!!!"
return (list(),list(),list())
while i < len(weights)-1:
if weights[i] < 0 or existence[i] > 15:
weights.pop(i)
points.pop(i)
existence.pop(i)
else:
i = i+1
return (points,weights, existence)
def AvgPoint(gfttar,meanormedian):
# 0=median, 1=mean
if meanormedian == 0:
x = list()
y = list()
for i in range (0, len(gfttar)):
x.append(gfttar[i][0])
y.append(gfttar[i][1])
y.sort()
x.sort()
indx = len(x)/2
indy = len(y)/2
return (x[indx],y[indy])
else:
x = 0
y = 0
for i in range (0, len(gfttar)):
x = x + gfttar[i][0]
y = y + gfttar[i][1]
x = x/len(gfttar)
y = y/len(gfttar)
return (x, y)
##############################################################################
## Go to one of the predefined postures
#############################################################################
def GoToPosture(thePosture, speed=0.5):
# "StandInit"
# "SitRelax"
# "StandZero"
# "LyingBelly"
# "Sit"
# "LyingBack"
# "Stand"
# "Crouch"
postureProxy.goToPosture(thePosture, speed)
##############################################################################
## Put's Noa into its Initpose. Only use when standing or in crouch position.
#############################################################################
def InitPose(time_pos=0.5, speed=0.8):
"""Nao will move to initpose."""
motionProxy.setWalkTargetVelocity(0, 0, 0, 1)
sleep(0.1)
# set stiffness
motionProxy.stiffnessInterpolation('Body',1.0, time_pos)
GoToPosture("Stand", speed)
#sleep(0.5)
#IP.initPose(motionProxy)
# numJoints = len(motionProxy.getJointNames('Body'))
# allAngles = [0.0,0.0, # head
# 1.39, 0.34, -1.39, -1.04, 0.0, 0.0, # left arm
# 0.0, 0.0, -0.43, 0.69, -0.34, 0.0, # left leg
# 0.0, 0.0, -0.43, 0.69, -0.34, 0.0, # right leg
# 1.39, -0.34, 1.39, 1.04, 0.0, 0.0] # right arm
# #printnumJoints
# if (numJoints == 26):
# angles = allAngles
# elif (numJoints == 22): # no hands (e.g. simulator)
# angles = allAngles[0:6] + allAngles[8:24]
# else:
# print "error in Init Pose"
# try:
# motionProxy.post.angleInterpolation('Body', angles, 1.5, True);
# except RuntimeError,e:
# print "An error has been caught"
# print e
def Stiffen(stiffness = True, int_time=1):
"""Make Nao stiffen its joints (Can be True or False)"""
motionProxy.stiffnessInterpolation('Body',int(stiffness), int_time)
def StiffenUpperBody(stiffness = True, int_time=0.1):
"""Make Nao stiffen its joints (Can be True or False)"""
motionProxy.setStiffnesses('HeadPitch',int(stiffness), int_time)
motionProxy.setStiffnesses('HeadYaw',int(stiffness), int_time)
motionProxy.setStiffnesses('LElbowRoll',int(stiffness), int_time)
motionProxy.setStiffnesses('LElbowYaw',int(stiffness), int_time)
motionProxy.setStiffnesses('LHand',int(stiffness), int_time)
motionProxy.setStiffnesses('LShoulderPitch',int(stiffness), int_time)
motionProxy.setStiffnesses('LShoulderRoll',int(stiffness), int_time)
motionProxy.setStiffnesses('LWristYaw',int(stiffness), int_time)
motionProxy.setStiffnesses('RElbowRoll',int(stiffness), int_time)
motionProxy.setStiffnesses('RElbowYaw',int(stiffness), int_time)
motionProxy.setStiffnesses('RHand',int(stiffness), int_time)
motionProxy.setStiffnesses('RShoulderPitch',int(stiffness), int_time)
motionProxy.setStiffnesses('RShoulderRoll',int(stiffness), int_time)
motionProxy.setStiffnesses('RWristYaw',int(stiffness), int_time)
################################################################################
## Nao crouches and loosens it's joints.
###############################################################################
def Crouch(speed=0.8):
motionProxy.setWalkTargetVelocity(0, 0, 0, 1)
sleep(0.1)
GoToPosture("Crouch", speed)
motionProxy.stiffnessInterpolation('Body',0, 0.5)
# def Crouch():
# """Make Nao to crouch pose."""
# # get the robot config
# robotConfig = motionProxy.getRobotConfig()
# #for i in range(len(robotConfig[0])):
# # print robotConfig[0][i], ": ", robotConfig[1][i]
# # "Model Type" : "naoH25", "naoH21", "naoT14" or "naoT2".
# # "Head Version" : "VERSION_32" or "VERSION_33" or "VERSION_40".
# # "Body Version" : "VERSION_32" or "VERSION_33" or "VERSION_40".
# # "Laser" : True or False.
# # "Legs" : True or False.
# # "Arms" : True or False.
# # "Extended Arms": True or False.
# # "Hands" : True or False.
# # "Arm Version" : "VERSION_32" or "VERSION_33" or "VERSION_40".
# # Number of Legs : 0 or 2
# # Number of Arms : 0 or 2
# # Number of Hands: 0 or 2
# if robotConfig[1][0]=="naoH25" or robotConfig[1][0]=="naoH21":
# pass
# else:
# print "Wrong robot type: cannot crouch without arms and legs"
# return
# if robotConfig[1][8]=="VERSION_32":
# allAngles = [0.0,0.0, # head
# 1.545, 0.33, -1.57, -0.486, 0.0, 0.0, # left arm
# -0.3, 0.057, -0.744, 2.192, -1.122, -0.035, # left leg
# -0.3, 0.057, -0.744, 2.192, -1.122, -0.035, # right leg
# 1.545, -0.33, 1.57, 0.486, 0.0, 0.0] # right arm
# elif robotConfig[1][8]=="VERSION_33":
# #Modified for robot version V33
# allAngles = [0.0,0.0, # head
# 1.545, 0.2, -1.56, -0.5, 0.0, 0.0, # left arm
# -0.319, 0.037, -0.695, 2.11, -1.189, -0.026, # left leg
# -0.319, 0.037, -0.695, 2.11, -1.189, -0.026, # right leg
# 1.545, -0.2, 1.56, 0.5, 0.0, 0.0] # right arm
# else:
# #Modified for robot version V4.0
# allAngles = [0.0,0.0, # head
# 1.53, 0.15, -1.56, -0.5, 0.0, 0.0, # left arm
# -0.30, 0.05, -0.75, 2.11, -1.19, -0.04, # left leg
# -0.30, 0.05, -0.75, 2.11, -1.19, -0.04, # right leg
# 1.53, -0.15, 1.56, 0.5, 0.0, 0.0] # right arm
# numJoints = len(motionProxy.getJointNames('Body'))
# if (numJoints == 26):
# angles = allAngles
# elif (numJoints == 22): # no hands (e.g. simulator)
# angles = allAngles[0:6] + allAngles[8:24]
# else:
# print "error in numJoints"
# try:
# motionProxy.angleInterpolation('Body', angles, 1.5, True);
# except RuntimeError,e:
# print "An error has been caught"
# print e
# motionProxy.stiffnessInterpolation('Body',0, 0.5)
##################################################################################
## Allows Nao to move in a certain direction with a certain speed.
################################################################################
def Move(dx=0, dy=0, dtheta=0, freq=1):
""""
dx = forward speed, dtheta = rotational speed,
dy = sidewards speed, freq = step frequency.
Allows Nao to move in a certain direction
with a certain speed.
"""
motionProxy.setWalkTargetVelocity(dx, dy, dtheta, freq)
def ReadSonar():
SonarLeft = "Device/SubDeviceList/US/Left/Sensor/Value"
SonarRight = "Device/SubDeviceList/US/Right/Sensor/Value"
SL=memoryProxy.getData(SonarLeft,0) # read sonar left value from memory
SR=memoryProxy.getData(SonarRight ,0) # read sonar right value from memory
return SL, SR
##################################################################################
## Allows Nao to move dx meters forward, dy meters sidways with final orientation of dtheta
################################################################################
def Walk(dx=0,dy=0,dtheta=0,post=False):
""""
dx = forward meters, dtheta = final angle,
dy = sidewards meters
Allows Nao to move in a certain direction.
"""
if post==False:
motionProxy.walkTo(dx, dy, dtheta)
else:
motionProxy.post.walkTo(dx, dy, dtheta)
##################################################################################
## Moves nao head yaw and pitch of the provided values yaw_val and pitch_val
################################################################################
def MoveHead(yaw_val=0, pitch_val=0, isAbsolute=True, post=True, timeLists= [[1],[1]]):
names = ["HeadYaw", "HeadPitch"]
angleLists = [[yaw_val], [pitch_val]]
if post==False:
motionProxy.angleInterpolation(names, angleLists, timeLists, isAbsolute)
else:
motionProxy.post.angleInterpolation(names, angleLists, timeLists, isAbsolute)
def GetYaw():
names = "HeadYaw"
useSensors = True
HeadYaw = motionProxy.getAngles(names, useSensors)
return HeadYaw
def GetPitch():
names = "HeadPitch"
useSensors = True
HeadPitch = motionProxy.getAngles(names, useSensors)
return HeadPitch
def GetYawPitch():
names = ["HeadYaw", "HeadPitch"]
useSensors = True
HeadYawPitch = motionProxy.getAngles(names, useSensors)
return HeadYawPitch
##################################################################################
## Allows Nao to play a sinusoidal wave of frequency in Hertz p1, Volume gain 0-100 p2, Stereo Pan set to either {-1,0,+1} p3 , duration in seconds
################################################################################
def PlaySine(p1,p2,p3,duration):
global audioProxy
try:
audioProxy.playSine(p1,p2,p3,duration)
except NameError:
print 'ALAudioDevice proxy undefined. Are you running a simulated naoqi?'
###################
#stop music
###########################
def StopPlay():
playProxy.stopAll()
######################################################
# Use this class and it's Play() function to play a wav or mp3 file on Nao.
# The files should be uploaded via ftp. Go to ftp://username:password@nao's_ip
# And upload them it's initial directory, which is /home/nao/ .
# id_music
######################################################
def Play(file_name):
"""Plays a audio file on Nao, it runs the file from the /home/nao/ directory"""
file_name = "/home/nao/"+file_name
id_music=playProxy.post.playFile(file_name)
return id_music
###########
# Pause
########################
def Pause(id_music):
playProxy.post.pause(id_music)
########################
#playFileFromPosition
##############################
def playFileFromPosition(file_name, position):
file_name = "/home/nao/"+file_name
id_music=playProxy.post.playFileFromPosition(file_name, position)
return id_music
##########################
#Set Volume TTS
##################################
def SetTTSVolume(volume):
tts.setVolume(volume)
##########################
#Get Volume TTS
##################################
def GetTTSVolume():
vol=tts.getVolume()
return vol
##########################
#Set Volume Music
##################################
def SetMusicVolume(id_music,volume):
playProxy.setVolume(id_music,volume)
##########################
#Get Volume Music
##################################
def GetMusicVolume():
vol=playProxy.getMasterVolume()
return vol
###############################################################################
## This will record a file located in the /home/nao/naoqi/share/naoqi/vision
## Directory on Nao
###############################################################################
def Record(file_name, fps = 3.0, resolution = 0):
"""
file_name without extension. fps, should be higher than 3.0.
resolution shoud be between 0 and 2.
Saved in /home/nao/naoqi/share/naoqi/vision
"""
vidp.startVideoRecord_adv(file_name,fps,"MJPG",resolution,-1)
###############################################################################
## This function will look for a face. If it doesn't find it False is returned.
###############################################################################
def FindFace(gain=1.0, offset=[0.0, -0.2]):
""" It looks for a face and if it finds it returns boolean True """
location, detected = ALFacePosition()
if detected:
return True
## - -
## 0 0 0 0 0
## 4 2 0 2 4
## ###########
##-0.4#+-+-+-+#e#
## #|#####|#|#
##-0.2#+#+-+#+#+#
## #|#|#|#|#|#
## 0#+#+#s#+#+#
## #|#|###|#|#
## 0.2#+#+-+-+#+#
## #|#######|#
## 0.4#+-+-+-+-+#
## ###########
# offset=0.0
# offset=0.0
# gain=1.0
max_yaw=2 # range [-119, 119] degrees
min_yaw=-2
max_pitch=0.26 # conservative but safe, real range is [-38.5, 29.5] /180*Pi
min_pitch=-0.6
yaw = [0.0, 0.0,-0.2,-0.2,0.0,0.2, 0.2, 0.2, 0.0,-0.2,-0.4,-0.4,-0.4,-0.2,0.0,0.2,0.4, 0.4, 0.4, 0.2, 0.0]
pitch = [0.0,-0.2,-0.2, 0.0,0.0,0.0,-0.2,-0.4,-0.4,-0.4,-0.4,-0.2, 0.0, 0.0,0.0,0.0,0.0,-0.2,-0.4,-0.4,-0.4]
# pitch = [0,-0.2,-0.2,0.0,0.2,0.2,0.2,0.0,-0.2,-0.4,-0.4,-0.4,-0.4,-0.2,
# 0.0,0.2,0.4,0.4,0.4,0.4,0.4,0.2,0.0,-0.2,-0.4,0.0]
# pitch = [0,-0.2,-0.2,0.0,0.0,0.0,0.2,0.0,-0.2,-0.4,-0.4,-0.4,-0.4,-0.2,
# 0.0,0.2,0.4,0.4,0.4,0.4,0.4,0.2,0.0,-0.2,0.0]
for i in range(0,len(yaw)):
names = ["HeadYaw", "HeadPitch"]
the_yaw=yaw[i]*gain+offset[0]
the_pitch=pitch[i]*gain+offset[1]
if the_yaw>max_yaw:
the_yaw=max_yaw
if the_yaw<min_yaw:
the_yaw=min_yaw
if the_pitch>max_pitch:
the_pitch=max_pitch
if the_pitch<min_pitch:
the_pitch=min_pitch
angleLists = [[the_yaw], [the_pitch]]
timeLists = [[1],[1]]
isAbsolute = True
motionProxy.angleInterpolation(names, angleLists, timeLists, isAbsolute)
sleep(0.25)
location, detected = ALFacePosition()
if detected:
return True
return False
#######################################################################
## This functions executes movements transported from choregraph
## and saved in a *.py file. Make sure to initialize the motion proxy.
#######################################################################
def RunMovement(file_name, post = True, to_start_position = True):
""" Give up the filename containing the movement. Needs motion proxy."""
list_path = sys.path
filefound = False
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/gestures/"+file_name):
file_name=list_path[i]+"/gestures/"+file_name
filefound=True
break
if os.path.exists(list_path[i]+"/"+file_name):
file_name=list_path[i]+"/"+file_name
filefound=True
break
if not filefound:
print "Movement or gesture "+str(file_name)+" not found in PYTHONPATH"
return
file_load = open(file_name)
lines = file_load.readlines()
for i in range(0,len(lines)):
if lines[i].startswith("try:"):
break
exec lines[i]
if to_start_position:
last_key = motionProxy.getAngles(names, True)
high_time = 0.0
for i in range(0,len(times)):
cur_time = times[i][len(times[i])-1]
if cur_time > high_time:
high_time = cur_time
for i in range(0, len(times)):
times[i].append(high_time+0.1)
times[i].append(high_time+2)
keys[i].append(keys[i][len(keys[i])-1])
keys[i].append([last_key[i],[ 3, -0.55556, 0.00000], [ 3, 0.55556, 0.00000]])
if post:
motionProxy.post.angleInterpolationBezier(names, times, keys)
else:
motionProxy.angleInterpolationBezier(names, times, keys)
###########################################################################
## This function runs a speech script saves as a *.csv file. Column 1
## contains the time in seconds, Column 2 contains the TTS input. This
## function requires a TTS proxy.
###########################################################################
def RunSpeech(file_name):
""" file_name is the name containing the speech script."""
list_path = sys.path
file_found=False
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/tts/"+file_name):
file_name=list_path[i]+"/tts/"+file_name
filefound=True
break
if os.path.exists(list_path[i]+"/"+file_name):
file_name=list_path[i]+"/"+file_name
filefound=True
break
if not filefound:
print "Speech file "+str(file_name)+" not found in PYTHONPATH"
return
try:
script_reader = csv.reader(open(file_name, 'rb'))
except:
print "Speech script does not exist!!!"
return
cur_line = script_reader.next()
start_time = time()
while True:
try:
cur_line = script_reader.next()
except:
break
while float(cur_line[0])> (time()-start_time):
sleep(0.1)
Say(cur_line[1])
########################################################################
## Uses a led CSV file to read out the proper eye pattern variables.
## Allows you to set LED Group, RGB, and Duration
## Frequency is currently ignored
## CSV file format:
## Row 1 = Header (ignored)
## Row 2-n = LED Group; Red; Green; Blue; Frequency; Duration
## Duration = Fade Time past to ALLeds.FadeListRGB
## CSV file delimiter is ';' (Used by Excel)
#########################################################################
def RunLED(file_name, post = True):
""" Uses a led CSV file to read out the proper eye pattern variables."""
#open CSV file
list_path = sys.path
filefound=False
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/led/"+file_name):
file_name=list_path[i]+"/led/"+file_name
filefound=True
break
if os.path.exists(list_path[i]+"/"+file_name):
file_name=list_path[i]+"/"+file_name
filefound=True
break
if not filefound:
print "LED file "+str(file_name)+" not found in PYTHONPATH"
return
file_load = open(file_name, 'rb')
#read all rows of CSV file (assumes delimiter is ';')
csv_reader = csv.reader(file_load, delimiter=';')
#read header row and ignore it
csv_reader.next()
#initialize LEDs to off
ledProxy.post.off('FaceLeds')
#print 'ledProxy.post.off(', 'FaceLeds', ')'
#read first LED command and initialize fadeListRGB parameters
parameters = csv_reader.next()
name = parameters[0]
rgbList = [256*256*int(parameters[1])+256*int(parameters[2])+int(parameters[3])]
timeList = [float(parameters[5])]
#while CSV file not empty
while True:
try:
parameters = csv_reader.next()
except:
break
#if LED Group different than last row
if (name != parameters[0]):
#send current fadeListRGB parameters to Nao
ledProxy.post.fadeListRGB(name, rgbList, timeList)
#print 'ledProxy.post.fadeListRGB(', name, rgbList, timeList, ')'
#intialize fadeListRGB parameters
name = parameters[0]
rgbList = []
timeList = []
#add this row to fadeListRGB parameters
rgbList.append(256*256*int(parameters[1])+256*int(parameters[2])+int(parameters[3]))
timeList.append(float(parameters[5]))
#all done - send current fadeListRGB parameters to Nao
ledProxy.post.fadeListRGB(name, rgbList, timeList)
#print 'ledProxy.post.fadeListRGB(', name, rgbList, timeList, ')'
return file_load
def GetAvailableModules():
dir_file = []
list_path = sys.path
filefound = False
for i in range (0,len(list_path)):
if os.path.exists(list_path[i]+"/modules"):
filefound = True
break
if not filefound:
print "Could not find /modules directory!"
raise IOError
return None
list_dir = os.listdir(list_path[i]+"/modules")
for directory in list_dir:
if not directory.startswith('.'):
list_subdir = os.listdir(list_path[i]+"/modules/"+directory)
module_files = ["/modules/",directory+"/"]
for file_name in list_subdir:
if not file_name.startswith("."):
module_files.append(file_name)
#module_files.append([directory,file_name])
dir_file.append(module_files)
return dir_file
def InitSpeech(wordList=["yes","no","hello NAO","goodbye NAO"],the_language="English",wordSpotting=False):
global speechProxy
global memoryProxy
#Creating a proxy on the module
#Before calling the ASR commands, you need to create a proxy on the ASR module:
# asr = ALProxy("ALSpeechRecognition",myIP,9559) #IP = address of your robot
#asr=speechProxy
#Before starting the ASR engine, you must set the language of the speech recognition system. The list of the installed languages can be obtained through the getAvailableLanguages method.
asr.setLanguage(the_language)
#Note that this does not affect the language of speech synthesis.
#We will assume that it must be the same:
tts.setLanguage(the_language)
# To set the words that should be recognized, use the setWordListAsVocabulary method.
asr.setVocabulary(wordList,wordSpotting)
#Note:
#The following feature (the usage of the "loadVocabulary()" function) is not available for Chinese and Japanese.
#If you prefer not to use the setWordListAsVocabulary function, you can directly defined a vocabulary in a .lxd file and load it with the loadVocabulary method as described below:
# Example: load the vocabulary defined in the file /home/nao/MyVocabulary.lxd asr.loadVocabulary(/home/nao/MyVocabulary.lxd)
#Defining your vocabulary in a .lxd file and load it with the loadVocabulary function allows you to add alternative phonetic transcriptions that refine the automatic transcriptions of the ASR, and can improve performances. For example, if you want the speech recognition to be robust to the different pronunciations of the word "hello", you can define your vocabulary file as follows:
#!Language=English #!FSG <words>=alt("yes" "no" "hello" "goodbye" )alt; <start>=alt(<words>)alt; #!Transcriptions hello h@l@U hello h@ll@U
#The phonetic alphabet used to write these phonetizations is described here: http://www.phon.ucl.ac.uk/home/sampa/.
#Collecting the recognized word in the memory
#If a word has been recognized, the result is placed in the "WordRecognized" key of ALMemory.
#As a result, you can read it by accessing this key in the ALMemory module.
# Clear event WordRecognized in ALMemory module.
memoryProxy.insertData("WordRecognized",[])
def DetectSpeech():
global memoryProxy
try:
#getData
result=memoryProxy.getData("WordRecognized")
if len(result)>0:
memoryProxy.insertData("WordRecognized",[])
except RuntimeError,e:
# catch exception
print "error getting data", e
return result
def InitLandMark(period = 500):
# Subscribe to the ALLandMarkDetection extractor
landmarkProxy.subscribe("Test_Mark", period, 0.0 )
def DetectLandMark():
# Get data from landmark detection (assuming face detection has been activated).
global memoryProxy
data = memoryProxy.getData("LandmarkDetected")
if data==None:
data=[] # otherwise the next statement fails ...
if len(data)==0:
detected=False
timestamp=time()
markerInfo=[]
else:
detected=True
#timestamp=data[0][0]+1E-6*data[0][1] #this works but only if a landmark is detected
timestamp=time()
markerInfo=[]
for p in data[1]:
markerInfo.append([p[1][0], #markerID
p[0][1], #alpha - x location in camera angle
p[0][2], #beta - y location
p[0][3], #sizeX
p[0][4], #sizeY
p[0][5] #orientation about vertical w.r. Nao's head
])
return detected, timestamp, markerInfo
def InitSoundDetection(switch=1):
# Subscribe to the ALSoundDetection
global soundProxy
soundProxy.setParameter("Sensitivity", 0.3)
if switch==1:
try:
soundProxy.subscribe(__nao_module_name__ )
except:
print "Could not subscribe to ALSoundDetection"
else:
try:
soundProxy.unsubscribe(__nao_module_name__ )
except:
print "Could not unsubscribe from ALSoundDetection"
def DetectSound():
# Get data from landmark detection (assuming face detection has been activated).
data = memoryProxy.getData("SoundDetected")
##The SoundDetected key is organized as follows:
##
##[[index_1, type_1, confidence_1, time_1],
## ...,
##[index_n, type_n, confidence_n, time_n]]
##
##n is the number of sounds detected in the last audio buffer,
##index is the index (in samples) of either the sound start (if type is equal to 1) or the sound end (if type is equal to 0),
##time is the detection time in micro seconds
##confidence gives an estimate of the probability [0;1] that the sound detected by the module corresponds to a real sound.
if data==None:
data=[] # otherwise the next statement fails ...
if len(data)==0:
detected=False
timestamp=time()
soundInfo=[]
else:
detected=True
timestamp=time()
soundInfo=[]
for snd in data:
soundInfo.append([ snd[0], #index of sound start/end
snd[1], #type: 1=start, 0=end
snd[2] #confidence: probability that there was a sound
])
memoryProxy.insertData("SoundDetected",[]) #clear memory
return detected, timestamp, soundInfo
def InitSoundLocalization(switch=1):
# Subscribe to the ALSoundDetection
global soundLocalizationProxy
if switch==1:
try:
soundLocalizationProxy.subscribe(__nao_module_name__ )
except:
print "Could not subscribe to ALSoundDetection"
else:
try:
soundLocalizationProxy.unsubscribe(__nao_module_name__ )
except:
print "Could not subscribe to ALSoundDetection"
def DetectSoundLocation():
# Get data from landmark detection (assuming face detection has been activated).
global memoryProxy
data = memoryProxy.getData("ALSoundLocalization/SoundLocated")
##The SoundDetected key is organized as follows:
##
##[ [time(sec), time(usec)],
##
## [azimuth(rad), elevation(rad), confidence],
##
## [Head Position[6D]]
##]
if data==None:
data=[] # otherwise the next statement fails ...
if len(data)==0:
detected=False
timestamp=time()
soundInfo=[]
else:
detected=True
#timestamp=data[0][0]+1E-6*data[0][1] #this works but only if a sound is located
timestamp=time()
soundInfo=[]
for snd in data:
soundInfo.append([ snd[1][0], #azimuth angle
snd[1][1], #elvation angle
snd[1][2], #confidence: probability that there was a sound
snd[2]]) #Headposition 6D
memoryProxy.insertData("ALSoundLocalization/SoundLocated",[]) #clear memory
return detected, timestamp, soundInfo
if __name__ == "__main__":
print GetAvailableModules(), "\n"
print GetAvailableGestures(), "\n"
print GetAvailableLEDPatterns(), "\n"
print GetAvailableDialogs(), "\n"
|
Rctue/nao-lib
|
nao_temp.py
|
Python
|
gpl-2.0
| 64,405
|
"""
Virtualization test utility functions.
:copyright: 2008-2009 Red Hat Inc.
"""
import time
import string
import random
import socket
import os
import signal
import re
import logging
import commands
import fcntl
import sys
import inspect
import tarfile
import shutil
import getpass
from autotest.client import utils, os_dep
from autotest.client.shared import error, logging_config
from autotest.client.shared import git
import data_dir
try:
from staging import utils_koji
except ImportError:
from autotest.client.shared import utils_koji
import platform
ARCH = platform.machine()
class UnsupportedCPU(error.TestError):
pass
# TODO: remove this import when log_last_traceback is moved to autotest
import traceback
# TODO: this function is being moved into autotest. For compatibility
# reasons keep it here too but new code should use the one from base_utils.
def log_last_traceback(msg=None, log=logging.error):
"""
@warning: This function is being moved into autotest and your code should
use autotest.client.shared.base_utils function instead.
Writes last traceback into specified log.
:param msg: Override the default message. ["Original traceback"]
:param log: Where to log the traceback [logging.error]
"""
if not log:
log = logging.error
if msg:
log(msg)
exc_type, exc_value, exc_traceback = sys.exc_info()
if not exc_traceback:
log('Requested log_last_traceback but no exception was raised.')
return
log("Original " +
"".join(traceback.format_exception(exc_type, exc_value,
exc_traceback)))
def lock_file(filename, mode=fcntl.LOCK_EX):
f = open(filename, "w")
fcntl.lockf(f, mode)
return f
def unlock_file(f):
fcntl.lockf(f, fcntl.LOCK_UN)
f.close()
# Utility functions for dealing with external processes
def unique(llist):
"""
Return a list of the elements in list, but without duplicates.
:param list: List with values.
:return: List with non duplicate elements.
"""
n = len(llist)
if n == 0:
return []
u = {}
try:
for x in llist:
u[x] = 1
except TypeError:
return None
else:
return u.keys()
def find_command(cmd):
"""
Try to find a command in the PATH, paranoid version.
:param cmd: Command to be found.
:raise: ValueError in case the command was not found.
"""
common_bin_paths = ["/usr/libexec", "/usr/local/sbin", "/usr/local/bin",
"/usr/sbin", "/usr/bin", "/sbin", "/bin"]
try:
path_paths = os.environ['PATH'].split(":")
except IndexError:
path_paths = []
path_paths = unique(common_bin_paths + path_paths)
for dir_path in path_paths:
cmd_path = os.path.join(dir_path, cmd)
if os.path.isfile(cmd_path):
return os.path.abspath(cmd_path)
raise ValueError('Missing command: %s' % cmd)
def pid_exists(pid):
"""
Return True if a given PID exists.
:param pid: Process ID number.
"""
try:
os.kill(pid, 0)
return True
except Exception:
return False
def safe_kill(pid, signal):
"""
Attempt to send a signal to a given process that may or may not exist.
:param signal: Signal number.
"""
try:
os.kill(pid, signal)
return True
except Exception:
return False
def kill_process_tree(pid, sig=signal.SIGKILL):
"""Signal a process and all of its children.
If the process does not exist -- return.
:param pid: The pid of the process to signal.
:param sig: The signal to send to the processes.
"""
if not safe_kill(pid, signal.SIGSTOP):
return
children = commands.getoutput("ps --ppid=%d -o pid=" % pid).split()
for child in children:
kill_process_tree(int(child), sig)
safe_kill(pid, sig)
safe_kill(pid, signal.SIGCONT)
# The following are utility functions related to ports.
def is_port_free(port, address):
"""
Return True if the given port is available for use.
:param port: Port number
"""
try:
s = socket.socket()
#s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if address == "localhost":
s.bind(("localhost", port))
free = True
else:
s.connect((address, port))
free = False
except socket.error:
if address == "localhost":
free = False
else:
free = True
s.close()
return free
def find_free_port(start_port, end_port, address="localhost"):
"""
Return a host free port in the range [start_port, end_port].
:param start_port: First port that will be checked.
:param end_port: Port immediately after the last one that will be checked.
"""
for i in range(start_port, end_port):
if is_port_free(i, address):
return i
return None
def find_free_ports(start_port, end_port, count, address="localhost"):
"""
Return count of host free ports in the range [start_port, end_port].
@count: Initial number of ports known to be free in the range.
:param start_port: First port that will be checked.
:param end_port: Port immediately after the last one that will be checked.
"""
ports = []
i = start_port
while i < end_port and count > 0:
if is_port_free(i, address):
ports.append(i)
count -= 1
i += 1
return ports
# An easy way to log lines to files when the logging system can't be used
_open_log_files = {}
_log_file_dir = "/tmp"
def log_line(filename, line):
"""
Write a line to a file. '\n' is appended to the line.
:param filename: Path of file to write to, either absolute or relative to
the dir set by set_log_file_dir().
:param line: Line to write.
"""
global _open_log_files, _log_file_dir
path = get_path(_log_file_dir, filename)
if path not in _open_log_files:
# First, let's close the log files opened in old directories
close_log_file(filename)
# Then, let's open the new file
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
_open_log_files[path] = open(path, "w")
timestr = time.strftime("%Y-%m-%d %H:%M:%S")
_open_log_files[path].write("%s: %s\n" % (timestr, line))
_open_log_files[path].flush()
def set_log_file_dir(directory):
"""
Set the base directory for log files created by log_line().
:param dir: Directory for log files.
"""
global _log_file_dir
_log_file_dir = directory
def close_log_file(filename):
global _open_log_files, _log_file_dir
remove = []
for k in _open_log_files:
if os.path.basename(k) == filename:
f = _open_log_files[k]
f.close()
remove.append(k)
if remove:
for key_to_remove in remove:
_open_log_files.pop(key_to_remove)
# The following are miscellaneous utility functions.
def get_path(base_path, user_path):
"""
Translate a user specified path to a real path.
If user_path is relative, append it to base_path.
If user_path is absolute, return it as is.
:param base_path: The base path of relative user specified paths.
:param user_path: The user specified path.
"""
if os.path.isabs(user_path):
return user_path
else:
return os.path.join(base_path, user_path)
def generate_random_string(length, ignore_str=string.punctuation,
convert_str=""):
"""
Return a random string using alphanumeric characters.
:param length: Length of the string that will be generated.
:param ignore_str: Characters that will not include in generated string.
:param convert_str: Characters that need to be escaped (prepend "\\").
:return: The generated random string.
"""
r = random.SystemRandom()
sr = ""
chars = string.letters + string.digits + string.punctuation
if not ignore_str:
ignore_str = ""
for i in ignore_str:
chars = chars.replace(i, "")
while length > 0:
tmp = r.choice(chars)
if convert_str and (tmp in convert_str):
tmp = "\\%s" % tmp
sr += tmp
length -= 1
return sr
def generate_random_id():
"""
Return a random string suitable for use as a qemu id.
"""
return "id" + generate_random_string(6)
def generate_tmp_file_name(file_name, ext=None, directory='/tmp/'):
"""
Returns a temporary file name. The file is not created.
"""
while True:
file_name = (file_name + '-' + time.strftime("%Y%m%d-%H%M%S-") +
generate_random_string(4))
if ext:
file_name += '.' + ext
file_name = os.path.join(directory, file_name)
if not os.path.exists(file_name):
break
return file_name
def format_str_for_message(sr):
"""
Format str so that it can be appended to a message.
If str consists of one line, prefix it with a space.
If str consists of multiple lines, prefix it with a newline.
:param str: string that will be formatted.
"""
lines = str.splitlines()
num_lines = len(lines)
sr = "\n".join(lines)
if num_lines == 0:
return ""
elif num_lines == 1:
return " " + sr
else:
return "\n" + sr
def wait_for(func, timeout, first=0.0, step=1.0, text=None):
"""
If func() evaluates to True before timeout expires, return the
value of func(). Otherwise return None.
@brief: Wait until func() evaluates to True.
:param timeout: Timeout in seconds
:param first: Time to sleep before first attempt
:param steps: Time to sleep between attempts in seconds
:param text: Text to print while waiting, for debug purposes
"""
start_time = time.time()
end_time = time.time() + timeout
time.sleep(first)
while time.time() < end_time:
if text:
logging.debug("%s (%f secs)", text, (time.time() - start_time))
output = func()
if output:
return output
time.sleep(step)
return None
def get_hash_from_file(hash_path, dvd_basename):
"""
Get the a hash from a given DVD image from a hash file
(Hash files are usually named MD5SUM or SHA1SUM and are located inside the
download directories of the DVDs)
:param hash_path: Local path to a hash file.
:param cd_image: Basename of a CD image
"""
hash_file = open(hash_path, 'r')
for line in hash_file.readlines():
if dvd_basename in line:
return line.split()[0]
def run_tests(parser, job):
"""
Runs the sequence of KVM tests based on the list of dictionaries
generated by the configuration system, handling dependencies.
:param parser: Config parser object.
:param job: Autotest job object.
:return: True, if all tests ran passed, False if any of them failed.
"""
last_index = -1
for i, d in enumerate(parser.get_dicts()):
logging.info("Test %4d: %s" % (i + 1, d["shortname"]))
last_index += 1
status_dict = {}
failed = False
# Add the parameter decide if setup host env in the test case
# For some special tests we only setup host in the first and last case
# When we need to setup host env we need the host_setup_flag as following:
# 0(00): do nothing
# 1(01): setup env
# 2(10): cleanup env
# 3(11): setup and cleanup env
index = 0
setup_flag = 1
cleanup_flag = 2
for param_dict in parser.get_dicts():
if param_dict.get("host_setup_flag", None) is not None:
flag = int(param_dict["host_setup_flag"])
if index == 0:
param_dict["host_setup_flag"] = flag | setup_flag
elif index == last_index:
param_dict["host_setup_flag"] = flag | cleanup_flag
else:
param_dict["host_setup_flag"] = flag
else:
if index == 0:
param_dict["host_setup_flag"] = setup_flag
elif index == last_index:
param_dict["host_setup_flag"] = cleanup_flag
index += 1
# Add kvm module status
sysfs_dir = param_dict.get("sysfs_dir", "/sys")
param_dict["kvm_default"] = get_module_params(sysfs_dir, 'kvm')
if param_dict.get("skip") == "yes":
continue
dependencies_satisfied = True
for dep in param_dict.get("dep"):
for test_name in status_dict.keys():
if not dep in test_name:
continue
# So the only really non-fatal state is WARN,
# All the others make it not safe to proceed with dependency
# execution
if status_dict[test_name] not in ['GOOD', 'WARN']:
dependencies_satisfied = False
break
test_iterations = int(param_dict.get("iterations", 1))
test_tag = param_dict.get(
"vm_type") + "." + param_dict.get("shortname")
if dependencies_satisfied:
# Setting up profilers during test execution.
profilers = param_dict.get("profilers", "").split()
for profiler in profilers:
job.profilers.add(profiler, **param_dict)
# We need only one execution, profiled, hence we're passing
# the profile_only parameter to job.run_test().
profile_only = bool(profilers) or None
test_timeout = int(param_dict.get("test_timeout", 14400))
current_status = job.run_test_detail("virt",
params=param_dict,
tag=test_tag,
iterations=test_iterations,
profile_only=profile_only,
timeout=test_timeout)
for profiler in profilers:
job.profilers.delete(profiler)
else:
# We will force the test to fail as TestNA during preprocessing
param_dict['dependency_failed'] = 'yes'
current_status = job.run_test_detail("virt",
params=param_dict,
tag=test_tag,
iterations=test_iterations)
if not current_status:
failed = True
status_dict[param_dict.get("name")] = current_status
return not failed
def display_attributes(instance):
"""
Inspects a given class instance attributes and displays them, convenient
for debugging.
"""
logging.debug("Attributes set:")
for member in inspect.getmembers(instance):
name, value = member
attribute = getattr(instance, name)
if not (name.startswith("__") or callable(attribute) or not value):
logging.debug(" %s: %s", name, value)
def get_full_pci_id(pci_id):
"""
Get full PCI ID of pci_id.
:param pci_id: PCI ID of a device.
"""
cmd = "lspci -D | awk '/%s/ {print $1}'" % pci_id
status, full_id = commands.getstatusoutput(cmd)
if status != 0:
return None
return full_id
def get_vendor_from_pci_id(pci_id):
"""
Check out the device vendor ID according to pci_id.
:param pci_id: PCI ID of a device.
"""
cmd = "lspci -n | awk '/%s/ {print $3}'" % pci_id
return re.sub(":", " ", commands.getoutput(cmd))
class Flag(str):
"""
Class for easy merge cpuflags.
"""
aliases = {}
def __new__(cls, flag):
if flag in Flag.aliases:
flag = Flag.aliases[flag]
return str.__new__(cls, flag)
def __eq__(self, other):
s = set(self.split("|"))
o = set(other.split("|"))
if s & o:
return True
else:
return False
def __str__(self):
return self.split("|")[0]
def __repr__(self):
return self.split("|")[0]
def __hash__(self, *args, **kwargs):
return 0
kvm_map_flags_to_test = {
Flag('avx'): set(['avx']),
Flag('sse3|pni'): set(['sse3']),
Flag('ssse3'): set(['ssse3']),
Flag('sse4.1|sse4_1|sse4.2|sse4_2'): set(['sse4']),
Flag('aes'): set(['aes', 'pclmul']),
Flag('pclmuldq'): set(['pclmul']),
Flag('pclmulqdq'): set(['pclmul']),
Flag('rdrand'): set(['rdrand']),
Flag('sse4a'): set(['sse4a']),
Flag('fma4'): set(['fma4']),
Flag('xop'): set(['xop']),
}
kvm_map_flags_aliases = {
'sse4_1': 'sse4.1',
'sse4_2': 'sse4.2',
'pclmuldq': 'pclmulqdq',
'sse3': 'pni',
'ffxsr': 'fxsr_opt',
'xd': 'nx',
'i64': 'lm',
'psn': 'pn',
'clfsh': 'clflush',
'dts': 'ds',
'htt': 'ht',
'CMPXCHG8B': 'cx8',
'Page1GB': 'pdpe1gb',
'LahfSahf': 'lahf_lm',
'ExtApicSpace': 'extapic',
'AltMovCr8': 'cr8_legacy',
'cr8legacy': 'cr8_legacy'
}
def kvm_flags_to_stresstests(flags):
"""
Covert [cpu flags] to [tests]
:param cpuflags: list of cpuflags
:return: Return tests like string.
"""
tests = set([])
for f in flags:
tests |= kvm_map_flags_to_test[f]
param = ""
for f in tests:
param += "," + f
return param
def get_cpu_flags():
"""
Returns a list of the CPU flags
"""
flags_re = re.compile(r'^flags\s*:(.*)')
for line in open('/proc/cpuinfo').readlines():
match = flags_re.match(line)
if match:
return match.groups()[0].split()
return []
def get_cpu_vendor(cpu_flags=[], verbose=True):
"""
Returns the name of the CPU vendor, either intel, amd or unknown
"""
if not cpu_flags:
cpu_flags = get_cpu_flags()
if 'vmx' in cpu_flags:
vendor = 'intel'
elif 'svm' in cpu_flags:
vendor = 'amd'
elif ARCH == 'ppc64':
vendor = 'ibm'
else:
vendor = 'unknown'
if verbose:
logging.debug("Detected CPU vendor as '%s'", vendor)
return vendor
def get_support_machine_type(qemu_binary="/usr/libexec/qemu-kvm"):
"""
Get the machine type the host support,return a list of machine type
"""
o = utils.system_output("%s -M ?" % qemu_binary)
s = re.findall("(\S*)\s*RHEL\s", o)
c = re.findall("(RHEL.*PC)", o)
return (s, c)
def get_cpu_model():
"""
Get cpu model from host cpuinfo
"""
def _make_up_pattern(flags):
"""
Update the check pattern to a certain order and format
"""
pattern_list = re.split(",", flags.strip())
pattern_list.sort()
pattern = r"(\b%s\b)" % pattern_list[0]
for i in pattern_list[1:]:
pattern += r".+(\b%s\b)" % i
return pattern
cpu_types = {"amd": ["Opteron_G5", "Opteron_G4", "Opteron_G3",
"Opteron_G2", "Opteron_G1"],
"intel": ["Haswell", "SandyBridge", "Westmere",
"Nehalem", "Penryn", "Conroe"]}
cpu_type_re = {"Opteron_G5":
"f16c,fma,tbm",
"Opteron_G4":
"avx,xsave,aes,sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3,sse4a",
"Opteron_G3": "cx16,sse4a",
"Opteron_G2": "cx16",
"Opteron_G1": "",
"Haswell":
"fsgsbase,bmi1,hle,avx2,smep,bmi2,erms,invpcid,rtm",
"SandyBridge":
"avx,xsave,aes,sse4_2|sse4.2,sse4.1|sse4_1,cx16,ssse3",
"Westmere": "aes,sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3",
"Nehalem": "sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3",
"Penryn": "sse4.1|sse4_1,cx16,ssse3",
"Conroe": "ssse3"}
flags = get_cpu_flags()
flags.sort()
cpu_flags = " ".join(flags)
vendor = get_cpu_vendor(flags)
cpu_model = ""
if cpu_flags:
for cpu_type in cpu_types.get(vendor):
pattern = _make_up_pattern(cpu_type_re.get(cpu_type))
if re.findall(pattern, cpu_flags):
cpu_model = cpu_type
break
else:
logging.warn("Can not get cpu flags from cpuinfo")
if cpu_model:
cpu_type_list = cpu_types.get(vendor)
cpu_support_model = cpu_type_list[cpu_type_list.index(cpu_model):]
cpu_model = ",".join(cpu_support_model)
return cpu_model
def get_archive_tarball_name(source_dir, tarball_name, compression):
'''
Get the name for a tarball file, based on source, name and compression
'''
if tarball_name is None:
tarball_name = os.path.basename(source_dir)
if not tarball_name.endswith('.tar'):
tarball_name = '%s.tar' % tarball_name
if compression and not tarball_name.endswith('.%s' % compression):
tarball_name = '%s.%s' % (tarball_name, compression)
return tarball_name
def archive_as_tarball(source_dir, dest_dir, tarball_name=None,
compression='bz2', verbose=True):
'''
Saves the given source directory to the given destination as a tarball
If the name of the archive is omitted, it will be taken from the
source_dir. If it is an absolute path, dest_dir will be ignored. But,
if both the destination directory and tarball anem is given, and the
latter is not an absolute path, they will be combined.
For archiving directory '/tmp' in '/net/server/backup' as file
'tmp.tar.bz2', simply use:
>>> utils_misc.archive_as_tarball('/tmp', '/net/server/backup')
To save the file it with a different name, say 'host1-tmp.tar.bz2'
and save it under '/net/server/backup', use:
>>> utils_misc.archive_as_tarball('/tmp', '/net/server/backup',
'host1-tmp')
To save with gzip compression instead (resulting in the file
'/net/server/backup/host1-tmp.tar.gz'), use:
>>> utils_misc.archive_as_tarball('/tmp', '/net/server/backup',
'host1-tmp', 'gz')
'''
tarball_name = get_archive_tarball_name(source_dir,
tarball_name,
compression)
if not os.path.isabs(tarball_name):
tarball_path = os.path.join(dest_dir, tarball_name)
else:
tarball_path = tarball_name
if verbose:
logging.debug('Archiving %s as %s' % (source_dir,
tarball_path))
os.chdir(os.path.dirname(source_dir))
tarball = tarfile.TarFile(name=tarball_path, mode='w')
tarball = tarball.open(name=tarball_path, mode='w:%s' % compression)
tarball.add(os.path.basename(source_dir))
tarball.close()
def parallel(targets):
"""
Run multiple functions in parallel.
:param targets: A sequence of tuples or functions. If it's a sequence of
tuples, each tuple will be interpreted as (target, args, kwargs) or
(target, args) or (target,) depending on its length. If it's a
sequence of functions, the functions will be called without
arguments.
:return: A list of the values returned by the functions called.
"""
threads = []
for target in targets:
if isinstance(target, tuple) or isinstance(target, list):
t = utils.InterruptedThread(*target)
else:
t = utils.InterruptedThread(target)
threads.append(t)
t.start()
return [t.join() for t in threads]
class VirtLoggingConfig(logging_config.LoggingConfig):
"""
Used with the sole purpose of providing convenient logging setup
for the KVM test auxiliary programs.
"""
def configure_logging(self, results_dir=None, verbose=False):
super(VirtLoggingConfig, self).configure_logging(use_console=True,
verbose=verbose)
def umount(src, mount_point, fstype):
"""
Umount the src mounted in mount_point.
@src: mount source
@mount_point: mount point
:type: file system type
"""
mount_string = "%s %s %s" % (src, mount_point, fstype)
if is_mounted(src, mount_point, fstype):
umount_cmd = "umount %s" % mount_point
try:
utils.system(umount_cmd)
return True
except error.CmdError:
return False
else:
logging.debug("%s is not mounted under %s", src, mount_point)
return True
def mount(src, mount_point, fstype, perm="rw"):
"""
Mount the src into mount_point of the host.
@src: mount source
@mount_point: mount point
@fstype: file system type
@perm: mount permission
"""
umount(src, mount_point, fstype)
mount_string = "%s %s %s %s" % (src, mount_point, fstype, perm)
if is_mounted(src, mount_point, fstype, perm):
logging.debug("%s is already mounted in %s with %s",
src, mount_point, perm)
return True
mount_cmd = "mount -t %s %s %s -o %s" % (fstype, src, mount_point, perm)
try:
utils.system(mount_cmd)
except error.CmdError:
return False
return is_mounted(src, mount_point, fstype, perm)
def is_mounted(src, mount_point, fstype, perm=""):
"""
Check mount status from /etc/mtab
:param src: mount source
:type src: string
:param mount_point: mount point
:type mount_point: string
:param fstype: file system type
:type fstype: string
:param perm: mount permission
:type perm: string
:return: if the src is mounted as expect
:rtype: Boolean
"""
mount_point = os.path.realpath(mount_point)
src = os.path.realpath(src)
mount_string = "%s %s %s %s" % (src, mount_point, fstype, perm)
if mount_string.strip() in file("/etc/mtab").read():
logging.debug("%s is successfully mounted", src)
return True
else:
logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s",
file("/etc/mtab").read())
return False
def install_host_kernel(job, params):
"""
Install a host kernel, given the appropriate params.
:param job: Job object.
:param params: Dict with host kernel install params.
"""
install_type = params.get('host_kernel_install_type')
if install_type == 'rpm':
logging.info('Installing host kernel through rpm')
rpm_url = params.get('host_kernel_rpm_url')
k_basename = os.path.basename(rpm_url)
dst = os.path.join("/var/tmp", k_basename)
k = utils.get_file(rpm_url, dst)
host_kernel = job.kernel(k)
host_kernel.install(install_vmlinux=False)
utils.write_keyval(job.resultdir,
{'software_version_kernel': k_basename})
host_kernel.boot()
elif install_type in ['koji', 'brew']:
logging.info('Installing host kernel through koji/brew')
koji_cmd = params.get('host_kernel_koji_cmd')
koji_build = params.get('host_kernel_koji_build')
koji_tag = params.get('host_kernel_koji_tag')
k_deps = utils_koji.KojiPkgSpec(tag=koji_tag, build=koji_build,
package='kernel',
subpackages=['kernel-devel', 'kernel-firmware'])
k = utils_koji.KojiPkgSpec(tag=koji_tag, build=koji_build,
package='kernel', subpackages=['kernel'])
c = utils_koji.KojiClient(koji_cmd)
logging.info('Fetching kernel dependencies (-devel, -firmware)')
c.get_pkgs(k_deps, job.tmpdir)
logging.info('Installing kernel dependencies (-devel, -firmware) '
'through %s', install_type)
k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for
rpm_file_name in c.get_pkg_rpm_file_names(k_deps)]
utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names))
c.get_pkgs(k, job.tmpdir)
k_rpm = os.path.join(job.tmpdir,
c.get_pkg_rpm_file_names(k)[0])
host_kernel = job.kernel(k_rpm)
host_kernel.install(install_vmlinux=False)
utils.write_keyval(job.resultdir,
{'software_version_kernel':
" ".join(c.get_pkg_rpm_file_names(k_deps))})
host_kernel.boot()
elif install_type == 'git':
logging.info('Chose to install host kernel through git, proceeding')
repo = params.get('host_kernel_git_repo')
repo_base = params.get('host_kernel_git_repo_base', None)
branch = params.get('host_kernel_git_branch')
commit = params.get('host_kernel_git_commit')
patch_list = params.get('host_kernel_patch_list')
if patch_list:
patch_list = patch_list.split()
kernel_config = params.get('host_kernel_config', None)
repodir = os.path.join("/tmp", 'kernel_src')
r = git.GitRepoHelper(uri=repo, branch=branch, destination_dir=repodir,
commit=commit, base_uri=repo_base)
r.execute()
host_kernel = job.kernel(r.destination_dir)
if patch_list:
host_kernel.patch(patch_list)
if kernel_config:
host_kernel.config(kernel_config)
host_kernel.build()
host_kernel.install()
git_repo_version = '%s:%s:%s' % (r.uri, r.branch, r.get_top_commit())
utils.write_keyval(job.resultdir,
{'software_version_kernel': git_repo_version})
host_kernel.boot()
else:
logging.info('Chose %s, using the current kernel for the host',
install_type)
k_version = utils.system_output('uname -r', ignore_status=True)
utils.write_keyval(job.resultdir,
{'software_version_kernel': k_version})
def install_cpuflags_util_on_vm(test, vm, dst_dir, extra_flags=None):
"""
Install stress to vm.
:param vm: virtual machine.
:param dst_dir: Installation path.
:param extra_flags: Extraflags for gcc compiler.
"""
if not extra_flags:
extra_flags = ""
cpuflags_src = os.path.join(test.virtdir, "deps", "test_cpu_flags")
cpuflags_dst = os.path.join(dst_dir, "test_cpu_flags")
session = vm.wait_for_login()
session.cmd("rm -rf %s" %
(cpuflags_dst))
session.cmd("sync")
vm.copy_files_to(cpuflags_src, dst_dir)
session.cmd("sync")
session.cmd("cd %s; make EXTRA_FLAGS='%s';" %
(cpuflags_dst, extra_flags))
session.cmd("sync")
session.close()
def install_disktest_on_vm(test, vm, src_dir, dst_dir):
"""
Install stress to vm.
:param vm: virtual machine.
:param src_dir: Source path.
:param dst_dir: Instaltation path.
"""
disktest_src = src_dir
disktest_dst = os.path.join(dst_dir, "disktest")
session = vm.wait_for_login()
session.cmd("rm -rf %s" % (disktest_dst))
session.cmd("mkdir -p %s" % (disktest_dst))
session.cmd("sync")
vm.copy_files_to(disktest_src, disktest_dst)
session.cmd("sync")
session.cmd("cd %s; make;" %
(os.path.join(disktest_dst, "src")))
session.cmd("sync")
session.close()
def qemu_has_option(option, qemu_path="/usr/bin/qemu-kvm"):
"""
Helper function for command line option wrappers
:param option: Option need check.
:param qemu_path: Path for qemu-kvm.
"""
hlp = commands.getoutput("%s -help" % qemu_path)
return bool(re.search(r"^-%s(\s|$)" % option, hlp, re.MULTILINE))
def bitlist_to_string(data):
"""
Transform from bit list to ASCII string.
:param data: Bit list to be transformed
"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
return ''.join([chr(c) for c in result])
def string_to_bitlist(data):
"""
Transform from ASCII string to bit list.
:param data: String to be transformed
"""
data = [ord(c) for c in data]
result = []
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result.append(1)
else:
result.append(0)
i -= 1
return result
def get_module_params(sys_path, module_name):
"""
Get the kvm module params
:param sys_path: sysfs path for modules info
:param module_name: module to check
"""
dir_params = os.path.join(sys_path, "module", module_name, "parameters")
module_params = {}
if os.path.isdir(dir_params):
for filename in os.listdir(dir_params):
full_dir = os.path.join(dir_params, filename)
tmp = open(full_dir, 'r').read().strip()
module_params[full_dir] = tmp
else:
return None
return module_params
def create_x509_dir(path, cacert_subj, server_subj, passphrase,
secure=False, bits=1024, days=1095):
"""
Creates directory with freshly generated:
ca-cart.pem, ca-key.pem, server-cert.pem, server-key.pem,
:param path: defines path to directory which will be created
:param cacert_subj: ca-cert.pem subject
:param server_key.csr subject
:param passphrase - passphrase to ca-key.pem
:param secure = False - defines if the server-key.pem will use a passphrase
:param bits = 1024: bit length of keys
:param days = 1095: cert expiration
:raise ValueError: openssl not found or rc != 0
:raise OSError: if os.makedirs() fails
"""
ssl_cmd = os_dep.command("openssl")
path = path + os.path.sep # Add separator to the path
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path)
server_key = "server-key.pem.secure"
if secure:
server_key = "server-key.pem"
cmd_set = [
('%s genrsa -des3 -passout pass:%s -out %sca-key.pem %d' %
(ssl_cmd, passphrase, path, bits)),
('%s req -new -x509 -days %d -key %sca-key.pem -passin pass:%s -out '
'%sca-cert.pem -subj "%s"' %
(ssl_cmd, days, path, passphrase, path, cacert_subj)),
('%s genrsa -out %s %d' % (ssl_cmd, path + server_key, bits)),
('%s req -new -key %s -out %s/server-key.csr -subj "%s"' %
(ssl_cmd, path + server_key, path, server_subj)),
('%s x509 -req -passin pass:%s -days %d -in %sserver-key.csr -CA '
'%sca-cert.pem -CAkey %sca-key.pem -set_serial 01 -out %sserver-cert.pem' %
(ssl_cmd, passphrase, days, path, path, path, path))
]
if not secure:
cmd_set.append('%s rsa -in %s -out %sserver-key.pem' %
(ssl_cmd, path + server_key, path))
for cmd in cmd_set:
utils.run(cmd)
logging.info(cmd)
def convert_ipv4_to_ipv6(ipv4):
"""
Translates a passed in string of an ipv4 address to an ipv6 address.
:param ipv4: a string of an ipv4 address
"""
converted_ip = "::ffff:"
split_ipaddress = ipv4.split('.')
try:
socket.inet_aton(ipv4)
except socket.error:
raise ValueError("ipv4 to be converted is invalid")
if (len(split_ipaddress) != 4):
raise ValueError("ipv4 address is not in dotted quad format")
for index, string in enumerate(split_ipaddress):
if index != 1:
test = str(hex(int(string)).split('x')[1])
if len(test) == 1:
final = "0"
final += test
test = final
else:
test = str(hex(int(string)).split('x')[1])
if len(test) == 1:
final = "0"
final += test + ":"
test = final
else:
test += ":"
converted_ip += test
return converted_ip
def get_thread_cpu(thread):
"""
Get the light weight process(thread) used cpus.
:param thread: thread checked
:type thread: string
:return: A list include all cpus the thread used
:rtype: list
"""
cmd = "ps -o cpuid,lwp -eL | grep -w %s$" % thread
cpu_thread = utils.system_output(cmd)
if not cpu_thread:
return []
return list(set([_.strip().split()[0] for _ in cpu_thread.splitlines()]))
def get_pid_cpu(pid):
"""
Get the process used cpus.
:param pid: process id
:type thread: string
:return: A list include all cpus the process used
:rtype: list
"""
cmd = "ps -o cpuid -L -p %s" % pid
cpu_pid = utils.system_output(cmd)
if not cpu_pid:
return []
return list(set([_.strip() for _ in cpu_pid.splitlines()]))
def get_node_count():
"""
Get the number of nodes of current host.
:return: the number of nodes
:rtype: string
"""
cmd = utils.run("numactl --hardware")
return int(re.findall("available: (\d+) nodes", cmd.stdout)[0])
def cpu_str_to_list(origin_str):
"""
Convert the cpu string to a list. The string may include comma and
hyphen.
:param origin_str: the cpu info string read from system
:type origin_str: string
:return: A list of the cpu ids
:rtype: list
"""
if isinstance(origin_str, str):
cpu_list = []
for cpu in origin_str.strip().split(","):
if "-" in cpu:
start, end = cpu.split("-")
for cpu_id in range(int(start), int(end) + 1):
cpu_list.append(cpu_id)
else:
try:
cpu_id = int(cpu)
cpu_list.append(cpu_id)
except ValueError:
logging.error("Illegimate string in cpu "
"informations: %s" % cpu)
cpu_list = []
break
cpu_list.sort()
return cpu_list
class NumaInfo(object):
"""
Numa topology for host. Also provide the function for check the memory status
of the node.
"""
def __init__(self):
self.numa_sys_path = "/sys/devices/system/node"
self.all_nodes = self.get_all_nodes()
self.online_nodes = self.get_online_nodes()
self.nodes = {}
self.distances = {}
for node_id in self.online_nodes:
self.nodes[node_id] = NumaNode(node_id + 1)
self.distances[node_id] = self.get_node_distance(node_id)
def get_all_nodes(self):
"""
Get all node ids in host.
:return: All node ids in host
:rtype: list
"""
all_nodes = get_path(self.numa_sys_path, "possible")
all_nodes_file = open(all_nodes, "r")
nodes_info = all_nodes_file.read()
all_nodes_file.close()
return cpu_str_to_list(nodes_info)
def get_online_nodes(self):
"""
Get node ids online in host
:return: The ids of node which is online
:rtype: list
"""
online_nodes = get_path(self.numa_sys_path, "online")
online_nodes_file = open(online_nodes, "r")
nodes_info = online_nodes_file.read()
online_nodes_file.close()
return cpu_str_to_list(nodes_info)
def get_node_distance(self, node_id):
"""
Get the distance from the give node to other nodes include itself.
:param node_id: Node that you want to check
:type node_id: string
:return: A list in of distance for the node in positive-sequence
:rtype: list
"""
cmd = utils.run("numactl --hardware")
node_distances = cmd.stdout.split("node distances:")[-1].strip()
node_distance = node_distances.splitlines()[node_id + 1]
if "%s:" % node_id not in node_distance:
logging.warn("Get wrong unexpect information from numctl")
numa_sys_path = self.numa_sys_path
distance_path = get_path(numa_sys_path,
"node%s/distance" % node_id)
if not os.path.isfile(distance_path):
logging.error("Can not get distance information for"
" node %s" % node_id)
return []
node_distance_file = open(distance_path, 'r')
node_distance = node_distance_file.read()
node_distance_file.close()
else:
node_distance = node_distance.split(":")[-1]
return node_distance.strip().split()
def read_from_node_meminfo(self, node_id, key):
"""
Get specific value of a given node from memoinfo file
:param node_id: The node you want to check
:type node_id: string
:param key: The value you want to check such as MemTotal etc.
:type key: string
:return: The value in KB
:rtype: string
"""
memory_path = os.path.join(self.numa_sys_path,
"node%s/meminfo" % node_id)
memory_file = open(memory_path, "r")
memory_info = memory_file.read()
memory_file.close()
return re.findall("%s:\s+(\d+)" % key, memory_info)[0]
class NumaNode(object):
"""
Numa node to control processes and shared memory.
"""
def __init__(self, i=-1):
self.num = get_node_count()
if i < 0:
self.cpus = self.get_node_cpus(int(self.num) + i).split()
self.node_id = self.num + i
else:
self.cpus = self.get_node_cpus(i - 1).split()
self.node_id = i - 1
self.dict = {}
for i in self.cpus:
self.dict[i] = "free"
def get_node_cpus(self, i):
"""
Get cpus of a specific node
:param i: Index of the CPU inside the node.
"""
cmd = utils.run("numactl --hardware")
cpus = re.findall("node %s cpus: (.*)" % i, cmd.stdout)
if cpus:
cpus = cpus[0]
else:
break_flag = False
cpulist_path = "/sys/devices/system/node/node%s/cpulist" % i
try:
cpulist_file = open(cpulist_path, 'r')
cpus = cpulist_file.read()
cpulist_file.close()
except IOError:
logging.warn("Can not find the cpu list information from both"
"numactl and sysfs. Please check your system.")
break_flag = True
if not break_flag:
# Try to expand the numbers with '-' to a string of numbers
# separated by blank. There number of '-' in the list depends
# on the physical architecture of the hardware.
try:
convert_list = re.findall("\d+-\d+", cpus)
for cstr in convert_list:
_ = " "
start = min(int(cstr.split("-")[0]),
int(cstr.split("-")[1]))
end = max(int(cstr.split("-")[0]),
int(cstr.split("-")[1]))
for n in range(start, end + 1, 1):
_ += "%s " % str(n)
cpus = re.sub(cstr, _, cpus)
except (IndexError, ValueError):
logging.warn("The format of cpu list is not the same as"
" expected.")
break_flag = False
if break_flag:
cpus = ""
return cpus
def free_cpu(self, i):
"""
Release pin of one node.
:param i: Index of the node.
"""
self.dict[i] = "free"
def _flush_pin(self):
"""
Flush pin dict, remove the record of exited process.
"""
cmd = utils.run("ps -eLf | awk '{print $4}'")
all_pids = cmd.stdout
for i in self.cpus:
if self.dict[i] != "free" and self.dict[i] not in all_pids:
self.free_cpu(i)
@error.context_aware
def pin_cpu(self, process):
"""
Pin one process to a single cpu.
:param process: Process ID.
"""
self._flush_pin()
error.context("Pinning process %s to the CPU" % process)
for i in self.cpus:
if self.dict[i] == "free":
self.dict[i] = str(process)
cmd = "taskset -p %s %s" % (hex(2 ** int(i)), process)
logging.debug("NumaNode (%s): " % i + cmd)
utils.run(cmd)
return i
def show(self):
"""
Display the record dict in a convenient way.
"""
logging.info("Numa Node record dict:")
for i in self.cpus:
logging.info(" %s: %s" % (i, self.dict[i]))
def get_host_cpu_models():
"""
Get cpu model from host cpuinfo
"""
def _cpu_flags_sort(cpu_flags):
"""
Update the cpu flags get from host to a certain order and format
"""
flag_list = re.split("\s+", cpu_flags.strip())
flag_list.sort()
cpu_flags = " ".join(flag_list)
return cpu_flags
def _make_up_pattern(flags):
"""
Update the check pattern to a certain order and format
"""
pattern_list = re.split(",", flags.strip())
pattern_list.sort()
pattern = r"(\b%s\b)" % pattern_list[0]
for i in pattern_list[1:]:
pattern += r".+(\b%s\b)" % i
return pattern
if ARCH == 'ppc64':
return ['POWER7']
vendor_re = "vendor_id\s+:\s+(\w+)"
cpu_flags_re = "flags\s+:\s+([\w\s]+)\n"
cpu_types = {"AuthenticAMD": ["Opteron_G5", "Opteron_G4", "Opteron_G3",
"Opteron_G2", "Opteron_G1"],
"GenuineIntel": ["SandyBridge", "Westmere", "Nehalem",
"Penryn", "Conroe"]}
cpu_type_re = {"Opteron_G5": "f16c,fma,tbm",
"Opteron_G4":
"avx,xsave,aes,sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3,sse4a",
"Opteron_G3": "cx16,sse4a",
"Opteron_G2": "cx16",
"Opteron_G1": "",
"SandyBridge":
"avx,xsave,aes,sse4_2|sse4.2,sse4.1|sse4_1,cx16,ssse3",
"Westmere": "aes,sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3",
"Nehalem": "sse4.2|sse4_2,sse4.1|sse4_1,cx16,ssse3",
"Penryn": "sse4.1|sse4_1,cx16,ssse3",
"Conroe": "ssse3"}
fd = open("/proc/cpuinfo")
cpu_info = fd.read()
fd.close()
vendor = re.findall(vendor_re, cpu_info)[0]
cpu_flags = re.findall(cpu_flags_re, cpu_info)
cpu_model = None
cpu_support_model = []
if cpu_flags:
cpu_flags = _cpu_flags_sort(cpu_flags[0])
for cpu_type in cpu_types.get(vendor):
pattern = _make_up_pattern(cpu_type_re.get(cpu_type))
if re.findall(pattern, cpu_flags):
cpu_model = cpu_type
break
else:
logging.warn("Can not Get cpu flags from cpuinfo")
if cpu_model:
cpu_type_list = cpu_types.get(vendor)
cpu_support_model = cpu_type_list[cpu_type_list.index(cpu_model):]
return cpu_support_model
def extract_qemu_cpu_models(qemu_cpu_help_text):
"""
Get all cpu models from qemu -cpu help text.
:param qemu_cpu_help_text: text produced by <qemu> -cpu '?'
:return: list of cpu models
"""
def check_model_list(pattern):
cpu_re = re.compile(pattern)
qemu_cpu_model_list = cpu_re.findall(qemu_cpu_help_text)
if qemu_cpu_model_list:
return qemu_cpu_model_list
else:
return None
x86_pattern_list = "x86\s+\[?([a-zA-Z0-9_-]+)\]?.*\n"
ppc64_pattern_list = "PowerPC\s+\[?([a-zA-Z0-9_-]+\.?[0-9]?)\]?.*\n"
for pattern_list in [x86_pattern_list, ppc64_pattern_list]:
model_list = check_model_list(pattern_list)
if model_list is not None:
return model_list
e_msg = ("CPU models reported by qemu -cpu ? not supported by virt-tests. "
"Please work with us to add support for it")
logging.error(e_msg)
for line in qemu_cpu_help_text.splitlines():
logging.error(line)
raise UnsupportedCPU(e_msg)
def get_qemu_cpu_models(qemu_binary):
"""Get listing of CPU models supported by QEMU
Get list of CPU models by parsing the output of <qemu> -cpu '?'
"""
cmd = qemu_binary + " -cpu '?'"
result = utils.run(cmd)
return extract_qemu_cpu_models(result.stdout)
def get_qemu_binary(params):
"""
Get the path to the qemu binary currently in use.
"""
# Update LD_LIBRARY_PATH for built libraries (libspice-server)
qemu_binary_path = get_path(os.path.join(data_dir.get_root_dir(),
params.get("vm_type")),
params.get("qemu_binary", "qemu"))
library_path = os.path.join(
data_dir.get_root_dir(), params.get('vm_type'), 'install_root', 'lib')
if os.path.isdir(library_path):
library_path = os.path.abspath(library_path)
qemu_binary = "LD_LIBRARY_PATH=%s %s" % (
library_path, qemu_binary_path)
else:
qemu_binary = qemu_binary_path
return qemu_binary
def get_qemu_img_binary(params):
"""
Get the path to the qemu-img binary currently in use.
"""
return get_path(os.path.join(data_dir.get_root_dir(),
params.get("vm_type")),
params.get("qemu_img_binary", "qemu"))
def get_qemu_io_binary(params):
"""
Get the path to the qemu-img binary currently in use.
"""
return get_path(os.path.join(data_dir.get_root_dir(),
params.get("vm_type")),
params.get("qemu_io_binary", "qemu"))
def get_qemu_best_cpu_model(params):
"""
Try to find out the best CPU model available for qemu.
This function can't be in qemu_vm, because it is used in env_process,
where there's no vm object available yet, and env content is synchronized
in multi host testing.
1) Get host CPU model
2) Verify if host CPU model is in the list of supported qemu cpu models
3) If so, return host CPU model
4) If not, return the default cpu model set in params, if none defined,
return 'qemu64'.
"""
host_cpu_models = get_host_cpu_models()
qemu_binary = get_qemu_binary(params)
qemu_cpu_models = get_qemu_cpu_models(qemu_binary)
# Let's try to find a suitable model on the qemu list
for host_cpu_model in host_cpu_models:
if host_cpu_model in qemu_cpu_models:
return host_cpu_model
# If no host cpu model can be found on qemu_cpu_models, choose the default
return params.get("default_cpu_model", "qemu64")
def check_if_vm_vcpu_match(vcpu_desire, vm):
"""
This checks whether the VM vCPU quantity matches
the value desired.
"""
vcpu_actual = vm.get_cpu_count()
if vcpu_desire != vcpu_actual:
logging.debug("CPU quantity mismatched !!! guest said it got %s "
"but we assigned %s" % (vcpu_actual, vcpu_desire))
return False
logging.info("CPU quantity matched: %s" % vcpu_actual)
return True
class ForAll(list):
def __getattr__(self, name):
def wrapper(*args, **kargs):
return map(lambda o: o.__getattribute__(name)(*args, **kargs), self)
return wrapper
class ForAllP(list):
"""
Parallel version of ForAll
"""
def __getattr__(self, name):
def wrapper(*args, **kargs):
threads = []
for o in self:
threads.append(
utils.InterruptedThread(o.__getattribute__(name),
args=args, kwargs=kargs))
for t in threads:
t.start()
return map(lambda t: t.join(), threads)
return wrapper
class ForAllPSE(list):
"""
Parallel version of and suppress exception.
"""
def __getattr__(self, name):
def wrapper(*args, **kargs):
threads = []
for o in self:
threads.append(
utils.InterruptedThread(o.__getattribute__(name),
args=args, kwargs=kargs))
for t in threads:
t.start()
result = []
for t in threads:
ret = {}
try:
ret["return"] = t.join()
except Exception:
ret["exception"] = sys.exc_info()
ret["args"] = args
ret["kargs"] = kargs
result.append(ret)
return result
return wrapper
def get_pid_path(program_name, pid_files_dir=None):
if not pid_files_dir:
base_dir = os.path.dirname(__file__)
pid_path = os.path.abspath(os.path.join(base_dir, "..", "..",
"%s.pid" % program_name))
else:
pid_path = os.path.join(pid_files_dir, "%s.pid" % program_name)
return pid_path
def write_pid(program_name, pid_files_dir=None):
"""
Try to drop <program_name>.pid in the main autotest directory.
Args:
program_name: prefix for file name
"""
pidfile = open(get_pid_path(program_name, pid_files_dir), "w")
try:
pidfile.write("%s\n" % os.getpid())
finally:
pidfile.close()
def delete_pid_file_if_exists(program_name, pid_files_dir=None):
"""
Tries to remove <program_name>.pid from the main autotest directory.
"""
pidfile_path = get_pid_path(program_name, pid_files_dir)
try:
os.remove(pidfile_path)
except OSError:
if not os.path.exists(pidfile_path):
return
raise
def get_pid_from_file(program_name, pid_files_dir=None):
"""
Reads the pid from <program_name>.pid in the autotest directory.
:param program_name the name of the program
:return: the pid if the file exists, None otherwise.
"""
pidfile_path = get_pid_path(program_name, pid_files_dir)
if not os.path.exists(pidfile_path):
return None
pidfile = open(get_pid_path(program_name, pid_files_dir), 'r')
try:
try:
pid = int(pidfile.readline())
except IOError:
if not os.path.exists(pidfile_path):
return None
raise
finally:
pidfile.close()
return pid
def program_is_alive(program_name, pid_files_dir=None):
"""
Checks if the process is alive and not in Zombie state.
:param program_name the name of the program
:return: True if still alive, False otherwise
"""
pid = get_pid_from_file(program_name, pid_files_dir)
if pid is None:
return False
return utils.pid_is_alive(pid)
def signal_program(program_name, sig=signal.SIGTERM, pid_files_dir=None):
"""
Sends a signal to the process listed in <program_name>.pid
:param program_name the name of the program
:param sig signal to send
"""
pid = get_pid_from_file(program_name, pid_files_dir)
if pid:
utils.signal_pid(pid, sig)
def normalize_data_size(value_str, order_magnitude="M", factor="1024"):
"""
Normalize a data size in one order of magnitude to another (MB to GB,
for example).
:param value_str: a string include the data and unit
:param order_magnitude: the magnitude order of result
:param factor: the factor between two relative order of magnitude.
Normally could be 1024 or 1000
"""
def _get_magnitude_index(magnitude_list, magnitude_value):
for i in magnitude_list:
order_magnitude = re.findall("[\s\d](%s)" % i,
str(magnitude_value), re.I)
if order_magnitude:
return magnitude_list.index(order_magnitude[0].upper())
return -1
magnitude_list = ['B', 'K', 'M', 'G', 'T']
try:
data = float(re.findall("[\d\.]+", value_str)[0])
except IndexError:
logging.error("Incorrect data size format. Please check %s"
" has both data and unit." % value_str)
return ""
magnitude_index = _get_magnitude_index(magnitude_list, value_str)
order_magnitude_index = _get_magnitude_index(magnitude_list,
" %s" % order_magnitude)
if magnitude_index < 0 or order_magnitude_index < 0:
logging.error("Unknown input order of magnitude. Please check your"
"value '%s' and desired order of magnitude"
" '%s'." % (value_str, order_magnitude))
return ""
if magnitude_index > order_magnitude_index:
multiple = float(factor)
else:
multiple = float(factor) ** -1
for _ in range(abs(magnitude_index - order_magnitude_index)):
data *= multiple
return str(data)
def verify_running_as_root():
"""
Verifies whether we're running under UID 0 (root).
:raise: error.TestNAError
"""
if os.getuid() != 0:
raise error.TestNAError("This test requires root privileges "
"(currently running with user %s)" %
getpass.getuser())
def selinux_enforcing():
"""
Returns True if SELinux is in enforcing mode, False if permissive/disabled
"""
cmdresult = utils.run('getenforce', ignore_status=True, verbose=False)
mobj = re.search('Enforcing', cmdresult.stdout)
return mobj is not None
def get_winutils_vol(session, label="WIN_UTILS"):
"""
Return Volum ID of winutils CDROM;ISO file should be create via command:
mkisofs -V $label -o winutils.iso
@parm session: session Object
@parm label:volum ID of WIN_UTILS.iso
:return: volum ID
"""
cmd = "wmic logicaldisk where (VolumeName='%s') get DeviceID" % label
output = session.cmd(cmd, timeout=120)
device = re.search(r'(\w):', output, re.M)
if not device:
return ""
return device.group(1)
def valued_option_dict(options, split_pattern, start_count=0, dict_split=None):
"""
Divide the valued options into key and value
:param options: the valued options get from cfg
:param split_pattern: patten used to split options
:param dict_split: patten used to split sub options and insert into dict
:param start_count: the start_count to insert option_dict
:return: dict include option and its value
"""
option_dict = {}
if options.strip() is not None:
pat = re.compile(split_pattern)
option_list = pat.split(options.lstrip(split_pattern))
logging.debug("option_list is %s", option_list)
for match in option_list[start_count:]:
match_list = match.split(dict_split)
if len(match_list) == 2:
key = match_list[0]
value = match_list[1]
if not key in option_dict:
option_dict[key] = value
else:
logging.debug("key %s in option_dict", key)
option_dict[key] = option_dict[key].split()
option_dict[key].append(value)
return option_dict
|
spcui/virt-test
|
virttest/utils_misc.py
|
Python
|
gpl-2.0
| 59,810
|
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminRadioSelect, AdminRadioFieldRenderer
from edc_constants.constants import YES, NO
from lab_requisition.forms import RequisitionFormMixin
from microbiome.apps.mb.choices import STUDY_SITES
from microbiome.apps.mb_infant.models import InfantStoolCollection
from ..models import InfantRequisition
class InfantRequisitionForm(RequisitionFormMixin):
study_site = forms.ChoiceField(
label='Study site',
choices=STUDY_SITES,
initial=settings.DEFAULT_STUDY_SITE,
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
def __init__(self, *args, **kwargs):
super(InfantRequisitionForm, self).__init__(*args, **kwargs)
self.fields['item_type'].initial = 'tube'
def clean(self):
cleaned_data = super(InfantRequisitionForm, self).clean()
self.validate_requisition_and_drawn_datetime()
self.validate_sample_swabs()
self.validate_dna_pcr_and_cytokines()
self.validate_stool_sample_collection()
self.validate_requisition_and_infant_visit()
return cleaned_data
def validate_requisition_and_drawn_datetime(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('drawn_datetime'):
if cleaned_data.get('drawn_datetime').date() < cleaned_data.get('requisition_datetime').date():
raise forms.ValidationError(
'Requisition date cannot be in future of specimen date. Specimen draw date is '
'indicated as {}, whilst requisition is indicated as{}. Please correct'.format(
cleaned_data.get('drawn_datetime').date(),
cleaned_data.get('requisition_datetime').date()))
def validate_sample_swabs(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('panel').name == 'Rectal swab (Storage)':
if cleaned_data.get('item_type') != 'swab':
raise forms.ValidationError('Panel {} is a swab therefore collection type is swab. Please correct.'
.format(cleaned_data.get('panel').name))
def validate_dna_pcr_and_cytokines(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('panel').name in ['DNA PCR', 'Inflammatory Cytokines']:
if cleaned_data.get('item_type') not in ['dbs', 'tube']:
raise forms.ValidationError('Panel {} collection type can only be dbs or tube. '
'Please correct.'.format(cleaned_data.get('panel').name))
def validate_stool_sample_collection(self):
cleaned_data = self.cleaned_data
sample_collection = InfantStoolCollection.objects.filter(infant_visit=cleaned_data.get('infant_visit'))
if sample_collection:
sample_collection = InfantStoolCollection.objects.get(infant_visit=cleaned_data.get('infant_visit'))
if sample_collection.sample_obtained == YES:
if (cleaned_data.get("panel").name == 'Stool storage' and cleaned_data.get("is_drawn") == NO):
raise forms.ValidationError("Stool Sample Collected. Stool Requisition is_drawn"
" cannot be NO.")
def validate_requisition_and_infant_visit(self):
cleaned_data = self.cleaned_data
if (cleaned_data.get('infant_visit').is_present == YES and
cleaned_data.get('reason_not_drawn') == 'absent'):
raise forms.ValidationError(
'Reason not drawn cannot be absent. On the visit report you said infant is present.'
' Please Correct.')
class Meta:
model = InfantRequisition
|
botswana-harvard/microbiome
|
microbiome/apps/mb_lab/forms/infant_requisition_form.py
|
Python
|
gpl-2.0
| 3,819
|
from p1_support import load_level, show_level
from math import sqrt
from heapq import heappush, heappop
import operator
VERBOSE = False
def debug(*args):
if (VERBOSE):
print ''.join([str(arg) for arg in args])
def dijkstras_shortest_path(src, dst, graph, adj):
dist = {}
prev = {}
dist[src] = 0
prev[src] = None # parent of the source node
queue = []
# Python heapq (heap, item) : item can be a tuple or single value
# If tuple is used, the first element will be used as key (key, data)
heappush(queue, (dist[src], src))
while queue:
pathCost, node = heappop(queue)
if node == dst:
break
adjacent = adj(graph, node)
# Extract (position, cost) from list of adjacent states
for neighbor, cost in adjacent:
totalCost = pathCost + cost
#print totalCost
if neighbor not in dist or totalCost < dist[neighbor]:
dist[neighbor] = totalCost
prev[neighbor] = node # parent of [ neighbor ] is node
heappush(queue, ( totalCost, neighbor))
path = []
# Path found build it, else return empty path
if node == dst:
# Traverse up the parent tree
while node: # while there is a parent (prev[src] = None)
path.append(node)
node = prev[node] # update to the parent
# Path is from dst to src, reverse it
path.reverse()
if path:
debug("Path: ", path)
debug("Path cost: ", pathCost)
return path
def navigation_edges(level, cell):
# Valid movement deltas
deltas = {
'LEFT_DOWN': (-1, -1),
'LEFT': (-1, 0),
'LEFT_UP': (-1, 1),
'DOWN': (0, -1),
'UP': (0, 1),
'RIGHT_DOWN': (1, -1),
'RIGHT': (1, 0),
'RIGHT_UP': (1, 1)
};
validMoves = []
for delta in deltas.values():
# Calculate new position
position = (cell[0]+delta[0], cell[1]+delta[1])
if position in level['spaces']:
# Calculate edge cost
cost = sqrt(delta[0] ** 2 + delta[1] ** 2)
# Valid move is a tuple (nextState, edgeCost)
validMoves.append((position, cost))
return validMoves
def test_route(filename, src_waypoint, dst_waypoint):
level = load_level(filename)
if VERBOSE:
print("Level layout:")
show_level(level)
src = level['waypoints'][src_waypoint]
dst = level['waypoints'][dst_waypoint]
path = dijkstras_shortest_path(src, dst, level, navigation_edges)
if path:
show_level(level, path)
else:
print "No path possible!"
# Show the level if the user hasn't already seen it
if not VERBOSE:
show_level(level, [])
if __name__ == '__main__':
import sys
# Use command line options
from optparse import OptionParser
parser = OptionParser(usage="usage: %prog [options] level_file src_waypoint dst_waypoint")
parser.add_option("-v", "--verbose", dest="verbose", help="use verbose logging", action="store_true", default=False)
(options, args) = parser.parse_args()
# Make sure the appropriate number of arguments was supplied
if (len(args) != 3):
print "Unexpected argument count."
parser.print_help()
else:
VERBOSE = options.verbose
filename, src_waypoint, dst_waypoint = args
test_route(filename, src_waypoint, dst_waypoint)
|
jtsommers/dijkd
|
p1.py
|
Python
|
gpl-2.0
| 3,077
|
import addressbook_pb2
person = addressbook_pb2.Person()
person.id = 1234
person.name = "John Doe"
person.email = "[email protected]"
p1 = person.phones.add()
p1.number = "555-5321"
p2 = person.phones.add()
p2.number = "555-5322"
p2.type = addressbook_pb2.Person.WORK
#print(person.IsInitialized())
#print(person)
print(person.SerializeToString())
#print('\n'.join(dir(person)))
|
dilworm/pytest
|
protobuf/main.py
|
Python
|
gpl-2.0
| 380
|
# Key list dialog.
#
# Copyright (C) 2008 Red Hat, Inc. All rights reserved.
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program; if
# not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Red Hat Author: Miloslav Trmac <[email protected]>
from gettext import gettext as _
import audit
import gtk
import gobject
from dialog_base import DialogBase
from key_dialog import KeyDialog
import util
__all__ = ('KeyListDialog')
class KeyListDialog(DialogBase):
'''Key list dialog.'''
_glade_widget_names = ('keys_add', 'keys_delete', 'keys_down', 'keys_edit',
'keys_list', 'keys_up')
def __init__(self, parent):
DialogBase.__init__(self, 'key_list_dialog', parent)
self.keys_store = gtk.ListStore(gobject.TYPE_STRING)
self.keys_list.set_model(self.keys_store)
c = gtk.TreeViewColumn(_('Key'), gtk.CellRendererText(), text = 0)
self.keys_list.append_column(c)
self.keys_list.connect('row-activated', self.__keys_edit_clicked)
self.keys_selection = self.keys_list.get_selection()
util.connect_and_run(self.keys_selection, 'changed',
self.__keys_selection_changed)
self.keys_up.connect('clicked', self.__keys_up_clicked)
self.keys_down.connect('clicked', self.__keys_down_clicked)
self.keys_delete.connect('clicked', self.__keys_delete_clicked)
self.keys_add.connect('clicked', self.__keys_add_clicked)
self.keys_edit.connect('clicked', self.__keys_edit_clicked)
def run(self, keys):
'''Show the dialog to modify the keys list.'''
self._load_keys(keys)
res = self.window.run()
while res == gtk.RESPONSE_OK and not self._validate_values():
res = self.window.run()
if res == gtk.RESPONSE_OK:
self._save_keys(keys)
return res
def _load_keys(self, keys):
'''Modify dialog controls to reflect keys.'''
self.keys_store.clear()
for key in keys:
self.keys_store.append((key,))
def _save_keys(self, keys):
'''Modify keys to reflect dialog state.'''
del keys[:]
it = self.keys_store.get_iter_first()
while it is not None:
keys.append(self.keys_store.get_value(it, 0))
it = self.keys_store.iter_next(it)
def _validate_get_failure(self):
keys = []
it = self.keys_store.get_iter_first()
while it is not None:
keys.append(self.keys_store.get_value(it, 0))
it = self.keys_store.iter_next(it)
if len('\x01'.join(keys)) > audit.AUDIT_MAX_KEY_LEN:
return (_('Total key length is too long'), None, self.keys_list)
return None
def __keys_selection_changed(self, *_):
(model, it) = self.keys_selection.get_selected()
util.set_sensitive_all(it is not None,
self.keys_delete, self.keys_edit)
self.keys_up.set_sensitive(it is not None and
model.get_path(it) !=
model.get_path(model.get_iter_first()))
self.keys_down.set_sensitive(it is not None and
model.iter_next(it) is not None)
def __keys_up_clicked(self, *_):
util.tree_model_move_up(self.keys_selection)
self.__keys_selection_changed()
def __keys_down_clicked(self, *_):
util.tree_model_move_down(self.keys_selection)
self.__keys_selection_changed()
def __keys_delete_clicked(self, *_):
util.tree_model_delete(self.keys_selection)
def __keys_add_clicked(self, *_):
dlg = KeyDialog(self.window)
(res, key) = dlg.run('')
dlg.destroy()
if res == gtk.RESPONSE_OK:
(model, it) = self.keys_selection.get_selected()
it = model.insert_after(it)
model.set_value(it, 0, key)
self.keys_selection.select_iter(it)
def __keys_edit_clicked(self, *_):
(model, it) = self.keys_selection.get_selected()
if it is None:
return
key = model.get_value(it, 0)
dlg = KeyDialog(self.window)
(res, key) = dlg.run(key)
dlg.destroy()
if res == gtk.RESPONSE_OK:
model.set_value(it, 0, key)
|
ystk/debian-audit
|
system-config-audit/src/key_list_dialog.py
|
Python
|
gpl-2.0
| 5,086
|
import unittest
from graph_theory.spfa import spfa
class GraphTheoryTests(unittest.TestCase):
def setUp(self):
source = 0
num_nodes = 5
neighbour_list = [[1], # 0
[2], # 1
[3], # 2
[4, 1], # 3
[1], # 4
]
weights = {(0,1): 20,
(1,2) : 1,
(2,3) : 2,
(3,4) : -2,
(4, 1): -1,
(3, 1): -4,
}
self.example_graph = (source, num_nodes, weights, neighbour_list)
self.example_graph_cycle = [1,2,3]
def is_cyclicily_equal(self, list1, list2):
if len(list1) != len(list2):
return False
n = len(list1)
for shift in range(n):
if list1 == list2[shift:] + list2[:shift]:
return True
return False
def test_negative_cycle(self):
_, negative_cycle = spfa(*self.example_graph)
# Careful, double negation ahead
assert(negative_cycle is not None)
assert(self.is_cyclicily_equal(negative_cycle, self.example_graph_cycle))
|
yu-peng/cdru
|
graph_theory/tests.py
|
Python
|
gpl-3.0
| 1,214
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-02 05:05
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=32)),
('project_description', models.CharField(max_length=256)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created')),
('commandline', models.CharField(default='', max_length=256)),
('public', models.BooleanField(default=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (('can_run', '[DjangoSourceControl]: Can run projects'), ('can_add', '[DjangoSourceControl]: Can add projects')),
},
),
migrations.CreateModel(
name='ProjectFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('projectfile_name', models.CharField(max_length=32)),
('projectfile_description', models.CharField(max_length=256)),
('startup', models.BooleanField(default=False)),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created')),
('public', models.BooleanField(default=True)),
('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='projectfiles', to='djangosourcecontrol.Project')),
],
),
migrations.CreateModel(
name='ProjectFileVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created')),
('text', models.TextField()),
('projectfile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projectfileversions', to='djangosourcecontrol.ProjectFile')),
],
),
]
|
kull2222/DjangoSourceControl
|
dsc/djangosourcecontrol/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 2,755
|
import numpy
from .embedded_sequence import embed_seq
def ap_entropy(X, M, R):
"""Computer approximate entropy (ApEN) of series X, specified by M and R.
Suppose given time series is X = [x(1), x(2), ... , x(N)]. We first build
embedding matrix Em, of dimension (N-M+1)-by-M, such that the i-th row of
Em is x(i),x(i+1), ... , x(i+M-1). Hence, the embedding lag and dimension
are 1 and M-1 respectively. Such a matrix can be built by calling pyeeg
function as Em = embed_seq(X, 1, M). Then we build matrix Emp, whose only
difference with Em is that the length of each embedding sequence is M + 1
Denote the i-th and j-th row of Em as Em[i] and Em[j]. Their k-th elements
are Em[i][k] and Em[j][k] respectively. The distance between Em[i] and
Em[j] is defined as 1) the maximum difference of their corresponding scalar
components, thus, max(Em[i]-Em[j]), or 2) Euclidean distance. We say two
1-D vectors Em[i] and Em[j] *match* in *tolerance* R, if the distance
between them is no greater than R, thus, max(Em[i]-Em[j]) <= R. Mostly, the
value of R is defined as 20% - 30% of standard deviation of X.
Pick Em[i] as a template, for all j such that 0 < j < N - M + 1, we can
check whether Em[j] matches with Em[i]. Denote the number of Em[j],
which is in the range of Em[i], as k[i], which is the i-th element of the
vector k. The probability that a random row in Em matches Em[i] is
\simga_1^{N-M+1} k[i] / (N - M + 1), thus sum(k)/ (N - M + 1),
denoted as Cm[i].
We repeat the same process on Emp and obtained Cmp[i], but here 0<i<N-M
since the length of each sequence in Emp is M + 1.
The probability that any two embedding sequences in Em match is then
sum(Cm)/ (N - M +1 ). We define Phi_m = sum(log(Cm)) / (N - M + 1) and
Phi_mp = sum(log(Cmp)) / (N - M ).
And the ApEn is defined as Phi_m - Phi_mp.
Notes
-----
Please be aware that self-match is also counted in ApEn.
References
----------
Costa M, Goldberger AL, Peng CK, Multiscale entropy analysis of biological
signals, Physical Review E, 71:021906, 2005
See also
--------
samp_entropy: sample entropy of a time series
"""
N = len(X)
Em = embed_seq(X, 1, M)
A = numpy.tile(Em, (len(Em), 1, 1))
B = numpy.transpose(A, [1, 0, 2])
D = numpy.abs(A - B) # D[i,j,k] = |Em[i][k] - Em[j][k]|
InRange = numpy.max(D, axis=2) <= R
# Probability that random M-sequences are in range
Cm = InRange.mean(axis=0)
# M+1-sequences in range if M-sequences are in range & last values are close
Dp = numpy.abs(
numpy.tile(X[M:], (N - M, 1)) - numpy.tile(X[M:], (N - M, 1)).T
)
Cmp = numpy.logical_and(Dp <= R, InRange[:-1, :-1]).mean(axis=0)
Phi_m, Phi_mp = numpy.sum(numpy.log(Cm)), numpy.sum(numpy.log(Cmp))
Ap_En = (Phi_m - Phi_mp) / (N - M)
return Ap_En
|
jbergantine/pyeeg
|
pyeeg/approximate_entropy.py
|
Python
|
gpl-3.0
| 2,931
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import timeit
import unittest
from haystack.mappings import folder
from haystack.mappings.base import AMemoryMapping
from haystack.mappings.base import MemoryHandler
from haystack.mappings.file import LocalMemoryMapping
import haystack.reverse.enumerators
import haystack.reverse.matchers
from haystack.reverse import searchers
from test.testfiles import zeus_856_svchost_exe
from . import test_pattern
log = logging.getLogger('test_pointerfinder')
class TestPointer(test_pattern.SignatureTests):
def setUp(self):
super(TestPointer, self).setUp()
self.mmap, self.values = self._make_mmap_with_values(self.seq)
self.name = 'test_dump_1'
self.feedback = searchers.NoFeedback()
def _make_mmap_with_values(self, intervals, struct_offset=None):
"""
Make a memory map, with a fake structure of pointer pattern inside.
Return the pattern signature
:param intervals:
:param struct_offset:
:return:
"""
# template of a memory map metadata
self._mstart = 0x0c00000
self._mlength = 4096 # end at (0x0c01000)
# could be 8, it doesn't really matter
self.word_size = self.target.get_word_size()
if struct_offset is not None:
self._struct_offset = struct_offset
else:
self._struct_offset = self.word_size*12 # 12, or any other aligned
mmap,values = self._make_mmap(0x0c00000, 4096, self._struct_offset,
intervals, self.word_size)
# add a reference to mmap in mmap2
ammap2 = AMemoryMapping(0xff7dc000, 0xff7dc000+0x1000, '-rwx', 0, 0, 0, 0, 'test_mmap2')
ammap2.set_ctypes(self.target.get_target_ctypes())
mmap2 = LocalMemoryMapping.fromBytebuffer(ammap2, mmap.get_byte_buffer())
self._memory_handler = MemoryHandler([mmap, mmap2], self.target, 'test')
self.mmap2 = mmap2
return mmap, values
class TestPointerSearcher(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerSearcher(self._memory_handler)
self.pointerSearcher = searchers.WordAlignedSearcher(self.mmap, matcher, self.feedback, self.word_size)
iters = [value for value in self.pointerSearcher]
values = self.pointerSearcher.search()
self.assertEqual(iters, values)
self.assertEqual(self.values, values)
self.assertEqual(self.values, iters)
class TestPointerEnumerator(TestPointer):
def test_iter(self):
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
values = [value for offset, value in self.pointerEnum]
offsets = [offset for offset, value in self.pointerEnum]
values_2 = [value for offset, value in self.pointerEnum.search()]
offsets_2 = [offset for offset, value in self.pointerEnum.search()]
self.assertEqual(values, values_2)
self.assertEqual(offsets, offsets_2)
self.assertEqual(self.values, values)
self.assertEqual(self.values, values_2)
nsig = [self._mstart + self._struct_offset]
nsig.extend(self.seq)
indices = [i for i in self._accumulate(nsig)]
self.assertEqual(indices, offsets)
self.assertEqual(indices, offsets_2)
def test_iter_advanced(self):
"""test that pointers to other mappings are detected"""
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
self.pointerEnum1 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap, matcher, self.feedback, self.word_size)
offsets1, values1 = zip(*self.pointerEnum1.search())
self.pointerEnum2 = haystack.reverse.enumerators.WordAlignedEnumerator(self.mmap2, matcher, self.feedback, self.word_size)
offsets2, values2 = zip(*self.pointerEnum2.search())
self.assertEqual(values1, values2)
self.assertEqual(len(values1), len(self.seq)+1)
class TestPointerEnumeratorReal(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._memory_handler = folder.load(zeus_856_svchost_exe.dumpname)
#cls._memory_handler = folder.load(putty_1_win7.dumpname)
cls._utils = cls._memory_handler.get_target_platform().get_target_ctypes_utils()
return
@classmethod
def tearDownClass(cls):
cls._utils = None
cls._memory_handler.reset_mappings()
cls._memory_handler = None
return
def setUp(self):
self._heap_finder = self._memory_handler.get_heap_finder()
return
def tearDown(self):
self._heap_finder = None
return
def _stats(self, heap_addrs):
# get the weight per mapping
mapdict = {}
for m in self._memory_handler.get_mappings():
mapdict[m.start] = 0
for addr in heap_addrs:
m = self._memory_handler.get_mapping_for_address(addr)
mapdict[m.start] += 1
res = [(v,k) for k,v, in mapdict.items()]
res.sort()
res.reverse()
print('Most used mappings:')
for cnt,s in res:
if cnt == 0:
continue
m = self._memory_handler.get_mapping_for_address(s)
print(cnt, m)
def test_pointer_enumerators(self):
"""
Search pointers values in one HEAP
:return:
"""
# prep the workers
dumpfilename = self._memory_handler.get_name()
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
walker = walkers[0]
heap_addr = walker.get_heap_address()
heap = walker.get_heap_mapping()
# create the enumerator on the whole mapping
enumerator1 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts1 = timeit.timeit(enumerator1.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum = enumerator1.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum = enumerator1.search()
ts1 = 0.0
heap_addrs1, heap_values1 = zip(*heap_enum)
print('WordAlignedEnumerator: %d pointers, timeit %0.2f' % (len(heap_addrs1), ts1))
self._stats(heap_addrs1)
def test_pointer_enumerators_allocated(self):
"""
Search pointers values in allocated chunks from one HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
heap_walker = walkers[0]
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
if False:
###
ts2 = timeit.timeit(enumerator2.search, number=3)
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
# ... do something ...
heap_enum2 = enumerator2.search()
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
###
else:
heap_enum2 = enumerator2.search()
ts2 = 0.0
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
self._stats(heap_addrs2)
def test_pointer_enumerators_all(self):
"""
Search pointers values in all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.WordAlignedEnumerator(heap, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('WordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
print(hex(k), hex(v))
self._stats(all_heaps_addrs)
def test_pointer_enumerators_allocated_all(self):
"""
Search pointers values in allocated chunks from all HEAP
:return:
"""
# prep the workers
word_size = self._memory_handler.get_target_platform().get_word_size()
feedback = searchers.NoFeedback()
matcher = haystack.reverse.matchers.PointerEnumerator(self._memory_handler)
finder = self._memory_handler.get_heap_finder()
walkers = finder.list_heap_walkers()
all_heaps_addrs = []
for heap_walker in walkers:
#if heap.start != 0x03360000:
# continue
heap = heap_walker.get_heap_mapping()
log.debug('heap is %s', heap)
# create the enumerator on the allocated chunks mapping
enumerator2 = haystack.reverse.enumerators.AllocatedWordAlignedEnumerator(heap_walker, matcher, feedback, word_size)
# collect the pointers
heap_enum2 = enumerator2.search()
ts2 = 0.0
if len(heap_enum2) == 0:
logging.debug('Heap %s has no pointers in allocated blocks', heap)
else:
heap_addrs2, heap_values2 = zip(*heap_enum2)
logging.debug('AllocatedWordAlignedEnumerator: %d pointers, timeit %0.2f', len(heap_addrs2), ts2)
all_heaps_addrs.extend(heap_addrs2)
##
if False:
print("Pointers:")
for k,v in heap_enum2:
print(hex(k), hex(v))
print("Allocations:")
for addr, size in heap_walker.get_user_allocations():
print(hex(addr), '->', hex(addr+size), '(%x)'%size)
print("Free chunks:")
for addr, size in heap_walker.get_free_chunks():
print(hex(addr), '->', hex(addr+size), '(%x)'%size)
self._stats(all_heaps_addrs)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
# logging.getLogger("test_pointerfinder").setLevel(logging.DEBUG)
unittest.main()
|
trolldbois/python-haystack-reverse
|
test/haystack/reverse/test_pointerfinder.py
|
Python
|
gpl-3.0
| 12,279
|
'''
Script to add period-of-record daily observations counts to the station
observation netCDF file.
'''
import os
from twx.utils import TwxConfig, ymdL
from twx.db import add_obs_cnt
if __name__ == '__main__':
twx_cfg = TwxConfig(os.getenv('TOPOWX_INI'))
for elem in twx_cfg.obs_main_elems:
print ("Adding monthly observation counts for %s from %d to %d... " %
(elem, ymdL(twx_cfg.obs_start_date), ymdL(twx_cfg.obs_end_date)))
add_obs_cnt(twx_cfg.fpath_stndata_nc_all, elem,
twx_cfg.obs_start_date, twx_cfg.obs_end_date,
twx_cfg.stn_agg_chunk)
print ("Adding monthly observation counts for %s from %d to %d... " %
(elem, ymdL(twx_cfg.interp_start_date), ymdL(twx_cfg.interp_end_date)))
add_obs_cnt(twx_cfg.fpath_stndata_nc_all, elem,
twx_cfg.interp_start_date, twx_cfg.interp_end_date,
twx_cfg.stn_agg_chunk)
|
jaredwo/topowx
|
scripts/step05_calc_obs_cnts.py
|
Python
|
gpl-3.0
| 1,001
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install/include;/usr/include/eigen3".split(';') if "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;roscpp;sensor_msgs;glog_catkin".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller".split(';') if "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller" != "" else []
PROJECT_NAME = "rotors_control"
PROJECT_SPACE_DIR = "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install"
PROJECT_VERSION = "1.0.0"
|
chickonice/AutonomousFlight
|
simulation/simulation_ws/build/rotors_simulator/rotors_control/catkin_generated/pkg.installspace.context.pc.py
|
Python
|
gpl-3.0
| 777
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# TODO:
# Ability to set CPU/Memory reservations
try:
import json
except ImportError:
import simplejson as json
HAS_PYSPHERE = False
try:
from pysphere import VIServer, VIProperty, MORTypes
from pysphere.resources import VimService_services as VI
from pysphere.vi_task import VITask
from pysphere import VIException, VIApiException, FaultTypes
HAS_PYSPHERE = True
except ImportError:
pass
import ssl
DOCUMENTATION = '''
---
module: vsphere_guest
short_description: Create/delete/manage a guest VM through VMware vSphere.
description:
- Create/delete/reconfigure a guest VM through VMware vSphere. This module has a dependency on pysphere >= 1.7
version_added: "1.6"
options:
vcenter_hostname:
description:
- The hostname of the vcenter server the module will connect to, to create the guest.
required: true
default: null
aliases: []
validate_certs:
description:
- Validate SSL certs. Note, if running on python without SSLContext
support (typically, python < 2.7.9) you will have to set this to C(no)
as pysphere does not support validating certificates on older python.
Prior to 2.1, this module would always validate on python >= 2.7.9 and
never validate on python <= 2.7.8.
required: false
default: yes
choices: ['yes', 'no']
version_added: 2.1
guest:
description:
- The virtual server name you wish to manage.
required: true
username:
description:
- Username to connect to vcenter as.
required: true
default: null
password:
description:
- Password of the user to connect to vcenter as.
required: true
default: null
resource_pool:
description:
- The name of the resource_pool to create the VM in.
required: false
default: None
cluster:
description:
- The name of the cluster to create the VM in. By default this is derived from the host you tell the module to build the guest on.
required: false
default: None
esxi:
description:
- Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
required: false
default: null
state:
description:
- Indicate desired state of the vm. 'reconfigured' only applies changes to 'memory_mb' and 'num_cpus' in vm_hardware parameter, and only when hot-plugging is enabled for the guest.
default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
snapshot_to_clone:
description:
- A string that when specified, will create a linked clone copy of the VM. Snapshot must already be taken in vCenter.
version_added: "2.0"
required: false
default: none
power_on_after_clone:
description:
- Specifies if the VM should be powered on after the clone.
required: false
default: yes
choices: ['yes', 'no']
vm_disk:
description:
- A key, value list of disks and their sizes and which datastore to keep it in.
required: false
default: null
vm_hardware:
description:
- A key, value list of VM config settings. Must include ['memory_mb', 'num_cpus', 'osid', 'scsi'].
required: false
default: null
vm_nic:
description:
- A key, value list of nics, their types and what network to put them on.
required: false
default: null
vm_extra_config:
description:
- A key, value pair of any extra values you want set or changed in the vmx file of the VM. Useful to set advanced options on the VM.
required: false
default: null
vm_hw_version:
description:
- Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
vmware_guest_facts:
description:
- Gather facts from vCenter on a particular VM
required: false
default: null
force:
description:
- Boolean. Allows you to run commands which may alter the running state of a guest. Also used to reconfigure and destroy.
default: "no"
choices: [ "yes", "no" ]
notes:
- This module should run from a system that can access vSphere directly.
Either by using local_action, or using delegate_to.
author: "Richard Hoop (@rhoop) <[email protected]>"
requirements:
- "python >= 2.6"
- pysphere
'''
EXAMPLES = '''
# Create a new VM on an ESX server
# Returns changed = False when the VM already exists
# Returns changed = True and a adds ansible_facts from the new VM
# State will set the power status of a guest upon creation. Use powered_on to create and boot.
# Options ['state', 'vm_extra_config', 'vm_disk', 'vm_nic', 'vm_hardware', 'esxi'] are required together
# Note: vm_floppy support added in 2.0
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: powered_on
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
folder: MyFolder
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
# VMs can be put into folders. The value given here is either the full path
# to the folder (e.g. production/customerA/lamp) or just the last component
# of the path (e.g. lamp):
folder: production/customerA/lamp
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
nic2:
type: vmxnet3
network: dvSwitch Network
network_type: dvs
vm_hardware:
memory_mb: 2048
num_cpus: 2
osid: centos64Guest
scsi: paravirtual
vm_cdrom:
type: "iso"
iso_path: "DatastoreName/cd-image.iso"
vm_floppy:
type: "image"
image_path: "DatastoreName/floppy-image.flp"
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Reconfigure the CPU and Memory on the newly created VM
# Will return the changes made
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: reconfigured
vm_extra_config:
vcpu.hotadd: yes
mem.hotadd: yes
notes: This is a test VM
vm_disk:
disk1:
size_gb: 10
type: thin
datastore: storage001
vm_nic:
nic1:
type: vmxnet3
network: VM Network
network_type: standard
vm_hardware:
memory_mb: 4096
num_cpus: 4
osid: centos64Guest
scsi: paravirtual
esxi:
datacenter: MyDatacenter
hostname: esx001.mydomain.local
# Deploy a guest from a template
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
from_template: yes
template_src: centosTemplate
cluster: MainCluster
resource_pool: "/Resources"
vm_extra_config:
folder: MyFolder
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
vmware_guest_facts: yes
# Typical output of a vsphere_facts run on a guest
# If vmware tools is not installed, ipadresses with return None
- hw_eth0:
- addresstype: "assigned"
label: "Network adapter 1"
macaddress: "00:22:33:33:44:55"
macaddress_dash: "00-22-33-33-44-55"
ipaddresses: ['192.0.2.100', '2001:DB8:56ff:feac:4d8a']
summary: "VM Network"
hw_guest_full_name: "newvm001"
hw_guest_id: "rhel6_64Guest"
hw_memtotal_mb: 2048
hw_name: "centos64Guest"
hw_power_status: "POWERED ON",
hw_processor_count: 2
hw_product_uuid: "ef50bac8-2845-40ff-81d9-675315501dac"
hw_power_status will be one of the following values:
- POWERED ON
- POWERED OFF
- SUSPENDED
- POWERING ON
- POWERING OFF
- SUSPENDING
- RESETTING
- BLOCKED ON MSG
- REVERTING TO SNAPSHOT
- UNKNOWN
as seen in the VMPowerState-Class of PySphere: http://git.io/vlwOq
# Remove a vm from vSphere
# The VM must be powered_off or you need to use force to force a shutdown
- vsphere_guest:
vcenter_hostname: vcenter.mydomain.local
username: myuser
password: mypass
guest: newvm001
state: absent
force: yes
'''
def add_scsi_controller(module, s, config, devices, type="paravirtual", bus_num=0, disk_ctrl_key=1):
# add a scsi controller
scsi_ctrl_spec = config.new_deviceChange()
scsi_ctrl_spec.set_element_operation('add')
if type == "lsi":
# For RHEL5
scsi_ctrl = VI.ns0.VirtualLsiLogicController_Def("scsi_ctrl").pyclass()
elif type == "paravirtual":
# For RHEL6
scsi_ctrl = VI.ns0.ParaVirtualSCSIController_Def("scsi_ctrl").pyclass()
elif type == "lsi_sas":
scsi_ctrl = VI.ns0.VirtualLsiLogicSASController_Def(
"scsi_ctrl").pyclass()
elif type == "bus_logic":
scsi_ctrl = VI.ns0.VirtualBusLogicController_Def("scsi_ctrl").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding scsi controller to vm spec. No scsi controller"
" type of: %s" % (type))
scsi_ctrl.set_element_busNumber(int(bus_num))
scsi_ctrl.set_element_key(int(disk_ctrl_key))
scsi_ctrl.set_element_sharedBus("noSharing")
scsi_ctrl_spec.set_element_device(scsi_ctrl)
# Add the scsi controller to the VM spec.
devices.append(scsi_ctrl_spec)
return disk_ctrl_key
def add_disk(module, s, config_target, config, devices, datastore, type="thin", size=200000, disk_ctrl_key=1, disk_number=0, key=0):
# add a vmdk disk
# Verify the datastore exists
datastore_name, ds = find_datastore(module, s, datastore, config_target)
# create a new disk - file based - for the vm
disk_spec = config.new_deviceChange()
disk_spec.set_element_fileOperation("create")
disk_spec.set_element_operation("add")
disk_ctlr = VI.ns0.VirtualDisk_Def("disk_ctlr").pyclass()
disk_backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def(
"disk_backing").pyclass()
disk_backing.set_element_fileName(datastore_name)
disk_backing.set_element_diskMode("persistent")
if type != "thick":
disk_backing.set_element_thinProvisioned(1)
disk_ctlr.set_element_key(key)
disk_ctlr.set_element_controllerKey(int(disk_ctrl_key))
disk_ctlr.set_element_unitNumber(int(disk_number))
disk_ctlr.set_element_backing(disk_backing)
disk_ctlr.set_element_capacityInKB(int(size))
disk_spec.set_element_device(disk_ctlr)
devices.append(disk_spec)
def add_cdrom(module, s, config_target, config, devices, default_devs, type="client", vm_cd_iso_path=None):
# Add a cd-rom
# Make sure the datastore exists.
if vm_cd_iso_path:
iso_location = vm_cd_iso_path.split('/', 1)
datastore, ds = find_datastore(
module, s, iso_location[0], config_target)
iso_path = iso_location[1]
# find ide controller
ide_ctlr = None
for dev in default_devs:
if dev.typecode.type[1] == "VirtualIDEController":
ide_ctlr = dev
# add a cdrom based on a physical device
if ide_ctlr:
cd_spec = config.new_deviceChange()
cd_spec.set_element_operation('add')
cd_ctrl = VI.ns0.VirtualCdrom_Def("cd_ctrl").pyclass()
if type == "iso":
iso = VI.ns0.VirtualCdromIsoBackingInfo_Def("iso").pyclass()
ds_ref = iso.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
iso.set_element_datastore(ds_ref)
iso.set_element_fileName("%s %s" % (datastore, iso_path))
cd_ctrl.set_element_backing(iso)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
elif type == "client":
client = VI.ns0.VirtualCdromRemoteAtapiBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("")
cd_ctrl.set_element_backing(client)
cd_ctrl.set_element_key(20)
cd_ctrl.set_element_controllerKey(ide_ctlr.get_element_key())
cd_ctrl.set_element_unitNumber(0)
cd_spec.set_element_device(cd_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding cdrom of type %s to vm spec. "
" cdrom type can either be iso or client" % (type))
devices.append(cd_spec)
def add_floppy(module, s, config_target, config, devices, default_devs, type="image", vm_floppy_image_path=None):
# Add a floppy
# Make sure the datastore exists.
if vm_floppy_image_path:
image_location = vm_floppy_image_path.split('/', 1)
datastore, ds = find_datastore(
module, s, image_location[0], config_target)
image_path = image_location[1]
floppy_spec = config.new_deviceChange()
floppy_spec.set_element_operation('add')
floppy_ctrl = VI.ns0.VirtualFloppy_Def("floppy_ctrl").pyclass()
if type == "image":
image = VI.ns0.VirtualFloppyImageBackingInfo_Def("image").pyclass()
ds_ref = image.new_datastore(ds)
ds_ref.set_attribute_type(ds.get_attribute_type())
image.set_element_datastore(ds_ref)
image.set_element_fileName("%s %s" % (datastore, image_path))
floppy_ctrl.set_element_backing(image)
floppy_ctrl.set_element_key(3)
floppy_spec.set_element_device(floppy_ctrl)
elif type == "client":
client = VI.ns0.VirtualFloppyRemoteDeviceBackingInfo_Def(
"client").pyclass()
client.set_element_deviceName("/dev/fd0")
floppy_ctrl.set_element_backing(client)
floppy_ctrl.set_element_key(3)
floppy_spec.set_element_device(floppy_ctrl)
else:
s.disconnect()
module.fail_json(
msg="Error adding floppy of type %s to vm spec. "
" floppy type can either be image or client" % (type))
devices.append(floppy_spec)
def add_nic(module, s, nfmor, config, devices, nic_type="vmxnet3", network_name="VM Network", network_type="standard"):
# add a NIC
# Different network card types are: "VirtualE1000",
# "VirtualE1000e","VirtualPCNet32", "VirtualVmxnet", "VirtualNmxnet2",
# "VirtualVmxnet3"
nic_spec = config.new_deviceChange()
nic_spec.set_element_operation("add")
if nic_type == "e1000":
nic_ctlr = VI.ns0.VirtualE1000_Def("nic_ctlr").pyclass()
elif nic_type == "e1000e":
nic_ctlr = VI.ns0.VirtualE1000e_Def("nic_ctlr").pyclass()
elif nic_type == "pcnet32":
nic_ctlr = VI.ns0.VirtualPCNet32_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet":
nic_ctlr = VI.ns0.VirtualVmxnet_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet2":
nic_ctlr = VI.ns0.VirtualVmxnet2_Def("nic_ctlr").pyclass()
elif nic_type == "vmxnet3":
nic_ctlr = VI.ns0.VirtualVmxnet3_Def("nic_ctlr").pyclass()
else:
s.disconnect()
module.fail_json(
msg="Error adding nic to vm spec. No nic type of: %s" %
(nic_type))
if network_type == "standard":
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(network_name)
elif network_type == "dvs":
# Get the portgroup key
portgroupKey = find_portgroup_key(module, s, nfmor, network_name)
# Get the dvswitch uuid
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, portgroupKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(portgroupKey)
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
else:
s.disconnect()
module.fail_json(
msg="Error adding nic backing to vm spec. No network type of:"
" %s" % (network_type))
nic_ctlr.set_element_addressType("generated")
nic_ctlr.set_element_backing(nic_backing)
nic_ctlr.set_element_key(4)
nic_spec.set_element_device(nic_ctlr)
devices.append(nic_spec)
def find_datastore(module, s, datastore, config_target):
# Verify the datastore exists and put it in brackets if it does.
ds = None
if config_target:
for d in config_target.Datastore:
if (d.Datastore.Accessible and
(datastore and d.Datastore.Name == datastore)
or (not datastore)):
ds = d.Datastore.Datastore
datastore = d.Datastore.Name
break
else:
for ds_mor, ds_name in server.get_datastores().items():
ds_props = VIProperty(s, ds_mor)
if (ds_props.summary.accessible and (datastore and ds_name == datastore)
or (not datastore)):
ds = ds_mor
datastore = ds_name
if not ds:
s.disconnect()
module.fail_json(msg="Datastore: %s does not appear to exist" %
(datastore))
datastore_name = "[%s]" % datastore
return datastore_name, ds
def find_portgroup_key(module, s, nfmor, network_name):
# Find a portgroups key given the portgroup name.
# Grab all the distributed virtual portgroup's names and key's.
dvpg_mors = s._retrieve_properties_traversal(
property_names=['name', 'key'],
from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# Get the correct portgroup managed object.
dvpg_mor = None
for dvpg in dvpg_mors:
if dvpg_mor:
break
for p in dvpg.PropSet:
if p.Name == "name" and p.Val == network_name:
dvpg_mor = dvpg
if dvpg_mor:
break
# If dvpg_mor is empty we didn't find the named portgroup.
if dvpg_mor is None:
s.disconnect()
module.fail_json(
msg="Could not find the distributed virtual portgroup named"
" %s" % network_name)
# Get the portgroup key
portgroupKey = None
for p in dvpg_mor.PropSet:
if p.Name == "key":
portgroupKey = p.Val
return portgroupKey
def find_dvswitch_uuid(module, s, nfmor, portgroupKey):
# Find a dvswitch's uuid given a portgroup key.
# Function searches all dvswitches in the datacenter to find the switch
# that has the portgroup key.
# Grab the dvswitch uuid and portgroup properties
dvswitch_mors = s._retrieve_properties_traversal(
property_names=['uuid', 'portgroup'],
from_node=nfmor, obj_type='DistributedVirtualSwitch')
dvswitch_mor = None
# Get the dvswitches managed object
for dvswitch in dvswitch_mors:
if dvswitch_mor:
break
for p in dvswitch.PropSet:
if p.Name == "portgroup":
pg_mors = p.Val.ManagedObjectReference
for pg_mor in pg_mors:
if dvswitch_mor:
break
key_mor = s._get_object_properties(
pg_mor, property_names=['key'])
for key in key_mor.PropSet:
if key.Val == portgroupKey:
dvswitch_mor = dvswitch
# Get the switches uuid
dvswitch_uuid = None
for p in dvswitch_mor.PropSet:
if p.Name == "uuid":
dvswitch_uuid = p.Val
return dvswitch_uuid
def spec_singleton(spec, request, vm):
if not spec:
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
return spec
def vmdisk_id(vm, current_datastore_name):
id_list = []
for vm_disk in vm._disks:
if current_datastore_name in vm_disk['descriptor']:
id_list.append(vm_disk['device']['key'])
return id_list
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone, vm_extra_config):
vmTemplate = vsphere_client.get_vm_by_name(template_src)
vmTarget = None
if esxi:
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computeResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
rpmor = crprops.resourcePool._obj
elif resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
module.fail_json(msg="You need to specify either esxi:[datacenter,hostname] or [cluster,resource_pool]")
try:
vmTarget = vsphere_client.get_vm_by_name(guest)
except Exception:
pass
if not vmTemplate.is_powered_off():
module.fail_json(
msg="Source %s must be powered off" % template_src
)
try:
if not vmTarget:
cloneArgs = dict(resourcepool=rpmor, power_on=power_on_after_clone)
if snapshot_to_clone is not None:
#check if snapshot_to_clone is specified, Create a Linked Clone instead of a full clone.
cloneArgs["linked"] = True
cloneArgs["snapshot"] = snapshot_to_clone
if vm_extra_config.get("folder") is not None:
# if a folder is specified, clone the VM into it
cloneArgs["folder"] = vm_extra_config.get("folder")
vmTemplate.clone(guest, **cloneArgs)
changed = True
else:
changed = False
vsphere_client.disconnect()
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(
msg="Could not clone selected machine: %s" % e
)
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
# was used.
def update_disks(vsphere_client, vm, module, vm_disk, changes):
request = VI.ReconfigVM_TaskRequestMsg()
changed = False
for cnf_disk in vm_disk:
disk_id = re.sub("disk", "", cnf_disk)
found = False
for dev_key in vm._devices:
if vm._devices[dev_key]['type'] == 'VirtualDisk':
hdd_id = vm._devices[dev_key]['label'].split()[2]
if disk_id == hdd_id:
found = True
continue
if not found:
it = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
spec = request.new_spec()
dc = spec.new_deviceChange()
dc.Operation = "add"
dc.FileOperation = "create"
hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
hd.Key = -100
hd.UnitNumber = int(disk_id)
hd.CapacityInKB = int(vm_disk[cnf_disk]['size_gb']) * 1024 * 1024
hd.ControllerKey = 1000
# module.fail_json(msg="peos : %s" % vm_disk[cnf_disk])
backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
backing.FileName = "[%s]" % vm_disk[cnf_disk]['datastore']
backing.DiskMode = "persistent"
backing.Split = False
backing.WriteThrough = False
backing.ThinProvisioned = False
backing.EagerlyScrub = False
hd.Backing = backing
dc.Device = hd
spec.DeviceChange = [dc]
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS,
task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
changes[cnf_disk] = vm_disk[cnf_disk]
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s, [%s]" % (
task.get_error_message(),
vm_disk[cnf_disk]))
return changed, changes
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
spec = None
changed = False
changes = {}
request = None
shutdown = False
poweron = vm.is_powered_on()
memoryHotAddEnabled = bool(vm.properties.config.memoryHotAddEnabled)
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
changed, changes = update_disks(vsphere_client, vm,
module, vm_disk, changes)
request = VI.ReconfigVM_TaskRequestMsg()
# Change Memory
if 'memory_mb' in vm_hardware:
if int(vm_hardware['memory_mb']) != vm.properties.config.hardware.memoryMB:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not memoryHotAddEnabled:
shutdown = True
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
shutdown = True
else:
# Fail on no hot add and no force
if not memoryHotAddEnabled:
module.fail_json(
msg="memoryHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and memory shrink
elif int(vm_hardware['memory_mb']) < vm.properties.config.hardware.memoryMB:
module.fail_json(
msg="Cannot lower memory on a live VM. force is "
"required for shutdown")
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ===( Reconfigure Network )====#
if vm_nic:
changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name)
# ====( Config Memory )====#
if 'num_cpus' in vm_hardware:
if int(vm_hardware['num_cpus']) != vm.properties.config.hardware.numCPU:
spec = spec_singleton(spec, request, vm)
if vm.is_powered_on():
if force:
# No hot add but force
if not cpuHotAddEnabled:
shutdown = True
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
shutdown = True
else:
# Fail on no hot add and no force
if not cpuHotAddEnabled:
module.fail_json(
msg="cpuHotAdd is not enabled. force is "
"required for shutdown")
# Fail on no force and cpu shrink without hot remove
elif int(vm_hardware['num_cpus']) < vm.properties.config.hardware.numCPU:
if not cpuHotRemoveEnabled:
module.fail_json(
msg="Cannot lower CPU on a live VM without "
"cpuHotRemove. force is required for shutdown")
spec.set_element_numCPUs(int(vm_hardware['num_cpus']))
changes['cpu'] = vm_hardware['num_cpus']
if len(changes):
if shutdown and vm.is_powered_on():
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e)
)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
# Wait for the task to finish
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
changed = True
elif status == task.STATE_ERROR:
module.fail_json(
msg="Error reconfiguring vm: %s" % task.get_error_message())
if vm.is_powered_off() and poweron:
try:
vm.power_on(sync_run=True)
except Exception, e:
module.fail_json(
msg='Failed to power on vm %s : %s' % (guest, e)
)
vsphere_client.disconnect()
if changed:
module.exit_json(changed=True, changes=changes)
module.exit_json(changed=False)
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
s = vsphere_client
nics = {}
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
nic_changes = []
datacenter = esxi['datacenter']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
nfmor = dcprops.networkFolder._obj
for k,v in vm_nic.iteritems():
nicNum = k[len(k) -1]
if vm_nic[k]['network_type'] == 'dvs':
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
todvs = True
elif vm_nic[k]['network_type'] == 'standard':
todvs = False
# Detect cards that need to be changed and network type (and act accordingly)
for dev in vm.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e",
"VirtualPCNet32", "VirtualVmxnet",
"VirtualNmxnet2", "VirtualVmxnet3"]:
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
if devNum == nicNum:
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
if todvs and fromdvs:
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
nics[k] = (dev, portgroupKey, 1)
elif fromdvs and not todvs:
nics[k] = (dev, '', 2)
elif not fromdvs and todvs:
nics[k] = (dev, portgroupKey, 3)
elif not fromdvs and not todvs:
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
else:
pass
else:
module.exit_json()
if len(nics) > 0:
for nic, obj in nics.iteritems():
"""
1,2 and 3 are used to mark which action should be taken
1 = from a distributed switch to a distributed switch
2 = to a standard switch
3 = to a distributed switch
"""
dev = obj[0]
pgKey = obj[1]
dvsKey = obj[2]
if dvsKey == 1:
dev.backing.port._obj.set_element_portgroupKey(pgKey)
dev.backing.port._obj.set_element_portKey('')
if dvsKey == 3:
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(pgKey)
nic_backing_port.set_element_portKey('')
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
dev._obj.set_element_backing(nic_backing)
if dvsKey == 2:
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
dev._obj.set_element_backing(nic_backing)
for nic, obj in nics.iteritems():
dev = obj[0]
spec = request.new_spec()
nic_change = spec.new_deviceChange()
nic_change.set_element_device(dev._obj)
nic_change.set_element_operation("edit")
nic_changes.append(nic_change)
spec.set_element_deviceChange(nic_changes)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return(True)
elif status == task.STATE_ERROR:
module.fail_json(msg="Could not change network %s" % task.get_error_message())
elif len(nics) == 0:
return(False)
def _build_folder_tree(nodes, parent):
tree = {}
for node in nodes:
if node['parent'] == parent:
tree[node['name']] = dict.copy(node)
tree[node['name']]['subfolders'] = _build_folder_tree(nodes, node['id'])
del tree[node['name']]['parent']
return tree
def _find_path_in_tree(tree, path):
for name, o in tree.iteritems():
if name == path[0]:
if len(path) == 1:
return o
else:
return _find_path_in_tree(o['subfolders'], path[1:])
return None
def _get_folderid_for_path(vsphere_client, datacenter, path):
content = vsphere_client._retrieve_properties_traversal(property_names=['name', 'parent'], obj_type=MORTypes.Folder)
if not content: return {}
node_list = [
{
'id': o.Obj,
'name': o.PropSet[0].Val,
'parent': (o.PropSet[1].Val if len(o.PropSet) > 1 else None)
} for o in content
]
tree = _build_folder_tree(node_list, datacenter)
tree = _find_path_in_tree(tree, ['vm'])['subfolders']
folder = _find_path_in_tree(tree, path.split('/'))
return folder['id'] if folder else None
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
datacenter = esxi['datacenter']
esxi_hostname = esxi['hostname']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
# hostFolder managed reference
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
if vm_extra_config.get('folder'):
# try to find the folder by its full path, e.g. 'production/customerA/lamp'
vmfmor = _get_folderid_for_path(vsphere_client, dcmor, vm_extra_config.get('folder'))
# try the legacy behaviour of just matching the folder name, so 'lamp' alone matches 'production/customerA/lamp'
if vmfmor is None:
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
if name == vm_extra_config['folder']:
vmfmor = mor
# if neither of strategies worked, bail out
if vmfmor is None:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])
else:
vmfmor = dcprops.vmFolder._obj
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
# Grab the computerResource name and host properties
crmors = vsphere_client._retrieve_properties_traversal(
property_names=['name', 'host'],
from_node=hfmor,
obj_type='ComputeResource')
# Grab the host managed object reference of the esxi_hostname
try:
hostmor = [k for k,
v in vsphere_client.get_hosts().items() if v == esxi_hostname][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find esx host named: %s" % esxi_hostname)
# Grab the computerResource managed object reference of the host we are
# creating the VM on.
crmor = None
for cr in crmors:
if crmor:
break
for p in cr.PropSet:
if p.Name == "host":
for h in p.Val.get_element_ManagedObjectReference():
if h == hostmor:
crmor = cr.Obj
break
if crmor:
break
crprops = VIProperty(vsphere_client, crmor)
# Get resource pool managed reference
# Requires that a cluster name be specified.
if resource_pool:
try:
cluster = [k for k,
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Cluster named: %s" %
cluster_name)
try:
rpmor = [k for k, v in vsphere_client.get_resource_pools(
from_mor=cluster).items()
if v == resource_pool][0]
except IndexError, e:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find Resource Pool named: %s" %
resource_pool)
else:
rpmor = crprops.resourcePool._obj
# CREATE VM CONFIGURATION
# get config target
request = VI.QueryConfigTargetRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_target = vsphere_client._proxy.QueryConfigTarget(request)._returnval
# get default devices
request = VI.QueryConfigOptionRequestMsg()
_this = request.new__this(crprops.environmentBrowser._obj)
_this.set_attribute_type(
crprops.environmentBrowser._obj.get_attribute_type())
request.set_element__this(_this)
h = request.new_host(hostmor)
h.set_attribute_type(hostmor.get_attribute_type())
request.set_element_host(h)
config_option = vsphere_client._proxy.QueryConfigOption(request)._returnval
default_devs = config_option.DefaultDevice
# add parameters to the create vm task
create_vm_request = VI.CreateVM_TaskRequestMsg()
config = create_vm_request.new_config()
if vm_hw_version:
config.set_element_version(vm_hw_version)
vmfiles = config.new_files()
datastore_name, ds = find_datastore(
module, vsphere_client, vm_disk['disk1']['datastore'], config_target)
vmfiles.set_element_vmPathName(datastore_name)
config.set_element_files(vmfiles)
config.set_element_name(guest)
if 'notes' in vm_extra_config:
config.set_element_annotation(vm_extra_config['notes'])
config.set_element_memoryMB(int(vm_hardware['memory_mb']))
config.set_element_numCPUs(int(vm_hardware['num_cpus']))
config.set_element_guestId(vm_hardware['osid'])
devices = []
# Attach all the hardware we want to the VM spec.
# Add a scsi controller to the VM spec.
disk_ctrl_key = add_scsi_controller(
module, vsphere_client, config, devices, vm_hardware['scsi'])
if vm_disk:
disk_num = 0
disk_key = 0
for disk in sorted(vm_disk.iterkeys()):
try:
datastore = vm_disk[disk]['datastore']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. datastore needs to be"
" specified." % disk)
try:
disksize = int(vm_disk[disk]['size_gb'])
# Convert the disk size to kiloboytes
disksize = disksize * 1024 * 1024
except (KeyError, ValueError):
vsphere_client.disconnect()
module.fail_json(msg="Error on %s definition. size needs to be specified as an integer." % disk)
try:
disktype = vm_disk[disk]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be"
" specified." % disk)
# Add the disk to the VM spec.
add_disk(
module, vsphere_client, config_target, config,
devices, datastore, disktype, disksize, disk_ctrl_key,
disk_num, disk_key)
disk_num = disk_num + 1
disk_key = disk_key + 1
if 'vm_cdrom' in vm_hardware:
cdrom_iso_path = None
cdrom_type = None
try:
cdrom_type = vm_hardware['vm_cdrom']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom type needs to be"
" specified." % vm_hardware['vm_cdrom'])
if cdrom_type == 'iso':
try:
cdrom_iso_path = vm_hardware['vm_cdrom']['iso_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. cdrom iso_path needs"
" to be specified." % vm_hardware['vm_cdrom'])
# Add a CD-ROM device to the VM.
add_cdrom(module, vsphere_client, config_target, config, devices,
default_devs, cdrom_type, cdrom_iso_path)
if 'vm_floppy' in vm_hardware:
floppy_image_path = None
floppy_type = None
try:
floppy_type = vm_hardware['vm_floppy']['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. floppy type needs to be"
" specified." % vm_hardware['vm_floppy'])
if floppy_type == 'image':
try:
floppy_image_path = vm_hardware['vm_floppy']['image_path']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. floppy image_path needs"
" to be specified." % vm_hardware['vm_floppy'])
# Add a floppy to the VM.
add_floppy(module, vsphere_client, config_target, config, devices,
default_devs, floppy_type, floppy_image_path)
if vm_nic:
for nic in sorted(vm_nic.iterkeys()):
try:
nictype = vm_nic[nic]['type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. type needs to be "
" specified." % nic)
try:
network = vm_nic[nic]['network']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network needs to be "
" specified." % nic)
try:
network_type = vm_nic[nic]['network_type']
except KeyError:
vsphere_client.disconnect()
module.fail_json(
msg="Error on %s definition. network_type needs to be "
" specified." % nic)
# Add the nic to the VM spec.
add_nic(module, vsphere_client, nfmor, config, devices,
nictype, network, network_type)
config.set_element_deviceChange(devices)
create_vm_request.set_element_config(config)
folder_mor = create_vm_request.new__this(vmfmor)
folder_mor.set_attribute_type(vmfmor.get_attribute_type())
create_vm_request.set_element__this(folder_mor)
rp_mor = create_vm_request.new_pool(rpmor)
rp_mor.set_attribute_type(rpmor.get_attribute_type())
create_vm_request.set_element_pool(rp_mor)
host_mor = create_vm_request.new_host(hostmor)
host_mor.set_attribute_type(hostmor.get_attribute_type())
create_vm_request.set_element_host(host_mor)
# CREATE THE VM
taskmor = vsphere_client._proxy.CreateVM_Task(create_vm_request)._returnval
task = VITask(taskmor, vsphere_client)
task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if task.get_state() == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error creating vm: %s" %
task.get_error_message())
else:
# We always need to get the vm because we are going to gather facts
vm = vsphere_client.get_vm_by_name(guest)
# VM was created. If there is any extra config options specified, set
# them here , disconnect from vcenter, then exit.
if vm_extra_config:
vm.set_extra_config(vm_extra_config)
# Power on the VM if it was requested
power_state(vm, state, True)
vmfacts=gather_facts(vm)
vsphere_client.disconnect()
module.exit_json(
ansible_facts=vmfacts,
changed=True,
changes="Created VM %s" % guest)
def delete_vm(vsphere_client, module, guest, vm, force):
try:
if vm.is_powered_on():
if force:
try:
vm.power_off(sync_run=True)
vm.get_status()
except Exception, e:
module.fail_json(
msg='Failed to shutdown vm %s: %s' % (guest, e))
else:
module.fail_json(
msg='You must use either shut the vm down first or '
'use force ')
# Invoke Destroy_Task
request = VI.Destroy_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
ret = vsphere_client._proxy.Destroy_Task(request)._returnval
task = VITask(ret, vsphere_client)
# Wait for the task to finish
status = task.wait_for_state(
[task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_ERROR:
vsphere_client.disconnect()
module.fail_json(msg="Error removing vm: %s %s" %
task.get_error_message())
module.exit_json(changed=True, changes="VM %s deleted" % guest)
except Exception, e:
module.fail_json(
msg='Failed to delete vm %s : %s' % (guest, e))
def power_state(vm, state, force):
"""
Correctly set the power status for a VM determined by the current and
requested states. force is forceful
"""
power_status = vm.get_status()
check_status = ' '.join(state.split("_")).upper()
# Need Force
if not force and power_status in [
'SUSPENDED', 'POWERING ON',
'RESETTING', 'BLOCKED ON MSG'
]:
return "VM is in %s power state. Force is required!" % power_status
# State is already true
if power_status == check_status:
return False
else:
try:
if state == 'powered_off':
vm.power_off(sync_run=True)
elif state == 'powered_on':
vm.power_on(sync_run=True)
elif state == 'restarted':
if power_status in ('POWERED ON', 'POWERING ON', 'RESETTING'):
vm.reset(sync_run=False)
else:
return "Cannot restart VM in the current state %s" \
% power_status
return True
except Exception, e:
return e
return False
def gather_facts(vm):
"""
Gather facts for VM directly from vsphere.
"""
vm.get_properties()
facts = {
'module_hw': True,
'hw_name': vm.properties.name,
'hw_power_status': vm.get_status(),
'hw_guest_full_name': vm.properties.config.guestFullName,
'hw_guest_id': vm.properties.config.guestId,
'hw_product_uuid': vm.properties.config.uuid,
'hw_processor_count': vm.properties.config.hardware.numCPU,
'hw_memtotal_mb': vm.properties.config.hardware.memoryMB,
'hw_interfaces':[],
}
netInfo = vm.get_property('net')
netDict = {}
if netInfo:
for net in netInfo:
netDict[net['mac_address']] = net['ip_addresses']
ifidx = 0
for entry in vm.properties.config.hardware.device:
if not hasattr(entry, 'macAddress'):
continue
factname = 'hw_eth' + str(ifidx)
facts[factname] = {
'addresstype': entry.addressType,
'label': entry.deviceInfo.label,
'macaddress': entry.macAddress,
'ipaddresses': netDict.get(entry.macAddress, None),
'macaddress_dash': entry.macAddress.replace(':', '-'),
'summary': entry.deviceInfo.summary,
}
facts['hw_interfaces'].append('eth'+str(ifidx))
ifidx += 1
return facts
class DefaultVMConfig(object):
"""
Shallow and deep dict comparison for interfaces
"""
def __init__(self, check_dict, interface_dict):
self.check_dict, self.interface_dict = check_dict, interface_dict
self.set_current, self.set_past = set(
check_dict.keys()), set(interface_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
self.recursive_missing = None
def shallow_diff(self):
return self.set_past - self.intersect
def recursive_diff(self):
if not self.recursive_missing:
self.recursive_missing = []
for key, value in self.interface_dict.items():
if isinstance(value, dict):
for k, v in value.items():
if k in self.check_dict[key]:
if not isinstance(self.check_dict[key][k], v):
try:
if v == int:
self.check_dict[key][k] = int(self.check_dict[key][k])
elif v == basestring:
self.check_dict[key][k] = str(self.check_dict[key][k])
else:
raise ValueError
except ValueError:
self.recursive_missing.append((k, v))
else:
self.recursive_missing.append((k, v))
return self.recursive_missing
def config_check(name, passed, default, module):
"""
Checks that the dict passed for VM configuration matches the required
interface declared at the top of __main__
"""
diff = DefaultVMConfig(passed, default)
if len(diff.shallow_diff()):
module.fail_json(
msg="Missing required key/pair [%s]. %s must contain %s" %
(', '.join(diff.shallow_diff()), name, default))
if diff.recursive_diff():
module.fail_json(
msg="Config mismatch for %s on %s" %
(name, diff.recursive_diff()))
return True
def main():
vm = None
proto_vm_hardware = {
'memory_mb': int,
'num_cpus': int,
'scsi': basestring,
'osid': basestring
}
proto_vm_disk = {
'disk1': {
'datastore': basestring,
'size_gb': int,
'type': basestring
}
}
proto_vm_nic = {
'nic1': {
'type': basestring,
'network': basestring,
'network_type': basestring
}
}
proto_esxi = {
'datacenter': basestring,
'hostname': basestring
}
module = AnsibleModule(
argument_spec=dict(
vcenter_hostname=dict(required=True, type='str'),
username=dict(required=True, type='str'),
password=dict(required=True, type='str', no_log=True),
state=dict(
required=False,
choices=[
'powered_on',
'powered_off',
'present',
'absent',
'restarted',
'reconfigured'
],
default='present'),
vmware_guest_facts=dict(required=False, type='bool'),
from_template=dict(required=False, type='bool'),
template_src=dict(required=False, type='str'),
snapshot_to_clone=dict(required=False, default=None, type='str'),
guest=dict(required=True, type='str'),
vm_disk=dict(required=False, type='dict', default={}),
vm_nic=dict(required=False, type='dict', default={}),
vm_hardware=dict(required=False, type='dict', default={}),
vm_extra_config=dict(required=False, type='dict', default={}),
vm_hw_version=dict(required=False, default=None, type='str'),
resource_pool=dict(required=False, default=None, type='str'),
cluster=dict(required=False, default=None, type='str'),
force=dict(required=False, type='bool', default=False),
esxi=dict(required=False, type='dict', default={}),
validate_certs=dict(required=False, type='bool', default=True),
power_on_after_clone=dict(required=False, type='bool', default=True)
),
supports_check_mode=False,
mutually_exclusive=[['state', 'vmware_guest_facts'],['state', 'from_template']],
required_together=[
['state', 'force'],
[
'state',
'vm_disk',
'vm_nic',
'vm_hardware',
'esxi'
],
['from_template', 'template_src'],
],
)
if not HAS_PYSPHERE:
module.fail_json(msg='pysphere module required')
vcenter_hostname = module.params['vcenter_hostname']
username = module.params['username']
password = module.params['password']
vmware_guest_facts = module.params['vmware_guest_facts']
state = module.params['state']
guest = module.params['guest']
force = module.params['force']
vm_disk = module.params['vm_disk']
vm_nic = module.params['vm_nic']
vm_hardware = module.params['vm_hardware']
vm_extra_config = module.params['vm_extra_config']
vm_hw_version = module.params['vm_hw_version']
esxi = module.params['esxi']
resource_pool = module.params['resource_pool']
cluster = module.params['cluster']
template_src = module.params['template_src']
from_template = module.params['from_template']
snapshot_to_clone = module.params['snapshot_to_clone']
power_on_after_clone = module.params['power_on_after_clone']
validate_certs = module.params['validate_certs']
# CONNECT TO THE SERVER
viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
try:
viserver.connect(vcenter_hostname, username, password)
except ssl.SSLError as sslerr:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in sslerr.strerror:
if not validate_certs:
default_context = ssl._create_default_https_context
ssl._create_default_https_context = ssl._create_unverified_context
viserver.connect(vcenter_hostname, username, password)
else:
module.fail_json(msg='Unable to validate the certificate of the vcenter host %s' % vcenter_hostname)
else:
raise
except VIApiException, err:
module.fail_json(msg="Cannot connect to %s: %s" %
(vcenter_hostname, err))
# Check if the VM exists before continuing
try:
vm = viserver.get_vm_by_name(guest)
except Exception:
pass
if vm:
# Run for facts only
if vmware_guest_facts:
try:
module.exit_json(ansible_facts=gather_facts(vm))
except Exception, e:
module.fail_json(
msg="Fact gather failed with exception %s" % e)
# Power Changes
elif state in ['powered_on', 'powered_off', 'restarted']:
state_result = power_state(vm, state, force)
# Failure
if isinstance(state_result, basestring):
module.fail_json(msg=state_result)
else:
module.exit_json(changed=state_result)
# Just check if there
elif state == 'present':
module.exit_json(changed=False)
# Fail on reconfig without params
elif state == 'reconfigured':
reconfigure_vm(
vsphere_client=viserver,
vm=vm,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
state=state,
force=force
)
elif state == 'absent':
delete_vm(
vsphere_client=viserver,
module=module,
guest=guest,
vm=vm,
force=force)
# VM doesn't exist
else:
# Fail for fact gather task
if vmware_guest_facts:
module.fail_json(
msg="No such VM %s. Fact gathering requires an existing vm"
% guest)
elif from_template:
deploy_template(
vsphere_client=viserver,
esxi=esxi,
resource_pool=resource_pool,
guest=guest,
template_src=template_src,
module=module,
cluster_name=cluster,
snapshot_to_clone=snapshot_to_clone,
power_on_after_clone=power_on_after_clone,
vm_extra_config=vm_extra_config
)
if state in ['restarted', 'reconfigured']:
module.fail_json(
msg="No such VM %s. States ["
"restarted, reconfigured] required an existing VM" % guest)
elif state == 'absent':
module.exit_json(changed=False, msg="vm %s not present" % guest)
# Create the VM
elif state in ['present', 'powered_off', 'powered_on']:
# Check the guest_config
config_check("vm_disk", vm_disk, proto_vm_disk, module)
config_check("vm_nic", vm_nic, proto_vm_nic, module)
config_check("vm_hardware", vm_hardware, proto_vm_hardware, module)
config_check("esxi", esxi, proto_esxi, module)
create_vm(
vsphere_client=viserver,
module=module,
esxi=esxi,
resource_pool=resource_pool,
cluster_name=cluster,
guest=guest,
vm_extra_config=vm_extra_config,
vm_hardware=vm_hardware,
vm_disk=vm_disk,
vm_nic=vm_nic,
vm_hw_version=vm_hw_version,
state=state
)
viserver.disconnect()
module.exit_json(
changed=False,
vcenter=vcenter_hostname)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
jjshoe/ansible-modules-core
|
cloud/vmware/vsphere_guest.py
|
Python
|
gpl-3.0
| 65,239
|
# Encas Sales Management Server
# Copyright 2013 - Hugo Caille
#
# This file is part of Encas.
#
# Encas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Encas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Encas. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps
from flask import jsonify
from sqlalchemy.exc import OperationalError
class ApiError(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return repr(self.reason)
def serialize(self):
return {'error' : True, 'reason' : self.reason}
class MissingFieldsError(Exception):
def __init__(self, fields):
self.fields = fields
self.fields.sort()
def reason(self):
fields_len = len(self.fields)
i = 0
msg = "Missing fields: "
for field in self.fields:
msg += str(field)
if i < fields_len - 1:
msg += ", "
else:
msg += "."
i += 1
return msg
def __str__(self):
return self.reason()
def serialize(self):
return {'error' : True, 'reason' : self.reason()}
def errorhandler(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
data = func(*args, **kwargs)
result = {'error' : False}
if data is not None:
result['data'] = data
return jsonify(result)
except MissingFieldsError as e:
return jsonify(e.serialize())
except ApiError as e:
return jsonify(e.serialize())
except OperationalError as e:
return jsonify({'error' : True, 'reason' : "Cannot access database"})
except ValueError:
return jsonify({'error' : True, 'reason' : "Invalid input"})
return wrapper
|
hugoatease/encas
|
errors.py
|
Python
|
gpl-3.0
| 2,359
|
# -*- coding: utf-8 -*-
from django import forms
from store.models import Achievement, AchievementLink, Upgrade
class RedeemForm(forms.Form):
code = forms.CharField(label='Bonus Code', min_length=Achievement.key_length(), max_length=Achievement.key_length())
class OrderForm(forms.Form):
upgrade = forms.ModelChoiceField(queryset = Upgrade.objects.all())
|
dstelter/ctfstore
|
store/forms.py
|
Python
|
gpl-3.0
| 365
|
import pandas
WEATHER_FNAME = 'weather.dat'
FOOTBALL_FNAME = 'football.dat'
def read_weather(fname=WEATHER_FNAME):
"""Read the weather file into a DataFrame and return it.
Pandas has many input routines (all prefixed with "read")
- http://pandas.pydata.org/pandas-docs/stable/io.html
Examining the weather.dat file we see that it has 17 columns. This file
might look like it's white space delimited but no! This, my friends, is
a fixed width file. Although Pandas allows arbitrary regular expressions
for delimiter values (for example we could use "\s+" for one or more white
spaces) there are some columns that have no values and this would break.
For example, the column HDDay has no values until the 9th row. Using "one
or more white spaces" as the delimiter would make 53.8 the value for HDDay
in the first row.
The function that we want is pandas.read_fwf (for fixed width file). It
turns out that pandas.read_fwf is *almost* smart enough to automatically
determine the widths of the columns. In the end we need to specify them
to get the last columns read correctly.
"""
# things I tried that don't work
# 1) df = pandas.read_csv(fname)
# 2) df = pandas.read_csv(fname, delimiter=' ')
# 3) df = pandas.read_csv(fname, delimiter='\s+')
# 4) df = pandas.read_fwf(fname)
df = pandas.read_fwf(
fname, widths=[4, 6, 6, 6, 7, 6, 5, 6, 6, 6, 5, 4, 4, 4, 4, 4, 6])
# we still have a row on top full of NaN because there was a blank line
# just below the header. we could use dropna(axis=0, how='all') but that
# would also drop any rows that happen to be empty in the middle of the
# data. instead we can simply use drop(0) which is the label of the row.
# also note that almost every pandas operation returns a new object and
# doesn't operate in place so we assign the results to df.
df = df.drop(0)
return df
if __name__ == '__main__':
# I usually use a naming convention that appends "_df" to DataFrames
weather_df = read_weather()
# Pandas guesses the types for each column. "object" is a native python
# string and what Pandas defaults to when it cant guess.
print(weather_df.dtypes)
print()
# you can index columns by passing a string or a list of strings
# into the square bracket operator
print(weather_df['WxType'])
print
print(weather_df[['HDDay', 'AvSLP']])
print
# "loc" and "iloc" are ways to index into the DataFrame
|
galtay/data_sci_ale
|
code_kata_04/kata_04.py
|
Python
|
gpl-3.0
| 2,540
|
import random
class Livingbeing():
def __init__(self, name, health, armor, mindset, emotion, skills, inventory):
self.name = name
self.health = health
self.armor = armor
self.mindset = mindset
self.emotion = emotion
self.skills = skills
self.inventory = inventory
self.hunger = random.randrange(1, 5)
self.thirst = 0
def takeDamage(self, dmgAmount):
if dmgAmount > self.armor:
self.health = self.health - (dmgAmount - self.armor)
quit()
|
MrZigler/UnderdogMilitia
|
charactors.py
|
Python
|
gpl-3.0
| 628
|
# Generated by Django 2.1.5 on 2019-10-01 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0005_auto_20191001_1559'),
]
operations = [
migrations.AlterField(
model_name='dataset',
name='dts_type',
field=models.CharField(choices=[('0', 'User Comment'), ('1', 'Validation History'), ('2', 'Reported Issue')], default='0', help_text='Differentiate user comments from automatic validation or defect comments.', max_length=1, verbose_name='Type'),
),
]
|
linea-it/dri
|
api/comment/migrations/0006_auto_20191001_1943.py
|
Python
|
gpl-3.0
| 596
|
# pylint: disable=missing-module-docstring, missing-class-docstring
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hc_dpd', '0011_align_with_hc_20220301'),
]
operations = [
migrations.AlterField(
model_name='company',
name='company_name',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AlterField(
model_name='company',
name='street_name',
field=models.CharField(blank=True, max_length=80, null=True),
),
]
|
studybuffalo/studybuffalo
|
study_buffalo/hc_dpd/migrations/0012_align_with_hc_20220301_2.py
|
Python
|
gpl-3.0
| 620
|
from scipy.stats import chi2_contingency
import numpy as np
obs = np.array([[10, 10, 20], [20, 20, 20]])
chi2_contingency(obs)
g, p, dof, expctd = chi2_contingency(obs, lambda_="log-likelihood")
g, p
obs = np.array(
[[[[12, 17],
[11, 16]],
[[11, 12],
[15, 16]]],
[[[23, 15],
[30, 22]],
[[14, 17],
[15, 16]]]])
print(chi2_contingency(obs))
|
davidam/python-examples
|
statistics/chi2contigency.py
|
Python
|
gpl-3.0
| 391
|
# Importing Modules from PyQt5
from PyQt5.QtWidgets import QSizePolicy, QPushButton, QFrame, QWidget, QStackedWidget
from PyQt5.QtGui import QColor
# Importing Modules from the App
from Gui import Table, Plot, Funcs, Budget
from Settings import StyleSheets as St
def smallerNumber(number1, number2):
if number1 < number2:
return number1
else:
return number2
def fill_a_list(List, filler, length):
List = List + [filler for i in range(length)]
return List
class App(QWidget):
# The Main Window... This Widget will be the main window.
# Other widgets such as the TablePage and PlotPage will be called from here in a StackedWidget
def __init__(self):
super(App, self).__init__()
self.setWindowTitle('Finances App 2') # Set the title of the app
self.setGeometry(500, 500, 1600, 880) # Set the Geometry of the Window
### Setting the Colour of the app background
p = self.palette()
b_col = QColor(St.background_colour)
p.setColor(self.backgroundRole(), b_col)
self.setPalette(p)
self.initUI()
def initUI(self):
self.TableStackItem = Table.TablePage()
self.PlotStackItem = Plot.App_Bit()
self.BudgetStackItem = Budget.SettingsPage()
sidebar_frame = self.sideBar()
self.FullStack = QStackedWidget(self)
self.FullStack.addWidget(self.TableStackItem)
self.FullStack.addWidget(self.PlotStackItem)
self.FullStack.addWidget(self.BudgetStackItem)
self.onTabButton()
Funcs.AllInOneLayout(self,[sidebar_frame,self.FullStack],Stretches=[1,10],VH="H")
self.show()
def sideBar(self):
sidebar_frame = QFrame()
sidebar_frame.setMinimumWidth(110)
#sidebar_frame.setStyleSheet(St.StyleSheets['Sidebar'])
button_titles = ['Data\nTables','Plotting','Budget']
button_titles = fill_a_list(button_titles, '', St.number_of_buttons_on_sidebar-len(button_titles))
self.buttons = []
but_funcs = [self.onTabButton, self.onPlotButton, self.onBudgetButton ]
but_funcs = fill_a_list(but_funcs, self.emptyFunc, St.number_of_buttons_on_sidebar-len(but_funcs))
for i in range(St.number_of_buttons_on_sidebar):
button = QPushButton(button_titles[i])
button.setStyleSheet(St.StyleSheets['Button%i'%i])
button.clicked.connect(but_funcs[i])
button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
button.setCheckable(True)
self.buttons.append(button)
Funcs.AllInOneLayout(sidebar_frame, self.buttons, VH='V')# add button and button2 to the sidebar_frame vertically, aligning them at the top.
#frame_layout.setSizeLayout(QSizePolicy.Expanding, QSizePolicy.Expanding)
return sidebar_frame
# These buttons change which widget we can see in the stacked widget
def onTabButton(self):
self.TableStackItem.setFocus()
self.FullStack.setCurrentIndex(0)
def onPlotButton(self):
self.PlotStackItem.setFocus()
self.FullStack.setCurrentIndex(1)
def onBudgetButton(self):
self.BudgetStackItem.setFocus()
self.FullStack.setCurrentIndex(2)
def emptyFunc(self):
return 0
|
95ellismle/FinancesApp2
|
Gui/App.py
|
Python
|
gpl-3.0
| 3,438
|
import os
import imp
from tinydb import TinyDB
from paths import DB_DIR
def scan_plugins_dir(plugins_dir='plugins'):
"""Scan the given dir for files matching the spec for plugin files"""
for plugin_file in os.listdir(plugins_dir):
plugin_path = os.path.join(plugins_dir, plugin_file)
if (not plugin_file.startswith('_') and
plugin_file.endswith('.py') and
os.path.isfile(plugin_path)):
yield plugin_file, plugin_path
def load_plugin(filename, path):
return imp.load_source(filename, path)
def list_plugins():
for plugin_file, plugin_path in scan_plugins_dir():
yield load_plugin(plugin_file, plugin_path)
def new_database(name):
full_path = os.path.join(DB_DIR, f'{name}.json')
return TinyDB(full_path)
|
sentriz/steely
|
steely/utils.py
|
Python
|
gpl-3.0
| 800
|
#!/usr/bin/env python
import rospy
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QHBoxLayout, QGroupBox, QTextEdit, QDoubleSpinBox, QColor
# generic helper to generate quickly QDoubleSpinBox
def generate_q_double_spin_box(default_val, range_min, range_max, decimals, single_step):
spin_box = QDoubleSpinBox()
spin_box.setValue(default_val)
spin_box.setRange(range_min, range_max)
spin_box.setDecimals(decimals)
spin_box.setSingleStep(single_step)
#spin_box.valueChanged[unicode].connect(self.callback_spin_box)
return spin_box
# adds a layout with frame and text to parent widget
def add_layout_with_frame(parent, layout, text = ""):
box_layout = QHBoxLayout()
box_layout.addLayout(layout)
group_box = QGroupBox()
group_box.setStyleSheet("QGroupBox { border: 1px solid gray; border-radius: 4px; margin-top: 0.5em; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 3px 0 3px; }")
group_box.setTitle(text)
group_box.setLayout(box_layout)
parent.addWidget(group_box)
# adds a widget with frame and text to parent widget
def add_widget_with_frame(parent, widget, text = ""):
box_layout = QHBoxLayout()
box_layout.addWidget(widget)
group_box = QGroupBox()
group_box.setStyleSheet("QGroupBox { border: 1px solid gray; border-radius: 4px; margin-top: 0.5em; } QGroupBox::title { subcontrol-origin: margin; left: 10px; padding: 0 3px 0 3px; }")
group_box.setTitle(text)
group_box.setLayout(box_layout)
parent.addWidget(group_box)
# outputs message with given color at a QTextEdit
def output_message(text_edit, msg, color):
text_edit.setTextColor(color)
text_edit.append(msg)
# outputs error_status msg at QTextEdit field
def output_status(text_edit, error_status):
if (error_status.error != 0):
output_message(text_edit, error_status.error_msg, Qt.red)
if (error_status.warning != 0):
output_message(text_edit, error_status.warning_msg, QColor(255, 165, 0))
|
TRECVT/vigir_footstep_planning_basics
|
vigir_footstep_planning_lib/src/vigir_footstep_planning_lib/qt_helper.py
|
Python
|
gpl-3.0
| 2,044
|
import asyncio
from mandelbrot.transport import *
class MockTransport(Transport):
def mock_create_item(self, path, item):
raise NotImplementedError()
@asyncio.coroutine
def create_item(self, path, item):
return self.mock_create_item(path, item)
def mock_replace_item(self, path, item):
raise NotImplementedError()
@asyncio.coroutine
def replace_item(self, path, item):
return self.mock_replace_item(path, item)
def mock_delete_item(self, path):
raise NotImplementedError()
@asyncio.coroutine
def delete_item(self, path):
return self.mock_delete_item(path)
def mock_get_item(self, path, filters):
raise NotImplementedError()
@asyncio.coroutine
def get_item(self, path, filters):
return self.mock_get_item(path, filters)
def mock_patch_item(self, path, fields, constraints):
raise NotImplementedError()
@asyncio.coroutine
def patch_item(self, path, fields, constraints):
return self.mock_patch_item(path, fields, constraints)
def mock_get_collection(self, path, matchers, count, last):
raise NotImplementedError()
@asyncio.coroutine
def get_collection(self, path, matchers, count, last):
return self.mock_get_collection(path, matchers, count, last)
def mock_delete_collection(self, path, params):
raise NotImplementedError()
@asyncio.coroutine
def delete_collection(self, path, params):
return self.mock_delete_collection(path, params)
|
msfrank/mandelbrot
|
test/mock_transport.py
|
Python
|
gpl-3.0
| 1,546
|
# encoding: utf-8
import ckan.plugins
import ckanext.multilingual.plugin as mulilingual_plugin
import ckan.lib.helpers
import ckan.lib.create_test_data
import ckan.logic.action.update
import ckan.model as model
import ckan.tests.legacy
import ckan.tests.legacy.html_check
import routes
import paste.fixture
import pylons.test
_create_test_data = ckan.lib.create_test_data
class TestDatasetTermTranslation(ckan.tests.legacy.html_check.HtmlCheckMethods):
'Test the translation of datasets by the multilingual_dataset plugin.'
@classmethod
def setup(cls):
cls.app = paste.fixture.TestApp(pylons.test.pylonsapp)
ckan.plugins.load('multilingual_dataset')
ckan.plugins.load('multilingual_group')
ckan.plugins.load('multilingual_tag')
ckan.tests.legacy.setup_test_search_index()
_create_test_data.CreateTestData.create_translations_test_data()
cls.sysadmin_user = model.User.get('testsysadmin')
cls.org = {'name': 'test_org',
'title': 'russian',
'description': 'Roger likes these books.'}
ckan.tests.legacy.call_action_api(cls.app, 'organization_create',
apikey=cls.sysadmin_user.apikey,
**cls.org)
dataset = {'name': 'test_org_dataset',
'title': 'A Novel By Tolstoy',
'owner_org': cls.org['name']}
ckan.tests.legacy.call_action_api(cls.app, 'package_create',
apikey=cls.sysadmin_user.apikey,
**dataset)
# Add translation terms that match a couple of group names and package
# names. Group names and package names should _not_ get translated even
# if there are terms matching them, because they are used to form URLs.
for term in ('roger', 'david', 'annakarenina', 'warandpeace'):
for lang_code in ('en', 'de', 'fr'):
data_dict = {'term': term,
'term_translation': 'this should not be rendered',
'lang_code': lang_code}
context = {'model': ckan.model,
'session': ckan.model.Session,
'user': 'testsysadmin'}
ckan.logic.action.update.term_translation_update(
context, data_dict)
@classmethod
def teardown(cls):
ckan.plugins.unload('multilingual_dataset')
ckan.plugins.unload('multilingual_group')
ckan.plugins.unload('multilingual_tag')
ckan.model.repo.rebuild_db()
ckan.lib.search.clear_all()
def test_user_read_translation(self):
'''Test the translation of datasets on user view pages by the
multilingual_dataset plugin.
'''
# It is testsysadmin who created the dataset, so testsysadmin whom
# we'd expect to see the datasets for.
for user_name in ('testsysadmin',):
offset = routes.url_for(
controller='user', action='read', id=user_name)
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
response = self.app.get(
offset,
status=200,
extra_environ={'CKAN_LANG': lang_code,
'CKAN_CURRENT_URL': offset})
terms = ('A Novel By Tolstoy')
for term in terms:
if term in translations:
assert translations[term] in response, response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response
assert 'this should not be rendered' not in response
def test_org_read_translation(self):
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
offset = '/{0}/organization/{1}'.format(
lang_code, self.org['name'])
response = self.app.get(offset, status=200)
terms = ('A Novel By Tolstoy',
'russian',
'Roger likes these books.')
for term in terms:
if term in translations:
assert translations[term] in response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response
assert 'this should not be rendered' not in response
def test_org_index_translation(self):
for (lang_code, translations) in (
('de', _create_test_data.german_translations),
('fr', _create_test_data.french_translations),
('en', _create_test_data.english_translations),
('pl', {})):
offset = '/{0}/organization'.format(lang_code)
response = self.app.get(offset, status=200)
for term in ('russian', 'Roger likes these books.'):
if term in translations:
assert translations[term] in response
elif term in _create_test_data.english_translations:
assert (_create_test_data.english_translations[term]
in response)
else:
assert term in response, response
assert ('/{0}/organization/{1}'.format(lang_code, self.org['name'])
in response)
assert 'this should not be rendered' not in response
class TestDatasetSearchIndex():
@classmethod
def setup_class(cls):
ckan.plugins.load('multilingual_dataset')
ckan.plugins.load('multilingual_group')
data_dicts = [
{'term': 'moo',
'term_translation': 'french_moo',
'lang_code': 'fr'},
{'term': 'moo',
'term_translation': 'this should not be rendered',
'lang_code': 'fsdas'},
{'term': 'an interesting note',
'term_translation': 'french note',
'lang_code': 'fr'},
{'term': 'moon',
'term_translation': 'french moon',
'lang_code': 'fr'},
{'term': 'boon',
'term_translation': 'french boon',
'lang_code': 'fr'},
{'term': 'boon',
'term_translation': 'italian boon',
'lang_code': 'it'},
{'term': 'david',
'term_translation': 'french david',
'lang_code': 'fr'},
{'term': 'david',
'term_translation': 'italian david',
'lang_code': 'it'}
]
context = {
'model': ckan.model,
'session': ckan.model.Session,
'user': 'testsysadmin',
'ignore_auth': True,
}
for data_dict in data_dicts:
ckan.logic.action.update.term_translation_update(
context, data_dict)
@classmethod
def teardown(cls):
ckan.plugins.unload('multilingual_dataset')
ckan.plugins.unload('multilingual_group')
def test_translate_terms(self):
sample_index_data = {
'download_url': u'moo',
'notes': u'an interesting note',
'tags': [u'moon', 'boon'],
'title': u'david',
}
result = mulilingual_plugin.MultilingualDataset().before_index(
sample_index_data)
assert result == {
'text_pl': '',
'text_de': '',
'text_ro': '',
'title': u'david',
'notes': u'an interesting note',
'tags': [u'moon', 'boon'],
'title_en': u'david',
'download_url': u'moo',
'text_it': u'italian boon',
'text_es': '',
'text_en': u'an interesting note moon boon moo',
'text_nl': '',
'title_it': u'italian david',
'text_pt': '',
'title_fr': u'french david',
'text_fr': u'french note french boon french_moo french moon'
}, result
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckanext/multilingual/tests/test_multilingual_plugin.py
|
Python
|
gpl-3.0
| 8,793
|
from root2hdf5.data_types.base import BaseData
import numpy as np
import logging
class SegmentData(BaseData):
logger = logging.getLogger('root2hdf5.data_types.segment')
tree_name = 'image2d_segment_hires_crop_tree'
def __init__(self, _file, output_file):
super(SegmentData, self).__init__(_file)
from larcv import larcv
self.array_converter = larcv.as_ndarray
self.logger.info("Setting Up Segment Data Stream")
self.dataset = output_file.create_dataset("image2d/segment",
(10,3,576,576),
maxshape=(None,3,576,576),
chunks=(10,3,576,576),
dtype='f',compression="gzip")
self.dataset.attrs['name'] = 'image2d_segment_hires_crop_tree'
self.dataset.attrs['index0_name'] = 'eventN'
self.dataset.attrs['index1_name'] = 'layerN'
self.dataset.attrs['index3_name'] = 'pixelX'
self.dataset.attrs['index4_name'] = 'pixelY'
self.buffer = np.ndarray((10,3,576,576), dtype='H')
self.buffer_index=0
def process_branch(self, branch):
for layer in range(3):
layerimage = self.array_converter(branch.at(layer))
layerimage.resize(576,576)
self.buffer[self.buffer_index, layer] = layerimage
self.buffer_index+=1
if self.event_index %10==0:
self.buffer_index=0
self.dataset.resize( (self.event_index+10,3,576,576) )
self.dataset[self.event_index:self.event_index+10,:,:,:] = self.buffer
|
HEP-DL/root2hdf5
|
root2hdf5/plugins/larcv/segment.py
|
Python
|
gpl-3.0
| 1,556
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
class ClusterListView(ListView):
model = Cluster
|
Ranjandas/firewallmanager
|
clusters/views.py
|
Python
|
gpl-3.0
| 223
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe, erpnext
from frappe import _, scrub
from frappe.utils import cint, flt, round_based_on_smallest_currency_fraction
from erpnext.controllers.accounts_controller import validate_conversion_rate, \
validate_taxes_and_charges, validate_inclusive_tax
from erpnext.stock.get_item_details import _get_item_tax_template
class calculate_taxes_and_totals(object):
def __init__(self, doc):
self.doc = doc
self.calculate()
def calculate(self):
if not len(self.doc.get("items")):
return
self.discount_amount_applied = False
self._calculate()
if self.doc.meta.get_field("discount_amount"):
self.set_discount_amount()
self.apply_discount_amount()
if self.doc.doctype in ["Sales Invoice", "Purchase Invoice"]:
self.calculate_total_advance()
if self.doc.meta.get_field("other_charges_calculation"):
self.set_item_wise_tax_breakup()
def _calculate(self):
self.validate_conversion_rate()
self.calculate_item_values()
self.validate_item_tax_template()
self.initialize_taxes()
self.determine_exclusive_rate()
self.calculate_net_total()
self.calculate_taxes()
self.manipulate_grand_total_for_inclusive_tax()
self.calculate_totals()
self._cleanup()
self.calculate_total_net_weight()
def validate_item_tax_template(self):
for item in self.doc.get('items'):
if item.item_code and item.get('item_tax_template'):
item_doc = frappe.get_cached_doc("Item", item.item_code)
args = {
'tax_category': self.doc.get('tax_category'),
'posting_date': self.doc.get('posting_date'),
'bill_date': self.doc.get('bill_date'),
'transaction_date': self.doc.get('transaction_date')
}
item_group = item_doc.item_group
item_group_taxes = []
while item_group:
item_group_doc = frappe.get_cached_doc('Item Group', item_group)
item_group_taxes += item_group_doc.taxes or []
item_group = item_group_doc.parent_item_group
item_taxes = item_doc.taxes or []
if not item_group_taxes and (not item_taxes):
# No validation if no taxes in item or item group
continue
taxes = _get_item_tax_template(args, item_taxes + item_group_taxes, for_validate=True)
if item.item_tax_template not in taxes:
frappe.throw(_("Row {0}: Invalid Item Tax Template for item {1}").format(
item.idx, frappe.bold(item.item_code)
))
def validate_conversion_rate(self):
# validate conversion rate
company_currency = erpnext.get_company_currency(self.doc.company)
if not self.doc.currency or self.doc.currency == company_currency:
self.doc.currency = company_currency
self.doc.conversion_rate = 1.0
else:
validate_conversion_rate(self.doc.currency, self.doc.conversion_rate,
self.doc.meta.get_label("conversion_rate"), self.doc.company)
self.doc.conversion_rate = flt(self.doc.conversion_rate)
def calculate_item_values(self):
if not self.discount_amount_applied:
for item in self.doc.get("items"):
self.doc.round_floats_in(item)
if item.discount_percentage == 100:
item.rate = 0.0
elif item.price_list_rate:
if not item.rate or (item.pricing_rules and item.discount_percentage > 0):
item.rate = flt(item.price_list_rate *
(1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))
item.discount_amount = item.price_list_rate * (item.discount_percentage / 100.0)
elif item.discount_amount and item.pricing_rules:
item.rate = item.price_list_rate - item.discount_amount
if item.doctype in ['Quotation Item', 'Sales Order Item', 'Delivery Note Item', 'Sales Invoice Item']:
item.rate_with_margin, item.base_rate_with_margin = self.calculate_margin(item)
if flt(item.rate_with_margin) > 0:
item.rate = flt(item.rate_with_margin * (1.0 - (item.discount_percentage / 100.0)), item.precision("rate"))
item.discount_amount = item.rate_with_margin - item.rate
elif flt(item.price_list_rate) > 0:
item.discount_amount = item.price_list_rate - item.rate
elif flt(item.price_list_rate) > 0 and not item.discount_amount:
item.discount_amount = item.price_list_rate - item.rate
item.net_rate = item.rate
if not item.qty and self.doc.get("is_return"):
item.amount = flt(-1 * item.rate, item.precision("amount"))
else:
item.amount = flt(item.rate * item.qty, item.precision("amount"))
item.net_amount = item.amount
self._set_in_company_currency(item, ["price_list_rate", "rate", "net_rate", "amount", "net_amount"])
item.item_tax_amount = 0.0
def _set_in_company_currency(self, doc, fields):
"""set values in base currency"""
for f in fields:
val = flt(flt(doc.get(f), doc.precision(f)) * self.doc.conversion_rate, doc.precision("base_" + f))
doc.set("base_" + f, val)
def initialize_taxes(self):
for tax in self.doc.get("taxes"):
if not self.discount_amount_applied:
validate_taxes_and_charges(tax)
validate_inclusive_tax(tax, self.doc)
tax.item_wise_tax_detail = {}
tax_fields = ["total", "tax_amount_after_discount_amount",
"tax_amount_for_current_item", "grand_total_for_current_item",
"tax_fraction_for_current_item", "grand_total_fraction_for_current_item"]
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax_fields.append("tax_amount")
for fieldname in tax_fields:
tax.set(fieldname, 0.0)
self.doc.round_floats_in(tax)
def determine_exclusive_rate(self):
if not any((cint(tax.included_in_print_rate) for tax in self.doc.get("taxes"))):
return
for item in self.doc.get("items"):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
cumulated_tax_fraction = 0
for i, tax in enumerate(self.doc.get("taxes")):
tax.tax_fraction_for_current_item = self.get_current_tax_fraction(tax, item_tax_map)
if i==0:
tax.grand_total_fraction_for_current_item = 1 + tax.tax_fraction_for_current_item
else:
tax.grand_total_fraction_for_current_item = \
self.doc.get("taxes")[i-1].grand_total_fraction_for_current_item \
+ tax.tax_fraction_for_current_item
cumulated_tax_fraction += tax.tax_fraction_for_current_item
if cumulated_tax_fraction and not self.discount_amount_applied and item.qty:
item.net_amount = flt(item.amount / (1 + cumulated_tax_fraction))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate"))
item.discount_percentage = flt(item.discount_percentage,
item.precision("discount_percentage"))
self._set_in_company_currency(item, ["net_rate", "net_amount"])
def _load_item_tax_rate(self, item_tax_rate):
return json.loads(item_tax_rate) if item_tax_rate else {}
def get_current_tax_fraction(self, tax, item_tax_map):
"""
Get tax fraction for calculating tax exclusive amount
from tax inclusive amount
"""
current_tax_fraction = 0
if cint(tax.included_in_print_rate):
tax_rate = self._get_tax_rate(tax, item_tax_map)
if tax.charge_type == "On Net Total":
current_tax_fraction = tax_rate / 100.0
elif tax.charge_type == "On Previous Row Amount":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_fraction_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_fraction = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_fraction_for_current_item
if getattr(tax, "add_deduct_tax", None):
current_tax_fraction *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
return current_tax_fraction
def _get_tax_rate(self, tax, item_tax_map):
if tax.account_head in item_tax_map:
return flt(item_tax_map.get(tax.account_head), self.doc.precision("rate", tax))
else:
return tax.rate
def calculate_net_total(self):
self.doc.total_qty = self.doc.total = self.doc.base_total = self.doc.net_total = self.doc.base_net_total = 0.0
for item in self.doc.get("items"):
self.doc.total += item.amount
self.doc.total_qty += item.qty
self.doc.base_total += item.base_amount
self.doc.net_total += item.net_amount
self.doc.base_net_total += item.base_net_amount
self.doc.round_floats_in(self.doc, ["total", "base_total", "net_total", "base_net_total"])
if self.doc.doctype == 'Sales Invoice' and self.doc.is_pos:
self.doc.pos_total_qty = self.doc.total_qty
def calculate_taxes(self):
self.doc.rounding_adjustment = 0
# maintain actual tax rate based on idx
actual_tax_dict = dict([[tax.idx, flt(tax.tax_amount, tax.precision("tax_amount"))]
for tax in self.doc.get("taxes") if tax.charge_type == "Actual"])
for n, item in enumerate(self.doc.get("items")):
item_tax_map = self._load_item_tax_rate(item.item_tax_rate)
for i, tax in enumerate(self.doc.get("taxes")):
# tax_amount represents the amount of tax for the current step
current_tax_amount = self.get_current_tax_amount(item, tax, item_tax_map)
# Adjust divisional loss to the last item
if tax.charge_type == "Actual":
actual_tax_dict[tax.idx] -= current_tax_amount
if n == len(self.doc.get("items")) - 1:
current_tax_amount += actual_tax_dict[tax.idx]
# accumulate tax amount into tax.tax_amount
if tax.charge_type != "Actual" and \
not (self.discount_amount_applied and self.doc.apply_discount_on=="Grand Total"):
tax.tax_amount += current_tax_amount
# store tax_amount for current item as it will be used for
# charge type = 'On Previous Row Amount'
tax.tax_amount_for_current_item = current_tax_amount
# set tax after discount
tax.tax_amount_after_discount_amount += current_tax_amount
current_tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(current_tax_amount, tax)
# note: grand_total_for_current_item contains the contribution of
# item's amount, previously applied tax and the current tax on that item
if i==0:
tax.grand_total_for_current_item = flt(item.net_amount + current_tax_amount)
else:
tax.grand_total_for_current_item = \
flt(self.doc.get("taxes")[i-1].grand_total_for_current_item + current_tax_amount)
# set precision in the last item iteration
if n == len(self.doc.get("items")) - 1:
self.round_off_totals(tax)
self.set_cumulative_total(i, tax)
self._set_in_company_currency(tax,
["total", "tax_amount", "tax_amount_after_discount_amount"])
# adjust Discount Amount loss in last tax iteration
if i == (len(self.doc.get("taxes")) - 1) and self.discount_amount_applied \
and self.doc.discount_amount and self.doc.apply_discount_on == "Grand Total":
self.doc.rounding_adjustment = flt(self.doc.grand_total
- flt(self.doc.discount_amount) - tax.total,
self.doc.precision("rounding_adjustment"))
def get_tax_amount_if_for_valuation_or_deduction(self, tax_amount, tax):
# if just for valuation, do not add the tax amount in total
# if tax/charges is for deduction, multiply by -1
if getattr(tax, "category", None):
tax_amount = 0.0 if (tax.category == "Valuation") else tax_amount
if self.doc.doctype in ["Purchase Order", "Purchase Invoice", "Purchase Receipt", "Supplier Quotation"]:
tax_amount *= -1.0 if (tax.add_deduct_tax == "Deduct") else 1.0
return tax_amount
def set_cumulative_total(self, row_idx, tax):
tax_amount = tax.tax_amount_after_discount_amount
tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(tax_amount, tax)
if row_idx == 0:
tax.total = flt(self.doc.net_total + tax_amount, tax.precision("total"))
else:
tax.total = flt(self.doc.get("taxes")[row_idx-1].total + tax_amount, tax.precision("total"))
def get_current_tax_amount(self, item, tax, item_tax_map):
tax_rate = self._get_tax_rate(tax, item_tax_map)
current_tax_amount = 0.0
if tax.charge_type == "Actual":
# distribute the tax amount proportionally to each item row
actual = flt(tax.tax_amount, tax.precision("tax_amount"))
current_tax_amount = item.net_amount*actual / self.doc.net_total if self.doc.net_total else 0.0
elif tax.charge_type == "On Net Total":
current_tax_amount = (tax_rate / 100.0) * item.net_amount
elif tax.charge_type == "On Previous Row Amount":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].tax_amount_for_current_item
elif tax.charge_type == "On Previous Row Total":
current_tax_amount = (tax_rate / 100.0) * \
self.doc.get("taxes")[cint(tax.row_id) - 1].grand_total_for_current_item
elif tax.charge_type == "On Item Quantity":
current_tax_amount = tax_rate * item.stock_qty
self.set_item_wise_tax(item, tax, tax_rate, current_tax_amount)
return current_tax_amount
def set_item_wise_tax(self, item, tax, tax_rate, current_tax_amount):
# store tax breakup for each item
key = item.item_code or item.item_name
item_wise_tax_amount = current_tax_amount*self.doc.conversion_rate
if tax.item_wise_tax_detail.get(key):
item_wise_tax_amount += tax.item_wise_tax_detail[key][1]
tax.item_wise_tax_detail[key] = [tax_rate,flt(item_wise_tax_amount)]
def round_off_totals(self, tax):
tax.tax_amount = flt(tax.tax_amount, tax.precision("tax_amount"))
tax.tax_amount_after_discount_amount = flt(tax.tax_amount_after_discount_amount,
tax.precision("tax_amount"))
def manipulate_grand_total_for_inclusive_tax(self):
# if fully inclusive taxes and diff
if self.doc.get("taxes") and any([cint(t.included_in_print_rate) for t in self.doc.get("taxes")]):
last_tax = self.doc.get("taxes")[-1]
non_inclusive_tax_amount = sum([flt(d.tax_amount_after_discount_amount)
for d in self.doc.get("taxes") if not d.included_in_print_rate])
diff = self.doc.total + non_inclusive_tax_amount \
- flt(last_tax.total, last_tax.precision("total"))
# If discount amount applied, deduct the discount amount
# because self.doc.total is always without discount, but last_tax.total is after discount
if self.discount_amount_applied and self.doc.discount_amount:
diff -= flt(self.doc.discount_amount)
diff = flt(diff, self.doc.precision("rounding_adjustment"))
if diff and abs(diff) <= (5.0 / 10**last_tax.precision("tax_amount")):
self.doc.rounding_adjustment = diff
def calculate_totals(self):
self.doc.grand_total = flt(self.doc.get("taxes")[-1].total) + flt(self.doc.rounding_adjustment) \
if self.doc.get("taxes") else flt(self.doc.net_total)
self.doc.total_taxes_and_charges = flt(self.doc.grand_total - self.doc.net_total
- flt(self.doc.rounding_adjustment), self.doc.precision("total_taxes_and_charges"))
self._set_in_company_currency(self.doc, ["total_taxes_and_charges", "rounding_adjustment"])
if self.doc.doctype in ["Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate, self.doc.precision("base_grand_total")) \
if self.doc.total_taxes_and_charges else self.doc.base_net_total
else:
self.doc.taxes_and_charges_added = self.doc.taxes_and_charges_deducted = 0.0
for tax in self.doc.get("taxes"):
if tax.category in ["Valuation and Total", "Total"]:
if tax.add_deduct_tax == "Add":
self.doc.taxes_and_charges_added += flt(tax.tax_amount_after_discount_amount)
else:
self.doc.taxes_and_charges_deducted += flt(tax.tax_amount_after_discount_amount)
self.doc.round_floats_in(self.doc, ["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.base_grand_total = flt(self.doc.grand_total * self.doc.conversion_rate) \
if (self.doc.taxes_and_charges_added or self.doc.taxes_and_charges_deducted) \
else self.doc.base_net_total
self._set_in_company_currency(self.doc,
["taxes_and_charges_added", "taxes_and_charges_deducted"])
self.doc.round_floats_in(self.doc, ["grand_total", "base_grand_total"])
self.set_rounded_total()
def calculate_total_net_weight(self):
if self.doc.meta.get_field('total_net_weight'):
self.doc.total_net_weight = 0.0
for d in self.doc.items:
if d.total_weight:
self.doc.total_net_weight += d.total_weight
def set_rounded_total(self):
if self.doc.meta.get_field("rounded_total"):
if self.doc.is_rounded_total_disabled():
self.doc.rounded_total = self.doc.base_rounded_total = 0
return
self.doc.rounded_total = round_based_on_smallest_currency_fraction(self.doc.grand_total,
self.doc.currency, self.doc.precision("rounded_total"))
#if print_in_rate is set, we would have already calculated rounding adjustment
self.doc.rounding_adjustment += flt(self.doc.rounded_total - self.doc.grand_total,
self.doc.precision("rounding_adjustment"))
self._set_in_company_currency(self.doc, ["rounding_adjustment", "rounded_total"])
def _cleanup(self):
for tax in self.doc.get("taxes"):
tax.item_wise_tax_detail = json.dumps(tax.item_wise_tax_detail, separators=(',', ':'))
def set_discount_amount(self):
if self.doc.additional_discount_percentage:
self.doc.discount_amount = flt(flt(self.doc.get(scrub(self.doc.apply_discount_on)))
* self.doc.additional_discount_percentage / 100, self.doc.precision("discount_amount"))
def apply_discount_amount(self):
if self.doc.discount_amount:
if not self.doc.apply_discount_on:
frappe.throw(_("Please select Apply Discount On"))
self.doc.base_discount_amount = flt(self.doc.discount_amount * self.doc.conversion_rate,
self.doc.precision("base_discount_amount"))
total_for_discount_amount = self.get_total_for_discount_amount()
taxes = self.doc.get("taxes")
net_total = 0
if total_for_discount_amount:
# calculate item amount after Discount Amount
for i, item in enumerate(self.doc.get("items")):
distributed_amount = flt(self.doc.discount_amount) * \
item.net_amount / total_for_discount_amount
item.net_amount = flt(item.net_amount - distributed_amount, item.precision("net_amount"))
net_total += item.net_amount
# discount amount rounding loss adjustment if no taxes
if (self.doc.apply_discount_on == "Net Total" or not taxes or total_for_discount_amount==self.doc.net_total) \
and i == len(self.doc.get("items")) - 1:
discount_amount_loss = flt(self.doc.net_total - net_total - self.doc.discount_amount,
self.doc.precision("net_total"))
item.net_amount = flt(item.net_amount + discount_amount_loss,
item.precision("net_amount"))
item.net_rate = flt(item.net_amount / item.qty, item.precision("net_rate")) if item.qty else 0
self._set_in_company_currency(item, ["net_rate", "net_amount"])
self.discount_amount_applied = True
self._calculate()
else:
self.doc.base_discount_amount = 0
def get_total_for_discount_amount(self):
if self.doc.apply_discount_on == "Net Total":
return self.doc.net_total
else:
actual_taxes_dict = {}
for tax in self.doc.get("taxes"):
if tax.charge_type == "Actual":
tax_amount = self.get_tax_amount_if_for_valuation_or_deduction(tax.tax_amount, tax)
actual_taxes_dict.setdefault(tax.idx, tax_amount)
elif tax.row_id in actual_taxes_dict:
actual_tax_amount = flt(actual_taxes_dict.get(tax.row_id, 0)) * flt(tax.rate) / 100
actual_taxes_dict.setdefault(tax.idx, actual_tax_amount)
return flt(self.doc.grand_total - sum(actual_taxes_dict.values()),
self.doc.precision("grand_total"))
def calculate_total_advance(self):
if self.doc.docstatus < 2:
total_allocated_amount = sum([flt(adv.allocated_amount, adv.precision("allocated_amount"))
for adv in self.doc.get("advances")])
self.doc.total_advance = flt(total_allocated_amount, self.doc.precision("total_advance"))
grand_total = self.doc.rounded_total or self.doc.grand_total
if self.doc.party_account_currency == self.doc.currency:
invoice_total = flt(grand_total - flt(self.doc.write_off_amount),
self.doc.precision("grand_total"))
else:
base_write_off_amount = flt(flt(self.doc.write_off_amount) * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
invoice_total = flt(grand_total * self.doc.conversion_rate,
self.doc.precision("grand_total")) - base_write_off_amount
if invoice_total > 0 and self.doc.total_advance > invoice_total:
frappe.throw(_("Advance amount cannot be greater than {0} {1}")
.format(self.doc.party_account_currency, invoice_total))
if self.doc.docstatus == 0:
self.calculate_outstanding_amount()
def calculate_outstanding_amount(self):
# NOTE:
# write_off_amount is only for POS Invoice
# total_advance is only for non POS Invoice
if self.doc.doctype == "Sales Invoice":
self.calculate_paid_amount()
if self.doc.is_return and self.doc.return_against: return
self.doc.round_floats_in(self.doc, ["grand_total", "total_advance", "write_off_amount"])
self._set_in_company_currency(self.doc, ['write_off_amount'])
if self.doc.doctype in ["Sales Invoice", "Purchase Invoice"]:
grand_total = self.doc.rounded_total or self.doc.grand_total
if self.doc.party_account_currency == self.doc.currency:
total_amount_to_pay = flt(grand_total - self.doc.total_advance
- flt(self.doc.write_off_amount), self.doc.precision("grand_total"))
else:
total_amount_to_pay = flt(flt(grand_total *
self.doc.conversion_rate, self.doc.precision("grand_total")) - self.doc.total_advance
- flt(self.doc.base_write_off_amount), self.doc.precision("grand_total"))
self.doc.round_floats_in(self.doc, ["paid_amount"])
change_amount = 0
if self.doc.doctype == "Sales Invoice":
self.calculate_write_off_amount()
self.calculate_change_amount()
change_amount = self.doc.change_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_change_amount
paid_amount = self.doc.paid_amount \
if self.doc.party_account_currency == self.doc.currency else self.doc.base_paid_amount
self.doc.outstanding_amount = flt(total_amount_to_pay - flt(paid_amount) + flt(change_amount),
self.doc.precision("outstanding_amount"))
def calculate_paid_amount(self):
paid_amount = base_paid_amount = 0.0
if self.doc.is_pos:
for payment in self.doc.get('payments'):
payment.amount = flt(payment.amount)
payment.base_amount = payment.amount * flt(self.doc.conversion_rate)
paid_amount += payment.amount
base_paid_amount += payment.base_amount
elif not self.doc.is_return:
self.doc.set('payments', [])
if self.doc.redeem_loyalty_points and self.doc.loyalty_amount:
base_paid_amount += self.doc.loyalty_amount
paid_amount += (self.doc.loyalty_amount / flt(self.doc.conversion_rate))
self.doc.paid_amount = flt(paid_amount, self.doc.precision("paid_amount"))
self.doc.base_paid_amount = flt(base_paid_amount, self.doc.precision("base_paid_amount"))
def calculate_change_amount(self):
self.doc.change_amount = 0.0
self.doc.base_change_amount = 0.0
if self.doc.doctype == "Sales Invoice" \
and self.doc.paid_amount > self.doc.grand_total and not self.doc.is_return \
and any([d.type == "Cash" for d in self.doc.payments]):
grand_total = self.doc.rounded_total or self.doc.grand_total
base_grand_total = self.doc.base_rounded_total or self.doc.base_grand_total
self.doc.change_amount = flt(self.doc.paid_amount - grand_total +
self.doc.write_off_amount, self.doc.precision("change_amount"))
self.doc.base_change_amount = flt(self.doc.base_paid_amount - base_grand_total +
self.doc.base_write_off_amount, self.doc.precision("base_change_amount"))
def calculate_write_off_amount(self):
if flt(self.doc.change_amount) > 0:
self.doc.write_off_amount = flt(self.doc.grand_total - self.doc.paid_amount
+ self.doc.change_amount, self.doc.precision("write_off_amount"))
self.doc.base_write_off_amount = flt(self.doc.write_off_amount * self.doc.conversion_rate,
self.doc.precision("base_write_off_amount"))
def calculate_margin(self, item):
rate_with_margin = 0.0
base_rate_with_margin = 0.0
if item.price_list_rate:
if item.pricing_rules and not self.doc.ignore_pricing_rule:
for d in item.pricing_rules.split(','):
pricing_rule = frappe.get_cached_doc('Pricing Rule', d)
if (pricing_rule.margin_type == 'Amount' and pricing_rule.currency == self.doc.currency)\
or (pricing_rule.margin_type == 'Percentage'):
item.margin_type = pricing_rule.margin_type
item.margin_rate_or_amount = pricing_rule.margin_rate_or_amount
else:
item.margin_type = None
item.margin_rate_or_amount = 0.0
if item.margin_type and item.margin_rate_or_amount:
margin_value = item.margin_rate_or_amount if item.margin_type == 'Amount' else flt(item.price_list_rate) * flt(item.margin_rate_or_amount) / 100
rate_with_margin = flt(item.price_list_rate) + flt(margin_value)
base_rate_with_margin = flt(rate_with_margin) * flt(self.doc.conversion_rate)
return rate_with_margin, base_rate_with_margin
def set_item_wise_tax_breakup(self):
self.doc.other_charges_calculation = get_itemised_tax_breakup_html(self.doc)
def get_itemised_tax_breakup_html(doc):
if not doc.taxes:
return
frappe.flags.company = doc.company
# get headers
tax_accounts = []
for tax in doc.taxes:
if getattr(tax, "category", None) and tax.category=="Valuation":
continue
if tax.description not in tax_accounts:
tax_accounts.append(tax.description)
headers = get_itemised_tax_breakup_header(doc.doctype + " Item", tax_accounts)
# get tax breakup data
itemised_tax, itemised_taxable_amount = get_itemised_tax_breakup_data(doc)
get_rounded_tax_amount(itemised_tax, doc.precision("tax_amount", "taxes"))
update_itemised_tax_data(doc)
frappe.flags.company = None
return frappe.render_template(
"templates/includes/itemised_tax_breakup.html", dict(
headers=headers,
itemised_tax=itemised_tax,
itemised_taxable_amount=itemised_taxable_amount,
tax_accounts=tax_accounts,
conversion_rate=doc.conversion_rate,
currency=doc.currency
)
)
@erpnext.allow_regional
def update_itemised_tax_data(doc):
#Don't delete this method, used for localization
pass
@erpnext.allow_regional
def get_itemised_tax_breakup_header(item_doctype, tax_accounts):
return [_("Item"), _("Taxable Amount")] + tax_accounts
@erpnext.allow_regional
def get_itemised_tax_breakup_data(doc):
itemised_tax = get_itemised_tax(doc.taxes)
itemised_taxable_amount = get_itemised_taxable_amount(doc.items)
return itemised_tax, itemised_taxable_amount
def get_itemised_tax(taxes, with_tax_account=False):
itemised_tax = {}
for tax in taxes:
if getattr(tax, "category", None) and tax.category=="Valuation":
continue
item_tax_map = json.loads(tax.item_wise_tax_detail) if tax.item_wise_tax_detail else {}
if item_tax_map:
for item_code, tax_data in item_tax_map.items():
itemised_tax.setdefault(item_code, frappe._dict())
tax_rate = 0.0
tax_amount = 0.0
if isinstance(tax_data, list):
tax_rate = flt(tax_data[0])
tax_amount = flt(tax_data[1])
else:
tax_rate = flt(tax_data)
itemised_tax[item_code][tax.description] = frappe._dict(dict(
tax_rate = tax_rate,
tax_amount = tax_amount
))
if with_tax_account:
itemised_tax[item_code][tax.description].tax_account = tax.account_head
return itemised_tax
def get_itemised_taxable_amount(items):
itemised_taxable_amount = frappe._dict()
for item in items:
item_code = item.item_code or item.item_name
itemised_taxable_amount.setdefault(item_code, 0)
itemised_taxable_amount[item_code] += item.net_amount
return itemised_taxable_amount
def get_rounded_tax_amount(itemised_tax, precision):
# Rounding based on tax_amount precision
for taxes in itemised_tax.values():
for tax_account in taxes:
taxes[tax_account]["tax_amount"] = flt(taxes[tax_account]["tax_amount"], precision)
|
ebukoz/thrive
|
erpnext/controllers/taxes_and_totals.py
|
Python
|
gpl-3.0
| 28,062
|
import time
import random
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
PURPLE = (255, 0, 255)
BROWN = (160, 82, 45)
class Landplots():
def __init__(self, owner, umtype, locationregion, jurisdiction, umvalue, inventory):
self.owner = owner
self.umtype = umtype
self.locationregion = locationregion
self.jurisdiction = jurisdiction
self.umvalue = umvalue
self.inventory = inventory
'''
class Umbuildings(): blacksmithshop (anvil, fire, metal) safehouse, farms (hulgerculter, agria, forest, hort, mixxed)
library (books )
def __init__(self, owner):
self.owner = owner '''
''' mines (gold, silver, copper, rareearths, titanium) smelter_furnace (eros to bars)
mints (coins) '''
'''Farm - umtype (1 depleted agra, 5 agra, 10, hort, 15 hort hugercult) locationregion (reseved), jurisdiction (1 heavyzoning & regulations,
5 minimum zoning but taxed, 10 no zoning regulations or taxes) umvalue (1 low to 100 highest value)
inventory (improvements, farmanimals, chicken coops, barns etc)
'''
class Farm(Landplots):
def __init__(self, owner, umtype, locationregion, jurisdiction, umvalue, inventory):
Landplots.__init__(self, owner, umtype, locationregion, jurisdiction, umvalue, inventory)
def producefood(self):
self.umvalue + self.jurisdiction
#orchard1 = Farm('Platinum Falcon', 5, 7, 42, 37, {})
|
MrZigler/UnderdogMilitia
|
landplots.py
|
Python
|
gpl-3.0
| 1,652
|
# -*-coding:utf-8-*-
# @auth ivan
# @time 2016-10-25 21:00:02
# @goal test for Observer Pattern
class Subject():
def __init__(self):
self.observers = []
self.state = 0
def getState(self):
return self.state
def setState(self, state):
self.state = state
def attach(self, observer):
self.observers.append(observer)
self.notifyAllObservers()
def notifyAllObservers(self):
for observer in self.observers:
observer.update()
class Observer:
def update(self):
return
class BinaryObserver(Observer):
def __init__(self, subject):
self.subject = subject
self.subject.attach(self)
def update(self):
print("Binary String: " + bin(self.subject.getState()))
# BinaryObserver
class OctalObserver(Observer):
def __init__(self, subject):
self.subject = subject
self.subject.attach(self)
def update(self):
print("Octal String: " + oct(self.subject.getState()))
# OctalObserver
class HexaObserver(Observer):
def __init__(self, subject):
self.subject = subject
self.subject.attach(self)
def update(self):
print("Hex String: " + hex(self.subject.getState()))
# HexaObserver
class ObserverPatternDemo:
def run(self):
self.subject = Subject()
BinaryObserver(self.subject)
OctalObserver(self.subject)
HexaObserver(self.subject)
print("First state change: 15")
self.subject.setState(15)
print("Second state change: 10")
self.subject.setState(10)
O = ObserverPatternDemo()
O.run()
|
IvanaXu/Test_Class_GOF
|
tPatterns/Behavioral_Patterns/test_Observer_Pattern.py
|
Python
|
gpl-3.0
| 1,644
|
import numpy
from numpy.testing import assert_array_almost_equal
import pytest
from sklearn.pipeline import make_pipeline
from sksurv.datasets import load_breast_cancer
from sksurv.ensemble import ExtraSurvivalTrees, RandomSurvivalForest
from sksurv.preprocessing import OneHotEncoder
from sksurv.testing import assert_cindex_almost_equal
FORESTS = [
RandomSurvivalForest,
ExtraSurvivalTrees,
]
@pytest.mark.parametrize(
'forest_cls, expected_c',
[(RandomSurvivalForest, (0.9026201280123488, 67831, 7318, 0, 14)),
(ExtraSurvivalTrees, (0.8389200122423452, 63044, 12105, 0, 14))]
)
def test_fit_predict(make_whas500, forest_cls, expected_c):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 100
pred = forest.predict(whas500.x)
assert numpy.isfinite(pred).all()
assert numpy.all(pred >= 0)
assert_cindex_almost_equal(
whas500.y["fstat"], whas500.y["lenfol"], pred, expected_c)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_int_time(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
y = whas500.y
y_int = numpy.empty(y.shape[0],
dtype=[(y.dtype.names[0], bool), (y.dtype.names[1], int)])
y_int[:] = y
forest_f = forest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y[50:])
forest_i = forest_cls(oob_score=True, random_state=2).fit(whas500.x[50:], y_int[50:])
assert len(forest_f.estimators_) == len(forest_i.estimators_)
assert forest_f.n_features_in_ == forest_i.n_features_in_
assert forest_f.oob_score_ == forest_i.oob_score_
assert_array_almost_equal(forest_f.event_times_, forest_i.event_times_)
pred_f = forest_f.predict(whas500.x[:50])
pred_i = forest_i.predict(whas500.x[:50])
assert_array_almost_equal(pred_f, pred_i)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_predict_chf(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 10
chf = forest.predict_cumulative_hazard_function(whas500.x, return_array=True)
assert chf.shape == (500, forest.event_times_.shape[0])
assert numpy.isfinite(chf).all()
assert numpy.all(chf >= 0.0)
vals, counts = numpy.unique(chf[:, 0], return_counts=True)
assert vals[0] == 0.0
assert numpy.max(counts) == counts[0]
d = numpy.apply_along_axis(numpy.diff, 1, chf)
assert (d >= 0).all()
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_predict_surv(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 10
surv = forest.predict_survival_function(whas500.x, return_array=True)
assert surv.shape == (500, forest.event_times_.shape[0])
assert numpy.isfinite(surv).all()
assert numpy.all(surv >= 0.0)
assert numpy.all(surv <= 1.0)
vals, counts = numpy.unique(surv[:, 0], return_counts=True)
assert vals[-1] == 1.0
assert numpy.max(counts) == counts[-1]
d = numpy.apply_along_axis(numpy.diff, 1, surv)
assert (d <= 0).all()
@pytest.mark.parametrize(
'forest_cls, expected_oob_score',
[(RandomSurvivalForest, 0.753010685),
(ExtraSurvivalTrees, 0.752092510)]
)
def test_oob_score(make_whas500, forest_cls, expected_oob_score):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(oob_score=True, bootstrap=False, random_state=2)
with pytest.raises(ValueError, match="Out of bag estimation only available "
"if bootstrap=True"):
forest.fit(whas500.x, whas500.y)
forest.set_params(bootstrap=True)
forest.fit(whas500.x, whas500.y)
assert forest.oob_prediction_.shape == (whas500.x.shape[0],)
assert round(abs(forest.oob_score_ - expected_oob_score), 6) == 0.0
@pytest.mark.parametrize('forest_cls', FORESTS)
@pytest.mark.parametrize("func", ["predict_survival_function", "predict_cumulative_hazard_function"])
def test_predict_step_function(make_whas500, forest_cls, func):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=10, random_state=2)
forest.fit(whas500.x[10:], whas500.y[10:])
pred_fn = getattr(forest, func)
ret_array = pred_fn(whas500.x[:10], return_array=True)
fn_array = pred_fn(whas500.x[:10], return_array=False)
assert ret_array.shape[0] == fn_array.shape[0]
for fn, arr in zip(fn_array, ret_array):
assert_array_almost_equal(fn.x, forest.event_times_)
assert_array_almost_equal(fn.y, arr)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_oob_too_little_estimators(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=3, oob_score=True, random_state=2)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates."):
forest.fit(whas500.x, whas500.y)
def test_fit_no_bootstrap(make_whas500):
whas500 = make_whas500(to_numeric=True)
forest = RandomSurvivalForest(n_estimators=10, bootstrap=False, random_state=2)
forest.fit(whas500.x, whas500.y)
pred = forest.predict(whas500.x)
expected_c = (0.931881994437717, 70030, 5119, 0, 14)
assert_cindex_almost_equal(
whas500.y["fstat"], whas500.y["lenfol"], pred, expected_c)
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_warm_start(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(n_estimators=11, max_depth=2, random_state=2)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 11
assert all((e.max_depth == 2 for e in forest.estimators_))
forest.set_params(warm_start=True)
with pytest.warns(UserWarning, match="Warm-start fitting without increasing "
"n_estimators does not fit new trees."):
forest.fit(whas500.x, whas500.y)
forest.set_params(n_estimators=3)
with pytest.raises(ValueError, match="n_estimators=3 must be larger or equal to "
r"len\(estimators_\)=11 when warm_start==True"):
forest.fit(whas500.x, whas500.y)
forest.set_params(n_estimators=23)
forest.fit(whas500.x, whas500.y)
assert len(forest.estimators_) == 23
assert all((e.max_depth == 2 for e in forest.estimators_))
@pytest.mark.parametrize('forest_cls', FORESTS)
def test_fit_with_small_max_samples(make_whas500, forest_cls):
whas500 = make_whas500(to_numeric=True)
# First fit with no restriction on max samples
est1 = forest_cls(n_estimators=1, random_state=1, max_samples=None)
# Second fit with max samples restricted to just 2
est2 = forest_cls(n_estimators=1, random_state=1, max_samples=2)
est1.fit(whas500.x, whas500.y)
est2.fit(whas500.x, whas500.y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
@pytest.mark.parametrize('forest_cls', FORESTS)
@pytest.mark.parametrize("func", ["predict_survival_function", "predict_cumulative_hazard_function"])
def test_pipeline_predict(breast_cancer, forest_cls, func):
X_str, _ = load_breast_cancer()
X_num, y = breast_cancer
est = forest_cls(n_estimators=10, random_state=1)
est.fit(X_num[10:], y[10:])
pipe = make_pipeline(OneHotEncoder(), forest_cls(n_estimators=10, random_state=1))
pipe.fit(X_str[10:], y[10:])
tree_pred = getattr(est, func)(X_num[:10], return_array=True)
pipe_pred = getattr(pipe, func)(X_str[:10], return_array=True)
assert_array_almost_equal(tree_pred, pipe_pred)
@pytest.mark.parametrize('forest_cls', FORESTS)
@pytest.mark.parametrize(
'max_samples, exc_type, exc_msg',
[(int(1e9), ValueError,
"`max_samples` must be in range 1 to 500 but got value 1000000000"),
(1.0 + 1e-7, ValueError,
r"`max_samples` must be in range \(0\.0, 1\.0] but got value 1.0"),
(2.0, ValueError,
r"`max_samples` must be in range \(0\.0, 1\.0] but got value 2.0"),
(0.0, ValueError,
r"`max_samples` must be in range \(0\.0, 1\.0] but got value 0.0"),
(numpy.nan, ValueError,
r"`max_samples` must be in range \(0\.0, 1\.0] but got value nan"),
(numpy.inf, ValueError,
r"`max_samples` must be in range \(0\.0, 1\.0] but got value inf"),
('str max_samples?!', TypeError,
r"`max_samples` should be int or float, but got "
r"type '\<class 'str'\>'"),
(numpy.ones(2), TypeError,
r"`max_samples` should be int or float, but got type "
r"'\<class 'numpy.ndarray'\>'")]
)
def test_fit_max_samples(make_whas500, forest_cls, max_samples, exc_type, exc_msg):
whas500 = make_whas500(to_numeric=True)
forest = forest_cls(max_samples=max_samples)
with pytest.raises(exc_type, match=exc_msg):
forest.fit(whas500.x, whas500.y)
|
sebp/scikit-survival
|
tests/test_forest.py
|
Python
|
gpl-3.0
| 9,388
|
import numpy as np
import matplotlib.pyplot as plt
import math
from pylab import figure
from my_plotter import *
import os
import sys
sys.path.append('./BCI_Framework')
import Main
import Single_Job_runner as SJR
import os
import re
if __name__ == '__main__':
bciciv1 = Main.Main('BCI_Framework','BCICIV2a','RANDOM_FOREST', 'BP', 'ALL', -1, 'python')
res_path = bciciv1.config.configuration['results_opt_path_str']
classifiers_dict = {'Boosting':0, 'LogisticRegression_l1':1, 'LogisticRegression_l2':2, 'RANDOM_FOREST':3,'SVM_linear':4, 'SVM_rbf':5 }
features_dict = {'BP':0, 'logbp':1, 'wackerman':2,'BPCSP':3, 'logbpCSP':4, 'wackermanCSP':5}
results = np.zeros((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]))
discarded_periods = np.empty((len(classifiers_dict),len(features_dict), bciciv1.config.configuration["number_of_subjects"]), dtype='S10')
subjects_dict = {}
for ind, subj in enumerate(bciciv1.config.configuration["subject_names_str"]):
subjects_dict.update({subj:ind})
for dirname, dirnames, filenames in os.walk(res_path):
# for subdirname in dirnames:
# fold_name = os.path.join(dirname, subdirname)
# print fold_name
for filename in filenames:
# slash_indices = re.search('0', filename)
file_name = os.path.join(dirname, filename)
backslash_indices = [m.start() for m in re.finditer("\\\\", file_name)]
underline_indices = [m.start() for m in re.finditer("_", file_name)]
feature_ext_name = file_name[backslash_indices[-2]+1:backslash_indices[-1]]
classifier_name = file_name[backslash_indices[-3]+1:backslash_indices[-2]]
subj = file_name[underline_indices[-1]+1:]
# print feature_ext_name, classifier_name, subj
with open(file_name,'r') as my_file:
error = float(my_file.readline())
accuracy = 100 - error*100
results[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = accuracy
# print file_name[backslash_indices[-1]+1:underline_indices[1]]
discarded_periods[classifiers_dict[classifier_name], features_dict[feature_ext_name],subjects_dict[subj]] = file_name[backslash_indices[-1]+1:underline_indices[2]]
# print backslash_indices
for feature in features_dict.keys():
f_ind = features_dict[feature]
feature_ext_y = []
labels = []
for subject in subjects_dict.keys():
subj_ind = subjects_dict[subject]
feature_ext_y.append(tuple(results[:,f_ind,subj_ind]))
labels.append(feature + '_' + subject)
# plotter( feature_ext_y, math.floor(np.min(feature_ext_y) - 1), math.floor(np.max(feature_ext_y) + 1), feature, labels)
plotter( feature_ext_y, 15, 87, feature, labels)
for subject in subjects_dict.keys():
for feature in features_dict.keys():
print subject, feature, discarded_periods[:, features_dict[feature],subjects_dict[subject]]
# BP_y = [(72.96,78.62,78.62,76.11,79.25,79.88), (64.45,65.38,65.75,65.00,67.04,66.67), (69.45,71.86,74.26,72.04,69.75,72.6)]
# labels = ['BP_O3','BP_S4','BP_X11']
# plotter( BP_y, 64, 81, 'BP', labels)
# logBP_y = [(74.22,79.25,79.25,77.36,81.77,81.77), (62.23,66.49,66.30,65.38,66.86,66.86), (69.82,72.97,73.15,71.86,74.63,74.63)]
# labels = ['LOGBP_O3','LOGBP_S4','LOGBP_X11']
# plotter( logBP_y, 61, 84, 'logBP', labels)
# wackermann_y = [(56.61,57.24,58.24,54.72,54.72,59.75), (57.97,57.6,59.82,55.75,57.97,58.71), (60,50,57.24,61.49,60.56,62.23)]
# labels = ['wackerman_O3','wackerman_S4','wackerman_X11']
# plotter( wackermann_y, 49, 65, 'wackerman', labels)
# y_RF = [(77.98,76.72,76.72,79.87), (70.74,74.44,80.92,75.18),(75.92,73.51,77.03,78.33),(76.11,77.36,58.5, 54.72), (65,65.38,53.34,55.75), (72.04,71.86,60,61.49)]
# labels = ['BO_RF_O3','BO_RF_S4','BO_RF_X11','RF_grid_search_O3','RF_grid_search_S4','RF_grid_search_X11']
# BO_plotter( y_RF, 49, 83, 'BO_RF', labels)
plt.show()
|
lol/BCI-BO-old
|
plot_iv2a.py
|
Python
|
gpl-3.0
| 4,270
|
"""
A python class to encapsulate the ComicBookInfo data
"""
"""
Copyright 2012-2014 Anthony Beville
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from datetime import datetime
from calibre.utils.localization import calibre_langcode_to_name, canonicalize_lang, lang_as_iso639_1
from calibre_plugins.EmbedComicMetadata.genericmetadata import GenericMetadata
import sys
if sys.version_info[0] > 2:
unicode = str
class ComicBookInfo:
def metadataFromString(self, string):
cbi_container = json.loads(unicode(string, 'utf-8'))
metadata = GenericMetadata()
cbi = cbi_container['ComicBookInfo/1.0']
# helper func
# If item is not in CBI, return None
def xlate(cbi_entry):
if cbi_entry in cbi:
return cbi[cbi_entry]
else:
return None
metadata.series = xlate('series')
metadata.title = xlate('title')
metadata.issue = xlate('issue')
metadata.publisher = xlate('publisher')
metadata.month = xlate('publicationMonth')
metadata.year = xlate('publicationYear')
metadata.issueCount = xlate('numberOfIssues')
metadata.comments = xlate('comments')
metadata.credits = xlate('credits')
metadata.genre = xlate('genre')
metadata.volume = xlate('volume')
metadata.volumeCount = xlate('numberOfVolumes')
metadata.language = xlate('language')
metadata.country = xlate('country')
metadata.criticalRating = xlate('rating')
metadata.tags = xlate('tags')
# make sure credits and tags are at least empty lists and not None
if metadata.credits is None:
metadata.credits = []
if metadata.tags is None:
metadata.tags = []
# need to massage the language string to be ISO
# modified to use a calibre function
if metadata.language is not None:
metadata.language = lang_as_iso639_1(metadata.language)
metadata.isEmpty = False
return metadata
def stringFromMetadata(self, metadata):
cbi_container = self.createJSONDictionary(metadata)
return json.dumps(cbi_container)
# verify that the string actually contains CBI data in JSON format
def validateString(self, string):
try:
cbi_container = json.loads(string)
except:
return False
return ('ComicBookInfo/1.0' in cbi_container)
def createJSONDictionary(self, metadata):
# Create the dictionary that we will convert to JSON text
cbi = dict()
cbi_container = {'appID': 'ComicTagger/',
'lastModified': str(datetime.now()),
'ComicBookInfo/1.0': cbi}
# helper func
def assign(cbi_entry, md_entry):
if md_entry is not None:
cbi[cbi_entry] = md_entry
# helper func
def toInt(s):
i = None
if type(s) in [str, unicode, int]:
try:
i = int(s)
except ValueError:
pass
return i
assign('series', metadata.series)
assign('title', metadata.title)
assign('issue', metadata.issue)
assign('publisher', metadata.publisher)
assign('publicationMonth', toInt(metadata.month))
assign('publicationYear', toInt(metadata.year))
assign('numberOfIssues', toInt(metadata.issueCount))
assign('comments', metadata.comments)
assign('genre', metadata.genre)
assign('volume', toInt(metadata.volume))
assign('numberOfVolumes', toInt(metadata.volumeCount))
assign('language', calibre_langcode_to_name(canonicalize_lang(metadata.language)))
assign('country', metadata.country)
assign('rating', metadata.criticalRating)
assign('credits', metadata.credits)
assign('tags', metadata.tags)
return cbi_container
|
dickloraine/EmbedComicMetadata
|
comicbookinfo.py
|
Python
|
gpl-3.0
| 4,500
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <[email protected]>
# Maintainer: David Arroyo Menéndez <[email protected]>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
from bokeh.plotting import figure, show, output_file
from bokeh.tile_providers import CARTODBPOSITRON
output_file("tile.html")
# range bounds supplied in web mercator coordinates
p = figure(x_range=(-2000000, 6000000), y_range=(-1000000, 7000000),
x_axis_type="mercator", y_axis_type="mercator")
p.add_tile(CARTODBPOSITRON)
show(p)
|
davidam/python-examples
|
bokeh/openstreetmap.py
|
Python
|
gpl-3.0
| 1,245
|
# Copyright 2016-2021 Peppy Player [email protected]
#
# This file is part of Peppy Player.
#
# Peppy Player is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Peppy Player is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Peppy Player. If not, see <http://www.gnu.org/licenses/>.
import time
from timeit import default_timer as timer
from ui.component import Component
from ui.container import Container
from ui.slider.slider import Slider
from ui.layout.borderlayout import BorderLayout
from util.config import USAGE, USE_WEB, COLORS, COLOR_BRIGHT
from ui.state import State
class EqualizerSlider(Container):
""" Time slider component """
def __init__(self, f, id, util, name, bgr, slider_color, img_knob, img_knob_on, key_incr, key_decr, key_knob, bb, listener, label):
""" Initializer
:param id: band ID
:param util: utility object
:param name: slider name
:param bgr: slider background color
:param slider_color: slider center line color
:param img_knob: knob image
:param img_knob_on: knob image in on state
:param key_incr: keyboard key associated with slider increment action
:param key_decr: keyboard key associated with slider decrement action
:param key_knob: keyboard key associated with single click on knob
:param bb: slider bounding box
"""
Container.__init__(self, util, background=bgr, bounding_box=bb, content=None)
self.util = util
self.config = util.config
self.bgr = bgr
self.VALUE_LAYER = 2
self.LABEL_LAYER = 1
layout = BorderLayout(bb)
layout.set_percent_constraints(10.0, 10.0, 0.0, 0.0)
self.id = id
self.value_name = name + ".value." + str(id)
self.label_name = name + ".label." + str(id)
self.value_layout = layout.TOP
self.label_layout = layout.BOTTOM
self.label_layout.y -= 1
self.label_layout.h += 2
self.slider = Slider(util, "slider." + str(id), bgr, slider_color, img_knob, img_knob_on, None, key_incr, key_decr, key_knob, layout.CENTER)
self.slider.add_slide_listener(listener)
self.add_component(self.slider)
height = 60
font_size = int((self.value_layout.h * height)/100.0)
c = self.config[COLORS][COLOR_BRIGHT]
self.top = f.create_output_text("top.label." + str(id), self.value_layout, bgr, c, font_size)
self.bottom = f.create_output_text("bottom.label." + str(id), self.label_layout, bgr, c, font_size)
self.bottom.set_text(label)
self.add_component(self.top)
self.add_component(self.bottom)
self.seek_listeners = []
self.update_seek_listeners = True
self.use_web = self.config[USAGE][USE_WEB]
def set_value(self, v):
self.top.set_text(v)
if self.use_web and getattr(self, "web_seek_listener", None):
s = State()
s.event_origin = self
s.seek_time_label = v
self.web_seek_listener(s)
def set_selected(self, flag):
""" Select/unselect slider
:param flag: True - select, False - unselect
"""
self.slider.set_selected(flag)
|
project-owner/Peppy
|
ui/slider/equalizerslider.py
|
Python
|
gpl-3.0
| 3,755
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
from sqlalchemy import Table, Column, Integer, String, Boolean
from sqlalchemy.orm import mapper
from eos.db import saveddata_meta
from eos.types import User
users_table = Table("users", saveddata_meta,
Column("ID", Integer, primary_key=True),
Column("username", String, nullable=False, unique=True),
Column("password", String, nullable=False),
Column("admin", Boolean, nullable=False))
mapper(User, users_table)
|
Ebag333/Pyfa
|
eos/db/saveddata/user.py
|
Python
|
gpl-3.0
| 1,355
|
"""Procedures to initialize the full text search in PostgresQL"""
from django.db import connection
def setup_full_text_search(script_path):
"""using postgresql database connection,
installs the plsql language, if necessary
and runs the stript, whose path is given as an argument
"""
fts_init_query = open(script_path).read()
cursor = connection.cursor()
try:
#test if language exists
cursor.execute("SELECT * FROM pg_language WHERE lanname='plpgsql'")
lang_exists = cursor.fetchone()
if not lang_exists:
cursor.execute("CREATE LANGUAGE plpgsql")
#run the main query
cursor.execute(fts_init_query)
finally:
cursor.close()
|
tvenkat/askbot-devel
|
askbot/search/postgresql/__init__.py
|
Python
|
gpl-3.0
| 723
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('playbook', '0003_auto_20151028_1735'),
]
operations = [
migrations.AddField(
model_name='playbookrunhistory',
name='log_url',
field=models.CharField(default=b'', max_length=1024, blank=True),
),
]
|
cycloidio/cyclosible
|
cyclosible/playbook/migrations/0004_playbookrunhistory_log_url.py
|
Python
|
gpl-3.0
| 441
|
#!/usr/bin/python
# Provides all the API functionality callable through "/api"
from flask import request
from flaskext.auth import login_required
import json, os, sys
import security, charts, plugins, webconfig, domoticz
apiDict = {}
modules = {}
def init():
global modules
modules = plugins.loadPlugins()
return
def addToApi(custom, module, function):
apiDict[custom] = [module, function]
@login_required()
def gateway():
requestedUrl = request.url.split("/api")
custom = request.args.get('custom', '')
if custom == "bar_chart":
result = charts.barChart()
elif custom == "donut_chart":
result = charts.donutChart()
elif custom == "modify_config":
idx = request.args.get('idx', '')
page = request.args.get('page', '')
component = request.args.get('component', '')
description = request.args.get('description', '')
extra = request.args.get('extra', '')
webconfig.writeToConfig(idx, page, component, description, extra)
elif custom == 'indexPlugins':
result = json.dumps(plugins.indexPlugins(request.args))
elif custom == 'indexWebConfig':
result = json.dumps(webconfig.indexWebConfig(request.args))
elif custom == "performUpgrade":
result = json.dumps(webconfig.performUpgrade())
elif custom in apiDict:
module = apiDict.get(custom)[0]
function = apiDict.get(custom)[1]
call = getattr(modules[module], function)
result = call(request.args)
else:
result = domoticz.queryDomoticz(requestedUrl[1])
try:
if not isJson(result):
result = json.dumps(result)
return security.sanitizeJSON(json.loads(result))
except:
return "No results returned"
def setConfig(cfg, orig_cfg):
global config
global originalCfg
config = cfg
originalCfg = orig_cfg
def setModules(modulesList):
global modules
modules = modulesList
def getConfig():
return config
def getOriginalConfig():
return originalCfg
def isJson(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
return True
|
wez3/domoboard
|
modules/api.py
|
Python
|
gpl-3.0
| 2,175
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 03 21:53:14 2016
@author: Antoine Riaud
"""
from distutils.core import setup
setup(name='Acoust_Tweezers_Anisotropic',
version='2.2',
description='Acoust_tweezers design package for anisotropic media',
author='Antoine Riaud, Jean-Louis Thomas, Michael Baudoin, Olivier Bou Matar',
author_email='[email protected]',
packages=['Tweezer_design'],
url = 'https://github.com/peterldowns/mypackage', # use the URL to the github repo
data_files=[('',['Tweezer_design/LiNbO3.mat','Tweezer_design/reticule.svg','Tweezer_design/mirror.svg'])],
)
|
AntoineRiaud/Tweezer_design
|
setup.py
|
Python
|
gpl-3.0
| 657
|
import itertools
def main():
#print(permutations(41063625))
#print([isCube(i) for i in [41063625, 56623104, 66430125]])
#print(hasCubicPermutations(41063625, 3))
i = 1
while not hasCubicPermutations(i ** 3, 5):
i += 1
print(i)
print(i, i ** 3)
def hasCubicPermutations(n, p):
cubesCount = 0
for i in set(permutations(n)):
if isCube(i):
cubesCount += 1
#print(i, isCube(i), int(round(i ** (1 / 3))))
if cubesCount == p:
return True
#print(cubesCount)
return False
def isCube(n):
root = int(round(n ** (1 / 3)))
return root ** 3 == n
def permutations(n):
return [int("".join(i)) for i in itertools.permutations(str(n))]
if __name__ == "__main__":
main()
|
ZachOhara/Project-Euler
|
python/p061_p070/problem062.py
|
Python
|
gpl-3.0
| 688
|
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.4"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import urllib
import base64
import os
import copy
import calendar
import time
import random
import errno
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
# prior to Python 2.5, these were separate modules
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
from httplib2 import socks
except ImportError:
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class NotSupportedOnThisPlatform(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
if self.challenge.get('opaque'):
headers['authorization'] += ', opaque="%s"' % self.challenge['opaque']
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class AllHosts(object):
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
bypass_hosts = ()
def __init__(self, proxy_type, proxy_host, proxy_port,
proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type = proxy_type
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_rdns = proxy_rdns
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port,
self.proxy_rdns, self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
@classmethod
def from_environment(cls, method='http'):
"""
Read proxy info from the environment variables.
"""
if method not in ['http', 'https']:
return
env_var = method + '_proxy'
url = os.environ.get(env_var, os.environ.get(env_var.upper()))
if not url:
return
pi = cls.from_url(url, method)
no_proxy = os.environ.get('no_proxy', os.environ.get('NO_PROXY', ''))
bypass_hosts = []
if no_proxy:
bypass_hosts = no_proxy.split(',')
# special case, no_proxy=* means all hosts bypassed
if no_proxy == '*':
bypass_hosts = AllHosts
pi.bypass_hosts = bypass_hosts
return pi
@classmethod
def from_url(cls, url, method='http'):
"""
Construct a ProxyInfo from a URL (such as http_proxy env var)
"""
url = urlparse.urlparse(url)
username = None
password = None
port = None
if '@' in url[1]:
ident, host_port = url[1].split('@', 1)
if ':' in ident:
username, password = ident.split(':', 1)
else:
password = ident
else:
host_port = url[1]
if ':' in host_port:
host, port = host_port.split(':', 1)
else:
host = host_port
if port:
port = int(port)
else:
port = dict(https=443, http=80)[method]
proxy_type = 3 # socks.PROXY_TYPE_HTTP
return cls(
proxy_type = proxy_type,
proxy_host = host,
proxy_port = port,
proxy_user = username or None,
proxy_pass = password or None,
)
def applies_to(self, hostname):
return not self.bypass_host(hostname)
def bypass_host(self, hostname):
"""Has this host been excluded from the proxy config"""
if self.bypass_hosts is AllHosts:
return True
bypass = False
for domain in self.bypass_hosts:
if hostname.endswith(domain):
bypass = True
return bypass
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect((self.host, self.port) + sa[2:])
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('urlfetch') is None:
raise ImportError # Bail out; we're not actually running on App Engine.
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_ssl_certificate_validation
import logging
if self.validate_certificate:
logging.debug('val cert %s %s', self.validate_certificate, disable_ssl_certificate_validation)
raise httplib.HTTPException()
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = str(response.status_code)
self.response.status = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
if self.response:
return self.response
else:
raise httplib.HTTPException()
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_ssl_certificate_validation=False):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info, ca_certs, disable_ssl_certificate_validation)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None,
proxy_info=ProxyInfo.from_environment,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
`proxy_info` may be:
- a callable that takes the http scheme ('http' or 'https') and
returns a ProxyInfo instance per request. By default, uses
ProxyInfo.from_environment.
- a ProxyInfo instance (static proxy config).
- None (proxy disabled).
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
import logging
logging.debug('http cert %s', self.disable_ssl_certificate_validation)
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
#print('request: %s, %s, %s, %s'%(request_uri, method, body, headers))
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
proxy_info = self._get_proxy_info(scheme, authority)
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
def _get_proxy_info(self, scheme, authority):
"""Return a ProxyInfo instance (or None) based on the scheme
and authority.
"""
hostname, port = urllib.splitport(authority)
proxy_info = self.proxy_info
if callable(proxy_info):
proxy_info = proxy_info(scheme)
if (hasattr(proxy_info, 'applies_to')
and not proxy_info.applies_to(hostname)):
proxy_info = None
return proxy_info
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
|
jbu/personis
|
personis/examples/aelog/httplib2/__init__.py
|
Python
|
gpl-3.0
| 68,094
|
# Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this program, or any covered work, by linking or
# combining it with the OpenSSL project's OpenSSL library (or a
# modified version of that library), containing parts covered by the
# terms of the OpenSSL or SSLeay licenses, We grant you additional
# permission to convey the resulting work. Corresponding Source for a
# non-source form of such a combination shall include the source code
# for the parts of OpenSSL used as well as that of the covered work.
"""
Strings for Yubico Authenticator.
Note: String names must not start with underscore (_).
"""
organization = "Yubico"
domain = "yubico.com"
app_name = "Yubico Authenticator"
win_title_1 = "Yubico Authenticator (%s)"
about_1 = "About: %s"
copyright = "Copyright © Yubico"
version_1 = "Version: %s"
wait = "Please wait..."
error = "Error"
menu_file = "&File"
menu_help = "&Help"
action_about = "&About"
action_add = "&Add..."
action_import = "&Import..."
action_reset = "&Reset"
action_password = "Set/Change &password"
action_settings = "&Settings"
action_delete = "&Delete"
action_show = "&Show credentials"
action_close = "&Close Window"
action_quit = "&Quit"
password = "Password"
settings = "Settings"
advanced = "Advanced"
search = "Search"
pass_required = "Password required"
remember = "Remember password"
no_key = "Insert a YubiKey..."
key_busy = "YubiKey already in use!"
key_present = "YubiKey found. Reading..."
key_removed = "YubiKey removed"
key_removed_desc = "There was an error communicating with the device!"
n_digits = "Number of digits"
enable_systray = "Show in system tray"
kill_scdaemon = "Kill scdaemon on show"
reader_name = "Card reader name"
no_creds = "No credentials available"
add_cred = "New credential"
cred_name = "Credential name"
cred_key = "Secret key (base32)"
cred_type = "Credential type"
cred_totp = "Time based (TOTP)"
cred_hotp = "Counter based (HOTP)"
algorithm = "Algorithm"
invalid_name = "Invalid name"
invalid_name_desc = "Name must be at least 3 characters"
invalid_key = "Invalid key"
invalid_key_desc = "Key must be base32 encoded"
set_pass = "Set password"
new_pass = "New password (blank for none)"
ver_pass = "Verify new password"
pass_mismatch = "Passwords do not match"
pass_mismatch_desc = "Please enter the same password twice"
touch_title = "Touch required"
touch_desc = "Touch your YubiKey now"
reset_title = "Confirm reset"
reset_warning_desc = """<span>Are you sure you want to delete all OATH credentials on the device?</span>
<br><br>
<b>This action cannot be undone.</b>
<br><br>
"""
imported = "Import complete"
imported_desc = "Found %d tokens, successfully imported %d tokens.%s"
delete_title = "Confirm credential deletion"
delete_desc_1 = """<span>Are you sure you want to delete the credential?</span>
<br>
This action cannot be undone.
<br><br>
<b>Delete credential: %s</b>
"""
free = "free"
in_use = "in use"
require_touch = "Require touch"
require_manual_refresh = "Require manual refresh"
overwrite_entry = "Overwrite entry?"
overwrite_entry_desc = "An entry with this username already exists.\n\nDo " \
"you wish to overwrite it? This action cannot be undone."
qr_scan = "Scan a QR code"
qr_scanning = "Scanning for QR code..."
qr_not_found = "QR code not found"
qr_not_found_desc = "No usable QR code detected. Make sure the QR code is " \
"fully visible on your primary screen and try again."
qr_invalid_type = "Invalid OTP type"
qr_invalid_type_desc = "Only TOTP and HOTP types are supported."
qr_invalid_digits = "Invalid number of digits"
qr_invalid_digits_desc = "An OTP may only contain 6 or 8 digits."
qr_invalid_algo = "Unsupported algorithm"
qr_invalid_algo_desc = "HMAC algorithm '%s' is not supported."
qr_missing_key = "Invalid QR code"
qr_missing_key_desc = "The QR code found on screen is missing the '%s' attribute."
tt_num_digits = "The number of digits to show for the credential."
tt_systray = "When checked, display an icon in the systray, and leave the " \
"application running there when closed."
tt_kill_scdaemon = "Kills any running scdaemon process when the window is " \
"shown. This is useful when using this application together with GnuPG " \
"to avoid GnuPG locking the device."
tt_reader_name = "Changes the default smartcard reader name to look for. " \
"This can be used to target a specific YubiKey when multiple are used, " \
"or to target an NFC reader."
ccid_disabled = '<b>CCID (smart card capabilities) is disabled on the ' \
'inserted YubiKey.</b><br><br>Without CCID enabled, you will only be ' \
'able to store 2 credentials.<br><br>' \
'<a href="%s">Learn how to enable CCID</a><br>'
no_space = "No space available"
no_space_desc = "There is not enough space to add another " \
"credential on your device.\n\nTo create free space to add a " \
"new credential, delete those you no longer need."
oath_backend = "OATH Storage Backend"
oath_backend_ccid = "Smart Card"
oath_backend_sqlite = "SQLite"
|
tycho/yubioath-desktop
|
yubioath/gui/messages.py
|
Python
|
gpl-3.0
| 5,696
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import configparser
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, RegexHandler, ConversationHandler
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from copy import deepcopy
import logging
import logging.handlers
from decisionTreeSupport import init, convert, getClassName
import xml.etree.ElementTree as ET
tree = ET.parse('config.xml.localSafeCopy')
root = tree.getroot()
Telegram_BOTID = root.find('telegramBotID').text
AdminPassword = root.find('adminPassword').text
datasets = {}
for ds in root.findall('dataset'):
name = ds.get('name')
datasets[name] = {}
datasets[name]['dataset_name'] = ds.find('filename').text
datasets[name]['class_column'] = int(ds.find('classColumn').text)
datasets[name]['data_columns'] = [int(x) for x in ds.find('dataColumns').text.split(',')]
if ds.find('successorOf') is not None:
datasets[name]['successorOf'] = ds.find('successorOf').text
datasets[name]['previousExitClass'] = ds.find('previousExitClass').text
del tree, root
CHOOSINGTREE, INTERACT = range(2)
LOG_FILENAME = 'logs.log'
treeData = {}
availableClassifierName = []
logging.basicConfig(filename=LOG_FILENAME, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.addHandler(logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=20000000, backupCount=5))
from botFunctions import *
def start(bot, update):
user = update.message.from_user
logger.debug("User %s typed /start." % user.name)
message = "Ciao, e benvenuto!"
message += "\nSono ancora in sviluppo, ecco la lista dei comandi attualmente disponibili:" \
"\n/exploretree Inizia ad esplorare gli alberi" \
"\n/help mostra la lista dei comandi disponibili"
bot.send_message(chat_id=update.message.chat_id, text=message)
def startInteraction(bot, update, chat_data):
user = update.message.from_user
logger.debug("User %s is starting the interaction." % user.name)
chat_data = {}
reply_keyboard = []
for k in availableClassifierName:
if 'isSuccessors' not in treeData[k]:
reply_keyboard.append([k])
reply_keyboard.append(['/cancel'])
update.message.reply_text('Ciao, scegli cosa vuoi che indovini.\n\n /cancel se vuoi terminare! ',
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
return INTERACT
def interactionManager(bot, update, chat_data):
chose = update.message.text
if chose in treeData:
chat_data['chose'] = chose
return interact(bot, update, chat_data, chose)
elif 'chose' in chat_data:
return interact(bot, update, chat_data, chat_data['chose'])
else:
bot.send_message(chat_id=update.message.chat_id, text="Scusa, ma non credo di disporre di questo dato...")
return startInteraction(bot, update, chat_data)
def interact(bot, update, chat_data, chose):
# Retrieve the data dictionary for tree interactionManager
if chose in chat_data:
data = chat_data[chose]
else:
data = deepcopy(treeData[chose])
chat_data[chose] = data
chat_data['step'] = 1 # 1 = ask question, 0 = process answer
if 'conversationHistory' not in chat_data:
chat_data['conversationHistory'] = {}
dt = treeData['dt' + chose]
while not data['__stop']:
toAsk = data['toAsk']
if data['step'] == 1:
if 'isSuccessors' in data and toAsk['feature'] in chat_data['conversationHistory']:
chat_data['step'] = 0
update.message.text = str(chat_data['conversationHistory'][toAsk['feature']])
if 'valueRange' in toAsk:
# IF the feature has numeric value within an interval:
if chat_data['step']:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
chat_data['step'] = 0
return INTERACT
else:
user_value_for_feature = convert(update.message.text.strip())
if toAsk['valueRange'][0] <= user_value_for_feature <= toAsk['valueRange'][1]:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['s'][toAsk['feature']] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
question = data['questions'][toAsk['feature']] + "Range: " + str(toAsk['valueRange'])
update.message.reply_text(question, reply_markup=ReplyKeyboardRemove())
return INTERACT
elif 'possibleAnswer' in toAsk:
# If the features has a symbolic value
if chat_data['step']:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text(data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
chat_data['step'] = 0
return INTERACT
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
user_value_for_feature = convert(
data['featuresHumanization'][toAsk['feature']][update.message.text.strip()])
else:
user_value_for_feature = convert(update.message.text.strip())
if user_value_for_feature in toAsk['possibleAnswer']:
chat_data['conversationHistory'][toAsk['feature']] = user_value_for_feature
data['step'] = 0
data['toAsk']['givenAnswer'] = user_value_for_feature
data = dt.classify_by_asking_questions(data['actualNode'], data)
chat_data['step'] = 1
else:
if 'featuresHumanization' in data and toAsk['feature'] in data['featuresHumanization']:
reply_keyboard = [[str(x) for x in data['featuresHumanization'][toAsk['feature']]]]
else:
reply_keyboard = [[str(x) for x in toAsk['possibleAnswer']]]
update.message.reply_text("Valore non valido!\n" + data['questions'][toAsk['feature']],
reply_markup=ReplyKeyboardMarkup(reply_keyboard,
one_time_keyboard=True))
return INTERACT
else:
logger.critical("Sono finito in uno stato morto...")
logger.critical("Albero: " + chat_data[chose])
logger.critical("Conversation Detal: \n" + str(chat_data['conversationHistory']))
del chat_data[chose], data, chat_data['chose'], chat_data['conversationHistory']
update.message.reply_text(
"Perdona, mi sono rotto un braccio! devo scappare in ospedale :("
"\nTi lascio con mio fratello, ma devi ricominciare.",
reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
# update.message.reply_text("Ottimo! Ho trovato qualcosa!\n")
message = ""
classification = data['a']
del classification['solution_path']
which_classes = list(classification.keys())
which_classes = sorted(which_classes, key=lambda x: classification[x], reverse=True)
if classification[which_classes[0]] < 1:
message += "\nEcco la probabilità delle risposte, io sceglierei la prima ;)\n"
message += "\n " + str.ljust("Classe", 30) + "Probabilità"
message += "\n ---------- -----------"
for which_class in which_classes:
if which_class is not 'solution_path' and classification[which_class] > 0:
message += "\n " + str.ljust(getClassName(which_class), 30) + str(
round(classification[which_class], 2))
else:
if 'singleAnswer' in data['interaction']:
message += data['interaction']['singleAnswer'] + '\n'
else:
message += "\n\nSai cosa?, sono quasi sicuro che la risposta corretta sia "
if str(which_classes[0][5:]) in data['classHumanization']:
message += getClassName(data['classHumanization'][str(which_classes[0][5:])])
else:
message += getClassName(str(which_classes[0]))
# handling of connection among tree
if 'hasSuccessors' in data:
update.message.reply_text("Credo di essere sulla buona strada...\n")
chat_data['chose'] = data['successorsMap'][getClassName(which_classes[0])]
del data, chat_data[chose]
return interact(bot, update, chat_data, chat_data['chose'])
logger.debug("Conversation with :" + update.message.from_user.name)
logger.debug(str(chat_data['conversationHistory']))
message += "\nCosa vuoi fare?"
reply_keyboard = [['Ricomincia', 'Esci'], ] # ['Valuta la classificazione']]
update.message.reply_text(message, reply_markup=ReplyKeyboardMarkup(reply_keyboard, one_time_keyboard=True))
del chat_data[chose], data, chat_data['chose'], chat_data['conversationHistory']
return CHOOSINGTREE
def main():
for name, v in datasets.items():
logging.info("Start training tree " + name)
print("Start training tree " + name)
data = init(v['dataset_name'], v['class_column'], v['data_columns'])
treeData['dt' + name] = deepcopy(data['dt'])
del data['dt']
treeData[name] = deepcopy(data)
# data['actualNode'].display_decision_tree(" ")
del data
logging.info("End training tree " + name)
print("End training tree " + name)
# computing connection among trees
for name, v in datasets.items():
if 'successorOf' in v:
treeData[name]['isSuccessors'] = True
treeData[v['successorOf']]['hasSuccessors'] = True
if 'successorsMap' in treeData[v['successorOf']]:
treeData[v['successorOf']]['successorsMap'][v['previousExitClass']] = name
else:
treeData[v['successorOf']]['successorsMap'] = {v['previousExitClass']: name}
for k in treeData.keys():
if not k.startswith('dt'):
availableClassifierName.append(k)
logging.info("Bot Starting...!")
updater = Updater(token=Telegram_BOTID)
dispatcher = updater.dispatcher
startHandler = CommandHandler(command='start', callback=start)
helpHandler = CommandHandler(command='help', callback=help)
settingsHandler = CommandHandler(command='settings', callback=settings)
adminIdentify = CommandHandler(command=AdminPassword, callback=imAdmin, pass_chat_data=True)
serverInfo = CommandHandler(command='getIP', callback=getServerInfo, pass_chat_data=True)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('exploretree', startInteraction, pass_chat_data=True)],
states={
INTERACT: [ # RegexHandler('^(Animals)$', interactionManager, pass_chat_data=True),
MessageHandler(Filters.text, interactionManager, pass_chat_data=True)],
CHOOSINGTREE: [RegexHandler('^(Ricomincia)$', startInteraction, pass_chat_data=True),
RegexHandler('^(Esci)$', cancel, pass_chat_data=True),
RegexHandler('^(Valuta la classificazione)$', tbd, pass_chat_data=True)]
},
fallbacks=[CommandHandler('cancel', cancel, pass_chat_data=True),
MessageHandler(Filters.command, unknown)]
)
# echoHandler = MessageHandler(Filters.text, echo)
unknownCommandHandler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(adminIdentify)
dispatcher.add_handler(serverInfo)
dispatcher.add_handler(startHandler)
dispatcher.add_handler(helpHandler)
dispatcher.add_handler(settingsHandler)
dispatcher.add_handler(conv_handler)
# dispatcher.add_handler(echoHandler)
dispatcher.add_handler(unknownCommandHandler)
dispatcher.add_error_handler(error)
updater.start_polling()
logging.info("Bot Started!")
print("Bot Started correctly!")
updater.idle()
if __name__ == '__main__':
main()
|
giuva90/TreeBot
|
bot.py
|
Python
|
gpl-3.0
| 11,462
|
"""
Здесь могут быть определены любые модели SQLAlchemy
"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
|
Dejust/flask-base
|
app/models/common.py
|
Python
|
gpl-3.0
| 155
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 11:19:54 2018
@author: david
"""
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit.setGeometry(QtCore.QRect(250, 10, 511, 81))
self.plainTextEdit.setObjectName("plainTextEdit")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
DavidAlt/ITG
|
python/test-gui-template-as-text.py
|
Python
|
gpl-3.0
| 1,512
|
#!/usr/bin/env python
# encoding: utf-8
"""Module to store all constants. Any constant needed in more than one
component, or potentially more than one part, of polar2grid should be
defined here.
Rules/Preferences:
- All values lowercase
- strings
- user-legible (assume that they may be printed in log messages)
- use == for comparison (not 'is' or 'not' or other)
Possible confusions:
The VIIRS fog product created by polar2grid is a temperature difference of
2 I bands. It is classified as an I band for this reason, meaning the
band is "fog", not the usual number.
Exceptions:
- Return status constants are not strings so that they can be or'ed and
can be interpreted by a command line shell.
:author: David Hoese (davidh)
:contact: [email protected]
:organization: Space Science and Engineering Center (SSEC)
:copyright: Copyright (c) 2013 University of Wisconsin SSEC. All rights reserved.
:date: Jan 2013
:license: GNU GPLv3
Copyright (C) 2013 Space Science and Engineering Center (SSEC),
University of Wisconsin-Madison.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This file is part of the polar2grid software package. Polar2grid takes
satellite observation data, remaps it, and writes it to a file format for
input into another program.
Documentation: http://www.ssec.wisc.edu/software/polar2grid/
Written by David Hoese January 2013
University of Wisconsin-Madison
Space Science and Engineering Center
1225 West Dayton Street
Madison, WI 53706
[email protected]
"""
__docformat__ = "restructuredtext en"
NOT_APPLICABLE = None
# default fill value
DEFAULT_FILL_VALUE = -999.0
# Satellites
SAT_NPP = "npp"
SAT_TERRA = "terra"
SAT_AQUA = "aqua"
# Instruments
INST_VIIRS = "viirs"
INST_MODIS = "modis"
# Band Kinds
BKIND_I = "i"
BKIND_M = "m"
BKIND_I_ADAPTIVE = "iadapt"
BKIND_M_ADAPTIVE = "madapt"
BKIND_DNB = "dnb"
BKIND_VIS = "visible"
BKIND_IR = "infrared"
BKIND_IR_ADAPTIVE = "iradapt"
BKIND_CMASK = "cloud_mask"
BKIND_LSMSK = "land_sea_mask"
BKIND_SST = "sea_surface_temp"
BKIND_LST = "land_surface_temp"
BKIND_SLST = "summer_land_surface_temp"
BKIND_SZA = "solar_zenith_angle"
BKIND_CTT = "cloud_top_temperature"
BKIND_IST = "ice_surface_temperature"
BKIND_INV = "inversion_strength"
BKIND_IND = "inversion_depth"
BKIND_ICON = "ice_concentration"
BKIND_NDVI = "ndvi"
BKIND_TPW = "total_precipitable_water"
BKIND_CREFL = "crefl"
BKIND_TCOLOR_CREFL = "true_color_crefl"
# Band Identifier
BID_01 = "01"
BID_02 = "02"
BID_03 = "03"
BID_04 = "04"
BID_05 = "05"
BID_06 = "06"
BID_07 = "07"
BID_08 = "08"
BID_09 = "09"
BID_10 = "10"
BID_11 = "11"
BID_12 = "12"
BID_13 = "13"
BID_14 = "14"
BID_15 = "15"
BID_16 = "16"
BID_20 = "20"
BID_26 = "26"
BID_27 = "27"
BID_31 = "31"
BID_FOG = "fog"
BID_ADAPTIVE = "adaptive"
# Data kinds
DKIND_LATITUDE = "latitude"
DKIND_LONGITUDE = "longitude"
DKIND_RADIANCE = "radiance"
DKIND_REFLECTANCE = "reflectance"
DKIND_BTEMP = "btemp"
DKIND_IR_ADAPTIVE = "ir_adapt"
DKIND_FOG = "fog"
DKIND_CATEGORY = "category"
DKIND_ANGLE = "angle"
DKIND_DISTANCE = "distance" # this is meant to be a distance in the sense of mm, cm, meters, km, or miles
DKIND_PERCENT = "percent"
DKIND_C_INDEX = "contiguous_index" # this represents some abstract ranging index with meaningfully contiguous values (not discrete categories)
DKIND_CREFL = "corrected_reflectance"
DKIND_TCOLOR_CREFL = "true_color_crefl"
SET_DKINDS = set([
DKIND_RADIANCE,
DKIND_REFLECTANCE,
DKIND_BTEMP,
DKIND_FOG,
DKIND_CATEGORY,
DKIND_ANGLE,
DKIND_DISTANCE,
DKIND_PERCENT,
DKIND_C_INDEX,
DKIND_CREFL,
DKIND_TCOLOR_CREFL
])
# Data types (int,float,#bits,etc.)
DTYPE_UINT8 = "uint1"
DTYPE_UINT16 = "uint2"
DTYPE_UINT32 = "uint4"
DTYPE_UINT64 = "uint8"
DTYPE_INT8 = "int1"
DTYPE_INT16 = "int2"
DTYPE_INT32 = "int4"
DTYPE_INT64 = "int8"
DTYPE_FLOAT32 = "real4"
DTYPE_FLOAT64 = "real8"
# Nav Set UIDs
MBAND_NAV_UID = "m_nav" # the M band navigation group
IBAND_NAV_UID = "i_nav" # the I band navigation group
DNB_NAV_UID = "dnb_nav" # the Day/Night band navigation group
GEO_NAV_UID = "geo_1000m_nav"# the geo navigation group
GEO_500M_NAV_UID = "geo_500m_nav" # the geo navigation group
GEO_250M_NAV_UID = "geo_250m_nav" # the 250m navigation group
MOD06_NAV_UID = "mod06_nav" # the mod06 navigation group
MOD07_NAV_UID = "mod07_nav" # the mod07 navigation group
# Grid Constants
GRIDS_ANY = "any_grid"
GRIDS_ANY_GPD = "any_gpd_grid"
GRIDS_ANY_PROJ4 = "any_proj4_grid"
GRID_KIND_GPD = "gpd"
GRID_KIND_PROJ4 = "proj4"
### Return Status Values ###
STATUS_SUCCESS = 0
# the frontend failed
STATUS_FRONTEND_FAIL = 1
# the backend failed
STATUS_BACKEND_FAIL = 2
# either ll2cr or fornav failed (4 + 8)
STATUS_REMAP_FAIL = 12
# ll2cr failed
STATUS_LL2CR_FAIL = 4
# fornav failed
STATUS_FORNAV_FAIL = 8
# grid determination or grid jobs creation failed
STATUS_GDETER_FAIL = 16
# not sure why we failed, not an expected failure
STATUS_UNKNOWN_FAIL = -1
|
tommyjasmin/polar2grid
|
py/polar2grid_core/polar2grid/core/constants.py
|
Python
|
gpl-3.0
| 5,811
|
#This file is part of REXT
#updater.py - script that handles updating of REXT and it's components
#Author: Ján Trenčanský
#License: GNU GPL v3
import subprocess
import time
import re
import os
import interface.utils
import core.globals
from interface.messages import print_blue
#Pull REXT from git repo
def update_rext():
subprocess.Popen("git pull", shell=True).wait()
time.sleep(4)
#Reset HEAD to discard local changes and pull
def update_rext_force():
subprocess.Popen("git reset --hard", shell=True).wait()
subprocess.Popen("git pull", shell=True).wait()
time.sleep(4)
#Download OUI file, and recreate DB
def update_oui():
if interface.utils.file_exists("./databases/oui.db") and core.globals.ouidb_conn is not None:
connection = core.globals.ouidb_conn
cursor = connection.cursor()
#Truncate database
print_blue("Truncating oui table")
cursor.execute("""DROP TABLE oui""")
cursor.execute("""CREATE TABLE oui (
id INTEGER PRIMARY KEY NOT NULL,
oui TEXT UNIQUE,
name TEXT)""")
print_blue("Downloading new OUI file")
interface.utils.wget("http://standards.ieee.org/regauth/oui/oui.txt", "./output/tmp_oui.txt")
file = open("./output/tmp_oui.txt", "r")
regex = re.compile(r"\(base 16\)")
for line in file:
if regex.search(line) is not None:
line = "".join(line.split("\t"))
line = line.split("(")
oui = line[0].replace(" ", "")
company = line[1].split(")")[1]
company = company.replace("\n", "")
if company == " ":
company = "Private"
try:
cursor.execute("INSERT INTO oui (oui, name) VALUES (?, ?)", [oui, company])
connection.commit()
except Exception as e:
#CONRAD CORP. and CERN + ROYAL MELBOURNE INST OF TECH share oui, this should be considered
#print(e)
#print(oui + " " + company)
#SELECT name FROM oui.oui WHERE oui = oui
#UPDATE oui.oui SET name = name+" OR "+company WHERE oui=oui
pass
#Add a few OUIs manually (from NMAP oui file)
cursor.execute("INSERT INTO oui (oui, name) VALUES ('525400', 'QEMU Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('B0C420', 'Bochs Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('DEADCA', 'PearPC Virtual NIC')")
cursor.execute("INSERT INTO oui (oui, name) VALUES ('00FFD1', 'Cooperative Linux virtual NIC')")
connection.commit()
try:
os.remove("./output/tmp_oui.txt")
except OSError:
pass
|
bmaia/rext
|
core/updater.py
|
Python
|
gpl-3.0
| 3,055
|
# Copyright (C) 2015 Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV Live. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
import os, sys, time, fileinput, re
import urllib, urllib2
from resources.lib.Globals import *
from resources.lib.utils import *
def showText(heading, text):
log("showText")
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
retry -= 1
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
return
except:
pass
def showChangelog(addonID=None):
log("showChangelog")
try:
if addonID:
ADDON = xbmcaddon.Addon(addonID)
else:
ADDON = xbmcaddon.Addon(ADDONID)
f = open(ADDON.getAddonInfo('changelog'))
text = f.read()
title = "Changelog - PseudoTV Live"
showText(title, text)
except:
pass
#DonorDownload
DonorURLPath = (PTVLURL + 'Donor.py')
LinkPath = (os.path.join(ADDON_PATH, 'resources', 'lib', 'links.py'))
DonorPath = (os.path.join(ADDON_PATH, 'resources', 'lib', 'Donor.pyo'))
DL_DonorPath = (os.path.join(ADDON_PATH, 'resources', 'lib', 'Donor.py'))
def DDautopatch():
log("DDautopatch")
REAL_SETTINGS.setSetting("AT_Donor", "false")
REAL_SETTINGS.setSetting("COM_Donor", "false")
REAL_SETTINGS.setSetting("TRL_Donor", "false")
REAL_SETTINGS.setSetting("CAT_Donor", "false")
try:
if xbmcvfs.exists(xbmc.translatePath(DL_DonorPath)):
xbmcvfs.delete(xbmc.translatePath(DL_DonorPath))
log('Removed DL_DonorPath')
if xbmcvfs.exists(xbmc.translatePath(DonorPath)):
xbmcvfs.delete(xbmc.translatePath(DonorPath))
log('Removed DonorPath')
except Exception:
pass
try:
urllib.urlretrieve(DonorURLPath, (xbmc.translatePath(DL_DonorPath)))
if xbmcvfs.exists(DL_DonorPath):
log('DL_DonorPath Downloaded')
REAL_SETTINGS.setSetting("AT_Donor", "true")
REAL_SETTINGS.setSetting("COM_Donor", "true")
REAL_SETTINGS.setSetting("TRL_Donor", "true")
REAL_SETTINGS.setSetting("CAT_Donor", "true")
xbmc.executebuiltin("Notification( %s, %s, %d, %s)" % ("PseudoTV Live", "Donor Autoupdate Complete", 4000, THUMB) )
except Exception:
pass
def DonorDownloader():
log('DonorDownloader')
REAL_SETTINGS.setSetting("AT_Donor", "false")
REAL_SETTINGS.setSetting("COM_Donor", "false")
REAL_SETTINGS.setSetting("TRL_Donor", "false")
REAL_SETTINGS.setSetting("CAT_Donor", "false")
Install = False
Verified = False
InstallStatusMSG = 'Activate'
if xbmcvfs.exists(DonorPath):
InstallStatusMSG = 'Update'
if dlg.yesno("PseudoTV Live", str(InstallStatusMSG) + " Donor Features?"):
try:
xbmcvfs.delete(xbmc.translatePath(DonorPath))
log('Removed DonorPath')
Install = True
except Exception:
pass
else:
Install = True
if Install == True:
try:
urllib.urlretrieve(DonorURLPath, (xbmc.translatePath(DL_DonorPath)))
if xbmcvfs.exists(DL_DonorPath):
log('DL_DonorPath Downloaded')
REAL_SETTINGS.setSetting("AT_Donor", "true")
REAL_SETTINGS.setSetting("COM_Donor", "true")
REAL_SETTINGS.setSetting("TRL_Donor", "true")
REAL_SETTINGS.setSetting("CAT_Donor", "true")
xbmc.executebuiltin("UpdateLocalAddons")
if REAL_SETTINGS.getSetting('AT_Donor') and REAL_SETTINGS.getSetting('COM_Donor') and REAL_SETTINGS.getSetting('TRL_Donor') and REAL_SETTINGS.getSetting('CAT_Donor'):
Verified = True
if Verified == True:
MSG = "Donor Features " + str(InstallStatusMSG) + "d"
else:
MSG = "Donor Features Not " + str(InstallStatusMSG) + "d"
xbmc.executebuiltin("Notification( %s, %s, %d, %s)" % ("PseudoTV Live", MSG, 1000, THUMB) )
REAL_SETTINGS.openSettings()
except Exception:
pass
def LogoDownloader():
log('LogoDownloader')
if dlg.yesno("PseudoTV Live", "Download Color Logos or No, Download Mono Logos"):
LogoDEST = os.path.join(LOCK_LOC,'PTVL_Color.zip')
URLPath = PTVLURL + 'PTVL_Color.zip'
else:
LogoDEST = os.path.join(LOCK_LOC,'PTVL_Mono.zip')
URLPath = PTVLURL + 'PTVL_Mono.zip'
if not xbmcvfs.exists(LOCK_LOC):
log('Creating LogoPath')
xbmcvfs.mkdir(LOCK_LOC)
try:
xbmcvfs.delete(xbmc.translatePath(LogoDEST))
log('Removed old LogoDEST')
except Exception:
pass
try:
download(URLPath, LogoDEST)
all(LogoDEST, LOCK_LOC)
REAL_SETTINGS.setSetting("ChannelLogoFolder", LOCK_LOC + 'logos')
try:
xbmcvfs.delete(LogoDEST)
log('Removed LogoDEST')
except Exception:
pass
except Exception:
pass
# Return to PTVL Settings
REAL_SETTINGS.openSettings()
if sys.argv[1] == '-DDautopatch':
DDautopatch()
elif sys.argv[1] == '-DonorDownloader':
if xbmcgui.Window(10000).getProperty("PseudoTVRunning") != "True":
DonorDownloader()
else:
xbmc.executebuiltin("Notification( %s, %s, %d, %s)" % ("PseudoTV Live", "Not available while running.", 1000, THUMB) )
elif sys.argv[1] == '-LogoDownloader':
LogoDownloader()
elif sys.argv[1] == '-SimpleDownloader':
xbmcaddon.Addon(id='script.module.simple.downloader').openSettings()
elif sys.argv[1] == '-showChangelog':
showChangelog(ADDON_ID)
|
7fever/script.pseudotv.live
|
utilities.py
|
Python
|
gpl-3.0
| 6,697
|
#!/usr/bin/env python
from setuptools import setup
#from path import path
#with (path(__file__).dirname() / 'pyqt_fit' / 'version.txt').open() as f:
#__version__ = f.read().strip()
import os.path
version_filename = os.path.join(os.path.dirname(__file__), 'pyqt_fit', 'version.txt')
with open(version_filename, "r") as f:
__version__ = f.read().strip()
extra = {}
setup(name='PyQt-Fit',
version=__version__,
description='Parametric and non-parametric regression, with plotting and testing methods.',
author='Pierre Barbier de Reuille',
author_email='[email protected]',
url=['https://code.google.com/p/pyqt-fit/'],
packages=['pyqt_fit', 'pyqt_fit.functions', 'pyqt_fit.residuals', 'pyqt_fit.test'],
package_data={'pyqt_fit': ['qt_fit.ui',
'version.txt',
'cy_local_linear.pyx',
'_kernels.pyx',
'_kde.pyx',
'cy_binning.pyx',
'math.pxd'
]
},
scripts=['bin/pyqt_fit1d.py'],
install_requires=['distribute >=0.6',
'numpy >=1.5.0',
'scipy >=0.10.0',
'matplotlib',
'path.py >=2.4.1'
],
extras_require={'Cython': ["Cython >=0.17"]
},
license='LICENSE.txt',
long_description=open('README.txt').read(),
classifiers=['Development Status :: 4 - Beta',
'Environment :: X11 Applications :: Qt',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Visualization',
],
test_suite='nose.collector',
platforms=['Linux', 'Windows', 'MacOS'],
**extra
)
|
sergeyfarin/pyqt-fit
|
setup.py
|
Python
|
gpl-3.0
| 2,412
|
#!/usr/bin/env python3
"""
The goal of this example is to show you the syntax for IR seeking readings. When using
IR-SEEK with a remote control you get both heading and distance data. The code below
shows the syntax for beacon seeking. Additionally it's good to play with a demo so that
you can see how well or not well a sensor behaves.
To test this module, put the IR Remote into beacon mode by pressing the button at the top
of the remote and making sure the green LED is on. Use channel 1 for this module. Move
the beacon around and watch the values that are printed.
Authors: David Fisher and PUT_YOUR_NAME_HERE. February 2017.
"""
import ev3dev.ev3 as ev3
import time
def main():
print("--------------------------------------------")
print(" Printing beacon seeking data")
print(" Press the touch sensor to exit")
print("--------------------------------------------")
ev3.Sound.speak("Printing beacon seeking").wait()
touch_sensor = ev3.TouchSensor()
ir_sensor = ev3.InfraredSensor()
assert touch_sensor
assert ir_sensor
ir_sensor.mode = "IR-SEEK"
while not touch_sensor.is_pressed:
current_heading = ir_sensor.value(0)
current_distance = ir_sensor.value(1)
print("IR Heading = {} Distance = {}".format(current_heading, current_distance))
time.sleep(0.5)
ev3.Sound.speak("Goodbye")
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
Rosebotics/cwc-projects
|
lego-ev3/examples/analog_sensors/ir_sensor/print_beacon_seeking.py
|
Python
|
gpl-3.0
| 1,582
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-02-15 23:16
from __future__ import unicode_literals
from django.db import migrations
import image_cropping.fields
class Migration(migrations.Migration):
dependencies = [
('gardens', '0023_auto_20180215_2314'),
]
operations = [
migrations.RemoveField(
model_name='maintenancephoto',
name='main',
),
migrations.AddField(
model_name='maintenancephoto',
name='large',
field=image_cropping.fields.ImageRatioField('image', '600x400', adapt_rotation=False, allow_fullsize=False, free_crop=False, help_text=None, hide_image_field=False, size_warning=False, verbose_name='large'),
),
]
|
bengosney/rhgd3
|
gardens/migrations/0024_auto_20180215_2316.py
|
Python
|
gpl-3.0
| 759
|
"""Pathname and path-related operations for the Macintosh."""
import os
from stat import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","islink","exists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"realpath"]
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
"""Split a path into root and extension.
The extension is everything starting at the last dot in the last
pathname component; the root is everything before that.
It is always true that root + ext == p."""
root, ext = '', ''
for c in p:
if c == ':':
root, ext = root + ext + c, ''
elif c == '.':
if ext:
root, ext = root + ext, c
else:
ext = c
elif ext:
ext = ext + c
else:
root = root + c
return root, ext
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files.
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_SIZE]
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_MTIME]
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
st = os.stat(filename)
return st[ST_ATIME]
def islink(s):
"""Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
def isfile(s):
"""Return true if the pathname refers to an existing regular file."""
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
def exists(s):
"""Return true if the pathname refers to an existing file or directory."""
try:
st = os.stat(s)
except os.error:
return 0
return 1
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] != item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
path = join(os.getcwd(), path)
return normpath(path)
# realpath is a no-op on systems without islink support
realpath = abspath
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/macpath.py
|
Python
|
gpl-3.0
| 6,978
|
'''
This program illustrates the use of findContours and drawContours.
The original image is put up along with the image of drawn contours.
Usage:
contours.py
A trackbar is put up which controls the contour level from -3 to 3
'''
import numpy as np
import cv2
name_file_tile = "0.45-nd.png"
file_tile = "../../wp-admin/img/" + name_file_tile
img = cv2.imread(file_tile)
h, w = img.shape[:2]
imgray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
_, contours0, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
#def update(levels):
vis = np.zeros((h, w, 3), np.uint8)
levels = 0
#cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255), 3, , hierarchy, abs(levels) )
#drawContours( vis, contours, -1, (255,255,255), 3)
cv2.drawContours( vis, contours, -1, (255, 255, 255), 1, 1, hierarchy );
cv2.imshow('contours', vis)
#update(3)
#cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
#cv2.imshow('image', img)
cv2.waitKey()
cv2.destroyAllWindows()
|
Null01/detect-polygons-from-image
|
src/generate-polygons/vectorize-img-03.py
|
Python
|
gpl-3.0
| 1,108
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import platform
import shutil
import sys
sys.path.append('bin')
from autojump_argparse import ArgumentParser
SUPPORTED_SHELLS = ('bash', 'zsh', 'fish')
def cp(src, dest, dryrun=False):
print("copying file: %s -> %s" % (src, dest))
if not dryrun:
shutil.copy(src, dest)
def get_shell():
return os.path.basename(os.getenv('SHELL', ''))
def mkdir(path, dryrun=False):
print("creating directory:", path)
if not dryrun and not os.path.exists(path):
os.makedirs(path)
def modify_autojump_sh(etc_dir, dryrun=False):
"""Append custom installation path to autojump.sh"""
custom_install = "\
\n# check custom install \
\nif [ -s %s/autojump.${shell} ]; then \
\n\tsource %s/autojump.${shell} \
\nfi\n" % (etc_dir, etc_dir)
with open(os.path.join(etc_dir, 'autojump.sh'), 'a') as f:
f.write(custom_install)
def parse_arguments():
default_user_destdir = os.path.join(os.path.expanduser("~"), '.autojump')
default_user_prefix = ''
default_user_zshshare = 'functions'
default_system_destdir = '/'
default_system_prefix = '/usr/local'
default_system_zshshare = '/usr/share/zsh/site-functions'
parser = ArgumentParser(
description='Installs autojump globally for root users, otherwise \
installs in current user\'s home directory.')
parser.add_argument(
'-n', '--dryrun', action="store_true", default=False,
help='simulate installation')
parser.add_argument(
'-f', '--force', action="store_true", default=False,
help='skip root user, shell type, Python version checks')
parser.add_argument(
'-d', '--destdir', metavar='DIR', default=default_user_destdir,
help='set destination to DIR')
parser.add_argument(
'-p', '--prefix', metavar='DIR', default=default_user_prefix,
help='set prefix to DIR')
parser.add_argument(
'-z', '--zshshare', metavar='DIR', default=default_user_zshshare,
help='set zsh share destination to DIR')
parser.add_argument(
'-s', '--system', action="store_true", default=False,
help='install system wide for all users')
args = parser.parse_args()
if not args.force:
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
print("Python v2.6+ or v3.0+ required.", file=sys.stderr)
sys.exit(1)
if get_shell() not in SUPPORTED_SHELLS:
print("Unsupported shell: %s" % os.getenv('SHELL'),
file=sys.stderr)
sys.exit(1)
if args.system and os.geteuid() != 0:
print("Please rerun as root for system-wide installation.",
file=sys.stderr)
sys.exit(1)
if args.destdir != default_user_destdir \
or args.prefix != default_user_prefix \
or args.zshshare != default_user_zshshare:
args.custom_install = True
else:
args.custom_install = False
if args.system:
if args.custom_install:
print("Custom paths incompatible with --system option.",
file=sys.stderr)
sys.exit(1)
args.destdir = default_system_destdir
args.prefix = default_system_prefix
args.zshshare = default_system_zshshare
return args
def print_post_installation_message(etc_dir):
if get_shell() == 'fish':
aj_shell = '%s/autojump.fish' % etc_dir
source_msg = "if test -f %s; . %s; end" % (aj_shell, aj_shell)
# TODO(ting|2013-12-31): check config.fish location on OSX
rcfile = '~/.config/fish/config.fish'
else:
aj_shell = '%s/autojump.sh' % etc_dir
source_msg = "[[ -s %s ]] && source %s" % (aj_shell, aj_shell)
if platform.system() == 'Darwin' and get_shell() == 'bash':
rcfile = '~/.profile'
else:
rcfile = '~/.%src' % get_shell()
print("\nPlease manually add the following line(s) to %s:" % rcfile)
print('\n\t' + source_msg)
if get_shell() == 'zsh':
print("\n\tautoload -U compinit && compinit -u")
print("\nPlease restart terminal(s) before running autojump.\n")
def main(args):
if args.dryrun:
print("Installing autojump to %s (DRYRUN)..." % args.destdir)
else:
print("Installing autojump to %s ..." % args.destdir)
bin_dir = os.path.join(args.destdir, args.prefix, 'bin')
etc_dir = os.path.join(args.destdir, 'etc/profile.d')
doc_dir = os.path.join(args.destdir, args.prefix, 'share/man/man1')
icon_dir = os.path.join(args.destdir, args.prefix, 'share/autojump')
zshshare_dir = os.path.join(args.destdir, args.zshshare)
mkdir(bin_dir, args.dryrun)
mkdir(etc_dir, args.dryrun)
mkdir(doc_dir, args.dryrun)
mkdir(icon_dir, args.dryrun)
mkdir(zshshare_dir, args.dryrun)
cp('./bin/autojump', bin_dir, args.dryrun)
cp('./bin/autojump_argparse.py', bin_dir, args.dryrun)
cp('./bin/autojump_data.py', bin_dir, args.dryrun)
cp('./bin/autojump_utils.py', bin_dir, args.dryrun)
cp('./bin/autojump.sh', etc_dir, args.dryrun)
cp('./bin/autojump.bash', etc_dir, args.dryrun)
cp('./bin/autojump.fish', etc_dir, args.dryrun)
cp('./bin/autojump.zsh', etc_dir, args.dryrun)
cp('./bin/_j', zshshare_dir, args.dryrun)
cp('./bin/icon.png', icon_dir, args.dryrun)
cp('./docs/autojump.1', doc_dir, args.dryrun)
if args.custom_install:
modify_autojump_sh(etc_dir, args.dryrun)
print_post_installation_message(etc_dir)
if __name__ == "__main__":
sys.exit(main(parse_arguments()))
|
mdlawson/autojump
|
install.py
|
Python
|
gpl-3.0
| 5,781
|
#-*- coding: ISO-8859-1 -*-
#
# DBSQL - A SQL database engine.
#
# Copyright (C) 2007-2008 The DBSQL Group, Inc. - All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# There are special exceptions to the terms and conditions of the GPL as it
# is applied to this software. View the full text of the exception in file
# LICENSE_EXCEPTIONS in the directory of this software distribution.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# http://creativecommons.org/licenses/GPL/2.0/
#
# Copyright (C) 2004-2005 Gerhard Häring <[email protected]>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
# The DB-API 2.0 interface
import datetime
import time
from pydbsql._dbsql import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return apply(Date, time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return apply(Time, time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return apply(Timestamp, time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
dbsql_version_info = tuple([int(x) for x in dbsql_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(timepart_full[1])
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
|
gburd/dbsql
|
src/py/pydbsql/dbapi2.py
|
Python
|
gpl-3.0
| 3,459
|
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'FORD',
packages = ['ford'],
include_package_data = True,
version = '4.3.0',
description = 'FORD, standing for FORtran Documenter, is an automatic documentation generator for modern Fortran programs.',
long_description = long_description,
author = 'Chris MacMackin',
author_email = '[email protected]',
url = 'https://github.com/cmacmackin/ford/',
download_url = 'https://github.com/cmacmackin/ford/tarball/4.3.0',
keywords = ['Markdown', 'Fortran', 'documentation', 'comments'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Documentation',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Documentation',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires = ['markdown','markdown-include >= 0.5.1','toposort',
'jinja2 >= 2.1','pygments','beautifulsoup4','graphviz'],
entry_points = {
'console_scripts': [
'ford=ford:run',
],
}
)
|
duyuan11/ford
|
setup.py
|
Python
|
gpl-3.0
| 2,137
|
#!/usr/bin/env python
"""
A Stepper Motor Driver class for Replicape.
Author: Elias Bakken
email: elias(dot)bakken(at)gmail(dot)com
Website: http://www.thing-printer.com
License: GNU GPL v3: http://www.gnu.org/copyleft/gpl.html
Redeem is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Redeem is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Redeem. If not, see <http://www.gnu.org/licenses/>.
"""
import Adafruit_BBIO.GPIO as GPIO
import logging
import time
from threading import Thread
from .Alarm import Alarm
from .DAC import DAC, PWM_DAC
from .Key_pin import Key_pin
from .Printer import Printer
from .ShiftRegister import ShiftRegister
class Stepper(object):
printer = None
def __init__(self, step_pin, dir_pin, fault_key, dac_channel, shiftreg_nr, name):
""" Init """
self.dac_channel = dac_channel # Which channel on the dac is connected to this stepper
self.step_pin = step_pin
self.dir_pin = dir_pin
self.fault_key = fault_key
self.name = name
self.enabled = False
self.in_use = False
self.steps_pr_mm = 1
self.microsteps = 1.0
self.microstepping = 0
self.direction = 1
self.current_disabled = False
# Set up the Shift register
ShiftRegister.make(8)
self.shift_reg = ShiftRegister.registers[shiftreg_nr]
# Set up the GPIO pins - we just have to initialize them so the PRU can flip them
# terrible hack to cover a bug in Adafruit
dir_name = "EHRPWM2A" if dir_pin == "GPIO0_22" else dir_pin
try:
GPIO.setup(dir_name, GPIO.OUT)
GPIO.setup(step_pin, GPIO.OUT)
except ValueError:
logging.warning("*** Stepper {} Pin {} initialization failure:".format(self.name, dir_name))
# Add a key code to the key listener
# Steppers have an nFAULT pin, so callback on falling
Key_pin(name, fault_key, Key_pin.FALLING, self.fault_callback)
def get_state(self):
""" Returns the current state """
return self.state & 0xFF # Return the state of the serial to parallel
def update(self):
""" Commits the changes """
ShiftRegister.commit() # Commit the serial to parallel
# Higher level commands
def set_steps_pr_mm(self, steps_pr_mm):
""" Set the number of steps pr mm. """
self.steps_pr_mm = steps_pr_mm
self.mmPrStep = 1.0 / (steps_pr_mm * self.microsteps)
def get_steps_pr_meter(self):
""" Get the number of steps pr meter """
return self.steps_pr_mm * self.microsteps * 1000.0
def get_step_bank(self):
""" The pin that steps, it looks like GPIO1_31 """
return int(self.step_pin[4:5])
def get_step_pin(self):
""" The pin that steps, it looks like GPIO1_31 """
return int(self.step_pin[6:])
def get_dir_bank(self):
""" Get the dir pin shifted into position """
return int(self.dir_pin[4:5])
def get_dir_pin(self):
""" Get the dir pin shifted into position """
return int(self.dir_pin[6:])
def get_direction(self):
return self.direction
@staticmethod
def commit():
pass
def fault_callback(self, key, event):
Alarm(Alarm.STEPPER_FAULT,
"Stepper {}<br>Most likely the stepper is over heated.".format(self.name))
"""
The bits in the shift register are as follows (Rev B1):
Bit - name - init val
D0 = - = X (or servo enable)
D1 = CFG5 = 0 (Chopper blank time)
D2 = CFG4 = 0 (Choppper hysteresis)
D3 = CFG0 = 0 (Chopper off time)
D4 = CFG2 = 0 (microstepping)
D5 = CFG2-Z = 0 (microstepping)
D6 = CFG1 = 0 (microstepping)
D7 = CFG1-Z = 0 (microstepping)
"""
class Stepper_00B1(Stepper):
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = PWM_DAC(dac_channel)
self.state = 0 # The initial state of shift register
def set_microstepping(self, value, force_update=False):
if not value in range(9):
logging.warning("Tried to set illegal microstepping value: {0} for stepper {1}".format(
value, self.name))
return
EN_CFG1 = (1 << 7)
DIS_CFG1 = (0 << 7)
EN_CFG2 = (1 << 5)
DIS_CFG2 = (0 << 5)
CFG2_H = (1 << 4)
CFG2_L = (0 << 4)
CFG1_H = (1 << 6)
CFG1_L = (0 << 6)
if value == 0: # GND, GND
state = EN_CFG2 | CFG2_L | EN_CFG1 | CFG1_L
self.microsteps = 1
elif value == 1: # GND, VCC
state = EN_CFG2 | CFG2_L | EN_CFG1 | CFG1_H
self.microsteps = 2
elif value == 2: # GND, open
state = EN_CFG2 | CFG2_L | DIS_CFG1 | CFG1_L
self.microsteps = 2
elif value == 3: # VCC, GND
state = EN_CFG2 | CFG2_H | EN_CFG1 | CFG1_L
self.microsteps = 4
elif value == 4: # VCC, VCC
state = EN_CFG2 | CFG2_H | EN_CFG1 | CFG1_H
self.microsteps = 16
elif value == 5: # VCC, open
state = EN_CFG2 | CFG2_H | DIS_CFG1 | CFG1_L
self.microsteps = 4
elif value == 6: # open, GND
state = DIS_CFG2 | CFG2_L | EN_CFG1 | CFG1_L
self.microsteps = 16
elif value == 7: # open, VCC
state = DIS_CFG2 | CFG2_L | EN_CFG1 | CFG1_H
self.microsteps = 4
elif value == 8: # open, open
state = DIS_CFG2 | CFG2_L | DIS_CFG1 | CFG1_L
self.microsteps = 16
self.shift_reg.set_state(state, 0xF0)
self.mmPrStep = 1.0 / (self.steps_pr_mm * self.microsteps)
logging.debug("Updated stepper " + self.name + " to microstepping " + str(value) + " = " +
str(self.microsteps))
self.microstepping = value
def set_current_value(self, i_rms):
""" Current chopping limit (This is the value you can change) """
self.current_value = i_rms
v_iref = 2.5 * (i_rms / 1.92)
if (v_iref > 2.5):
logging.warning("Current ref for stepper " + self.name +
" above limit (2.5 V). Setting to 2.5 V")
v_iref = 2.5
logging.debug("Setting votage to " + str(v_iref) + " for " + self.name)
self.dac.set_voltage(v_iref)
def set_disabled(self, force_update=False):
if hasattr(Stepper, "printer"):
Stepper.printer.enable.set_disabled()
def set_enabled(self, force_update=False):
if hasattr(Stepper, "printer"):
Stepper.printer.enable.set_enabled()
def set_decay(self, value):
EN_CFG0 = (1 << 3)
DIS_CFG0 = (0 << 3)
EN_CFG4 = (1 << 2)
DIS_CFG4 = (0 << 2)
EN_CFG5 = (1 << 1)
DIS_CFG5 = (0 << 1)
if value == 0: # GND, GND, GND
state = DIS_CFG0 | DIS_CFG4 | DIS_CFG5
elif value == 1: # GND, GND, VCC
state = DIS_CFG0 | DIS_CFG4 | EN_CFG5
elif value == 2: # GND, VCC, GND
state = DIS_CFG0 | EN_CFG4 | DIS_CFG5
elif value == 3: # GND, VCC, VCC
state = DIS_CFG0 | EN_CFG4 | EN_CFG5
elif value == 4: # VCC, GND, GND
state = EN_CFG0 | DIS_CFG4 | DIS_CFG5
elif value == 5: # VCC, GND, VCC
state = EN_CFG0 | DIS_CFG4 | EN_CFG5
elif value == 6: # VCC, VCC, GND
state = EN_CFG0 | EN_CFG4 | DIS_CFG5
elif value == 7: # VCC, VCC, VCC
state = EN_CFG0 | EN_CFG4 | EN_CFG5
else:
logging.warning("Tried to set illegal value for stepper decay: " + str(value))
return
self.shift_reg.set_state(state, 0x0E)
self.decay = value # For saving the setting with M500
def reset(self):
self.set_disabled()
self.set_enabled()
class Stepper_00B2(Stepper_00B1):
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper_00B1.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = PWM_DAC(dac_channel)
if name in ["X", "E", "H"]:
self.state = 0x1 # The initial state of shift register
else:
self.state = 0x0
self.shift_reg.set_state(self.state)
def set_disabled(self, force_update=False):
if not self.enabled:
return
logging.debug("Disabling stepper " + self.name)
# X, Y, Z steppers are on the first shift reg. Extruders have their own.
if self.name in ["X", "Y", "Z"]:
ShiftRegister.registers[0].add_state(0x1)
elif self.name == "E":
ShiftRegister.registers[3].add_state(0x1)
elif self.name == "H":
ShiftRegister.registers[4].add_state(0x1)
self.enabled = False
def set_enabled(self, force_update=False):
if self.enabled:
return
logging.debug("Enabling stepper " + self.name)
# X, Y, Z steppers are on the first shift reg. Extruders have their own.
if self.name in ["X", "Y", "Z"]:
ShiftRegister.registers[0].remove_state(0x1) # First bit low.
elif self.name == "E":
ShiftRegister.registers[3].remove_state(0x1)
elif self.name == "H":
ShiftRegister.registers[4].remove_state(0x1)
self.enabled = True
class Stepper_00B3(Stepper_00B2):
@classmethod
def set_stepper_power_down(self, pd):
''' Enables stepper low current mode on all steppers '''
logging.debug("Setting pwerdown to " + str(pd))
if pd:
ShiftRegister.registers[4].add_state(0x1)
else:
ShiftRegister.registers[4].remove_state(0x1)
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper_00B1.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = PWM_DAC(dac_channel)
if name in ["X", "E", "H"]:
self.state = 0x1 # The initial state of shift register
else:
self.state = 0x0
self.shift_reg.set_state(self.state)
self.current_enabled = True
def set_disabled(self, force_update=False):
if not self.enabled:
return
logging.debug("Disabling stepper " + self.name)
# X, Y, Z steppers are on the first shift reg. Extruders have their own.
if self.name in ["X", "Y", "Z"]:
ShiftRegister.registers[0].add_state(0x1)
elif self.name in ["E", "H"]:
ShiftRegister.registers[3].add_state(0x1)
self.enabled = False
def set_enabled(self, force_update=False):
if self.enabled:
return
logging.debug("Enabling stepper " + self.name)
# X, Y, Z steppers are on the first shift reg. Extruders have their own.
if self.name in ["X", "Y", "Z"]:
ShiftRegister.registers[0].remove_state(0x1) # First bit low.
elif self.name in ["E", "H"]:
ShiftRegister.registers[3].remove_state(0x1)
self.enabled = True
def set_current_disabled(self):
''' Set the stepper in lowest current mode '''
if not self.current_enabled:
return
self.current_enable_value = self.current_value
self.current_enabled = False
self.set_current_value(0)
def set_current_enabled(self):
if self.current_enabled:
return
self.set_current_value(self.current_enable_value)
self.current_enabled = True
"""
The bits in the shift register are as follows (Rev A4) :
Bit - name - init val
D0 = - = X
D1 = MODE2 = 0
D2 = MODE1 = 0
D3 = MODE0 = 0
D4 = nENABLE = 0 - Enabled
D5 = DECAY = 0 - Slow decay
D6 = nSLEEP = 1 - Not sleeping
D7 = nRESET = 1 - Not in reset mode
"""
class Stepper_00A4(Stepper):
revision = "A4"
SLEEP = 6
ENABLED = 4
RESET = 7
DECAY = 5
def __init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name):
Stepper.__init__(self, stepPin, dirPin, faultPin, dac_channel, shiftreg_nr, name)
self.dac = DAC(dac_channel)
self.dacvalue = 0x00 # The voltage value on the VREF
self.state = (1 << Stepper_00A4.SLEEP) | (1 << Stepper_00A4.RESET) | (
1 << Stepper_00A4.ENABLED) # The initial state of the inputs
self.current_enabled = True
self.update()
def set_enabled(self, force_update=False):
""" Sets the Stepper enabled """
if not self.enabled:
self.state &= ~(1 << Stepper_00A4.ENABLED)
self.enabled = True
self.update()
def set_disabled(self, force_update=False):
""" Sets the Stepper disabled """
if self.enabled:
self.state |= (1 << Stepper_00A4.ENABLED)
self.enabled = False
self.update()
def set_current_disabled(self):
''' Set the stepper in lowest current mode '''
if not self.current_enabled:
return
self.current_enable_value = self.current_value
self.current_enabled = False
self.set_current_value(0)
def set_current_enabled(self):
if self.current_enabled:
return
self.set_current_value(self.current_enable_value)
self.current_enabled = True
def enable_sleepmode(self, force_update=False):
"""Logic high to enable device, logic low to enter
low-power sleep mode. Internal pulldown."""
self.state &= ~(1 << Stepper_00A4.SLEEP)
self.update()
def disable_sleepmode(self, force_update=False):
""" Disables sleepmode (awake) """
self.state |= (1 << Stepper_00A4.SLEEP)
self.update()
def reset(self, force_update=False):
"""nReset - Active-low reset input initializes the indexer
logic and disables the H-bridge outputs.
Internal pulldown."""
self.state &= ~(1 << Stepper_00A4.RESET)
self.update()
time.sleep(0.001)
self.state |= (1 << Stepper_00A4.RESET)
self.update()
def set_microstepping(self, value, force_update=False):
""" Microstepping (default = 0) 0 to 5 """
if not value in [0, 1, 2, 3, 4, 5]: # Full, half, 1/4, 1/8, 1/16, 1/32.
logging.warning("Tried to set illegal microstepping value: {0} for stepper {1}".format(
value, self.name))
return
self.microstepping = value
self.microsteps = 2**value # 2^val
# Keep bit 0, 4, 5, 6 intact but replace bit 1, 2, 3
self.state = int(
"0b" + bin(self.state)[2:].rjust(8, '0')[:4] + bin(value)[2:].rjust(3, '0')[::-1] + "0", 2)
#self.state = int("0b"+bin(self.state)[2:].rjust(8, '0')[:4]+bin(value)[2:].rjust(3, '0')+bin(self.state)[-1:], 2)
self.mmPrStep = 1.0 / (self.steps_pr_mm * self.microsteps)
logging.debug("Updated stepper " + self.name + " to microstepping " + str(value) + " = " +
str(self.microsteps))
self.update()
def set_current_value(self, iChop):
""" Current chopping limit (This is the value you can change) """
self.current_value = iChop
rSense = 0.1 # Resistance for the
v_out = iChop * 5.0 * rSense # Calculated voltage out from the DAC
self.dac.set_voltage(v_out)
def set_decay(self, value, force_update=False):
""" Decay mode, look in the data sheet """
if value not in [0, 1]:
logging.warning("Invalid decay value. Use 0 or 1. Got: {}".format(value))
return
self.decay = value
self.state &= ~(1 << Stepper_00A4.DECAY) # bit 5
self.state |= (value << Stepper_00A4.DECAY)
self.update()
def update(self):
# Invert shizzle
self.shift_reg.set_state(self.state)
#logging.debug("Updated stepper {} to enabled, state: {}".format(self.name, bin(self.state)))
class Stepper_reach_00A4(Stepper_00A4):
pass
"""
The bits in the shift register are as follows (Rev A3):
D0 = DECAY = X
D1 = MODE0 = X
D2 = MODE1 = X
D3 = MODE2 = X
D4 = nRESET = 1
D5 = nSLEEP = 1
D6 = nENABLE = 0
D7 = - = X
"""
class Stepper_00A3(Stepper_00A4):
Stepper.revision = "A3"
Stepper.ENABLED = 6
Stepper.SLEEP = 5
Stepper.RESET = 4
Stepper.DECAY = 0
# Simple test procedure for the steppers
if __name__ == '__main__':
s = Stepper("GPIO0_27", "GPIO1_29", "GPIO2_4", 0, 0, "X")
print(s.get_step_pin())
print(s.get_step_bank())
print(s.get_dir_pin())
print(s.get_dir_bank())
|
intelligent-agent/redeem
|
redeem/Stepper.py
|
Python
|
gpl-3.0
| 15,869
|
# -*- coding: utf-8 -*-
"""
DrQueue main module
Copyright (C) 2011-2013 Andreas Schroeder
This file is part of DrQueue.
Licensed under GNU General Public License version 3. See LICENSE for details.
"""
import platform
import os
import sys
import smtplib
import json
from email.mime.text import MIMEText
from .client import Client
from .job import Job
from .computer import Computer
supported_renderers = ['3delight', '3dsmax', 'aftereffects', 'aqsis', \
'blender', 'cinema4d', 'general', 'lightwave', 'luxrender', 'mantra', \
'maya', 'mentalray', 'nuke', 'shake', 'terragen', 'turtle', 'vray', 'xsi']
supported_os = ['Windows', 'Mac OSX', 'Linux', 'FreeBSD', 'NetBSD', 'OpenBSD', \
'AIX', 'Solaris']
def check_renderer_support(renderer):
"""Check if renderer is supported."""
if renderer in supported_renderers:
return True
else:
return False
def get_rendertemplate(renderer):
"""Return template filename from renderer name"""
filename = ""
if renderer == '3delight':
filename = '3delight_sg.py'
if renderer == '3dsmax':
filename = '3dsmax_sg.py'
if renderer == 'aftereffects':
filename = 'aftereffects_sg.py'
if renderer == 'aqsis':
filename = 'aqsis_sg.py'
if renderer == 'blender':
filename = 'blender_sg.py'
if renderer == 'cinema4d':
filename = 'cinema4d_sg.py'
if renderer == 'general':
filename = 'general_sg.py'
if renderer == 'lightwave':
filename = 'lightwave_sg.py'
if renderer == 'luxrender':
filename = 'luxrender_sg.py'
if renderer == 'mantra':
filename = 'mantra_sg.py'
if renderer == 'maya':
filename = 'maya_sg.py'
if renderer == 'mentalray':
filename = 'mentalray_sg.py'
if renderer == 'nuke':
filename = 'nuke_sg.py'
if renderer == 'pixie':
filename = 'pixie_sg.py'
if renderer == 'shake':
filename = 'shake_sg.py'
if renderer == 'terragen':
filename = 'terragen_sg.py'
if renderer == 'turtle':
filename = 'turtle_sg.py'
if renderer == 'vray':
filename = 'vray_sg.py'
if renderer == 'xsi':
filename = 'xsi_sg.py'
return filename
def get_osname():
"""Return operating system name"""
osname = platform.system()
if osname == 'Darwin':
osname = 'Mac OSX'
return osname
def run_script_with_env(render_script, env_dict):
"""Run template script on IPython engine"""
import platform, os, sys
# set some variables on target machine
env_dict['DRQUEUE_OS'] = platform.system()
env_dict['DRQUEUE_ETC'] = os.path.join(os.getenv('DRQUEUE_ROOT'), "etc")
env_dict['DRQUEUE_LOGFILE'] = os.path.join(os.getenv('DRQUEUE_ROOT'),
"logs", env_dict['DRQUEUE_LOGFILE'])
# import specific render template
sys.path.append(env_dict['DRQUEUE_ETC'])
impmod = render_script.replace('.py', '')
__import__(impmod)
template = sys.modules[impmod]
# run template with env_dict
status = template.run_renderer(env_dict)
return status
def check_deps(dep_dict):
"""Run all dependency checking functions. This method runs directly on the engine."""
if ('os_name' in dep_dict) and (engine_has_os(dep_dict['os_name']) == False):
return False
elif ('minram' in dep_dict) and (engine_has_minram(dep_dict['minram']) == False):
return False
elif ('mincores' in dep_dict) and (engine_has_mincores(dep_dict['mincores']) == False):
return False
elif ('pool_name' in dep_dict) and (engine_is_in_pool(dep_dict['pool_name']) == False):
return False
elif ('job_id' in dep_dict) and (job_is_enabled(dep_dict['job_id']) == False):
return False
else:
return True
def engine_is_in_pool(pool_name):
"""Check if engine belongs to certain pool. This method runs directly on the engine."""
# check os.environ["DRQUEUE_POOL"]
if ("DRQUEUE_POOL" in os.environ) and (pool_name in os.environ["DRQUEUE_POOL"]):
return True
else:
return False
def engine_has_os(os_name):
"""Check if engine is running on certain OS. This method runs directly on the engine."""
running_os = get_osname()
if os_name == running_os:
return True
else:
return False
def engine_has_minram(minram):
"""Check if engine has at least minram GB RAM. This method runs directly on the engine."""
mem = Computer.get_memory()
if mem >= minram:
return True
else:
return False
def engine_has_mincores(mincores):
"""Check if engine has at least mincores CPU cores. This method runs directly on the engine."""
ncpus = Computer.get_ncpus()
ncorescpu = Computer.get_ncorescpu()
cores = ncpus * ncorescpu
if cores >= mincores:
return True
else:
return False
def job_is_enabled(job_id):
"""Check if job is enabled. This method runs directly on the engine."""
job = Job.query_db(job_id)
if (job != None) and (job["enabled"] == True):
return True
else:
return False
def send_email(job_name, recipients):
"""Notify recipients about finish of job."""
# load email configuration
user_dir = os.path.expanduser("~")
config_file = os.path.join(user_dir, ".drqueue", "email_config.json")
try:
fp = open(config_file, "rb")
except:
print("Email configuration could not be loaded.")
try:
config = json.load(fp)
except:
print("Email configuration could not be parsed.")
print(config)
mail_from = config['from']
body_text = "Your render job \"%s\" is finished." % job_name
# Create a text/plain message
msg = MIMEText(body_text)
# subject, sender and recipients
msg['Subject'] = "Job \"%s\" is finished" % job_name
msg['From'] = mail_from
msg['To'] = recipients
if config['smtp_ssl'] == "1":
# connect via SSL
smtp = smtplib.SMTP_SSL(config['smtp_server'], int(config['smtp_port']))
else:
# connect without SSL
smtp = smtplib.SMTP(config['smtp_server'], int(config['smtp_port']))
# start TLS encryption
if config['smtp_tls'] == "1":
smtp.starttls()
if config['smtp_auth'] == "1":
# authenticate if required
smtp.login(config['smtp_user'], config['smtp_passwd'])
try:
smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except:
print("Email could not be sent.")
smtp.quit()
|
jedie/DrQueueIPython
|
DrQueue/__init__.py
|
Python
|
gpl-3.0
| 6,522
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.