hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc8ce23b2de1c59918fb8dc6dfc87ea85a63c990
| 2,733
|
py
|
Python
|
atc/LINE-master/train.py
|
anaeliaovalle/atc-mt-dti
|
755bd175e852ef2a6792be7244b006ebed252d8d
|
[
"MIT"
] | null | null | null |
atc/LINE-master/train.py
|
anaeliaovalle/atc-mt-dti
|
755bd175e852ef2a6792be7244b006ebed252d8d
|
[
"MIT"
] | null | null | null |
atc/LINE-master/train.py
|
anaeliaovalle/atc-mt-dti
|
755bd175e852ef2a6792be7244b006ebed252d8d
|
[
"MIT"
] | null | null | null |
import argparse
from utils.line import Line
from tqdm import trange
import torch
import torch.optim as optim
import sys
import pickle
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--graph_path", type=str)
parser.add_argument("-save", "--save_path", type=str)
parser.add_argument("-lossdata", "--lossdata_path", type=str)
# Hyperparams.
parser.add_argument("-order", "--order", type=int, default=2)
parser.add_argument("-neg", "--negsamplesize", type=int, default=5)
parser.add_argument("-dim", "--dimension", type=int, default=128)
parser.add_argument("-batchsize", "--batchsize", type=int, default=5)
parser.add_argument("-epochs", "--epochs", type=int, default=1)
parser.add_argument("-lr", "--learning_rate", type=float,
default=0.025) # As starting value in paper
parser.add_argument("-negpow", "--negativepower", type=float, default=0.75)
args = parser.parse_args()
# Create dict of distribution when opening file
edgedistdict, nodedistdict, weights, nodedegrees, maxindex = makeDist(
args.graph_path, args.negativepower)
edgesaliassampler = VoseAlias(edgedistdict)
nodesaliassampler = VoseAlias(nodedistdict)
batchrange = int(len(edgedistdict) / args.batchsize)
print(maxindex)
line = Line(maxindex + 1, embed_dim=args.dimension, order=args.order)
opt = optim.SGD(line.parameters(), lr=args.learning_rate,
momentum=0.9, nesterov=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
lossdata = {"it": [], "loss": []}
it = 0
print("\nTraining on {}...\n".format(device))
for epoch in range(args.epochs):
print("Epoch {}".format(epoch))
for b in trange(batchrange):
samplededges = edgesaliassampler.sample_n(args.batchsize)
batch = list(makeData(samplededges, args.negsamplesize, weights, nodedegrees,
nodesaliassampler))
batch = torch.LongTensor(batch)
v_i = batch[:, 0]
v_j = batch[:, 1]
negsamples = batch[:, 2:]
line.zero_grad()
loss = line(v_i, v_j, negsamples, device)
loss.backward()
opt.step()
lossdata["loss"].append(loss.item())
lossdata["it"].append(it)
it += 1
print("\nDone training, saving model to {}".format(args.save_path))
torch.save(line, "{}".format(args.save_path))
print("Saving loss data at {}".format(args.lossdata_path))
with open(args.lossdata_path, "wb") as ldata:
pickle.dump(lossdata, ldata)
sys.exit()
| 36.932432
| 89
| 0.631906
|
import argparse
from utils.utils import *
from utils.line import Line
from tqdm import trange
import torch
import torch.optim as optim
import sys
import pickle
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--graph_path", type=str)
parser.add_argument("-save", "--save_path", type=str)
parser.add_argument("-lossdata", "--lossdata_path", type=str)
# Hyperparams.
parser.add_argument("-order", "--order", type=int, default=2)
parser.add_argument("-neg", "--negsamplesize", type=int, default=5)
parser.add_argument("-dim", "--dimension", type=int, default=128)
parser.add_argument("-batchsize", "--batchsize", type=int, default=5)
parser.add_argument("-epochs", "--epochs", type=int, default=1)
parser.add_argument("-lr", "--learning_rate", type=float,
default=0.025) # As starting value in paper
parser.add_argument("-negpow", "--negativepower", type=float, default=0.75)
args = parser.parse_args()
# Create dict of distribution when opening file
edgedistdict, nodedistdict, weights, nodedegrees, maxindex = makeDist(
args.graph_path, args.negativepower)
edgesaliassampler = VoseAlias(edgedistdict)
nodesaliassampler = VoseAlias(nodedistdict)
batchrange = int(len(edgedistdict) / args.batchsize)
print(maxindex)
line = Line(maxindex + 1, embed_dim=args.dimension, order=args.order)
opt = optim.SGD(line.parameters(), lr=args.learning_rate,
momentum=0.9, nesterov=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
lossdata = {"it": [], "loss": []}
it = 0
print("\nTraining on {}...\n".format(device))
for epoch in range(args.epochs):
print("Epoch {}".format(epoch))
for b in trange(batchrange):
samplededges = edgesaliassampler.sample_n(args.batchsize)
batch = list(makeData(samplededges, args.negsamplesize, weights, nodedegrees,
nodesaliassampler))
batch = torch.LongTensor(batch)
v_i = batch[:, 0]
v_j = batch[:, 1]
negsamples = batch[:, 2:]
line.zero_grad()
loss = line(v_i, v_j, negsamples, device)
loss.backward()
opt.step()
lossdata["loss"].append(loss.item())
lossdata["it"].append(it)
it += 1
print("\nDone training, saving model to {}".format(args.save_path))
torch.save(line, "{}".format(args.save_path))
print("Saving loss data at {}".format(args.lossdata_path))
with open(args.lossdata_path, "wb") as ldata:
pickle.dump(lossdata, ldata)
sys.exit()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 22
|
e350a391ac04e2b8526a0c505584efa8bd49131b
| 13,994
|
py
|
Python
|
sdtv4/SDT4Parser.py
|
Homegateway/SDTTool
|
97e698ce3078595a6755ec0b599838dc903eaa3d
|
[
"Apache-2.0"
] | 2
|
2018-05-14T16:00:23.000Z
|
2018-12-26T14:02:51.000Z
|
sdtv4/SDT4Parser.py
|
Homegateway/SDTTool
|
97e698ce3078595a6755ec0b599838dc903eaa3d
|
[
"Apache-2.0"
] | null | null | null |
sdtv4/SDT4Parser.py
|
Homegateway/SDTTool
|
97e698ce3078595a6755ec0b599838dc903eaa3d
|
[
"Apache-2.0"
] | 2
|
2016-09-05T09:24:41.000Z
|
2020-06-23T14:05:45.000Z
|
# SDT4Parser.py
#
# Callback target class for the ElementTree parser to parse a SDT4
#
# Hanlder for each of the element types
#
#
# Assignment of element types and (handlerFunction, (tuple of allowed parents))
#
handlers = {
SDT4Parser.actionTag : (handleAction, (SDT4ModuleClass,)),
SDT4Parser.argTag : (handleArg, (SDT4Action,)),
SDT4Parser.arrayTypeTag : (handleArrayType, (SDT4DataType,)),
SDT4Parser.bTag : (handleB, (SDT4Doc, SDT4DocP)),
SDT4Parser.constraintTag : (handleConstraint, (SDT4DataType,)),
SDT4Parser.dataPointTag : (handleDataPoint, (SDT4Event, SDT4ModuleClass)),
SDT4Parser.dataTypeTag : (handleDataType, (SDT4Action, SDT4DataPoint, SDT4Event, SDT4Arg, SDT4StructType, SDT4ArrayType, SDT4Domain)),
SDT4Parser.deviceClassTag : (handleDeviceClass, (SDT4Domain,)),
SDT4Parser.docTag : (handleDoc, (SDT4Domain, SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4DataType, SDT4ModuleClass, SDT4Action, SDT4DataPoint, SDT4Event, SDT4EnumValue, SDT4Arg, SDT4Constraint, SDT4Property)),
SDT4Parser.domainTag : (handleDomain, None),
SDT4Parser.emTag : (handleEM, (SDT4Doc, SDT4DocP)),
SDT4Parser.enumTypeTag : (handleEnumType, (SDT4DataType,)),
SDT4Parser.enumValueTag : (handleEnumValue, (SDT4EnumType,)),
SDT4Parser.eventTag : (handleEvent, (SDT4ModuleClass,)),
SDT4Parser.excludeTag : (handleExtendExclude, (SDT4Extend,)),
SDT4Parser.extendTag : (handleExtend, (SDT4ModuleClass, SDT4DataType, SDT4ProductClass, SDT4SubDevice)),
SDT4Parser.imgTag : (handleImg, (SDT4Doc, SDT4DocP)),
SDT4Parser.imgCaptionTag : (handleImgCaption, (SDT4DocIMG,)),
SDT4Parser.includeTag : (handleInclude, (SDT4Domain, SDT4Extend)),
SDT4Parser.moduleClassTag : (handleModuleClass, (SDT4Domain, SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4ProductClass)),
SDT4Parser.pTag : (handleP, (SDT4Doc, SDT4DocP)),
SDT4Parser.productClassTag : (handleProductClass, (SDT4Domain,)),
SDT4Parser.propertyTag : (handleProperty, (SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4ModuleClass)),
SDT4Parser.simpleTypeTag : (handleSimpleType, (SDT4DataType, SDT4Property)),
SDT4Parser.structTypeTag : (handleStructType, (SDT4DataType,)),
SDT4Parser.subDeviceTag : (handleSubDevice, (SDT4DeviceClass, SDT4ProductClass, SDT4Domain)),
SDT4Parser.ttTag : (handleTT, (SDT4Doc, SDT4DocP))
}
| 31.376682
| 223
| 0.736101
|
# SDT4Parser.py
#
# Callback target class for the ElementTree parser to parse a SDT4
from .SDT4Classes import *
class SDT4Parser:
# Define the element tags of the SDT4
actionTag = 'action'
actionsTag = 'actions'
argTag = 'arg'
argsTag = 'args'
arrayTypeTag = 'array'
constraintTag = 'constraint'
constraintsTag = 'constraints'
dataPointTag = 'datapoint'
dataTag = 'data'
dataTypeTag = 'datatype'
dataTypesTag = 'datatypes'
deviceClassTag = 'deviceclass'
deviceClassesTag = 'deviceclasses'
domainTag = 'domain'
enumTypeTag = 'enum'
enumValueTag = 'enumvalue'
eventTag = 'event'
eventsTag = 'events'
excludeTag = 'exclude'
extendDeviceTag = 'extenddevice'
extendTag = 'extend'
importsTag = 'imports'
includeTag = 'include'
moduleClassTag = 'moduleclass'
moduleClassesTag = 'moduleclasses'
productClassTag = 'productclass'
productClassesTag = 'productclasses'
propertiesTag = 'properties'
propertyTag = 'property'
simpleTypeTag = 'simple'
structTypeTag = 'struct'
subDeviceTag = 'subdevice'
subDevicesTag = 'subdevices'
# Document tags
docTag = 'doc'
ttTag = 'tt'
emTag = 'em'
bTag = 'b'
pTag = 'p'
imgTag = 'img'
imgCaptionTag = 'caption'
def __init__(self):
self.elementStack = []
self.nameSpaces = []
self.domain = None
def start(self, tag, attrib):
# First add the name space to the list of used name spaces
uri, ignore, otag = tag[1:].partition("}")
if uri not in self.nameSpaces:
self.nameSpaces.append(uri)
ntag = otag.lower()
# Check non-emptyness of attributes
for at in attrib:
if len(attrib[at].strip()) == 0:
raise SyntaxError('empty attribute: ' + at + ' for element ' + tag)
# Handle all elements
# The lastElem always contains the last element on the stack and is
# used transparently in the code below.
lastElem = self.elementStack[-1] if len(self.elementStack) > 0 else None
# Call the handler function for that element tag.
# First, chech whether this is allowed for the current parent, or raise an exception
if ntag in handlers:
(func, instances) = handlers[ntag]
if instances is None or isinstance(lastElem, instances):
func(attrib, lastElem, self.elementStack)
else:
raise SyntaxError('%s definition is only allowed in %s elements' % (otag, [v._name for v in instances]))
# Other tags to ignore / just containers
elif ntag in (SDT4Parser.actionsTag,
SDT4Parser.argsTag,
SDT4Parser.constraintsTag,
SDT4Parser.dataTag,
SDT4Parser.dataTypesTag,
SDT4Parser.deviceClassesTag,
SDT4Parser.eventsTag,
SDT4Parser.extendDeviceTag,
SDT4Parser.importsTag,
SDT4Parser.moduleClassesTag,
SDT4Parser.productClassesTag,
SDT4Parser.propertiesTag,
SDT4Parser.subDevicesTag):
pass
# Encountered an unknwon element
else:
raise SyntaxError('Unknown Element: %s %s' % (tag, attrib))
def end(self, tag):
uri, ignore, ntag = tag[1:].partition("}")
ntag = ntag.lower()
if ntag == SDT4Parser.domainTag:
self.domain = self.elementStack.pop() # Assign the domain to the parser as result
elif ntag in (SDT4Parser.actionTag,
SDT4Parser.argTag,
SDT4Parser.arrayTypeTag,
SDT4Parser.bTag,
SDT4Parser.constraintTag,
SDT4Parser.eventTag,
SDT4Parser.deviceClassTag,
SDT4Parser.dataPointTag,
SDT4Parser.dataTypeTag,
SDT4Parser.docTag,
SDT4Parser.emTag,
SDT4Parser.enumTypeTag,
SDT4Parser.enumValueTag,
SDT4Parser.extendTag,
SDT4Parser.imgTag,
SDT4Parser.imgCaptionTag,
SDT4Parser.moduleClassTag,
SDT4Parser.pTag,
SDT4Parser.productClassTag,
SDT4Parser.propertyTag,
SDT4Parser.simpleTypeTag,
SDT4Parser.structTypeTag,
SDT4Parser.subDeviceTag,
SDT4Parser.ttTag):
obj = self.elementStack.pop()
obj.endElement()
else:
# ignore others
pass
def data(self, data):
if len(self.elementStack) < 1:
return
if isinstance(self.elementStack[-1], SDT4Doc):
obj = self.elementStack[-1]
obj.addContent(' ' + ' '.join(data.split()))
elif isinstance(self.elementStack[-1], (SDT4DocTT, SDT4DocEM, SDT4DocB, SDT4DocP, SDT4DocIMG, SDT4DocCaption)):
obj = self.elementStack[-1]
obj.addContent(' '.join(data.split()))
def close(self): # ignore end of file
pass
def comment(self, data): # ignore comments
pass
def getAttribute(attrib, attribName):
return attrib[attribName].strip() if attribName in attrib else None
#
# Hanlder for each of the element types
#
def handleAction(attrib, lastElem, elementStack):
action = SDT4Action()
action.name = getAttribute(attrib, 'name')
action.optional = getAttribute(attrib, 'optional')
action.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.actions.append(action)
elementStack.append(action)
def handleArg(attrib, lastElem, elementStack):
arg = SDT4Arg()
arg.name = getAttribute(attrib, 'name')
arg.optional = getAttribute(attrib, 'optional')
arg.default = getAttribute(attrib, 'default')
arg.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.args.append(arg)
elementStack.append(arg)
def handleArrayType(attrib, lastElem, elementStack):
arrayType = SDT4ArrayType()
lastElem.type = arrayType
elementStack.append(arrayType)
def handleB(attrib, lastElem, elementStack):
b = SDT4DocB()
b.doc = lastElem.doc
elementStack.append(b)
def handleConstraint(attrib, lastElem, elementStack):
constraint = SDT4Constraint()
constraint.name = getAttribute(attrib, 'name')
constraint.type = getAttribute(attrib, 'type')
constraint.value = getAttribute(attrib, 'value')
constraint.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.constraints.append(constraint)
elementStack.append(constraint)
def handleDataPoint(attrib, lastElem, elementStack):
dataPoint = SDT4DataPoint()
dataPoint.name = getAttribute(attrib, 'name')
dataPoint.optional = getAttribute(attrib, 'optional')
dataPoint.writable = getAttribute(attrib, 'writable')
dataPoint.readable = getAttribute(attrib, 'readable')
dataPoint.eventable = getAttribute(attrib, 'eventable')
dataPoint.default = getAttribute(attrib, 'default')
dataPoint.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.data.append(dataPoint)
elementStack.append(dataPoint)
def handleDataType(attrib, lastElem, elementStack):
dataType = SDT4DataType()
dataType.name = getAttribute(attrib, 'name')
dataType.unitOfMeasure = getAttribute(attrib, 'unitOfMeasure')
dataType.semanticURI = getAttribute(attrib, 'semanticURI')
if isinstance(lastElem, SDT4ArrayType):
lastElem.arrayType = dataType
elif isinstance(lastElem, SDT4StructType):
lastElem.structElements.append(dataType)
elif isinstance(lastElem, SDT4Domain): # DataTypes in Domain
lastElem.dataTypes.append(dataType)
else:
lastElem.type = dataType
elementStack.append(dataType)
def handleDeviceClass(attrib, lastElem, elementStack):
device = SDT4DeviceClass()
device.id = getAttribute(attrib, 'id')
device.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.deviceClasses.append(device)
elementStack.append(device)
def handleDoc(attrib, lastElem, elementStack):
doc = SDT4Doc()
lastElem.doc = doc
elementStack.append(doc)
def handleDomain(attrib, lastElem, elementStack):
domain = SDT4Domain()
domain.id = getAttribute(attrib, 'id')
domain.semanticURI = getAttribute(attrib, 'semanticURI')
elementStack.append(domain)
def handleEM(attrib, lastElem, elementStack):
em = SDT4DocEM()
em.doc = lastElem.doc
elementStack.append(em)
def handleEnumType(attrib, lastElem, elementStack):
enumType = SDT4EnumType()
lastElem.type = enumType
elementStack.append(enumType)
def handleEnumValue(attrib, lastElem, elementStack):
value = SDT4EnumValue()
value.name = getAttribute(attrib, 'name')
value.value = getAttribute(attrib, 'value')
value.type = getAttribute(attrib, 'type')
value.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.enumValues.append(value)
elementStack.append(value)
def handleEvent(attrib, lastElem, elementStack):
event = SDT4Event()
event.name = getAttribute(attrib, 'name')
event.optional = getAttribute(attrib, 'optional')
event.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.events.append(event)
elementStack.append(event)
def handleExtendExclude(attrib, lastElem, elementStack):
exclude = SDT4ExtendExclude()
exclude.name = getAttribute(attrib, 'name')
exclude.type = getAttribute(attrib, 'type')
lastElem.excludes.append(exclude)
def handleExtend(attrib, lastElem, elementStack):
extend = SDT4Extend()
extend.domain = getAttribute(attrib, 'domain')
extend.entity = getAttribute(attrib, 'entity')
if isinstance(lastElem, SDT4ProductClass): # for ProductClass
lastElem.extendDevice = extend
else: # normal extend
lastElem.extend = extend
elementStack.append(extend)
def handleImg(attrib, lastElem, elementStack):
img = SDT4DocIMG()
img.doc = lastElem.doc
img.startImage(getAttribute(attrib, 'src'))
elementStack.append(img)
def handleImgCaption(attrib, lastElem, elementStack):
caption = SDT4DocCaption()
caption.doc = lastElem.doc
elementStack.append(caption)
def handleInclude(attrib, lastElem, elementStack):
# Unfortunately, there are two "include" element types to handle
if isinstance(lastElem, SDT4Extend):
include = SDT4ExtendInclude()
include.name = getAttribute(attrib, 'name')
include.type = getAttribute(attrib, 'type')
lastElem.excludes.append(include)
else:
include = SDT4Include()
include.parse = getAttribute(attrib, 'parse')
include.href = getAttribute(attrib, 'href')
lastElem.includes.append(include)
def handleModuleClass(attrib, lastElem, elementStack):
mc = SDT4ModuleClass()
mc.name = getAttribute(attrib, 'name')
mc.semanticURI = getAttribute(attrib, 'semanticURI')
mc.minOccurs = getAttribute(attrib, 'minOccurs')
mc.maxOccurs = getAttribute(attrib, 'maxOccurs')
lastElem.moduleClasses.append(mc)
elementStack.append(mc)
def handleP(attrib, lastElem, elementStack):
p = SDT4DocP()
p.doc = lastElem.doc
p.startParagraph()
elementStack.append(p)
def handleProductClass(attrib, lastElem, elementStack):
product = SDT4ProductClass()
product.id = getAttribute(attrib, 'name')
product.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.productClasses.append(product)
elementStack.append(product)
def handleProperty(attrib, lastElem, elementStack):
prop = SDT4Property()
prop.name = getAttribute(attrib, 'name')
prop.optional = getAttribute(attrib, 'optional')
prop.value = getAttribute(attrib, 'value')
prop.semanticURI = getAttribute(attrib, 'semanticURI')
lastElem.properties.append(prop)
elementStack.append(prop)
def handleSimpleType(attrib, lastElem, elementStack):
simpleType = SDT4SimpleType()
simpleType.type = getAttribute(attrib, 'type')
lastElem.type = simpleType
elementStack.append(simpleType)
def handleStructType(attrib, lastElem, elementStack):
structType = SDT4StructType()
lastElem.type = structType
self.elementStack.append(structType)
def handleSubDevice(attrib, lastElem, elementStack):
subDevice = SDT4SubDevice()
subDevice.id = getAttribute(attrib, 'id')
subDevice.semanticURI = getAttribute(attrib, 'semanticURI')
subDevice.minOccurs = getAttribute(attrib, 'minOccurs')
subDevice.maxOccurs = getAttribute(attrib, 'maxOccurs')
lastElem.subDevices.append(subDevice)
elementStack.append(subDevice)
def handleTT(attrib, lastElem, elementStack):
tt = SDT4DocTT()
tt.doc = lastElem.doc
elementStack.append(tt)
#
# Assignment of element types and (handlerFunction, (tuple of allowed parents))
#
handlers = {
SDT4Parser.actionTag : (handleAction, (SDT4ModuleClass,)),
SDT4Parser.argTag : (handleArg, (SDT4Action,)),
SDT4Parser.arrayTypeTag : (handleArrayType, (SDT4DataType,)),
SDT4Parser.bTag : (handleB, (SDT4Doc, SDT4DocP)),
SDT4Parser.constraintTag : (handleConstraint, (SDT4DataType,)),
SDT4Parser.dataPointTag : (handleDataPoint, (SDT4Event, SDT4ModuleClass)),
SDT4Parser.dataTypeTag : (handleDataType, (SDT4Action, SDT4DataPoint, SDT4Event, SDT4Arg, SDT4StructType, SDT4ArrayType, SDT4Domain)),
SDT4Parser.deviceClassTag : (handleDeviceClass, (SDT4Domain,)),
SDT4Parser.docTag : (handleDoc, (SDT4Domain, SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4DataType, SDT4ModuleClass, SDT4Action, SDT4DataPoint, SDT4Event, SDT4EnumValue, SDT4Arg, SDT4Constraint, SDT4Property)),
SDT4Parser.domainTag : (handleDomain, None),
SDT4Parser.emTag : (handleEM, (SDT4Doc, SDT4DocP)),
SDT4Parser.enumTypeTag : (handleEnumType, (SDT4DataType,)),
SDT4Parser.enumValueTag : (handleEnumValue, (SDT4EnumType,)),
SDT4Parser.eventTag : (handleEvent, (SDT4ModuleClass,)),
SDT4Parser.excludeTag : (handleExtendExclude, (SDT4Extend,)),
SDT4Parser.extendTag : (handleExtend, (SDT4ModuleClass, SDT4DataType, SDT4ProductClass, SDT4SubDevice)),
SDT4Parser.imgTag : (handleImg, (SDT4Doc, SDT4DocP)),
SDT4Parser.imgCaptionTag : (handleImgCaption, (SDT4DocIMG,)),
SDT4Parser.includeTag : (handleInclude, (SDT4Domain, SDT4Extend)),
SDT4Parser.moduleClassTag : (handleModuleClass, (SDT4Domain, SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4ProductClass)),
SDT4Parser.pTag : (handleP, (SDT4Doc, SDT4DocP)),
SDT4Parser.productClassTag : (handleProductClass, (SDT4Domain,)),
SDT4Parser.propertyTag : (handleProperty, (SDT4ProductClass, SDT4DeviceClass, SDT4SubDevice, SDT4ModuleClass)),
SDT4Parser.simpleTypeTag : (handleSimpleType, (SDT4DataType, SDT4Property)),
SDT4Parser.structTypeTag : (handleStructType, (SDT4DataType,)),
SDT4Parser.subDeviceTag : (handleSubDevice, (SDT4DeviceClass, SDT4ProductClass, SDT4Domain)),
SDT4Parser.ttTag : (handleTT, (SDT4Doc, SDT4DocP))
}
| 0
| 0
| 0
| 4,476
| 0
| 6,438
| 0
| 5
| 690
|
de130291f9918c171aaa53459b784a3e93f4b849
| 1,698
|
py
|
Python
|
tools/validator.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | 8
|
2019-02-02T01:15:57.000Z
|
2021-12-23T04:43:46.000Z
|
tools/validator.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | null | null | null |
tools/validator.py
|
adacker10/showdown
|
8ceb1ff46d5c33ec3055928d6ad293224446f63c
|
[
"MIT"
] | 6
|
2020-09-11T13:15:05.000Z
|
2022-03-18T15:46:35.000Z
|
from data import dex
import re
def validate_team(team):
'''
team is an array of six pokemon sets
'''
if len(team) > 6:
raise InValidSetError("more than 6 pokemon")
pokemon_names = set()
for pokemon in team:
# check if the pokemon is an actual pokemon
species = re.sub(r'\W+', '', pokemon['species'].lower())
pokemon_names.add(species)
if species not in dex.pokedex:
raise InValidSetError(species + " is not a real pokemon species")
if len(pokemon['moves']) > 4:
raise InValidSetError("more than 4 moves")
for move in pokemon['moves']:
if move not in dex.simple_learnsets[species]:
raise InValidSetError(species + " can't learn the move " + move)
if pokemon['ability'] not in [re.sub(r'\W+', '', ability.lower()) for ability in list(filter(None.__ne__, list(dex.pokedex[species].abilities)))]:
raise InValidSetError(species + " cant have the ability, " + pokemon['ability'])
for i in range(6):
if pokemon['evs'][i] > 255 or pokemon['evs'][i] < 0:
raise InVaidSetError("ev value is out of range: " + str(pokemon['evs'][i]))
if pokemon['ivs'][i] > 31 or pokemon['ivs'][i] < 0:
raise InVaidSetError("iv value is out of range: " + str(pokemon['ivs'][i]))
if sum(pokemon['evs']) > 510:
raise InValidSetError("sum of evs is over 510")
if len(team) != len(pokemon_names):
raise InValidSetError("cannot have multiple of the same pokemon")
return True
| 40.428571
| 154
| 0.602473
|
from data import dex
import re
class InValidSetError(Exception):
def __init__(self, message):
self.message = message
def validate_team(team):
'''
team is an array of six pokemon sets
'''
if len(team) > 6:
raise InValidSetError("more than 6 pokemon")
pokemon_names = set()
for pokemon in team:
# check if the pokemon is an actual pokemon
species = re.sub(r'\W+', '', pokemon['species'].lower())
pokemon_names.add(species)
if species not in dex.pokedex:
raise InValidSetError(species + " is not a real pokemon species")
if len(pokemon['moves']) > 4:
raise InValidSetError("more than 4 moves")
for move in pokemon['moves']:
if move not in dex.simple_learnsets[species]:
raise InValidSetError(species + " can't learn the move " + move)
if pokemon['ability'] not in [re.sub(r'\W+', '', ability.lower()) for ability in list(filter(None.__ne__, list(dex.pokedex[species].abilities)))]:
raise InValidSetError(species + " cant have the ability, " + pokemon['ability'])
for i in range(6):
if pokemon['evs'][i] > 255 or pokemon['evs'][i] < 0:
raise InVaidSetError("ev value is out of range: " + str(pokemon['evs'][i]))
if pokemon['ivs'][i] > 31 or pokemon['ivs'][i] < 0:
raise InVaidSetError("iv value is out of range: " + str(pokemon['ivs'][i]))
if sum(pokemon['evs']) > 510:
raise InValidSetError("sum of evs is over 510")
if len(team) != len(pokemon_names):
raise InValidSetError("cannot have multiple of the same pokemon")
return True
| 0
| 0
| 0
| 76
| 0
| 0
| 0
| 0
| 23
|
bceb90c866742318115d3897625ab3cd17dad9ae
| 1,782
|
py
|
Python
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 7
|
2019-03-13T17:22:50.000Z
|
2022-01-09T09:03:16.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 1
|
2019-08-01T23:42:09.000Z
|
2019-08-02T16:14:31.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 2
|
2020-09-12T06:33:16.000Z
|
2021-01-01T01:05:48.000Z
|
from collections import namedtuple as Struct
DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])
DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
| 30.724138
| 92
| 0.640292
|
from collections import namedtuple as Struct
from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit
DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])
DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
class GroupDataSplit():
def __init__(self, df, key, config=DEFAULT_SPLIT_CONFIG):
self.config = config
self.key = key
self._df = df
self._split_data()
@property
def total(self):
"""Total records in the data frame"""
return len(self._df)
def train_df(self):
"""Randomized train data frame"""
return self._train_df.sample(frac=1).reset_index(drop=True)
@property
def val_df(self):
"""Validation data frame"""
return self._val_df
@property
def test_df(self):
"""Test data frame"""
return self._test_df
@property
def test_split(self):
return GroupShuffleSplit(test_size=self.config.test_size,
random_state=self.config.random_seed).split
@property
def val_split(self):
val_size = self.config.validation_size / (1 - self.config.test_size)
return GroupShuffleSplit(test_size=val_size,
random_state=self.config.random_seed).split
def _split_data(self):
rem_indices, test_indices = next(
self.test_split(self._df, groups=self._df[self.key])
)
rem_df = self._df.iloc[rem_indices]
train_indices, val_indices = next(
self.val_split(rem_df, groups=rem_df[self.key])
)
self._test_df = self._df.iloc[test_indices]
self._val_df = rem_df.iloc[val_indices]
self._train_df = rem_df.iloc[train_indices]
| 0
| 605
| 0
| 891
| 0
| 0
| 0
| 46
| 45
|
b1412972007124b927dd10c01b84ccee4179e203
| 656
|
py
|
Python
|
data_extract_code/sex-ratio.py
|
jaya-shankar/Human-Development-Prediction
|
cdc7f2186c49db3506267573b05da6ba03cd5bfd
|
[
"Unlicense"
] | null | null | null |
data_extract_code/sex-ratio.py
|
jaya-shankar/Human-Development-Prediction
|
cdc7f2186c49db3506267573b05da6ba03cd5bfd
|
[
"Unlicense"
] | null | null | null |
data_extract_code/sex-ratio.py
|
jaya-shankar/Human-Development-Prediction
|
cdc7f2186c49db3506267573b05da6ba03cd5bfd
|
[
"Unlicense"
] | 2
|
2021-11-01T15:48:16.000Z
|
2021-12-28T07:48:35.000Z
|
import csv
file = open("sex-ratio.csv")
csvreader = csv.reader(file)
header = next(csvreader)
mapped = {}
for row in csvreader:
if(row[0] not in mapped):
mapped[row[0]]={}
mapped[row[0]][row[2]] = row[3]
# f = open("converted.csv",'w')
rows=[]
for c in mapped:
row = [c]
for y in mapped[c]:
row.append(mapped[c][y])
rows.append(row)
header =['country']
for i in range(1950,2018):
header.append(str(i))
with open('converted.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
# write the data
for row in rows:
writer.writerow(row)
| 18.222222
| 54
| 0.599085
|
import csv
file = open("sex-ratio.csv")
csvreader = csv.reader(file)
header = next(csvreader)
mapped = {}
for row in csvreader:
if(row[0] not in mapped):
mapped[row[0]]={}
mapped[row[0]][row[2]] = row[3]
# f = open("converted.csv",'w')
rows=[]
for c in mapped:
row = [c]
for y in mapped[c]:
row.append(mapped[c][y])
rows.append(row)
header =['country']
for i in range(1950,2018):
header.append(str(i))
with open('converted.csv', 'w', encoding='UTF8') as f:
writer = csv.writer(f)
# write the header
writer.writerow(header)
# write the data
for row in rows:
writer.writerow(row)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
31f4a614a97ec199afd65d00866fe66229803029
| 525
|
py
|
Python
|
lib/flask_api/compat.py
|
imtiaz-emu/gcp-flask-test
|
096f466242aa14941712ab8ea06ac4fb4eaeb993
|
[
"Apache-2.0"
] | null | null | null |
lib/flask_api/compat.py
|
imtiaz-emu/gcp-flask-test
|
096f466242aa14941712ab8ea06ac4fb4eaeb993
|
[
"Apache-2.0"
] | null | null | null |
lib/flask_api/compat.py
|
imtiaz-emu/gcp-flask-test
|
096f466242aa14941712ab8ea06ac4fb4eaeb993
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
| 25
| 77
| 0.645714
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6bff53885f7d7817bb5989d427b6919245c38e01
| 4,969
|
py
|
Python
|
src/cfdpy/derivative/finiteDifferenceMethods.py
|
mkihara/cfdpy
|
53945ddd87f810e65d4fffe3b68f6bf8c06098c2
|
[
"MIT"
] | 1
|
2022-03-28T03:07:26.000Z
|
2022-03-28T03:07:26.000Z
|
src/cfdpy/derivative/finiteDifferenceMethods.py
|
mkihara/cfdpy
|
53945ddd87f810e65d4fffe3b68f6bf8c06098c2
|
[
"MIT"
] | null | null | null |
src/cfdpy/derivative/finiteDifferenceMethods.py
|
mkihara/cfdpy
|
53945ddd87f810e65d4fffe3b68f6bf8c06098c2
|
[
"MIT"
] | null | null | null |
"""Finite Difference Methods
"""
import numpy as np
def FDMWeights(M, x0, alpha):
"""Calculate the weights in finite difference formulas
for any order of derivative and to any order of accuracy
on onedimensional grids with arbitrary spacing.
Args:
M (int): Order of derivative
x0 (float): Approximations at this point
alpha (np.array): x-cordinates. length must be N
Attributes:
N (int): Order of accuracy, which is equivalent to len(alpha)-1.
Returns:
np.array: Weights
References:
Bengt Fornberg, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", 1988.
"""
N = len(alpha) - 1
delta = np.zeros([M+1,N+1,N+1])
delta[0,0,0] = 1.
c1 = 1.
for n in range(1, N+1):
c2 = 1.
for nu in range(n):
c3 = alpha[n] - alpha[nu]
c2 *= c3
for m in range(min(n, M)+1):
delta[m,n,nu] = ((alpha[n]-x0)*delta[m,n-1,nu] - m*delta[m-1,n-1,nu]) / c3
for m in range(min(n, M)+1):
delta[m,n,n] = c1/c2 * (m*delta[m-1,n-1,n-1] - (alpha[n-1]-x0)*delta[m,n-1,n-1])
c1 = c2
return delta
| 38.51938
| 105
| 0.603944
|
"""Finite Difference Methods
"""
import numpy as np
def FDMWeights(M, x0, alpha):
"""Calculate the weights in finite difference formulas
for any order of derivative and to any order of accuracy
on onedimensional grids with arbitrary spacing.
Args:
M (int): Order of derivative
x0 (float): Approximations at this point
alpha (np.array): x-cordinates. length must be N
Attributes:
N (int): Order of accuracy, which is equivalent to len(alpha)-1.
Returns:
np.array: Weights
References:
Bengt Fornberg, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", 1988.
"""
N = len(alpha) - 1
delta = np.zeros([M+1,N+1,N+1])
delta[0,0,0] = 1.
c1 = 1.
for n in range(1, N+1):
c2 = 1.
for nu in range(n):
c3 = alpha[n] - alpha[nu]
c2 *= c3
for m in range(min(n, M)+1):
delta[m,n,nu] = ((alpha[n]-x0)*delta[m,n-1,nu] - m*delta[m-1,n-1,nu]) / c3
for m in range(min(n, M)+1):
delta[m,n,n] = c1/c2 * (m*delta[m-1,n-1,n-1] - (alpha[n-1]-x0)*delta[m,n-1,n-1])
c1 = c2
return delta
class centralFDM(object):
"""Central Finite Difference Method
Args:
order (int, optional): The order of the accuracy. Defaults to 2.
highestDerivative (int, optional): The order of the highest derivative. Defaults to 1.
"""
def __init__(self, order:int=2, highestDerivative=1):
assert (order % 2) == 0, "order must be even number."
assert order > 0, "order must be greater than 0."
assert highestDerivative > 0, "highestDerivative must be greater than 0."
self.order = order
self.highestDerivative = highestDerivative
self.nGridPoints = ((self.highestDerivative + 1) // 2) * 2 - 1 + self.order
self.set_alpha()
self.weight = FDMWeights(M=self.highestDerivative, x0=0, alpha=self.alpha)[:,self.order]
def __call__(self, f, axis=-1, derivative=1, h=1.):
"""Calculate the derivative.
Args:
f (np.array): An array containing samples.
axis (int, optional): The derivative is calculated only along the given axis. Defaults to -1.
derivative (int, optional): The order of the derivative. Defaults to 1.
h (float, optional): The space of the uniform grid. Defaults to 1..
Returns:
np.array: The derivative.
"""
df = np.zeros_like(f)
weight_ = self.weight[derivative]
alpha_ = self.alpha[weight_!=0]
weight_ = weight_[weight_!=0]
for i, alpha_i in enumerate(alpha_):
df += np.roll(f, shift=-int(alpha_i), axis=axis) * weight_[i]
return df / h**derivative
def set_alpha(self):
alpha_ = np.arange(self.nGridPoints, dtype=float)
alpha_ = self.__infiniteSeries(alpha_)
self.alpha = np.cumsum(alpha_)
def __infiniteSeries(self, n):
return n * (-1)**(n-1)
class upwindFDM(object):
"""Upwind Finite Difference Method
Args:
order (int, optional): The order of the accuracy. Defaults to 1.
highestDerivative (int, optional): The order of the highest derivative. Defaults to 1.
"""
def __init__(self, order:int=1, highestDerivative:int=1):
assert order > 0, "order must be greater than 0."
assert highestDerivative > 0, "highestDerivative must be greater than 0."
self.order = order
self.highestDerivative = highestDerivative
self.nGridPoints = self.order+self.highestDerivative
self.start = - (self.nGridPoints) // 2
self.alpha = np.arange(start=self.start, stop=self.start+self.nGridPoints)
self.weight = FDMWeights(M=self.highestDerivative, x0=0., alpha=self.alpha)[:,self.order]
self.weight2 = FDMWeights(M=self.highestDerivative, x0=0., alpha=-self.alpha)[:,self.order]
def __call__(self, f, axis=-1, derivative=1, h=1., c=None):
"""Calculate the derivative.
Args:
f (np.array): An array containing samples.
axis (int, optional): The derivative is calculated only along the given axis. Defaults to -1.
derivative (int, optional): The order of the derivative. Defaults to 1.
h (float, optional): The space of the uniform grid. Defaults to 1..
c (float or np.array, optional): The advection speed. Defaults to None.
Returns:
np.array: The derivative.
"""
df = np.zeros_like(f)
df2 = np.zeros_like(f)
for i, alpha_i in enumerate(self.alpha):
df += np.roll(f, shift=-int(alpha_i), axis=axis) * self.weight[derivative,i]
df2 += np.roll(f, shift=int(alpha_i), axis=axis) * self.weight2[derivative,i]
if c == None:
c = f
df = np.where(c>=0, df, df2)
return df / h**derivative
| 0
| 0
| 0
| 3,725
| 0
| 0
| 0
| 0
| 46
|
ff72c44c7b3d8e713ec5a01de73d9db8358095b9
| 7,381
|
py
|
Python
|
revcar-cli.py
|
madkaye/revcar-cli
|
c49a0ae47b1545c81d53d4fb5ddccbc73203caa2
|
[
"MIT"
] | null | null | null |
revcar-cli.py
|
madkaye/revcar-cli
|
c49a0ae47b1545c81d53d4fb5ddccbc73203caa2
|
[
"MIT"
] | null | null | null |
revcar-cli.py
|
madkaye/revcar-cli
|
c49a0ae47b1545c81d53d4fb5ddccbc73203caa2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import curses
# Scan timeout in seconds
SCAN_TIMEOUT = 10
## screen parts
LINE_HEADING = 0
LINE_OPTIONS = 1
LINE_STATUS = 5
LINE_ERROR = 6
COL_START = 0
HEIGHT_TOP = 8
HEIGHT_BOT = 3
LOOP_DURATION = 0.05
DISPLAY_COUNT = 100
LINE_RECT = 30
RECT_HEIGHT = 12
RECT_WIDTH = 40
MSG_WELCOME = "Welcome to Carmageddon - in real life!\n"
MSG_OPTIONS = " [S] - start scanning...\t\t\t\t[Q] - Exit\n"
MSG_OPTIONS = MSG_OPTIONS + " [1...9] - Direct connect to device by number\t\t[D] - Disconnect \n"
MSG_DRIVE_HELP = "Use [Arrows] to drive, [SPACE] to Fire"
if __name__ == '__main__':
try:
screen = MainScreen()
except KeyboardInterrupt:
os.sys.exit(0)
# finally:
| 34.013825
| 122
| 0.562796
|
#!/usr/bin/env python3
import os
import time
import datetime
from bluepy import btle
from bluepy.btle import Scanner, DefaultDelegate, Peripheral, Characteristic, ScanEntry, Service, UUID
import curses
import curses.textpad
from carcontrol import CarControl
# Scan timeout in seconds
SCAN_TIMEOUT = 10
## screen parts
LINE_HEADING = 0
LINE_OPTIONS = 1
LINE_STATUS = 5
LINE_ERROR = 6
COL_START = 0
HEIGHT_TOP = 8
HEIGHT_BOT = 3
LOOP_DURATION = 0.05
DISPLAY_COUNT = 100
LINE_RECT = 30
RECT_HEIGHT = 12
RECT_WIDTH = 40
MSG_WELCOME = "Welcome to Carmageddon - in real life!\n"
MSG_OPTIONS = " [S] - start scanning...\t\t\t\t[Q] - Exit\n"
MSG_OPTIONS = MSG_OPTIONS + " [1...9] - Direct connect to device by number\t\t[D] - Disconnect \n"
MSG_DRIVE_HELP = "Use [Arrows] to drive, [SPACE] to Fire"
class MainScreen:
status = 0
lastmsg = None
lasterror = None
displaycounter = 0
car = CarControl()
def __init__(self):
curses.wrapper(self.mainloop)
def createmidwin(self):
win = curses.newwin(curses.LINES - (HEIGHT_TOP + HEIGHT_BOT), curses.COLS, HEIGHT_TOP, COL_START)
win.scrollok(True)
win.idlok(True)
win.addstr(LINE_HEADING, COL_START, "Information:", curses.A_BOLD)
win.move(0, 0)
win.refresh()
return win
def createbotwin(self):
win = curses.newwin(HEIGHT_BOT, curses.COLS, curses.LINES - HEIGHT_BOT, COL_START)
win.addstr(LINE_HEADING, COL_START, "Bot window", curses.A_BOLD)
win.move(0, 0)
win.refresh()
return win
def drawheadings(self, window):
window.addstr(LINE_HEADING, COL_START, MSG_WELCOME, curses.A_BOLD)
window.addstr(LINE_OPTIONS, COL_START, MSG_OPTIONS)
window.hline('_', curses.COLS)
window.refresh()
def resizescreen(self, midwin, botwin):
midwin.resize(curses.LINES - (HEIGHT_TOP + HEIGHT_BOT), curses.COLS)
botwin.mvwin(curses.LINES - HEIGHT_TOP - 1, COL_START)
def updatestatus(self, window, status=0, msg="", error=""):
self.status = status
self.lastmsg = msg
self.lasterror = error
if window is None:
return
statusmsg = "Status: {} - {}".format(self.status, self.lastmsg)
errmsg = "Error: {}".format(self.lasterror) if len(self.lasterror) > 0 else ""
window.move(LINE_STATUS, COL_START)
window.addstr(LINE_STATUS, COL_START, statusmsg)
window.clrtoeol()
window.move(LINE_ERROR, COL_START)
window.addstr(LINE_ERROR, COL_START, errmsg)
window.clrtoeol()
window.refresh()
def countdownstatus(self):
self.displaycounter = DISPLAY_COUNT
def checkstatus(self):
if self.displaycounter > 1 and self.status > 0:
self.displaycounter = self.displaycounter - 1
return False
elif self.displaycounter == 1 and self.status > 0:
self.status = 0
self.displaycounter = 0
return True
else:
return False
def detailline(self, window, msg=""):
window.clear()
window.move(0, COL_START)
window.addstr("{}".format(msg))
window.refresh()
window.move(0, COL_START)
def debugline(self, window, msg=""):
window.move(0, COL_START)
window.addstr("dbg: {}".format(msg))
window.clrtoeol()
window.refresh()
def mainloop(self, stdscr):
self.drawheadings(stdscr)
self.updatestatus(stdscr)
midwin = self.createmidwin()
botwin = self.createbotwin()
self.debugline(botwin)
stdscr.nodelay(True)
while True:
time.sleep(LOOP_DURATION)
if self.checkstatus():
self.updatestatus(stdscr)
inchar = stdscr.getch()
curses.flushinp()
# SCAN
if inchar == ord('s') or inchar == ord('S'):
self.updatestatus(stdscr, 1, "Scanning...")
self.detailline(midwin)
if self.car.scan(SCAN_TIMEOUT):
self.updatestatus(stdscr, 1, "Scan - Done, found {} devices".format(len(self.car.devices)))
else:
#self.updatestatus(stdscr, 1, "Scan - Error", "Could not initiate scanning")
self.updatestatus(stdscr, 1, "Scan - Error with scan, found {} devices".format(len(self.car.devices)))
#self.countdownstatus()
self.detailline(midwin, self.car.devicetext)
self.debugline(botwin, "{}".format(self.car))
# Connect
elif inchar >= ord('1') and inchar <= ord('9'):
devnum = inchar - ord('1') + 1
self.debugline(botwin, "Device #{}".format(devnum))
self.updatestatus(stdscr, 2, "Connecting to car #{}...".format(devnum))
if self.car.connect((devnum-1)):
self.updatestatus(stdscr, 2, "Connected to car #{} [{}]...".format(devnum, self.car.carName))
self.debugline(botwin, "Sending handshake...")
self.car.sendhandshake()
self.debugline(botwin, "Sending handshake, Done")
self.detailline(midwin, MSG_DRIVE_HELP)
else:
self.updatestatus(stdscr, 2, "No connection to car #{}...".format(devnum))
self.debugline(botwin, "{}".format(self.car))
# Disconnect
elif inchar == ord('d') or inchar == ord('D'):
self.updatestatus(stdscr, 3, "Disconnecting...")
if self.car.disconnectcar():
self.updatestatus(stdscr, 2, "Disconnect, Done")
else:
self.updatestatus(stdscr, 2, "Unable to disconnect car")
self.detailline(midwin)
self.debugline(botwin, "{}".format(self.car))
# Quit
elif inchar == ord('q') or inchar == ord('Q'):
if self.car.isConnected: self.car.disconnectcar();
break
# Movement Actions
elif inchar == ord(' '):
if self.car.isConnected: self.car.carfiregun()
elif inchar == curses.KEY_UP:
if self.car.isConnected: self.car.carforward()
elif inchar == curses.KEY_DOWN:
if self.car.isConnected: self.car.carreverse()
elif inchar == curses.KEY_LEFT:
if self.car.isConnected: self.car.carleft()
elif inchar == curses.KEY_RIGHT:
if self.car.isConnected: self.car.carright()
elif inchar == curses.KEY_RESIZE:
curses.update_lines_cols()
self.resizescreen(midwin, botwin)
self.debugline(botwin, "resizing")
self.drawheadings(stdscr)
self.updatestatus(stdscr)
elif inchar == curses.ERR or inchar == -1:
continue
else:
continue
if __name__ == '__main__':
try:
screen = MainScreen()
except KeyboardInterrupt:
os.sys.exit(0)
# finally:
| 0
| 0
| 0
| 6,385
| 0
| 0
| 0
| 79
| 156
|
cbfa2caf1265110b8014de4c1cbc3f72c30c2833
| 2,736
|
py
|
Python
|
landwatch/model/unet.py
|
Lleyton-Ariton/landwatch
|
21e86e899d33d0ee349cf9bf87c6c13ebdab82fa
|
[
"MIT"
] | 1
|
2021-06-07T06:04:49.000Z
|
2021-06-07T06:04:49.000Z
|
landwatch/model/unet.py
|
Lleyton-Ariton/landwatch
|
21e86e899d33d0ee349cf9bf87c6c13ebdab82fa
|
[
"MIT"
] | null | null | null |
landwatch/model/unet.py
|
Lleyton-Ariton/landwatch
|
21e86e899d33d0ee349cf9bf87c6c13ebdab82fa
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
| 26.057143
| 88
| 0.574196
|
import math
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_block = nn.Sequential(
nn.Conv2d(self.in_channels, self.out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(self.out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.conv_block(x)
class DownScalingBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.downscaling_block = nn.Sequential(
nn.MaxPool2d(2),
ConvBlock(in_channels=in_channels,
out_channels=out_channels)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.downscaling_block(x)
class UNet(nn.Module):
def __init__(self, in_channels: int, out_channels: int, bilinear: bool=True):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.bilinear = bilinear
left_layers = [pow(2, i) for i in range(6, 11)]
self.left = nn.ModuleList([DownScalingBlock(self.in_channels, 64)])
self.right = nn.ModuleList([])
self.left.extend([
*[DownScalingBlock(left_layers[i],
left_layers[i + 1]) for i in range(len(left_layers) - 1)]
])
self.right.extend([
ConvBlock(512 + 256, 256),
ConvBlock(256 + 128, 128),
ConvBlock(128 + 64, 64)
])
self.maxpool = nn.MaxPool2d(2)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.output = nn.Sequential(
nn.Conv2d(64, self.out_channels, kernel_size=1),
nn.Upsample(scale_factor=2),
nn.Sigmoid()
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
conv1 = self.left[0](x)
conv2 = self.left[1](conv1)
conv3 = self.left[2](conv2)
x = self.left[3](conv3)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.right[0](x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.right[1](x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
x = self.right[2](x)
x = self.output(x)
return x
| 0
| 0
| 0
| 2,616
| 0
| 0
| 0
| -19
| 114
|
0e2664f2020e42cdf1e34c5553fdb17e94b6240a
| 5,580
|
py
|
Python
|
spycy/operator.py
|
kanales/spycy
|
f8702bcbfed8eb60bc6e3fca76cb30d781d69cb0
|
[
"MIT"
] | null | null | null |
spycy/operator.py
|
kanales/spycy
|
f8702bcbfed8eb60bc6e3fca76cb30d781d69cb0
|
[
"MIT"
] | null | null | null |
spycy/operator.py
|
kanales/spycy
|
f8702bcbfed8eb60bc6e3fca76cb30d781d69cb0
|
[
"MIT"
] | null | null | null |
"""
This module contains all the functions from the ``operator`` module (bar some
functions that dind't feel like they belonged here) transformed into a
spice so it can be used more confortable.
:Example:
Consider adding ``2`` to a list of numbers::
map(add(2), [1,2,3,4,5])
"""
import operator
from spycy import spice
__all__ = [ 'add', 'and_', 'contains', 'concat', 'countOf', 'eq', 'floordiv'
, 'ge', 'getitem', 'gt', 'indexOf', 'is_', 'is_not', 'le', 'lshift'
, 'lt', 'matmul', 'mod', 'mul', 'ne', 'or_', 'pos', 'pow', 'rshift'
, 'sub', 'truediv', 'xor', 'neg', 'not_', 'index', 'itemgetter'
, 'methodcaller', 'attrgetter', 'truth']
add = spice(lambda x,y: operator.__add__(x,y), name='add', doc=operator.add.__doc__)
__add__ = spice(lambda x,y: operator.__add__(x,y), name='__add__', doc=operator.add.__doc__)
and_ = spice(lambda x,y: operator.and_(x,y), name='and_', doc=operator.and_.__doc__)
__and__ = spice(lambda x,y: operator.__and__(x,y), name='__and__', doc=operator.and_.__doc__)
__contains__ = spice(lambda x,y: operator.__contains__(x,y), name='__contains__', doc=operator.contains.__doc__)
contains = spice(lambda x,y: operator.contains(x,y), name='contains', doc=operator.contains.__doc__)
concat = spice(lambda x,y: operator.concat(x,y), name='concat', doc=operator.concat.__doc__)
countOf = spice(lambda x,y: operator.countOf(x,y), name='countOf', doc=operator.countOf.__doc__)
eq = spice(lambda x,y: operator.eq(x,y), name='eq', doc=operator.eq.__doc__)
__eq__ = spice(lambda x,y: operator.__eq__(x,y), name='__eq__', doc=operator.eq.__doc__)
floordiv = spice(lambda x,y: operator.floordiv(x,y), name='floordiv', doc=operator.floordiv.__doc__)
__floordiv__ = spice(lambda x,y: operator.__floordiv__(x,y), name='__floordiv__', doc=operator.floordiv.__doc__)
# reversed
ge = spice(lambda x,y: operator.ge(y,x), name='ge')
__ge__ = spice(lambda x,y: operator.__ge__(y,x), name='__ge__')
getitem = spice(lambda x,y: operator.getitem(x,y), name='getitem', doc=operator.getitem.__doc__)
__getitem__ = spice(lambda x,y: operator.__getitem__(x,y), name='__getitem__', doc=operator.getitem.__doc__)
# reversed
gt = spice(lambda x,y: operator.gt(y,x), name='gt')
__gt__ = spice(lambda x,y: operator.__gt__(y,x))
indexOf = spice(lambda x,y: operator.indexOf(x,y), name='indexOf', doc=operator.indexOf.__doc__)
is_ = spice(lambda x,y: operator.is_(x,y), name='is_', doc=operator.is_.__doc__)
is_not = spice(lambda x,y: operator.is_not(x,y), name='is_not', doc=operator.is_not.__doc__)
# reversed
le = spice(lambda x,y: operator.le(y,x), name='le')
__le__ = spice(lambda x,y: operator.__le__(y,x), name='__le__')
# reversed
lshift = spice(lambda x,y: operator.lshift(y,x), name='lshift')
__lshift__ = spice(lambda x,y: operator.__lshift__(y,x), name='__lshift__')
# reversed
lt = spice(lambda x,y: operator.lt(y,x), name='lt')
__lt__ = spice(lambda x,y: operator.__lt__(y,x), name='__lt__')
# reversed
matmul = spice(lambda x,y: operator.matmul(y,x), name='matmul')
__matmul__ = spice(lambda x,y: operator.__matmul__(y,x), name='__matmul__')
# reversed
mod = spice(lambda x,y: operator.mod(y,x), name='mod')
__mod__ = spice(lambda x,y: operator.__mod__(y,x), name='__mod__')
mul = spice(lambda x,y: operator.mul(x,y), name='mul', doc=operator.mul.__doc__)
__mul__ = spice(lambda x,y: operator.__mul__(x,y), name='__mul__', doc=operator.mul.__doc__)
ne = spice(lambda x,y: operator.ne(x,y), name='ne', doc=operator.ne.__doc__)
__ne__ = spice(lambda x,y: operator.__ne__(x,y), name='__ne__', doc=operator.ne.__doc__)
or_ = spice(lambda x,y: operator.or_(x,y), name='or_', doc=operator.or_.__doc__)
__or__ = spice(lambda x,y: operator.__or__(x,y), name='__or__', doc=operator.or_.__doc__)
pos = spice(lambda x,y: operator.pos(x,y), name='pos', doc=operator.pos.__doc__)
#reversed
pow = spice(lambda x,y: operator.pow(y,x), name='pow')
__pow__ = spice(lambda x,y: operator.__pow__(y,x), name='__pow__')
# reversed
rshift = spice(lambda x,y: operator.rshift(y,x), name='rshift')
__rshift__ = spice(lambda x,y: operator.__rshift__(y,x), name='__rshift__')
# reversed
sub = spice(lambda x,y: operator.sub(y,x), name='sub')
__sub__ = spice(lambda x,y: operator.__sub__(y,x), name='__sub__')
# reversed
truediv = spice(lambda x,y: operator.truediv(y,x), name='truediv')
__truediv__ = spice(lambda x,y: operator.__truediv__(y,x), name='__truediv__')
xor = spice(lambda x,y: operator.xor(x,y), name='xor', doc=operator.xor.__doc__)
__xor__ = spice(lambda x,y: operator.__xor__(x,y), name='__xor__', doc=operator.xor.__doc__)
#################################################
neg = spice(lambda x: operator.neg(x), name='neg', doc=operator.neg.__doc__)
__neg__ = spice(lambda x: operator.__neg__(x), name='__neg__', doc=operator.neg.__doc__)
not_ = spice(lambda x: operator.not_(x), name='not_', doc=operator.not_.__doc__)
__not__ = spice(lambda x: operator.__not__(x), name='__not__', doc=operator.not_.__doc__)
index = spice(lambda x: operator.index(x), name='index', doc=operator.index.__doc__)
__index__ = spice(lambda x: operator.__index__(x), name='__index__', doc=operator.index.__doc__)
itemgetter = spice(lambda x: operator.itemgetter(x), name='itemgetter', doc=operator.itemgetter.__doc__)
methodcaller = spice(lambda x: operator.methodcaller(x), name='methodcaller', doc=operator.methodcaller.__doc__)
attrgetter = spice(lambda x: operator.attrgetter(x), name='attrgetter', doc=operator.attrgetter.__doc__)
truth = spice(lambda x: operator.truth(x), name='truth', doc=operator.truth.__doc__)
| 45.365854
| 112
| 0.704659
|
"""
This module contains all the functions from the ``operator`` module (bar some
functions that dind't feel like they belonged here) transformed into a
spice so it can be used more confortable.
:Example:
Consider adding ``2`` to a list of numbers::
map(add(2), [1,2,3,4,5])
"""
import operator
from spycy import spice
__all__ = [ 'add', 'and_', 'contains', 'concat', 'countOf', 'eq', 'floordiv'
, 'ge', 'getitem', 'gt', 'indexOf', 'is_', 'is_not', 'le', 'lshift'
, 'lt', 'matmul', 'mod', 'mul', 'ne', 'or_', 'pos', 'pow', 'rshift'
, 'sub', 'truediv', 'xor', 'neg', 'not_', 'index', 'itemgetter'
, 'methodcaller', 'attrgetter', 'truth']
add = spice(lambda x,y: operator.__add__(x,y), name='add', doc=operator.add.__doc__)
__add__ = spice(lambda x,y: operator.__add__(x,y), name='__add__', doc=operator.add.__doc__)
and_ = spice(lambda x,y: operator.and_(x,y), name='and_', doc=operator.and_.__doc__)
__and__ = spice(lambda x,y: operator.__and__(x,y), name='__and__', doc=operator.and_.__doc__)
__contains__ = spice(lambda x,y: operator.__contains__(x,y), name='__contains__', doc=operator.contains.__doc__)
contains = spice(lambda x,y: operator.contains(x,y), name='contains', doc=operator.contains.__doc__)
concat = spice(lambda x,y: operator.concat(x,y), name='concat', doc=operator.concat.__doc__)
countOf = spice(lambda x,y: operator.countOf(x,y), name='countOf', doc=operator.countOf.__doc__)
eq = spice(lambda x,y: operator.eq(x,y), name='eq', doc=operator.eq.__doc__)
__eq__ = spice(lambda x,y: operator.__eq__(x,y), name='__eq__', doc=operator.eq.__doc__)
floordiv = spice(lambda x,y: operator.floordiv(x,y), name='floordiv', doc=operator.floordiv.__doc__)
__floordiv__ = spice(lambda x,y: operator.__floordiv__(x,y), name='__floordiv__', doc=operator.floordiv.__doc__)
# reversed
ge = spice(lambda x,y: operator.ge(y,x), name='ge')
__ge__ = spice(lambda x,y: operator.__ge__(y,x), name='__ge__')
getitem = spice(lambda x,y: operator.getitem(x,y), name='getitem', doc=operator.getitem.__doc__)
__getitem__ = spice(lambda x,y: operator.__getitem__(x,y), name='__getitem__', doc=operator.getitem.__doc__)
# reversed
gt = spice(lambda x,y: operator.gt(y,x), name='gt')
__gt__ = spice(lambda x,y: operator.__gt__(y,x))
indexOf = spice(lambda x,y: operator.indexOf(x,y), name='indexOf', doc=operator.indexOf.__doc__)
is_ = spice(lambda x,y: operator.is_(x,y), name='is_', doc=operator.is_.__doc__)
is_not = spice(lambda x,y: operator.is_not(x,y), name='is_not', doc=operator.is_not.__doc__)
# reversed
le = spice(lambda x,y: operator.le(y,x), name='le')
__le__ = spice(lambda x,y: operator.__le__(y,x), name='__le__')
# reversed
lshift = spice(lambda x,y: operator.lshift(y,x), name='lshift')
__lshift__ = spice(lambda x,y: operator.__lshift__(y,x), name='__lshift__')
# reversed
lt = spice(lambda x,y: operator.lt(y,x), name='lt')
__lt__ = spice(lambda x,y: operator.__lt__(y,x), name='__lt__')
# reversed
matmul = spice(lambda x,y: operator.matmul(y,x), name='matmul')
__matmul__ = spice(lambda x,y: operator.__matmul__(y,x), name='__matmul__')
# reversed
mod = spice(lambda x,y: operator.mod(y,x), name='mod')
__mod__ = spice(lambda x,y: operator.__mod__(y,x), name='__mod__')
mul = spice(lambda x,y: operator.mul(x,y), name='mul', doc=operator.mul.__doc__)
__mul__ = spice(lambda x,y: operator.__mul__(x,y), name='__mul__', doc=operator.mul.__doc__)
ne = spice(lambda x,y: operator.ne(x,y), name='ne', doc=operator.ne.__doc__)
__ne__ = spice(lambda x,y: operator.__ne__(x,y), name='__ne__', doc=operator.ne.__doc__)
or_ = spice(lambda x,y: operator.or_(x,y), name='or_', doc=operator.or_.__doc__)
__or__ = spice(lambda x,y: operator.__or__(x,y), name='__or__', doc=operator.or_.__doc__)
pos = spice(lambda x,y: operator.pos(x,y), name='pos', doc=operator.pos.__doc__)
#reversed
pow = spice(lambda x,y: operator.pow(y,x), name='pow')
__pow__ = spice(lambda x,y: operator.__pow__(y,x), name='__pow__')
# reversed
rshift = spice(lambda x,y: operator.rshift(y,x), name='rshift')
__rshift__ = spice(lambda x,y: operator.__rshift__(y,x), name='__rshift__')
# reversed
sub = spice(lambda x,y: operator.sub(y,x), name='sub')
__sub__ = spice(lambda x,y: operator.__sub__(y,x), name='__sub__')
# reversed
truediv = spice(lambda x,y: operator.truediv(y,x), name='truediv')
__truediv__ = spice(lambda x,y: operator.__truediv__(y,x), name='__truediv__')
xor = spice(lambda x,y: operator.xor(x,y), name='xor', doc=operator.xor.__doc__)
__xor__ = spice(lambda x,y: operator.__xor__(x,y), name='__xor__', doc=operator.xor.__doc__)
#################################################
neg = spice(lambda x: operator.neg(x), name='neg', doc=operator.neg.__doc__)
__neg__ = spice(lambda x: operator.__neg__(x), name='__neg__', doc=operator.neg.__doc__)
not_ = spice(lambda x: operator.not_(x), name='not_', doc=operator.not_.__doc__)
__not__ = spice(lambda x: operator.__not__(x), name='__not__', doc=operator.not_.__doc__)
index = spice(lambda x: operator.index(x), name='index', doc=operator.index.__doc__)
__index__ = spice(lambda x: operator.__index__(x), name='__index__', doc=operator.index.__doc__)
itemgetter = spice(lambda x: operator.itemgetter(x), name='itemgetter', doc=operator.itemgetter.__doc__)
methodcaller = spice(lambda x: operator.methodcaller(x), name='methodcaller', doc=operator.methodcaller.__doc__)
attrgetter = spice(lambda x: operator.attrgetter(x), name='attrgetter', doc=operator.attrgetter.__doc__)
truth = spice(lambda x: operator.truth(x), name='truth', doc=operator.truth.__doc__)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8f22602f2a4aaf4a77b2fbdbfd6ec7f59e6787bf
| 2,577
|
bzl
|
Python
|
recipes/brotli/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | 3
|
2022-02-06T10:10:44.000Z
|
2022-02-07T11:53:25.000Z
|
recipes/brotli/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | null | null | null |
recipes/brotli/config.bzl
|
curoky/rules_cc
|
943408c05e2204e1e603b70db05037217a53868d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 curoky([email protected]).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
"name": "com_github_google_brotli",
"type": "git_repository",
# "remote": "https://github.com/google/brotli",
"remote": "https://github.com/pefoley2/brotli",
"used_version": "heads/master",
"versions": {
"heads/master": {},
"tags/v1.0.9": {},
},
}
# Note:
# 1: after v1.0.9, brotli use vla-parameter, which gcc-11 throw error by default
# fix pr: https://github.com/google/brotli/pull/904
# external/com_github_google_brotli/c/dec/decode.c:2036:41: error: argument 2 of type 'const uint8_t *' {aka 'const unsigned char *'} declared as a pointer [-Werror=vla-parameter]
# 2036 | size_t encoded_size, const uint8_t* encoded_buffer, size_t* decoded_size,
# | ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~
# In file included from external/com_github_google_brotli/c/dec/decode.c:7:
# bazel-out/k8-dbg/bin/external/com_github_google_brotli/_virtual_includes/brotli_inc/brotli/decode.h:204:19: note: previously declared as a variable length array 'const uint8_t[*decoded_size]' {aka 'const unsigned char[*decoded_size]'}
# 204 | const uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(encoded_size)],
# | ~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# external/com_github_google_brotli/c/dec/decode.c:2037:14: error: argument 4 of type 'uint8_t *' {aka 'unsigned char *'} declared as a pointer [-Werror=vla-parameter]
# 2037 | uint8_t* decoded_buffer) {
# | ~~~~~~~~~^~~~~~~~~~~~~~
# In file included from external/com_github_google_brotli/c/dec/decode.c:7:
# bazel-out/k8-dbg/bin/external/com_github_google_brotli/_virtual_includes/brotli_inc/brotli/decode.h:206:13: note: previously declared as a variable length array 'uint8_t[encoded_size]' {aka 'unsigned char[encoded_size]'}
# 206 | uint8_t decoded_buffer[BROTLI_ARRAY_PARAM(*decoded_size)]);
# | ~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# cc1: all warnings being treated as errors
| 57.266667
| 236
| 0.668607
|
# Copyright 2021 curoky([email protected]).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
"name": "com_github_google_brotli",
"type": "git_repository",
# "remote": "https://github.com/google/brotli",
"remote": "https://github.com/pefoley2/brotli",
"used_version": "heads/master",
"versions": {
"heads/master": {},
"tags/v1.0.9": {},
},
}
# Note:
# 1: after v1.0.9, brotli use vla-parameter, which gcc-11 throw error by default
# fix pr: https://github.com/google/brotli/pull/904
# external/com_github_google_brotli/c/dec/decode.c:2036:41: error: argument 2 of type 'const uint8_t *' {aka 'const unsigned char *'} declared as a pointer [-Werror=vla-parameter]
# 2036 | size_t encoded_size, const uint8_t* encoded_buffer, size_t* decoded_size,
# | ~~~~~~~~~~~~~~~^~~~~~~~~~~~~~
# In file included from external/com_github_google_brotli/c/dec/decode.c:7:
# bazel-out/k8-dbg/bin/external/com_github_google_brotli/_virtual_includes/brotli_inc/brotli/decode.h:204:19: note: previously declared as a variable length array 'const uint8_t[*decoded_size]' {aka 'const unsigned char[*decoded_size]'}
# 204 | const uint8_t encoded_buffer[BROTLI_ARRAY_PARAM(encoded_size)],
# | ~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# external/com_github_google_brotli/c/dec/decode.c:2037:14: error: argument 4 of type 'uint8_t *' {aka 'unsigned char *'} declared as a pointer [-Werror=vla-parameter]
# 2037 | uint8_t* decoded_buffer) {
# | ~~~~~~~~~^~~~~~~~~~~~~~
# In file included from external/com_github_google_brotli/c/dec/decode.c:7:
# bazel-out/k8-dbg/bin/external/com_github_google_brotli/_virtual_includes/brotli_inc/brotli/decode.h:206:13: note: previously declared as a variable length array 'uint8_t[encoded_size]' {aka 'unsigned char[encoded_size]'}
# 206 | uint8_t decoded_buffer[BROTLI_ARRAY_PARAM(*decoded_size)]);
# | ~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# cc1: all warnings being treated as errors
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1e9f1106e32966139e9187c455ded6eb0bcb3ee5
| 565
|
py
|
Python
|
vvphotos/serializers.py
|
synw/django-vvphotos
|
3dd93fbe8a29d8db6fe440a40ee700d229da537b
|
[
"MIT"
] | 1
|
2017-04-05T04:09:00.000Z
|
2017-04-05T04:09:00.000Z
|
vvphotos/serializers.py
|
synw/django-vvphotos
|
3dd93fbe8a29d8db6fe440a40ee700d229da537b
|
[
"MIT"
] | null | null | null |
vvphotos/serializers.py
|
synw/django-vvphotos
|
3dd93fbe8a29d8db6fe440a40ee700d229da537b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 25.681818
| 106
| 0.677876
|
# -*- coding: utf-8 -*-
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from vvphotos.models import Album
class AlbumSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = read_only_fields = ["slug", "title", "image", "description", "url", "photos", 'children']
depth = 1
class AlbumsSerializer(serializers.ModelSerializer):
class Meta:
model = Album
fields = read_only_fields = ["slug", "title", "parent", "image", "description", "url"]
| 0
| 0
| 0
| 359
| 0
| 0
| 0
| 66
| 113
|
2e44b8dfcbd49f05cef5d226ed68147f4d615605
| 6,650
|
py
|
Python
|
docusign_esign/models/currency_feature_set_price.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
docusign_esign/models/currency_feature_set_price.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
docusign_esign/models/currency_feature_set_price.py
|
hunk/docusign-python-client
|
a643c42c1236715e74eef6fc279a1b29da1b5455
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
| 30.365297
| 126
| 0.603308
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CurrencyFeatureSetPrice(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, currency_code=None, currency_symbol=None, envelope_fee=None, fixed_fee=None, seat_fee=None):
"""
CurrencyFeatureSetPrice - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'currency_code': 'str',
'currency_symbol': 'str',
'envelope_fee': 'str',
'fixed_fee': 'str',
'seat_fee': 'str'
}
self.attribute_map = {
'currency_code': 'currencyCode',
'currency_symbol': 'currencySymbol',
'envelope_fee': 'envelopeFee',
'fixed_fee': 'fixedFee',
'seat_fee': 'seatFee'
}
self._currency_code = currency_code
self._currency_symbol = currency_symbol
self._envelope_fee = envelope_fee
self._fixed_fee = fixed_fee
self._seat_fee = seat_fee
@property
def currency_code(self):
"""
Gets the currency_code of this CurrencyFeatureSetPrice.
Specifies the alternate ISO currency code for the account.
:return: The currency_code of this CurrencyFeatureSetPrice.
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""
Sets the currency_code of this CurrencyFeatureSetPrice.
Specifies the alternate ISO currency code for the account.
:param currency_code: The currency_code of this CurrencyFeatureSetPrice.
:type: str
"""
self._currency_code = currency_code
@property
def currency_symbol(self):
"""
Gets the currency_symbol of this CurrencyFeatureSetPrice.
Specifies the alternate currency symbol for the account.
:return: The currency_symbol of this CurrencyFeatureSetPrice.
:rtype: str
"""
return self._currency_symbol
@currency_symbol.setter
def currency_symbol(self, currency_symbol):
"""
Sets the currency_symbol of this CurrencyFeatureSetPrice.
Specifies the alternate currency symbol for the account.
:param currency_symbol: The currency_symbol of this CurrencyFeatureSetPrice.
:type: str
"""
self._currency_symbol = currency_symbol
@property
def envelope_fee(self):
"""
Gets the envelope_fee of this CurrencyFeatureSetPrice.
An incremental envelope cost for plans with envelope overages (when `isEnabled` is set to **true**.)
:return: The envelope_fee of this CurrencyFeatureSetPrice.
:rtype: str
"""
return self._envelope_fee
@envelope_fee.setter
def envelope_fee(self, envelope_fee):
"""
Sets the envelope_fee of this CurrencyFeatureSetPrice.
An incremental envelope cost for plans with envelope overages (when `isEnabled` is set to **true**.)
:param envelope_fee: The envelope_fee of this CurrencyFeatureSetPrice.
:type: str
"""
self._envelope_fee = envelope_fee
@property
def fixed_fee(self):
"""
Gets the fixed_fee of this CurrencyFeatureSetPrice.
Specifies a one-time fee associated with the plan (when `isEnabled` is set to **true**.)
:return: The fixed_fee of this CurrencyFeatureSetPrice.
:rtype: str
"""
return self._fixed_fee
@fixed_fee.setter
def fixed_fee(self, fixed_fee):
"""
Sets the fixed_fee of this CurrencyFeatureSetPrice.
Specifies a one-time fee associated with the plan (when `isEnabled` is set to **true**.)
:param fixed_fee: The fixed_fee of this CurrencyFeatureSetPrice.
:type: str
"""
self._fixed_fee = fixed_fee
@property
def seat_fee(self):
"""
Gets the seat_fee of this CurrencyFeatureSetPrice.
Specifies an incremental seat cost for seat-based plans (when `isEnabled` is set to **true**.)
:return: The seat_fee of this CurrencyFeatureSetPrice.
:rtype: str
"""
return self._seat_fee
@seat_fee.setter
def seat_fee(self, seat_fee):
"""
Sets the seat_fee of this CurrencyFeatureSetPrice.
Specifies an incremental seat cost for seat-based plans (when `isEnabled` is set to **true**.)
:param seat_fee: The seat_fee of this CurrencyFeatureSetPrice.
:type: str
"""
self._seat_fee = seat_fee
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 0
| 3,323
| 0
| 2,926
| 0
| 0
| 0
| -3
| 90
|
374c484b3ff01260b21cf925102e1875ef9938a6
| 477
|
py
|
Python
|
WarelogPyApi/app.py
|
propil5/WarelogManager
|
6baf338855175259877257352f9986a02ffd3e2e
|
[
"MIT"
] | null | null | null |
WarelogPyApi/app.py
|
propil5/WarelogManager
|
6baf338855175259877257352f9986a02ffd3e2e
|
[
"MIT"
] | null | null | null |
WarelogPyApi/app.py
|
propil5/WarelogManager
|
6baf338855175259877257352f9986a02ffd3e2e
|
[
"MIT"
] | null | null | null |
from flask import Flask
# from flask_restful import Api, Resource, reqparse, abort, fields, marshal_with
# from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# api = Api(app)
| 29.8125
| 80
| 0.769392
|
from flask.helpers import url_for
from pyTrendsExtensions import GetTrendingOverTime
from flask import Flask, redirect
# from flask_restful import Api, Resource, reqparse, abort, fields, marshal_with
# from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# api = Api(app)
@app.route("/")
def hello():
return redirect("http://127.0.0.1:5000/test", code=302)
@app.route("/<keyword>")
def GetTrandingDataForKeyword(keyword):
return GetTrendingOverTime(keyword)
| 0
| 150
| 0
| 0
| 0
| 0
| 0
| 51
| 90
|
06d5b67bfa6a6b800139d199e86030897e4fbec4
| 1,190
|
py
|
Python
|
TCPServer/server.py
|
listenzcc/BCIMiddleware
|
80f74731b4df7f6da84c5df0c67e0ca4e6af7102
|
[
"MIT"
] | null | null | null |
TCPServer/server.py
|
listenzcc/BCIMiddleware
|
80f74731b4df7f6da84c5df0c67e0ca4e6af7102
|
[
"MIT"
] | null | null | null |
TCPServer/server.py
|
listenzcc/BCIMiddleware
|
80f74731b4df7f6da84c5df0c67e0ca4e6af7102
|
[
"MIT"
] | null | null | null |
'''
TCP server interface for console.
The TCP server will be automatically built.
- @interface: The function for user interface, and keep the server running.
'''
from .defines import TCPServer
server = TCPServer()
server.start()
| 23.8
| 76
| 0.532773
|
'''
TCP server interface for console.
The TCP server will be automatically built.
- @interface: The function for user interface, and keep the server running.
'''
from . import logger
from .defines import TCPServer
server = TCPServer()
server.start()
def interface():
logger.info(f'Interface starts')
help_msg = dict(
h="Show help message",
q="Quit",
list="List the alive sessions",
send="Send message through all alive sessions, send [message]"
)
while True:
inp = input('>> ')
if inp == 'q':
break
if inp == 'h':
for key, value in help_msg.items():
print(f'{key}: {value}')
continue
if inp == 'list':
for i, session in enumerate(server.alive_sessions()):
print(f'[{i}]', session.address)
continue
if inp.startswith('send '):
message = inp.split(' ', 1)[1]
for session in server.alive_sessions():
session.send(message)
continue
print('ByeBye')
logger.info(f'Interface stops')
return 0
| 0
| 0
| 0
| 0
| 0
| 898
| 0
| -1
| 50
|
8212a3f51dd0ff690f96c791a8239dc4e79430a6
| 2,225
|
py
|
Python
|
whale/src/choiceHeuristic.py
|
margaal/whale
|
00f0743a49e383319cec2d38883697774956ffc5
|
[
"MIT",
"Unlicense"
] | null | null | null |
whale/src/choiceHeuristic.py
|
margaal/whale
|
00f0743a49e383319cec2d38883697774956ffc5
|
[
"MIT",
"Unlicense"
] | null | null | null |
whale/src/choiceHeuristic.py
|
margaal/whale
|
00f0743a49e383319cec2d38883697774956ffc5
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
""" This module contains all class to manage variable choice Heuristic """
| 33.712121
| 127
| 0.595056
|
#!/usr/bin/env python3
""" This module contains all class to manage variable choice Heuristic """
class VariableChoiceHeuristic:
""" Super class to handle variable choice heuristic """
def __init__(self, vars):
"""
Args:
vars (set): variables used in all clauses.
"""
#: set: All variables of a set of clauses program must be analyzed
self.vars = vars
def getVariabeTriplet(self, S):
"""Method to get variable
Args:
S: assignment set
Returns:
a triplet (X, v, v') such as X is variable, v is value of X and v' is alternative value of X
"""
if len(S) == 0:
return (min(self.vars), 1, -1)
s = set(list(zip(*S))[0])
return (min(self.vars-s), 1, -1)
class SimpleVariableChoiceHeuristic(VariableChoiceHeuristic):
""" First approach to choose variable, it is simple. we choose the first variable wich is not yet in assignment set (S) """
def __init__(self, vars):
super().__init__(vars)
def getVariableTriplet(self, S):
"""Method to get variable
Args:
S: assignment set
Returns:
a triplet (X, v, v') such as X is variable, v is value of X and v' is alternative value of X
"""
return super().getVariabeTriplet(S)
class LevelTwoVariableChoiceHeuristic(VariableChoiceHeuristic):
""" This approach to choose variable is better than SimpleVariableChoiceHeuristic because it considers unitary clause"""
def __init__(self, vars):
super().__init__(vars)
#: set: All unitary clauses detected in the previous analysis of system of clauses
self.unitClauseLitteral:set = set()
def getVariableTriplet(self, S):
"""Method to get variable
Args:
S(list): assignment set
Returns:
a set of tuple, i.e a triplet (X, v, v') such as X is variable, v is value of X and v' is alternative value of X
"""
if len(self.unitClauseLitteral)!=0:
return self.unitClauseLitteral
return super().getVariabeTriplet(S)
| 0
| 0
| 0
| 2,059
| 0
| 0
| 0
| 0
| 69
|
87e02e8e46132c97db5e7e96372e3d6b7ffa4c3f
| 21,615
|
py
|
Python
|
References/Original Docs/TrussCalc_v0.py
|
lorcan2440/SimpleTrussCalculator
|
3c063fbc5f1987e9a700e312b763b36d4ec22495
|
[
"MIT"
] | 1
|
2021-07-29T14:34:08.000Z
|
2021-07-29T14:34:08.000Z
|
References/Original Docs/TrussCalc_v0.py
|
lorcan2440/SimpleTrussCalculator
|
3c063fbc5f1987e9a700e312b763b36d4ec22495
|
[
"MIT"
] | 1
|
2021-07-30T17:34:42.000Z
|
2021-07-30T17:34:42.000Z
|
References/Original Docs/TrussCalc_v0.py
|
lorcan2440/SimpleTrussCalculator
|
3c063fbc5f1987e9a700e312b763b36d4ec22495
|
[
"MIT"
] | 1
|
2022-03-13T11:01:34.000Z
|
2022-03-13T11:01:34.000Z
|
# TRUSS INNER CLASSES END HERE
# MAIN FUNCTIONS END HERE
build_truss(815, True)
| 46.684665
| 152
| 0.556697
|
from matplotlib import pyplot as plt
import math, sigfig, warnings # module "sigfig" requires "pip install sigfig" at command line
import numpy as np
def get_all_trusses():
return [truss for truss in Truss]
class IterTruss(type):
def __iter__(cls):
return iter(cls._allTrusses)
class Truss(metaclass = IterTruss):
_allTrusses = []
# TRUSS METACLASS INITIATORS
class IterJoint(type):
def __iter__(cls):
return iter(cls._allJoints)
class IterBar(type):
def __iter__(cls):
return iter(cls._allBars)
class IterLoad(type):
def __iter__(cls):
return iter(cls._allLoads)
class IterSupport(type):
def __iter__(cls):
return iter(cls._allSupports)
def __init__(self, bar_params: dict = None, units = 'kN, mm'):
self._allTrusses.append(self)
if bar_params == None:
if units == 'N, m':
self.default_params = {"b" : 0.016, "t" : 0.004, "D" : 0.020, "E" : 2.1e11}
elif units == 'kN, mm':
self.default_params = {"b" : 1.6, "t" : 4, "D" : 20, "E" : 210}
else:
raise ValueError('Units must be either "N, m" or "kN, mm".')
else:
self.default_params = bar_params
self.units = units
# PARTS OF THE TRUSS (INNER CLASSES)
class Joint(metaclass = IterJoint):
_allJoints = []
def __init__(self, truss: object, name: str, x: float, y: float):
self._allJoints.append(self)
self.name = name
self.truss = truss
self.x = x
self.y = y
self.loads = {}
def form_equation(self):
self.truss.get_all_bars_connected_to_joint(self)
class Bar(metaclass = IterBar):
_allBars = []
def __init__(self, truss: object, name: str, first_joint: object, second_joint: object,
my_params: dict = None):
self._allBars.append(self)
self.name = name
self.first_joint, self.first_joint_name = first_joint, first_joint.name
self.second_joint, self.second_joint_name = second_joint, second_joint.name
if my_params == None:
self.params = truss.default_params
else:
self.params = my_params
self.b, self.t, self.D, self.E, self.σ_max = self.params["b"], self.params["t"], self.params["D"], self.params["E"], self.params["σ_max"]
def length(self):
self.L = math.sqrt((self.first_joint.x - self.second_joint.x)**2 + (self.first_joint.y - self.second_joint.y)**2)
return self.L
def area(self):
self.A = (self.b ** 2 - (self.b - self.t) ** 2) * 1.03
def effective_area(self):
self.A_eff = (1.5 * self.b - self.D) * 0.9 * self.t
return self.A_eff
def buckling_ratio(self):
self.buckling_ratio = self.length() / self.b
return self.buckling_ratio
class Load(metaclass = IterLoad):
_allLoads = []
def __init__(self, name: str, joint: object, x_comp: float = 0.0, y_comp: float = 0.0):
self._allLoads.append(self)
self.name = name
self.joint = joint
self.x, self.y = x_comp, y_comp
self.magnitude = math.sqrt(self.x ** 2 + self.y ** 2)
self.direction = math.atan2(self.y, self.x)
joint.loads[self.name] = (self.x, self.y)
class Support(metaclass = IterSupport):
_allSupports = []
def __init__(self, truss: object, name: str, joint: object, support_type: str = 'encastre',
roller_normal_vector: tuple = (1, 0)):
self._allSupports.append(self)
self.name = name
self.joint = joint
self.type = support_type
self.dir = roller_normal_vector
if self.type in ('encastre', 'pin'):
joint.loads['Reaction @ {}'.format(self.name)] = (None, None)
# Independent unknowns: fill in later
elif self.type == 'roller':
joint.loads['Reaction @ {}'.format(self.name)] = (None * self.dir[0], None * self.dir[1])
# Dependent unknowns: fill in later
else:
raise ValueError('Support type must be "encastre", "roller" or " pin".')
# TRUSS METACLASS METHODS
def get_all_bars(self, str_names_only: bool = False):
if not str_names_only:
return [bar for bar in Truss.Bar]
else:
return [bar.name for bar in Truss.Bar]
def get_all_joints(self, str_names_only: bool = False):
if not str_names_only:
return [joint for joint in Truss.Joint]
else:
return [joint.name for joint in Truss.Joint]
def get_all_bars_connected_to_joint(self, joint: object, str_names_only: bool = False):
if not str_names_only:
return [bar for bar in Truss.Bar if joint.name in (bar.first_joint.name, bar.second_joint.name)]
else:
return [bar.name for bar in Truss.Bar if joint.name in (bar.first_joint.name, bar.second_joint.name)]
def get_all_joints_connected_to_bar(self, bar: object, str_names_only: bool = False):
if not str_names_only:
return [bar.first_joint, bar.second_joint]
else:
return [bar.first_joint.name, bar.second_joint.name]
def get_all_loads(self):
return [load for load in Truss.Load]
def get_all_loads_at_joint(self, joint: object):
return [load for load in Truss.Load if load.joint == joint]
def get_all_loads_at_joint_by_name(self, joint_name: str):
return [load for load in Truss.Load if load.joint.name == joint_name]
def get_all_supports(self):
return [support for support in Truss.Support]
def get_bar_by_name(self, bar_name: str):
for bar in Truss.Bar:
if bar.name == bar_name:
return bar
def is_statically_determinate(self):
b = len(self.get_all_bars(str_names_only = True))
F = sum([2 if support.type in ('encastre', 'pin') else 1 for support in Truss.Support])
j = len(self.get_all_joints(str_names_only = True))
return b + F == 2 * j
def calculate(self):
# Get a list of the distinct joint names, number of equations to form = 2 * number of joints
joint_names = self.get_all_joints(str_names_only = True)
number_of_unknowns = 2 * len(joint_names)
# List of dictionaries for unknowns, given default zero values
unknowns = {}
wanted_vars = []
for bar in self.get_all_bars():
unknowns['Tension in ' + bar.name] = 0
wanted_vars.append('Tension in ' + bar.name)
for support in self.get_all_supports():
unknowns['Horizontal reaction at ' + support.name] = 0
wanted_vars.append('Horizontal reaction at ' + support.joint.name)
unknowns['Vertical reaction at ' + support.name] = 0
wanted_vars.append('Vertical reaction at ' + support.joint.name)
unknowns = [unknowns for x in range(number_of_unknowns)]
# Create a list of joint names, with each entry included twice and then flatten the list
joint_enum = [[joint_names[i], joint_names[i]] for i in range(len(joint_names))]
joint_enum = [item for sublist in joint_enum for item in sublist]
# Create empty dictionary of all equations in all unknowns
unknowns = {"Equation {}, resolve {} at {}".format(
x + 1, 'horizontally' if (x + 1) % 2 == 1 else 'vertically',
joint_enum[x]) : unknowns[x] for x in range(number_of_unknowns)}
all_directions = {}
for joint in self.get_all_joints():
# Reset the directions dictionary for this joint
directions = {}
connected_bars = self.get_all_bars_connected_to_joint(joint)
# Get the anticlockwise (polar) angle of each connected joint relative to this joint which have bars
for bar in connected_bars:
connected_joints = self.get_all_joints_connected_to_bar(bar)
if joint == connected_joints[0]:
angle = math.atan2(connected_joints[1].y - joint.y, connected_joints[1].x - joint.x)
elif joint == connected_joints[1]:
angle = math.atan2(connected_joints[0].y - joint.y, connected_joints[0].x - joint.x)
directions['Tension in ' + bar.name] = angle
# If there are reactions at this joint, store their directions too
if any([bool(s.joint.name == joint.name) for s in self.get_all_supports()]):
directions['Horizontal reaction at ' + joint.name] = 0
directions['Vertical reaction at ' + joint.name] = math.pi/2
# If there are external loads at this joint, store their directions too
for l in self.get_all_loads_at_joint(joint):
directions['Horizontal component of {} at {}'.format(l.name , joint.name)] = 0
directions['Vertical component of {} at {}'.format(l.name , joint.name)] = math.pi/2
all_directions[joint.name] = directions
# Store the coefficients of the unknowns in each equation
coefficients = []
for joint_name in joint_names:
current_line = []
for var in wanted_vars:
try:
current_line.append(round(math.cos(all_directions[joint_name][var]), 10))
except KeyError:
current_line.append(0)
coefficients.append(current_line)
current_line = []
for var in wanted_vars:
try:
current_line.append(round(math.sin(all_directions[joint_name][var]), 10))
except KeyError:
current_line.append(0)
coefficients.append(current_line)
# Store the constants of each equation
constants = []
for joint_name in joint_names:
try:
constants.append([-1 * sum(L.x) for L in self.get_all_loads_at_joint_by_name(joint_name)])
constants.append([-1 * sum(L.y) for L in self.get_all_loads_at_joint_by_name(joint_name)])
except TypeError:
constants.append([-1 * L.x for L in self.get_all_loads_at_joint_by_name(joint_name)])
constants.append([-1 * L.y for L in self.get_all_loads_at_joint_by_name(joint_name)])
# Sanitise load data
for i in range(len(constants)):
if constants[i] == [] or constants[i] == [None]:
constants[i] = [0]
# Solve the system
M, B = np.matrix(np.array(coefficients)), np.matrix(constants)
X = np.linalg.inv(M) * B
# Match values back to variable names and return
output_dict = {}
i = 0
for bar in self.get_all_bars():
output_dict[bar.name] = float(X[i])
i += 1
for support in self.get_all_supports():
output_dict[support.name] = (float(X[i]), float(X[i+1]))
i += 2
return output_dict
# TRUSS RESULTS CLASS
class Result:
def __init__(self, truss, sig_figs = None):
self.results = truss.calculate()
self.tensions, self.reactions, self.stresses, self.strains, self.buckling_ratios = {}, {}, {}, {}, {}
self.sig_figs = sig_figs
warnings.filterwarnings('ignore')
self.get_tensions(truss)
self.get_reactions(truss)
self.get_stresses(truss)
self.get_buckling_ratios(truss)
self.get_strains(truss)
self.round_data()
def round_data(self):
for item in list(self.tensions.keys()):
try:
self.tensions[item] = sigfig.round(self.tensions[item], self.sig_figs)
self.stresses[item] = sigfig.round(self.stresses[item], self.sig_figs)
self.strains[item] = sigfig.round(self.strains[item], self.sig_figs)
self.buckling_ratios[item] = sigfig.round(self.buckling_ratios[item], self.sig_figs)
except KeyError:
continue
for item in list(self.reactions.keys()):
try:
self.reactions[item] = (sigfig.round(self.reactions[item][0], self.sig_figs),
sigfig.round(self.reactions[item][1], self.sig_figs))
except KeyError:
continue
def get_tensions(self, truss):
for item in self.results:
if type(self.results[item]) == float:
if abs(self.results[item]) < 1e-10:
self.tensions.update({item : 0})
else:
self.tensions.update({item : self.results[item]})
def get_reactions(self, truss):
for item in self.results:
if type(self.results[item]) == tuple:
self.reactions.update({item : self.results[item]})
def get_stresses(self, truss):
for item in self.results:
if type(self.results[item]) == float:
self.stresses.update({item : self.tensions[item] / truss.get_bar_by_name(item).effective_area()})
def get_strains(self, truss):
for item in self.results:
if type(self.results[item]) == float:
self.strains.update({item : self.stresses[item] / truss.get_bar_by_name(item).E})
def get_buckling_ratios(self, truss):
for item in self.results:
if type(self.results[item]) == float and self.results[item] < 0:
self.buckling_ratios.update({item : truss.get_bar_by_name(item).buckling_ratio()})
# TRUSS INNER CLASSES END HERE
def print_results(results: object, truss: object, as_str: bool = True):
if as_str:
print('Axial forces are: (positive = tension; negative = compression) \n' + str(results.tensions))
print('\nAxial stresses are: \n' + str(results.stresses))
'''
print('\nReaction forces are (horizontal, vertical) components (signs consistent with coordinate system): \n'
+ str(results.reactions))
'''
print('Buckling ratios are: \n' + str(results.buckling_ratios))
print('Strains are: \n' + str(results.strains))
if results.sig_figs == None:
print('\nUnits are {}, {}'.format(truss.units.split(',')[0], 'values not rounded'))
else:
print('\nUnits are {}, {}'.format(truss.units.split(',')[0],
'values rounded to {} sig figs'.format(results.sig_figs)))
def plot_diagram(truss: object, results: object, show_reactions = False):
# Find a suitable length-scale to make the annotations look nicer
arrow_sizes = [x.length() for x in truss.get_all_bars()]
arrow_sizes = sum(arrow_sizes)/len(arrow_sizes) * 0.1
# Plot all joints
plt.plot([joint.x for joint in truss.get_all_joints()], [joint.y for joint in truss.get_all_joints()], 'o')
# Plot all bars and label their axial forces in the legend
for bar in truss.get_all_bars():
plt.plot([bar.first_joint.x, bar.second_joint.x], [bar.first_joint.y, bar.second_joint.y],
label = '{}'.format(bar.name + ': ' + str(results.tensions[bar.name]) + ' ' + truss.units.split(',')[0]),
zorder = 0)
# If the bar is nearly vertical, label its name to its right, otherwise label it above
if 80 * (math.pi / 180) <= abs(math.atan2(bar.second_joint.y - bar.first_joint.y,
bar.second_joint.x - bar.first_joint.x)) <= 100 * (math.pi / 180):
plt.text(sum([bar.first_joint.x, bar.second_joint.x])/2 + arrow_sizes / 3,
sum([bar.first_joint.y, bar.second_joint.y])/2, bar.name)
else:
plt.text(sum([bar.first_joint.x, bar.second_joint.x])/2,
sum([bar.first_joint.y, bar.second_joint.y])/2 + arrow_sizes / 3, bar.name)
# Plot all support points with their reactions as arrows
for support in truss.get_all_supports():
plt.plot(support.joint.x, support.joint.y, '*', color = 'red',
label = support.name + ': ' + str(results.reactions[support.name]) + ' ' + truss.units.split(',')[0])
for support in truss.get_all_supports():
if show_reactions == True:
direction_of_reaction = math.atan2(results.reactions[support.name][1], results.reactions[support.name][0])
plt.arrow(support.joint.x, support.joint.y, arrow_sizes, 0, head_width = arrow_sizes / 5, head_length = arrow_sizes / 4)
plt.arrow(support.joint.x, support.joint.y, 0, arrow_sizes, head_width = arrow_sizes / 5, head_length = arrow_sizes / 4)
plt.text(support.joint.x + arrow_sizes / 4, support.joint.y + arrow_sizes / 4, support.name,
label = support.name + ': ' + str(results.reactions[support.name]) + ' ' + truss.units.split(',')[0])
# Plot all loads
for load in truss.get_all_loads():
direction_of_load = math.atan2(load.y, load.x)
plt.arrow(load.joint.x, load.joint.y, arrow_sizes * math.cos(direction_of_load),
arrow_sizes * math.sin(direction_of_load),
head_width = arrow_sizes / 5, head_length = arrow_sizes / 4)
plt.text(sum([load.joint.x, load.joint.x + arrow_sizes * math.cos(direction_of_load)])/2 + arrow_sizes / 3,
sum([load.joint.y + load.joint.y, arrow_sizes * math.sin(direction_of_load)])/2,
load.name + ': (' + str(load.x) + ', ' + str(load.y) + ') ' + truss.units.split(',')[0])
# Graphical improvements
plt.legend(loc = 'upper right')
plt.autoscale(enable = True, axis = 'both')
plt.axis('equal')
plt.show()
# MAIN FUNCTIONS END HERE
def build_truss(x, print_res = True):
# Step 0: set the physical properties and name the truss
custom_params = {"b" : 12.5, "t" : 0.7, "D" : 5, "E" : 210, "σ_max": 0.216}
myTruss = Truss(custom_params, 'kN, mm')
# Step 1: Define the joints (nodes)
joint_A = myTruss.Joint(myTruss, "Joint A", 0, 0)
joint_B = myTruss.Joint(myTruss, "Joint B", 290, -90)
joint_C = myTruss.Joint(myTruss, "Joint C", 815, 127.5)
joint_D = myTruss.Joint(myTruss, "Joint D", 290, 345)
joint_E = myTruss.Joint(myTruss, "Joint E", 0, 255)
joint_F = myTruss.Joint(myTruss, "Joint F", 220.836, 127.5)
weak = {"b" : 12.5, "t" : 0.7, "D" : 5, "E" : 210, "σ_max": 0.216}
medium_1 = {"b" : 16, "t" : 0.9, "D" : 5, "E" : 210, "σ_max": 0.216}
medium_2 = {"b" : 16, "t" : 1.1, "D" : 5, "E" : 210, "σ_max": 0.216}
strong = {"b" : 19, "t" : 1.1, "D" : 5, "E" : 210, "σ_max": 0.216}
# Step 2: Define the bars going between any pair of joints
bar_1 = myTruss.Bar(myTruss, "Bar 1", joint_A, joint_B, medium_2)
bar_2 = myTruss.Bar(myTruss, "Bar 2", joint_B, joint_C, strong)
bar_3 = myTruss.Bar(myTruss, "Bar 3", joint_C, joint_D, medium_1)
bar_4 = myTruss.Bar(myTruss, "Bar 4", joint_D, joint_E, medium_1)
bar_5 = myTruss.Bar(myTruss, "Bar 5", joint_E, joint_F, medium_1)
bar_6 = myTruss.Bar(myTruss, "Bar 6", joint_F, joint_A, medium_2)
bar_7 = myTruss.Bar(myTruss, "Bar 7", joint_F, joint_D, medium_1)
bar_8 = myTruss.Bar(myTruss, "Bar 8", joint_F, joint_B, weak)
# Step 3: Define the loads acting on any joint
load_1 = myTruss.Load("W", joint_C, 0, -0.675 * 2)
# Step 4: Define the supports acting at any joint
support_1 = myTruss.Support(myTruss, "Support 1", joint_A, 'encastre')
support_2 = myTruss.Support(myTruss, "Support 2", joint_E, 'encastre')
# Step 5: Calculate the truss and print the results
my_results = myTruss.Result(myTruss, sig_figs = 3)
if print_res == True:
print_results(my_results, myTruss, as_str = True)
if True:
plot_diagram(myTruss, my_results)
else:
return my_results
build_truss(815, True)
| 14
| 0
| 0
| 14,868
| 0
| 6,352
| 0
| 20
| 278
|
0e2883fc0a40eaacd3d346a142bc2ccbdbc6a50c
| 4,477
|
py
|
Python
|
src/dual_gazebo/src/control_key_drive.py
|
diddytpq/Dual-Motion-robot-gazebo
|
19d8098f9931ee7ded91f8242efdc176c418db8c
|
[
"MIT"
] | null | null | null |
src/dual_gazebo/src/control_key_drive.py
|
diddytpq/Dual-Motion-robot-gazebo
|
19d8098f9931ee7ded91f8242efdc176c418db8c
|
[
"MIT"
] | null | null | null |
src/dual_gazebo/src/control_key_drive.py
|
diddytpq/Dual-Motion-robot-gazebo
|
19d8098f9931ee7ded91f8242efdc176c418db8c
|
[
"MIT"
] | null | null | null |
import rospy
import sys, os
import roslib
if os.name == 'nt':
else:
import termios
roslib.load_manifest('dual_gazebo')
if __name__ == '__main__':
try:
rospy.init_node('mecanum_key')
if os.name != 'nt':
settings = termios.tcgetattr(sys.stdin)
linear = [0, 0, 0]
angular = [0, 0, 0]
plant_x = 0
while(1):
key = getKey()
if key == 'w' :
linear[0] += 1
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'x' :
linear[0] -= 1
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'a' :
angular[2] += 0.5
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'd' :
angular[2] -= 0.5
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'q' :
plant_x += 0.01
move_chassis(plant_x)
elif key == 'e' :
plant_x -= 0.01
move_chassis(plant_x)
elif key == 's' :
linear = [0, 0, 0]
angular = [0, 0, 0]
linear, angular[2] = move_mecanum([linear,angular])
if (key == '\x03'):
linear = [0, 0, 0]
angular = [0, 0, 0]
linear, angular[2] = move_mecanum([linear,angular])
break
except rospy.ROSInt:
pass
| 22.725888
| 110
| 0.542104
|
import rospy
import numpy as np
from std_msgs.msg import Float64
from gazebo_msgs.srv import *
from geometry_msgs.msg import *
import sys, select, os
import roslib
if os.name == 'nt':
import msvcrt
else:
import tty, termios
roslib.load_manifest('dual_gazebo')
def qua2eular(x,y,z,w):
q_x = x
q_y = y
q_z = z
q_w = w
t0 = +2.0 * (q_w * q_x + q_y * q_z)
t1 = +1.0 - 2.0 * (q_x * q_x + q_y * q_y)
roll_x = np.arctan2(t0, t1)
t2 = +2.0 * (q_w * q_y - q_z * q_x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = np.arcsin(t2)
t3 = +2.0 * (q_w * q_z + q_x * q_y)
t4 = +1.0 - 2.0 * (q_y * q_y + q_z * q_z)
yaw_z = np.arctan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def getKey():
if os.name == 'nt':
if sys.version_info[0] >= 3:
return msvcrt.getch().decode()
else:
return msvcrt.getch()
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.1)
if rlist:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def check_velocity(cur_vel):
max_x = 5.5 #km/h
max_y = 3.3 #km/h
max_wz = 3.5 #deg/sec
x_vel, y_vel, z_vel, z_angle = cur_vel
if max_x < abs(x_vel):
if x_vel > 0: x_vel = max_x
else: x_vel = -max_x
if max_y < abs(y_vel):
if y_vel > 0: y_vel = max_y
else: y_vel = -max_y
if max_wz < abs(z_angle):
if z_angle > 0: z_angle = max_wz
else: z_angle = -max_wz
return [x_vel, y_vel, z_vel], z_angle
def mecanum_wheel_velocity(vx, vy, wz):
r = 0.0762 # radius of wheel
l = 0.23 #length between {b} and wheel
w = 0.25225 #depth between {b} abd wheel
alpha = l + w
q_dot = np.array([wz, vx, vy])
J_pseudo = np.array([[-alpha, 1, -1],[alpha, 1, 1],[alpha, 1, -1],[alpha, 1,1]])
u = 1/r * J_pseudo @ np.reshape(q_dot,(3,1))#q_dot.T
return u
def move_mecanum(data):
# start publisher of cmd_vel to control mecanum
linear, angular = data
pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
twist = Twist()
twist.linear.x = linear[0]
twist.linear.y = linear[1]
twist.linear.z = linear[2]
twist.angular.x = angular[0]
twist.angular.y = angular[1]
twist.angular.z = angular[2]
pub.publish(twist)
print(twist)
return [linear[0],linear[1],linear[2]], angular[2]
def move_chassis(data):
#pub_1 = rospy.Publisher('/link_chassis_vel', Twist,queue_size=10)
pub_1 = rospy.Publisher('/dual_motion_robot/chassis_pos_joint_controller/command', Float64, queue_size=10)
#pub_WL = rospy.Publisher('/kitech_robot/mp_left_wheel_joint_controller/command', Float64, queue_size=10)
#pub_WR = rospy.Publisher('/kitech_robot/mp_right_wheel_joint_controller/command', Float64, queue_size=10)
#pub_WL.publish(data)
#pub_WR.publish(data)
pub_1.publish(data)
print(data)
if __name__ == '__main__':
try:
rospy.init_node('mecanum_key')
if os.name != 'nt':
settings = termios.tcgetattr(sys.stdin)
linear = [0, 0, 0]
angular = [0, 0, 0]
plant_x = 0
while(1):
key = getKey()
if key == 'w' :
linear[0] += 1
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'x' :
linear[0] -= 1
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'a' :
angular[2] += 0.5
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'd' :
angular[2] -= 0.5
linear, angular[2] = move_mecanum([linear,angular])
elif key == 'q' :
plant_x += 0.01
move_chassis(plant_x)
elif key == 'e' :
plant_x -= 0.01
move_chassis(plant_x)
elif key == 's' :
linear = [0, 0, 0]
angular = [0, 0, 0]
linear, angular[2] = move_mecanum([linear,angular])
if (key == '\x03'):
linear = [0, 0, 0]
angular = [0, 0, 0]
linear, angular[2] = move_mecanum([linear,angular])
break
except rospy.ROSInt:
pass
| 0
| 0
| 0
| 0
| 0
| 2,630
| 0
| 31
| 250
|
316b321b9f7d046b9a1c73717ede0ee14f70b07e
| 3,151
|
py
|
Python
|
datalad/crawler/pipelines/tests/test_fcptable.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | null | null | null |
datalad/crawler/pipelines/tests/test_fcptable.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 6
|
2015-11-20T21:41:13.000Z
|
2018-06-12T14:27:32.000Z
|
datalad/crawler/pipelines/tests/test_fcptable.py
|
yarikoptic/datalad
|
c0cd538de2ed9a30c0f58256c7afa6e18d325505
|
[
"MIT"
] | 1
|
2017-03-28T14:44:16.000Z
|
2017-03-28T14:44:16.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from logging import getLogger
lgr = getLogger('datalad.crawl.tests')
TOPURL = "http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html"
| 34.25
| 109
| 0.628689
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
from os.path import exists
from requests.exceptions import InvalidURL
from ....utils import chpwd
from ....dochelpers import exc_str
from ....tests.utils import assert_true, assert_raises, assert_false
from ....tests.utils import SkipTest
from ....tests.utils import with_tempfile, skip_if_no_network, use_cassette
from ....tests.utils import skip_if_url_is_not_available
from datalad.crawler.pipelines.tests.utils import _test_smoke_pipelines
from datalad.crawler.pipelines.fcptable import *
from datalad.crawler.pipeline import run_pipeline
import logging
from logging import getLogger
lgr = getLogger('datalad.crawl.tests')
from ..fcptable import pipeline, superdataset_pipeline
TOPURL = "http://fcon_1000.projects.nitrc.org/fcpClassic/FcpTable.html"
def test_smoke_pipelines():
yield _test_smoke_pipelines, pipeline, ['bogus']
yield _test_smoke_pipelines, superdataset_pipeline, []
@use_cassette('test_fcptable_dataset')
@skip_if_no_network
@with_tempfile(mkdir=True)
def _test_dataset(dataset, error, create, skip, tmpdir):
with chpwd(tmpdir):
if create:
with open("README.txt", 'w') as f:
f.write(" ")
pipe = [
crawl_url(TOPURL),
[
assign({'dataset': dataset}),
skip_if({'dataset': 'Cleveland CCF|Durham_Madden|NewYork_Test-Retest_Reliability'}, re=True),
sub({'response': {'<div class="tableParam">([^<]*)</div>': r'\1'}}),
find_dataset(dataset),
extract_readme,
]
]
if error:
assert_raises((InvalidURL, RuntimeError), run_pipeline, pipe)
return
try:
run_pipeline(pipe)
except InvalidURL as exc:
raise SkipTest(
"This version of requests considers %s to be invalid. "
"See https://github.com/kennethreitz/requests/issues/3683#issuecomment-261947670 : %s"
% (TOPURL, exc_str(exc)))
if skip:
assert_false(exists("README.txt"))
return
assert_true(exists("README.txt"))
f = open("README.txt", 'r')
contents = f.read()
assert_true("Author(s)" and "Details" in contents)
def test_dataset():
raise SkipTest('Bring back when NITRC is back (gh-1472)')
skip_if_url_is_not_available(TOPURL, regex='service provider outage')
yield _test_dataset, 'Baltimore', None, False, False
yield _test_dataset, 'AnnArbor_b', None, False, False
yield _test_dataset, 'Ontario', None, False, False
yield _test_dataset, 'Boston', RuntimeError, False, False
yield _test_dataset, "AnnArbor_b", None, True, False
yield _test_dataset, "Cleveland CCF", None, False, True
| 0
| 1,328
| 0
| 0
| 602
| 0
| 0
| 327
| 359
|
2adfa5603510b5c0ce9f049510e9539047f88898
| 1,683
|
py
|
Python
|
KuldeepDwivedi_A2305218477/BFS/Water_jug.py
|
suraj0803/AI-LAB-WORK
|
c09776c104529678215d4f51a756ea0039a89df4
|
[
"Apache-2.0"
] | null | null | null |
KuldeepDwivedi_A2305218477/BFS/Water_jug.py
|
suraj0803/AI-LAB-WORK
|
c09776c104529678215d4f51a756ea0039a89df4
|
[
"Apache-2.0"
] | null | null | null |
KuldeepDwivedi_A2305218477/BFS/Water_jug.py
|
suraj0803/AI-LAB-WORK
|
c09776c104529678215d4f51a756ea0039a89df4
|
[
"Apache-2.0"
] | null | null | null |
# Water Jug problem
print("Solution for Water Jug problem!")
x = int(input("Enter the capacity of jug1 : "))
y = int(input("Entert the capacity of jug2 : "))
target = int(input("Enter the target volume : "))
start = [0, 0]
if target % gcd(x,y) == 0:
print(bfs(start, target, x, y))
else:
print("No solution")
| 25.892308
| 84
| 0.512181
|
# Water Jug problem
print("Solution for Water Jug problem!")
x = int(input("Enter the capacity of jug1 : "))
y = int(input("Entert the capacity of jug2 : "))
target = int(input("Enter the target volume : "))
def bfs(start, target, x, y):
path = []
front = []
front.append(start)
visited = []
while(not (not front)):
current = front.pop()
x = current[0]
y = current[1]
path.append(current)
if(x == target or y == target):
print("Found!")
return path
if(current[0] < x and ([x, current[1]] not in visited)):
front.append([x, current[1]])
visited.append([x, current[1]])
if(current[1] < y and ([current[0], y] not in visited)):
front.append([current[0], y])
visited.append([current[0], y])
if(current[0] > x and ([0, current[1]] not in visited)):
front.append([0, current[1]])
visited.append([0, current[1]])
if(current[1] > y and ([x, 0] not in visited)):
front.append([x, 0])
visited.append([x, 0])
if(current[1] > 0 and ([min(x + y, x), max(0, x + y - x)] not in visited)):
front.append([min(x + y, x), max(0, x + y - x)])
visited.append([min(x + y, x), max(0, x + y - x)])
if current[0] > 0 and ([max(0, x + y - y), min(x + y, y)] not in visited):
front.append([max(0, x + y - y), min(x + y, y)])
visited.append([max(0, x + y - y), min(x + y, y)])
return -1
def gcd(a, b):
if a == 0:
return b
return gcd(b%a, a)
start = [0, 0]
if target % gcd(x,y) == 0:
print(bfs(start, target, x, y))
else:
print("No solution")
| 0
| 0
| 0
| 0
| 0
| 1,281
| 0
| 0
| 50
|
63865fa932487f27230a6778c7a192a4701cb3dc
| 6,190
|
py
|
Python
|
ReadStereoCalibration.py
|
cxn304/Strain-gauges-recognition
|
1f9f64f8a0fa01892509835694ff88bc47736a7b
|
[
"Apache-2.0"
] | null | null | null |
ReadStereoCalibration.py
|
cxn304/Strain-gauges-recognition
|
1f9f64f8a0fa01892509835694ff88bc47736a7b
|
[
"Apache-2.0"
] | null | null | null |
ReadStereoCalibration.py
|
cxn304/Strain-gauges-recognition
|
1f9f64f8a0fa01892509835694ff88bc47736a7b
|
[
"Apache-2.0"
] | null | null | null |
'''
author: cxn
version: 0.1.0
read camera calibration from mat
'''
import cv2
import matplotlib.pyplot as plt
#
#
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
"""
cv2.remap,
"""
rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_AREA)
rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_AREA)
return rectifyed_img1, rectifyed_img2
#
#,
# ----
imgL = cv2.imread("D:/cxn_project/Strain-gauges-recognition/cali_img/left/l6.bmp")
imgR = cv2.imread("D:/cxn_project/Strain-gauges-recognition/cali_img/right/r6.bmp")
height, width = imgL.shape[0:2]
#
config = stereoCameral()
map1x, map1y, map2x, map2y, Q = getRectifyTransform(height, width, config)
iml_rectified, imr_rectified = rectifyImage(imgL, imgR, map1x,
map1y, map2x, map2y)
disp = sgbm(iml_rectified, imr_rectified)
plt.imshow(disp)
target_point = threeD(disp, Q) # 3D
print(target_point)
| 32.578947
| 105
| 0.641034
|
'''
author: cxn
version: 0.1.0
read camera calibration from mat
'''
import numpy as np
import cv2
from scipy.io import loadmat
import matplotlib.pyplot as plt
#双目相机参数
class stereoCameral(object):
def __init__(self):
stereoParameters = loadmat("./internal_reference/stereoParameters.mat")
self.cam_matrix_left = stereoParameters["stereoParameters"]["K1"][0][0] # IntrinsicMatrix
self.distortion_l = stereoParameters["stereoParameters"]["D1"][0][0] # distortion
self.cam_matrix_right = stereoParameters["stereoParameters"]["K2"][0][0]
self.distortion_r = stereoParameters["stereoParameters"]["D2"][0][0]
self.size = stereoParameters["stereoParameters"]["size"][0][0] # image size
self.R = stereoParameters["stereoParameters"]["rot"][0][0].T
self.T = stereoParameters["stereoParameters"]["trans"][0][0]
def getRectifyTransform(height, width, config):
#读取矩阵参数
left_K = config.cam_matrix_left
right_K = config.cam_matrix_right
left_distortion = config.distortion_l
right_distortion = config.distortion_r
R = config.R
T = config.T
#计算校正变换,cv2.stereoRectify
"""
stereoRectify() 的作用是为每个摄像头计算立体校正的映射矩阵.
所以其运行结果并不是直接将图片进行立体矫正,而是得出进行立体矫正所需要的映射矩阵
cameraMatrix1-第一个摄像机的摄像机矩阵
distCoeffs1-第一个摄像机的畸变向量
cameraMatrix2-第二个摄像机的摄像机矩阵
distCoeffs1-第二个摄像机的畸变向量
imageSize-图像大小
R- stereoCalibrate() 求得的R矩阵
T- stereoCalibrate() 求得的T矩阵
R1-输出矩阵,第一个摄像机的校正变换矩阵(旋转变换)
R2-输出矩阵,第二个摄像机的校正变换矩阵(旋转矩阵)
P1-输出矩阵,第一个摄像机在新坐标系下的投影矩阵
P2-输出矩阵,第二个摄像机在想坐标系下的投影矩阵
Q-4*4的深度差异映射矩阵
flags-可选的标志有两种零或者CV_CALIB_ZERO_DISPARITY,
如果设置 CV_CALIB_ZERO_DISPARITY 的话,该函数会让两幅校正后的图像的主点
有相同的像素坐标.否则该函数会水平或垂直的移动图像,以使得其有用的范围最大
alpha-拉伸参数.如果设置为负或忽略,将不进行拉伸.如果设置为0,那么校正后图像
只有有效的部分会被显示(没有黑色的部分),如果设置为1,那么就会显示整个图像.
设置为0~1之间的某个值,其效果也居于两者之间.
newImageSize-校正后的图像分辨率,默认为原分辨率大小.
validPixROI1-可选的输出参数,Rect型数据.其内部的所有像素都有效
validPixROI2-可选的输出参数,Rect型数据.其内部的所有像素都有效
"""
if type(height) != "int" or type(width) != "int":
height = int(height)
width = int(width)
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
left_K, left_distortion, right_K, right_distortion,
(width, height), R, T.T, alpha=0.5)
"""
initUndistortRectifyMap
cameraMatrix-摄像机参数矩阵
distCoeffs-畸变参数矩阵
R- stereoCalibrate() 求得的R矩阵
newCameraMatrix-矫正后的摄像机矩阵(可省略)
Size-没有矫正图像的分辨率
m1type-第一个输出映射的数据类型,可以为 CV_32FC1或CV_16SC2
map1-输出的第一个映射变换
map2-输出的第二个映射变换
"""
map1x, map1y = cv2.initUndistortRectifyMap(
left_K, left_distortion, R1, P1, (width, height), cv2.CV_32FC1)
map2x, map2y = cv2.initUndistortRectifyMap(
right_K, right_distortion, R2, P2, (width, height), cv2.CV_32FC1)
return map1x, map1y, map2x, map2y, Q
# 畸变校正和立体校正
def rectifyImage(image1, image2, map1x, map1y, map2x, map2y):
"""
cv2.remap重映射,就是把一幅图像中某位置的像素放置到另一个图片指定位置的过程
"""
rectifyed_img1 = cv2.remap(image1, map1x, map1y, cv2.INTER_AREA)
rectifyed_img2 = cv2.remap(image2, map2x, map2y, cv2.INTER_AREA)
return rectifyed_img1, rectifyed_img2
#视差计算
def sgbm(imgL, imgR):
#SGBM参数设置
blockSize = 8
img_channels = 3
stereo = cv2.StereoSGBM_create(minDisparity = 1,
numDisparities = 64,
blockSize = blockSize,
P1 = 8 * img_channels * blockSize * blockSize,
P2 = 32 * img_channels * blockSize * blockSize,
disp12MaxDiff = -1,
preFilterCap = 1,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 100,
mode = cv2.STEREO_SGBM_MODE_HH)
# 计算视差图
disp = stereo.compute(imgL, imgR)
disp = np.divide(disp.astype(np.float32), 16.) # 除以16得到真实视差图
return disp
#计算三维坐标,并删除错误点
def threeD(disp, Q):
# 计算像素点的3D坐标(左相机坐标系下)
points_3d = cv2.reprojectImageTo3D(disp, Q)
points_3d = points_3d.reshape(points_3d.shape[0] * points_3d.shape[1], 3)
X = points_3d[:, 0]
Y = points_3d[:, 1]
Z = points_3d[:, 2]
#选择并删除错误的点
remove_idx1 = np.where(Z <= 0)
remove_idx2 = np.where(Z > 15000)
remove_idx3 = np.where(X > 10000)
remove_idx4 = np.where(X < -10000)
remove_idx5 = np.where(Y > 10000)
remove_idx6 = np.where(Y < -10000)
remove_idx = np.hstack(
(remove_idx1[0], remove_idx2[0], remove_idx3[0], remove_idx4[0], remove_idx5[0], remove_idx6[0]))
points_3d = np.delete(points_3d, remove_idx, 0)
#计算目标点(这里我选择的是目标区域的中位数,可根据实际情况选取)
if points_3d.any():
x = np.median(points_3d[:, 0])
y = np.median(points_3d[:, 1])
z = np.median(points_3d[:, 2])
targetPoint = [x, y, z]
else:
targetPoint = [0, 0, -1]#无法识别目标区域
return targetPoint
# 立体校正检验----画线
def draw_line(image1, image2):
# 建立输出图像
height = max(image1.shape[0], image2.shape[0])
width = image1.shape[1] + image2.shape[1]
output = np.zeros((height, width, 3), dtype=np.uint8)
output[0:image1.shape[0], 0:image1.shape[1]] = image1
output[0:image2.shape[0], image1.shape[1]:] = image2
# 绘制等间距平行线
line_interval = 50 # 直线间隔:50
for k in range(height // line_interval):
cv2.line(output, (0, line_interval * (k + 1)), (
2 * width, line_interval * (k + 1)), (0, 255, 0),
thickness=2, lineType=cv2.LINE_AA)
imgL = cv2.imread("D:/cxn_project/Strain-gauges-recognition/cali_img/left/l6.bmp")
imgR = cv2.imread("D:/cxn_project/Strain-gauges-recognition/cali_img/right/r6.bmp")
height, width = imgL.shape[0:2]
# 读取相机内参和外参
config = stereoCameral()
map1x, map1y, map2x, map2y, Q = getRectifyTransform(height, width, config)
iml_rectified, imr_rectified = rectifyImage(imgL, imgR, map1x,
map1y, map2x, map2y)
disp = sgbm(iml_rectified, imr_rectified)
plt.imshow(disp)
target_point = threeD(disp, Q) # 计算目标点的3D坐标(左相机坐标系下)
print(target_point)
| 2,151
| 0
| 0
| 682
| 0
| 3,660
| 0
| 4
| 156
|
9789fa0d9128f43bf8dfdc8bf19f5e86479b11c4
| 2,110
|
py
|
Python
|
lib/model/model.py
|
smallstrong0/easy_python
|
c6794bf290731beb9b3cab94f815880befb37d9b
|
[
"MIT"
] | 2
|
2020-09-16T09:32:09.000Z
|
2021-02-10T12:09:40.000Z
|
lib/model/model.py
|
smallstrong0/easy_python
|
c6794bf290731beb9b3cab94f815880befb37d9b
|
[
"MIT"
] | null | null | null |
lib/model/model.py
|
smallstrong0/easy_python
|
c6794bf290731beb9b3cab94f815880befb37d9b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Put your models here
from lib.model.base import Base
"""
1. BaseModel
2. id
3.comment
4.
5.
"""
if __name__ == '__main__':
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
from setting import MYSQL
engine = create_engine(MYSQL)
DBSession = scoped_session(sessionmaker(bind=engine))
Base.metadata.create_all(engine)
| 37.678571
| 122
| 0.735071
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Put your models here
from sqlalchemy import Column, BigInteger, Integer, String, SmallInteger, Float, Boolean, DECIMAL, Text, DateTime, Date, \
Index, UniqueConstraint
from sqlalchemy.dialects.mysql import MEDIUMTEXT, LONGTEXT, BIGINT, INTEGER, SMALLINT, TINYINT, TIMESTAMP
from sqlalchemy.ext.declarative import declarative_base
from decimal import Decimal
from sqlalchemy.schema import Sequence
from lib.model.base import Base, BaseModel
"""
建表规范
1.之后建表 请继承BaseModel
2.表字段主键自增强制取名 不允许是id
3.comment备注强制每个字段都要
4.建表之后如果如果关联其他表字段时候 名字别乱取 要统一
5.字段取名 出现下划线警示时候请自行注意单词拼写
"""
class ApiLog(BaseModel):
__tablename__ = "api_log"
__doc__ = '接口log'
log_id = Column(BigInteger, primary_key=True, autoincrement=True, comment='日志主键')
time_consuming = Column(Integer, nullable=False, default=0, comment='接口耗时 单位毫秒')
params = Column(String(1024), nullable=False, default='{}', comment='url参数')
body = Column(Text, nullable=False, default='{}', comment='body参数')
response = Column(Text, nullable=False, default='{}', comment='返回结果')
date_time_in = Column(String(30), nullable=False, default='', comment='调用时间')
date_time_out = Column(String(30), nullable=False, default='', comment='返回时间')
method = Column(String(10), nullable=False, default='', comment='http method')
url = Column(String(1024), nullable=False, default='', comment='http path url')
user_id = Column(BigInteger, nullable=False, comment='登录用户的user_id')
result = Column(String(10), nullable=False, default='SUCCESS', comment='结果')
class Test(BaseModel):
__tablename__ = 'test'
test_id = Column(INTEGER(11), primary_key=True, autoincrement=True)
test_name = Column(String(128), nullable=False, default="")
if __name__ == '__main__':
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy import create_engine
from setting import MYSQL
engine = create_engine(MYSQL)
DBSession = scoped_session(sessionmaker(bind=engine))
Base.metadata.create_all(engine)
| 360
| 0
| 0
| 1,066
| 0
| 0
| 0
| 281
| 157
|
7f00b4d7b1812863814e19a971adecf24942fc84
| 3,949
|
py
|
Python
|
algorithms/Search.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/Search.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
algorithms/Search.py
|
zhaoxinlu/leetcode-algorithms
|
f5e1c94c99628e7fb04ba158f686a55a8093e933
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Zhao Xinlu
School: BUPT
Date: 2018-01-15
Function: Some different searching algorithms and its performance
"""
def Simple_search(lists, key):
'''
Simple_search: ;
O(n)
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
for i in range(0, length):
if lists[i] == key:
return i
return False
def Binary_search(lists, key):
'''
Binary search():
O(logn)
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
low = 0
high = length - 1
while low < high:
mid = int((low + high) / 2)
# mid = low + 1/2 * (high - low)
if lists[mid] > key:
high = mid - 1
elif lists[mid] < key:
low = mid + 1
else:
return mid
return False
def Binary_search2(lists, key, low, high):
'''
Binary search 2()
:param lists: search list
:param key: the value of key
:param low:
:param high:
:return: the key's location in the list
'''
mid = int((low + high) / 2)
if lists[mid] == key:
return mid
elif lists[mid] < key:
return Binary_search2(lists, key, mid+1, high)
else:
return Binary_search2(lists, key, low, mid-1)
def Binary_search_plus(lists, key):
'''
Binary search plus():
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
low = 0
high = length - 1
while low < high:
mid = low + int((high - low) * (key - lists[low]) / (lists[high] - lists[low]))
# value = (key - list[low])/(list[high] - list[low])
if lists[mid] > key:
high = mid - 1
elif lists[mid] < key:
low = mid + 1
else:
return mid
return False
def Fibonacci_search(lists, key):
'''
Fibonacci search():mid.
O(logn)
:param lists: search list
:param key: the value of search key
:return: the key's location in the list
'''
# ,
FibonacciList = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144,
233, 377, 610, 987, 1597, 2584, 4181, 6765,
10946, 17711, 28657, 46368]
length = len(lists)
low = 0
high = length - 1
#
#
# F[k]-1-high
k = 0
while high > FibonacciList[k] - 1:
k += 1
print k
i = high
while FibonacciList[k] - 1 > i:
lists.append(lists[high])
i += 1
print lists
#
while low <= high:
if k < 2:
mid = low
else:
mid = low + FibonacciList[k] - 1
#
if key < lists[mid]:
high = mid - 1
k -= 1
elif key > lists[mid]:
low = mid + 1
k -= 2
else:
if mid <= high:
return mid
else:
return high
return False
if __name__ == '__main__':
key = 7
TestList1 = [3, 6, 5, 9, 7, 1, 8, 2, 4]
TestList2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
TestList3 = [1, 5, 7, 8, 22, 54, 99, 123, 200, 222, 444]
# result = Simple_search(TestList1, key)
# result = Binary_search(TestList2, key)
# result = Binary_search2(TestList2, key, 0, len(TestList2))
# result = Binary_search_plus(TestList2, key)
result = Fibonacci_search(TestList3, key=444)
print "Key's location of the list is : lists[", result, "]"
| 26.326667
| 87
| 0.545455
|
# -*- coding: utf-8 -*-
"""
Author: Zhao Xinlu
School: BUPT
Date: 2018-01-15
Function: Some different searching algorithms and its performance
"""
def Simple_search(lists, key):
'''
Simple_search: 数据不排序的线性查找,遍历数据元素;
性能:
时间复杂度:O(n)
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
for i in range(0, length):
if lists[i] == key:
return i
return False
def Binary_search(lists, key):
'''
Binary search(二分查找):在查找表中不断取中间元素与查找值进行比较,以二分之一的倍率进行表范围的缩小。
性能:
时间复杂度:O(logn)
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
low = 0
high = length - 1
while low < high:
mid = int((low + high) / 2)
# mid = low + 1/2 * (high - low)
if lists[mid] > key:
high = mid - 1
elif lists[mid] < key:
low = mid + 1
else:
return mid
return False
def Binary_search2(lists, key, low, high):
'''
Binary search 2(二分查找的递归实现)
:param lists: search list
:param key: the value of key
:param low:
:param high:
:return: the key's location in the list
'''
mid = int((low + high) / 2)
if lists[mid] == key:
return mid
elif lists[mid] < key:
return Binary_search2(lists, key, mid+1, high)
else:
return Binary_search2(lists, key, low, mid-1)
def Binary_search_plus(lists, key):
'''
Binary search plus(插值查找):二分查找的优化
对半过滤还不够狠,要是每次都排除十分之九的数据岂不是更好?选择这个值就是关键问题
:param lists: search list
:param key: the value of key
:return: the key's location in the list
'''
length = len(lists)
low = 0
high = length - 1
while low < high:
mid = low + int((high - low) * (key - lists[low]) / (lists[high] - lists[low]))
# 插值的核心公式: value = (key - list[low])/(list[high] - list[low])
if lists[mid] > key:
high = mid - 1
elif lists[mid] < key:
low = mid + 1
else:
return mid
return False
def Fibonacci_search(lists, key):
'''
Fibonacci search(斐波那契查找):利用斐波那契数列的性质,黄金分割的原理来确定mid的位置.
性能:
时间复杂的:O(logn)
:param lists: search list
:param key: the value of search key
:return: the key's location in the list
'''
# 需要一个现成的斐波那契列表, 其最大元素的值必须超过查找表中元素个数的数值。
FibonacciList = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144,
233, 377, 610, 987, 1597, 2584, 4181, 6765,
10946, 17711, 28657, 46368]
length = len(lists)
low = 0
high = length - 1
# 为了使得查找表满足斐波那契特性,在表的最后添加几个同样的值
# 这个值是原查找表的最后那个元素的值
# 添加的个数由F[k]-1-high决定
k = 0
while high > FibonacciList[k] - 1:
k += 1
print k
i = high
while FibonacciList[k] - 1 > i:
lists.append(lists[high])
i += 1
print lists
# 算法主逻辑
while low <= high:
if k < 2:
mid = low
else:
mid = low + FibonacciList[k] - 1
# 利用斐波那契数列来找寻下一个要比较的关键字的位置
if key < lists[mid]:
high = mid - 1
k -= 1
elif key > lists[mid]:
low = mid + 1
k -= 2
else:
if mid <= high:
return mid
else:
return high
return False
if __name__ == '__main__':
key = 7
TestList1 = [3, 6, 5, 9, 7, 1, 8, 2, 4]
TestList2 = [1, 2, 3, 4, 5, 6, 7, 8, 9]
TestList3 = [1, 5, 7, 8, 22, 54, 99, 123, 200, 222, 444]
# result = Simple_search(TestList1, key)
# result = Binary_search(TestList2, key)
# result = Binary_search2(TestList2, key, 0, len(TestList2))
# result = Binary_search_plus(TestList2, key)
result = Fibonacci_search(TestList3, key=444)
print "Key's location of the list is : lists[", result, "]"
| 912
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d04e9753e38d729231b2e66b82bf461bc97df527
| 3,220
|
py
|
Python
|
goldman/resources/oauth_ropc.py
|
sassoo/goldman
|
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
[
"MIT"
] | 2
|
2016-07-26T13:47:51.000Z
|
2017-02-13T12:08:38.000Z
|
goldman/resources/oauth_ropc.py
|
sassoo/goldman
|
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
[
"MIT"
] | null | null | null |
goldman/resources/oauth_ropc.py
|
sassoo/goldman
|
b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2
|
[
"MIT"
] | null | null | null |
"""
resources.oauth_ropc
~~~~~~~~~~~~~~~~~~~~
OAuth2 Resource Owner Password Credentials Grant resource
object with responders.
This resource should be used to accept access_token requests
according to RFC 6749 section 4.3:
tools.ietf.org/html/rfc6749#section-4.3
The resource requires a callable to be passed in as the
auth_creds property which will be given a username &
password. The callable should return a token.
Returning a string will be interpreted as an error &
a RFC 6749 compliant error response will be sent with
the error message as the error_description field in
the response.
"""
| 32.857143
| 76
| 0.587888
|
"""
resources.oauth_ropc
~~~~~~~~~~~~~~~~~~~~
OAuth2 Resource Owner Password Credentials Grant resource
object with responders.
This resource should be used to accept access_token requests
according to RFC 6749 section 4.3:
tools.ietf.org/html/rfc6749#section-4.3
The resource requires a callable to be passed in as the
auth_creds property which will be given a username &
password. The callable should return a token.
Returning a string will be interpreted as an error &
a RFC 6749 compliant error response will be sent with
the error message as the error_description field in
the response.
"""
import falcon
import goldman
from goldman.exceptions import AuthRejected
from ..resources.base import Resource as BaseResource
class Resource(BaseResource):
""" OAuth2 Resource Owner Password Credentials Grant resource """
DESERIALIZERS = [
goldman.FormUrlEncodedDeserializer,
]
SERIALIZERS = [
goldman.JsonSerializer,
]
def __init__(self, auth_creds):
self.auth_creds = auth_creds
super(Resource, self).__init__()
@property
def _realm(self):
""" Return a string representation of the authentication realm """
return 'Bearer realm="%s"' % goldman.config.AUTH_REALM
def on_post(self, req, resp):
""" Validate the access token request for spec compliance
The spec also dictates the JSON based error response
on failure & is handled in this responder.
"""
grant_type = req.get_param('grant_type')
password = req.get_param('password')
username = req.get_param('username')
# errors or not, disable client caching along the way
# per the spec
resp.disable_caching()
if not grant_type or not password or not username:
resp.status = falcon.HTTP_400
resp.serialize({
'error': 'invalid_request',
'error_description': 'A grant_type, username, & password '
'parameters are all required when '
'requesting an OAuth access_token',
'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2',
})
elif grant_type != 'password':
resp.status = falcon.HTTP_400
resp.serialize({
'error': 'unsupported_grant_type',
'error_description': 'The grant_type parameter MUST be set '
'to "password" not "%s"' % grant_type,
'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2',
})
else:
try:
token = self.auth_creds(username, password)
resp.serialize({
'access_token': token,
'token_type': 'Bearer',
})
except AuthRejected as exc:
resp.status = falcon.HTTP_401
resp.set_header('WWW-Authenticate', self._realm)
resp.serialize({
'error': 'invalid_client',
'error_description': exc.detail,
})
| 0
| 149
| 0
| 2,257
| 0
| 0
| 0
| 39
| 113
|
6e976b5b835b38818090ff9810971809f857fe0e
| 7,534
|
py
|
Python
|
newapp.py
|
Andriusjok/VUSAMIFChatBot
|
9444bcf4f0f2137757925f8e07b8cc3ba442a162
|
[
"BSD-2-Clause"
] | null | null | null |
newapp.py
|
Andriusjok/VUSAMIFChatBot
|
9444bcf4f0f2137757925f8e07b8cc3ba442a162
|
[
"BSD-2-Clause"
] | null | null | null |
newapp.py
|
Andriusjok/VUSAMIFChatBot
|
9444bcf4f0f2137757925f8e07b8cc3ba442a162
|
[
"BSD-2-Clause"
] | null | null | null |
from flask import Flask
ACCESS_TOKEN = "Baisiai slaptas"
VERIFY_TOKEN = "Dar slaptesnis"
app = Flask(__name__)
app.debug = True
messenger = Messenger(ACCESS_TOKEN)
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 41.395604
| 168
| 0.591585
|
import os
from flask import Flask, request
from fbmessenger import BaseMessenger
from fbmessenger import quick_replies
from fbmessenger.elements import Text
from fbmessenger.thread_settings import GreetingText, GetStartedButton, MessengerProfile
from fbmessenger import elements
from fbmessenger import templates
ACCESS_TOKEN = "Baisiai slaptas"
VERIFY_TOKEN = "Dar slaptesnis"
class Messenger(BaseMessenger):
def __init__(self, page_access_token):
self.page_access_token = page_access_token
super(Messenger, self).__init__(self.page_access_token)
def message(self, message):
response = Text(text= str(message["message"]["text"]))
action = response.to_dict()
res = self.send(action)
app.logger.debug("Response: {}".format(res))
def delivery(self, message):
pass
def read(self, message):
pass
def account_linking(self, message):
pass
def postback(self, message):
payload = message["postback"]["payload"]
print(message["postback"]["payload"])
if "start" in payload:
elem = elements.Text("Sveiki, norėdami pasinaudoti VU SA MIF DUK skiltimi, pasirinkite vieną iš žemiau pateiktų temų, kitu atveju užduokite savo klausimą.")
self.send(elem.to_dict(),"RESPONSE")
btn1 = elements.Button(button_type = "postback", title="VU SA+LSP+Apeliacijos", payload="VU SA+LSP+Apeliacijos")
btn2 = elements.Button(button_type = "postback", title="BUS+PD", payload="BUS+PD")
btn3 = elements.Button(button_type = "postback", title="Studijos+Finansai", payload="Studijos+Finansai")
btns = templates.ButtonTemplate(
text = "DUK temos",
buttons = [btn1, btn2, btn3]
)
self.send(btns.to_dict(),"RESPONSE")
if "VU SA+LSP+Apeliacijos" == payload:
btn1 = elements.Button(button_type = "postback", title="VU SA", payload="VU SA")
btn2 = elements.Button(button_type = "postback", title="LSP", payload="LSP")
btn3 = elements.Button(button_type = "postback", title="Apeliacijos", payload="Apeliacijos")
btns = templates.ButtonTemplate(
text = "Potemės",
buttons = [btn1, btn2, btn3]
)
self.send(btns.to_dict(),"RESPONSE")
if "BUS+PD" == payload:
btn1 = elements.Button(button_type = "postback", title="BUS", payload="BUS")
btn2 = elements.Button(button_type = "postback", title="PD", payload="PD")
btns = templates.ButtonTemplate(
text = "Potemės",
buttons = [btn1, btn2]
)
self.send(btns.to_dict(),"RESPONSE")
if "Studijos+Finansai" == payload:
btn1 = elements.Button(button_type = "postback", title="Studijos", payload="Studijos")
btn2 = elements.Button(button_type = "postback", title="Finansai", payload="Finansai")
btns = templates.ButtonTemplate(
text = "Potemės",
buttons = [btn1, btn2]
)
self.send(btns.to_dict(),"RESPONSE")
if "Studijos" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie Studijų programų / dalykų keitimą bei gretutines studijas / individualų studijų planą",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "Finansai" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie mokesčius už mokslą bei stipendijas",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "BUS" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie Bendrasias universitetines studijas",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "PD" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie Pasirenkamuosius dalykus",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "VU SA" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie VU SA bei VU SA MIF",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "LSP" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie LSP",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
if "Apeliacijos" == payload:
btn1 = elements.Button(
button_type = "web_url",
title="Atsakymai",
url="https://docs.google.com/document/d/1e_1jSsdjlfoIYJrIuCZELJX0nv4F5IIp2ar-CMUmn98/edit"
)
btns = templates.ButtonTemplate(
text = "Nuoroda į DUK apie Apeliacijas bei skundus",
buttons = [btn1]
)
print(self.send(btns.to_dict(),"RESPONSE"))
def optin(self, message):
pass
def init_bot(self):
greeting_text = GreetingText("VU SA MIF konsultavimas")
messenger_profile = MessengerProfile(greetings=[greeting_text])
messenger.set_messenger_profile(messenger_profile.to_dict())
get_started = GetStartedButton(payload="start")
messenger_profile = MessengerProfile(get_started=get_started)
messenger.set_messenger_profile(messenger_profile.to_dict())
app = Flask(__name__)
app.debug = True
messenger = Messenger(ACCESS_TOKEN)
@app.route("/", methods=["GET", "POST"])
def webhook():
if request.method == "GET":
if request.args.get("hub.verify_token") == VERIFY_TOKEN:
messenger.init_bot()
return request.args.get("hub.challenge")
raise ValueError("FB_VERIFY_TOKEN does not match.")
elif request.method == "POST":
messenger.handle(request.get_json(force=True))
return ""
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 56
| 381
| 0
| 6,566
| 0
| 0
| 0
| 135
| 200
|
788f66b9fb4748228011a407e6e029dba64a944b
| 14,173
|
py
|
Python
|
fixture/contact.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
Droriel/python_training
|
e0fbbf3df4289e5af606d9c752e99cab82c653a6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
| 46.621711
| 146
| 0.627672
|
# -*- coding: utf-8 -*-
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from model.contact import ContactBaseData
import re
class ContactHelper:
def __init__(self, app):
self.app = app
# additional methods -adding contact
def open_main_page(self):
wd = self.app.wd
if not(wd.current_url.endswith('/addressbook/') and len(wd.find_elements_by_xpath("//strong[contains(.,'Liczba trafień:')]")) > 0):
wd.find_element_by_xpath("//a[contains(.,'strona główna')]").click()
def submit_contact(self):
wd = self.app.wd
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
def fill_notes(self, notes):
wd = self.app.wd
self.app.change_field_value("notes", notes.notes)
def fill_additional_data(self, additionalData):
wd = self.app.wd
# Fill second address and phone
# Fill in second address
self.app.change_field_value("address2", additionalData.address)
# Fill in "Prywatny" phone
self.app.change_field_value("phone2", additionalData.phone)
def fill_anniversary_date(self, anniversaryDate):
wd = self.app.wd
# Choose in day
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[3]//option[%s]" % str(anniversaryDate.day + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % str(anniversaryDate.day + 2)).click()
# Choose in month
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[4]//option[%s]" % str(anniversaryDate.month + 1)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % str(anniversaryDate.month + 1)).click()
# Fill in year
self.app.change_field_value("ayear", anniversaryDate.year)
def update_anniversary_date(self, anniversaryDate):
wd = self.app.wd
# Choose in day
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[3]//option[%s]" % str(anniversaryDate.day + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[%s]" % str(anniversaryDate.day + 2)).click()
# Choose in month
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[4]//option[%s]" % str(anniversaryDate.month + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[%s]" % str(anniversaryDate.month + 2)).click()
# Fill in year
self.app.change_field_value("ayear", anniversaryDate.year)
def fill_birth_date(self, birthDate):
wd = self.app.wd
# Choose in day
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[1]//option[%s]" % str(birthDate.day + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % str(birthDate.day + 2)).click()
# Choose in month
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[2]//option[%s]" % str(birthDate.month + 1)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % str(birthDate.month + 1)).click()
# Fill in year
self.app.change_field_value("byear", birthDate.year)
def update_birth_date(self, birthDate):
wd = self.app.wd
# Choose in day
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[1]//option[%s]" % str(birthDate.day + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[%s]" % str(birthDate.day + 2)).click()
# Choose in month
if not wd.find_element_by_xpath(
"//div[@id='content']/form/select[2]//option[%s]" % str(birthDate.month + 2)).is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[%s]" % str(birthDate.month + 2)).click()
# Fill in year
self.app.change_field_value("byear", birthDate.year)
def fill_www_address(self, www):
wd = self.app.wd
self.app.change_field_value("homepage", www.www)
def fill_emails(self, emails):
wd = self.app.wd
self.app.change_field_value("email", emails.email1)
self.app.change_field_value("email2", emails.email2)
self.app.change_field_value("email3", emails.email3)
def fill_phone_number(self, phoneNumbers):
wd = self.app.wd
# Fill in home number
self.app.change_field_value("home", phoneNumbers.home)
self.app.change_field_value("mobile", phoneNumbers.mobile)
self.app.change_field_value("work", phoneNumbers.work)
self.app.change_field_value("fax", phoneNumbers.fax)
def fill_contact_base_data(self,baseData):
wd = self.app.wd
self.app.change_field_value("firstname", baseData.firstname)
self.app.change_field_value("lastname", baseData.lastname)
# self.app.change_field_value("home", phoneNumbers.home)
# self.app.change_field_value("mobile", phoneNumbers.mobile)
# self.app.change_field_value("work", phoneNumbers.work)
# self.app.change_field_value("phone2", additionalData.phone)
# self.app.change_field_value("email", emails.email1)
# self.app.change_field_value("email2", emails.email2)
# self.app.change_field_value("email3", emails.email3)
def fill_personal_data(self, personalData):
wd = self.app.wd
self.app.change_field_value("middlename", personalData.middlename)
self.app.change_field_value("nickname", personalData.nickname)
# Add photo
# wd.find_element_by_name("photo").click()
self.app.change_field_value("title", personalData.title)
self.app.change_field_value("company", personalData.company)
self.app.change_field_value("address", personalData.address)
def init_new_contact(self):
wd = self.app.wd
wd.find_element_by_link_text("nowy wpis").click()
def choose_by_id_contact(self, contact_id):
wd = self.app.wd
wd.find_element_by_xpath("//input[@id='%s']" % contact_id).click()
def delete_first_contact(self):
wd = self.app.wd
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_main_page()
# Choose first contact
wd.find_elements_by_name("selected[]")[index].click()
# Submit contact deletation
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# closing alert window
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, contact_id):
wd = self.app.wd
self.open_main_page()
self.choose_by_id_contact(contact_id)
# Submit contact deletation
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# closing alert window
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_all_contacts(self):
wd = self.app.wd
self.open_main_page()
# Choose all contacts
# //form[@name='MainForm']/input[2]
wd.find_element_by_xpath("//input[@id='MassCB']").click()
# Submit contact deletation
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# closing alert window
wd.switch_to_alert().accept()
self.contact_cache = None
def init_first_contact_edition(self):
wd = self.app.wd
self.init_by_index_contact_edition(0)
def init_by_index_contact_edition(self,index):
wd = self.app.wd
self.open_main_page()
wd.find_elements_by_xpath("//img[@title='Edytuj']")[index].click()
def init_by_id_contact_edition(self, contact_id):
wd = self.app.wd
self.open_main_page()
wd.find_element_by_xpath("//a[contains(@href,'edit.php?id=%s')]/img" % contact_id).click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_main_page()
wd.find_elements_by_xpath("//img[@alt='Szczegóły']")[index].click()
def update_contact_top(self):
wd = self.app.wd
wd.find_element_by_xpath("//input[@value='Aktualizuj'][1]").click()
self.contact_cache = None
def update_contact_bottom(self):
wd = self.app.wd
wd.find_element_by_xpath("//input[@value='Aktualizuj'][2]").click()
self.contact_cache = None
def delete_edited_contact(self):
wd = self.app.wd
wd.find_element_by_xpath("//input[@value='Usuń']").click()
self.contact_cache = None
def add_contact_to_group(self, contact_id, group_id):
wd = self.app.wd
self.open_main_page()
# choosing contact
self.choose_by_id_contact(contact_id)
# choosing group from dropdown for adding
# wd.find_element_by_xpath("//select[@name='to_group']").click()
wd.find_element_by_xpath("//select[@name='to_group']/option[@value='%s']" % group_id).click()
# Submit
wd.find_element_by_xpath("//input[@name='add']").click()
def delete_contact_from_group(self, contact_id, group_id):
wd = self.app.wd
self.open_main_page()
# group choosing from dropdown for viewing contacts in group
wd.find_element_by_xpath("//select[@name='group']/option[@value='%s']" % group_id).click()
# waiting for the refresh of content
wait = WebDriverWait(wd, 10)
wait.until(lambda d: d.find_element_by_xpath("//input[@name='remove']"))
# choosing contact
self.choose_by_id_contact(contact_id)
# Submit
wd.find_element_by_xpath("//input[@name='remove']").click()
# counting elements on the list
def count(self):
wd = self.app.wd
self.open_main_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
wd = self.app.wd
self.open_main_page()
self.contact_cache = []
for row in wd.find_elements_by_name('entry'):
cells = row.find_elements_by_tag_name('td')
id = cells[0].find_element_by_tag_name('input').get_attribute('value')
# . przed // oznacza relatywne użycie xpatha - jakby tworzyła nowy dom w ramach wiersza
# text1 = element.find_element_by_xpath(".//td[2]").text
# text2 = element.find_element_by_xpath(".//td[3]").text
# lastName = row.find_element_by_css_selector('*>td:nth-of-type(2)').text
# firstName = row.find_element_by_css_selector('*>td:nth-of-type(3)').text
firstName = cells[2].text
lastName = cells[1].text
allPhones = cells[5].text
allEmails = cells[4].text
address = cells[3].text
self.contact_cache.append(ContactBaseData(firstname=firstName, lastname=lastName, id=id, address=address,
allPhonesFromHomePage=allPhones, allEmailsFromHomePage=allEmails))
return list(self.contact_cache)
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.init_by_index_contact_edition(index)
id = wd.find_element_by_name('id').get_attribute('value')
firstname = wd.find_element_by_name('firstname').get_attribute('value')
lastname = wd.find_element_by_name('lastname').get_attribute('value')
address = wd.find_element_by_name('address').get_attribute('value')
homephone = wd.find_element_by_name('home').get_attribute('value')
workphone = wd.find_element_by_name('work').get_attribute('value')
mobilephone = wd.find_element_by_name('mobile').get_attribute('value')
additionalphone = wd.find_element_by_name('phone2').get_attribute('value')
email1 = wd.find_element_by_name('email').get_attribute('value')
email2 = wd.find_element_by_name('email2').get_attribute('value')
email3 = wd.find_element_by_name('email3').get_attribute('value')
return ContactBaseData(firstname=firstname, lastname=lastname, id=id,
homephone=homephone, workphone=workphone, mobilephone=mobilephone,
additionalphone=additionalphone, email1=email1, email2=email2, email3=email3, address=address)
def get_contact_info_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
if re.search('H:\s(.*)', text) is not None:
homephone = re.search('H:\s(.*)', text).group(1)
else:
homephone = None
if re.search('W:\s(.*)', text) is not None:
workphone = re.search('W:\s(.*)', text).group(1)
else:
workphone = None
if re.search('M:\s(.*)', text) is not None:
mobilephone = re.search('M:\s(.*)', text).group(1)
else:
mobilephone = None
if re.search('P:\s(.*)', text) is not None:
additionalphone = re.search('P:\s(.*)', text).group(1)
else:
additionalphone = None
# allEmails = wd.find_elements_by_xpath("//a[starts-with(@href, 'mailto:')]")
allEmails = []
for i in range(0, len(wd.find_elements_by_xpath("//a[starts-with(@href, 'mailto:')]"))):
allEmails.append(wd.find_elements_by_xpath("//a[starts-with(@href, 'mailto:')]")[i].text)
return ContactBaseData(homephone=homephone, workphone=workphone, mobilephone=mobilephone,
additionalphone=additionalphone, allEmailsFromHomePage=allEmails)
| 16
| 0
| 0
| 13,941
| 0
| 0
| 0
| 87
| 112
|
092f1761576ffa817c9655201b6f11db45a1a582
| 2,041
|
py
|
Python
|
vision/library/tools/engines/text_recognition_tesseract.py
|
lcmonteiro/space-vision-py
|
38022c99218de0e1e93ec0bae8d143fa0c787f1d
|
[
"MIT"
] | 1
|
2019-12-14T20:00:17.000Z
|
2019-12-14T20:00:17.000Z
|
vision/library/tools/engines/text_recognition_tesseract.py
|
lcmonteiro/space-vision-py
|
38022c99218de0e1e93ec0bae8d143fa0c787f1d
|
[
"MIT"
] | null | null | null |
vision/library/tools/engines/text_recognition_tesseract.py
|
lcmonteiro/space-vision-py
|
38022c99218de0e1e93ec0bae8d143fa0c787f1d
|
[
"MIT"
] | null | null | null |
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: text_recognition_tesseract_engine.py
# Author: Luis Monteiro
#
# Created on nov 17, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# external
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# TextRecognitionTesseract
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------------------------
# ################################################################################################
| 48.595238
| 98
| 0.168055
|
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: text_recognition_tesseract_engine.py
# Author: Luis Monteiro
#
# Created on nov 17, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# external
from pytesseract import image_to_string
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# TextRecognitionTesseract
# ------------------------------------------------------------------------------------------------
# ################################################################################################
class TextRecognitionTesseract:
#
# -------------------------------------------------------------------------
# initialization
# -------------------------------------------------------------------------
#
def __init__(self):
super().__init__()
# configuration
self.__config = ("-l eng --oem 1 --psm 7")
#
# -------------------------------------------------------------------------
# process
# -------------------------------------------------------------------------
#
def process(self, frame):
# format results
return image_to_string(frame, config=self.__config)
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------------------------
# ################################################################################################
| 0
| 0
| 0
| 630
| 0
| 0
| 0
| 18
| 44
|
5111b400d490cda967a1cc070c3b3f72a6cd1341
| 685
|
py
|
Python
|
music_manuel/osc_server.py
|
HelsinkiGroup5/Hackathon
|
eb1c7c5f142fc3dbe83a41a558a1ab8071341d06
|
[
"MIT"
] | null | null | null |
music_manuel/osc_server.py
|
HelsinkiGroup5/Hackathon
|
eb1c7c5f142fc3dbe83a41a558a1ab8071341d06
|
[
"MIT"
] | null | null | null |
music_manuel/osc_server.py
|
HelsinkiGroup5/Hackathon
|
eb1c7c5f142fc3dbe83a41a558a1ab8071341d06
|
[
"MIT"
] | null | null | null |
import OSC
#import rtmidi_python as rtmidi
#midi_out = rtmidi.MidiOut()
#midi_out.open_port(0)
if __name__ == "__main__":
s = OSC.OSCServer(('10.100.7.151', 57120)) # listen on localhost, port 57120
s.addMsgHandler('/startup', handler) # call handler() for OSC messages received with the /startup address
s.serve_forever()
| 29.782609
| 113
| 0.655474
|
import OSC, time
#import rtmidi_python as rtmidi
#midi_out = rtmidi.MidiOut()
#midi_out.open_port(0)
def handler(addr, tags, data, client_address):
txt = "OSCMessage '%s' from %s: " % (addr, client_address)
txt += str(data)
print(txt)
#num = data[0]
#print num
#midi_out.send_message([0x90, 192, num]) # Note on
#time.sleep(0.5)
#midi_out.send_message([0x80, 192, num]) # Note on
#print("midi sent")
if __name__ == "__main__":
s = OSC.OSCServer(('10.100.7.151', 57120)) # listen on localhost, port 57120
s.addMsgHandler('/startup', handler) # call handler() for OSC messages received with the /startup address
s.serve_forever()
| 0
| 0
| 0
| 0
| 0
| 313
| 0
| 6
| 23
|
f1a11d584fb476bc84cfd89ab66c348ee5ba13cc
| 24,531
|
py
|
Python
|
store/adminshop/views/compras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
store/adminshop/views/compras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
store/adminshop/views/compras.py
|
vallemrv/my_store_test
|
2da624fd02c5f1784464f15b751b488f3dd2bae6
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: [email protected]
# @Filename: views.py
# @Last modified by: valle
# @Last modified time: 02-Mar-2018
# @License: Apache license vesion 2.0
try:
from django.core.urlresolvers import reverse
except ImportError:
#from django.template import Context
| 40.148936
| 107
| 0.61832
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 28-Aug-2017
# @Email: [email protected]
# @Filename: views.py
# @Last modified by: valle
# @Last modified time: 02-Mar-2018
# @License: Apache license vesion 2.0
from django.forms.models import model_to_dict
from django.db.models import Q
from django.conf import settings
from django.shortcuts import render, redirect
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.contrib.auth.decorators import login_required, permission_required
from django.template.loader import render_to_string
from django.http import HttpResponse
#from django.template import Context
from django.template.loader import get_template
from adminshop.utility import get_documento_compra, get_documento_testeo
from adminshop.forms import (CPClientesForm, CPProductosForm, ProductosForm, MODProductosForm,
FinTratoForm, ValidarCompra, VistaValidarForm, ModelosForm)
from adminshop.models import (Modelos, Clientes, Testeo, ConfigSite, Historial, Firmas,
Productos, Compras, Tipos, Direcciones, DocumentoTesteo, ListaTesteo)
from adminshop.utility import save_historial, save_doc_firmas, save_doc_testeo
from . import (validoDNI, get_first_direccion, set_first_direccion)
from tokenapi.http import JsonResponse
import threading
import base64
import json
import trml2pdf
import os
@login_required(login_url='login_tk')
def get_modificar_compra(request, id_compra):
pres = Compras.objects.filter(pk=id_compra)
if len(pres) > 0:
pres = pres[0]
vendedor = pres.get_vendedor()
producto = pres.producto
producto_dict = model_to_dict(producto)
producto_dict["cliente"] = vendedor['id']
f_compra = MODProductosForm(producto_dict)
modelo = producto.modelo
return render (request, "tienda/compras/modificar.html",
{"c": vendedor,
"form": f_compra,
"m": modelo,
"f": pres})
return redirect("tienda")
@login_required(login_url='login_tk')
def modificar_compra(request, id_compra):
if request.method == "POST":
pres = Compras.objects.filter(pk=id_compra)
if len(pres) > 0:
pres = pres[0]
producto = pres.producto
producto.tipo_id = request.POST["tipo"]
producto.color = request.POST["color"]
producto.modelo_id = request.POST["modelo"]
producto.ns_imei = request.POST["ns_imei"]
producto.precio_compra = request.POST["precio_compra"]
producto.save()
pres.vendedor_id = request.POST["cliente"]
pres.save()
return HttpResponse(reverse("listado_compras"))
@login_required(login_url='login_tk')
def ch_find_modelo(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = Modelos.objects.filter(Q(nombre__contains=filter) |
Q(marca__nombre__contains=filter))
return render(request, "tienda/compras/lista_modelos.html",
{'query': filter_query,
'change': True })
@login_required(login_url='login_tk')
def cancelar_trato(request, id_producto):
if request.method == "POST":
producto = Productos.objects.get(pk=id_producto)
f_p = FinTratoForm(request.POST, instance=producto)
if f_p.is_valid():
p = f_p.save()
p.estado = "CT"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
#guardamos un historial de la accion realizada
save_historial(request.user.id, p.id, cliente_id,
"Rechazada la compra del producto..")
vaciar_sesison_compra(request)
return HttpResponse(reverse("tienda"))
else:
p = Productos.objects.get(pk=id_producto)
p.estado = "CT"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
#guardamos un historial de la accion realizada
save_historial(request.user.id, p.id, cliente_id,
"Rechazada la compra del producto..")
vaciar_sesison_compra(request)
return redirect("lista_productos", estado="TD")
@login_required(login_url='login_tk')
def validar_compra(request, id_compra):
if request.method == "POST":
compra = Compras.objects.get(pk=id_compra)
f = ValidarCompra(request.POST, instance=compra)
if f.is_valid():
compra = f.save()
vaciar_sesison_compra(request)
#Guardamos el documento en la cola para ser firmado
save_doc_firmas(request.user.pk, compra.pk, "CP")
return redirect("tienda")
else:
f = VistaValidarForm(instance=compra)
return render(request, "tienda/compras/validar_compra.html",
{"form": f, "form_error": f.errors })
else:
compra = Compras.objects.get(pk=id_compra)
f = VistaValidarForm(instance=compra)
return render(request, "tienda/compras/validar_compra.html",
{"form": f, })
@login_required(login_url='login_tk')
def send_sign(request, id_producto):
producto = Productos.objects.get(pk=id_producto)
compras = Compras.objects.filter(producto__id=id_producto)
if len(compras) > 0:
compra = compras[0]
Firmas.objects.filter(Q(documento_id=compra.pk) &
Q(tipo_documento="CP")).delete()
threading.Thread(target=send_men_sing, args=(compra,)).start()
return render(request, "tienda/compras/sender_sign.html")
@login_required(login_url='login_tk')
def get_document_by_id(request, id_producto):
producto = Productos.objects.get(pk=id_producto)
compras = Compras.objects.filter(producto__id=id_producto)
compra = Compras()
if len(compras) > 0:
compra = compras[0]
return get_document(producto, compra)
@login_required(login_url='login_tk')
def find_cliente(request):
vaciar_sesison_compra(request)
if request.method == "POST" and "DNI" in request.POST:
if validoDNI(request.POST["DNI"]):
return cp_clientes(request)
else:
return render(request, 'tienda/compras/find_cliente.html',{
"mensaje": "DNI no valido",
"url_tipo": reverse("find_cliente")
})
return render(request, 'tienda/compras/find_cliente.html',{
"url_tipo": reverse("find_cliente")
})
@login_required(login_url='login_tk')
def listado_doc_testeos(request):
testeos = DocumentoTesteo.objects.all()
return render(request, 'tienda/testeo/listado.html',{
"compras": testeos
})
@login_required(login_url='login_tk')
def find_doc_testeos(request):
filter = request.POST["filter"]
if len(filter) > 0 and filter[0].upper() == "T":
filter = filter.replace("T", "")
filter = filter.replace("t", "")
compras = DocumentoTesteo.objects.filter(Q(pk=filter))
else:
compras = DocumentoTesteo.objects.filter(Q(cliente__DNI__contains=filter)|
Q(cliente__nombre_completo__contains=filter))
return render(request, 'tienda/testeo/listado_ajax.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def get_doc_testeo_by_id(request, id_doc):
doc = DocumentoTesteo.objects.get(pk=id_doc)
return doc_testeo(doc)
@login_required(login_url='login_tk')
def cp_clientes(request):
if request.method == 'POST':
if "filter" in request.POST:
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
if len(clientes) > 0:
direccion = get_first_direccion(clientes[0].id)
full_data = dict(model_to_dict(direccion).items() + model_to_dict(clientes[0]).items())
form = CPClientesForm (full_data, instance=clientes[0])
titulo = 'Cliente existente'
tipo = "comprar"
request.session["accion_comprar_dni"] = request.POST.get('DNI')
request.session["accion_comprar_pk_cliente"] = clientes[0].pk
else:
form = CPClientesForm(request.POST)
titulo = 'Cliente no existe'
tipo = "no_existe"
return render(request, 'tienda/compras/clientes_ajax.html',
{'form':form, 'titulo': titulo,
'tipo': tipo})
elif len(request.POST) == 2 and "DNI" in request.POST:
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
if len(clientes) > 0:
direccion = get_first_direccion(clientes[0].id)
full_data = dict(model_to_dict(direccion).items() + model_to_dict(clientes[0]).items())
form = CPClientesForm (full_data, instance=clientes[0])
titulo = 'Cliente existente'
tipo = "comprar"
request.session["accion_comprar_dni"] = request.POST.get('DNI')
request.session["accion_comprar_pk_cliente"] = clientes[0].pk
else:
form = CPClientesForm(request.POST)
titulo = 'Cliente no existe'
tipo = "no_existe"
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': titulo,
'tipo': tipo})
elif len(request.POST) > 2:
tipo = "comprar"
clientes = Clientes.objects.filter(DNI__icontains=request.POST.get('DNI'))
request.session["accion_comprar_dni"] = request.POST.get('DNI')
if len(clientes) > 0:
form = CPClientesForm(request.POST, instance=clientes[0])
else:
form = CPClientesForm(request.POST)
if form.is_valid():
cliente = form.save()
direccion = set_first_direccion(request.POST, cliente.pk)
if type(direccion) == Direcciones:
direccion.cliente_id = cliente.pk
direccion.save()
else:
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': "Error al guardar el cliente",
'tipo': tipo, "form_error": form.errors})
request.session["accion_comprar_pk_cliente"] = cliente.pk
return render(request, 'tienda/compras/clientes.html',
{'form':form, 'titulo': "Cliente guardado o modificado",
'tipo': tipo})
return redirect("find_cliente")
@login_required(login_url='login_tk')
def listado_compras(request):
compras = Compras.objects.all().exclude(tipo_vendedor="NO")
return render(request, 'tienda/compras/listado.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def find_compra(request):
filter = request.POST["filter"]
if len(filter) > 0 and "c" == filter[0].lower():
filter = filter.replace("C", "")
filter = filter.replace("c", "")
compras = Compras.objects.filter(Q(codigo_compra__icontains=filter)).exclude(vendedor_id=None)
else:
compras = Compras.objects.filter(Q(codigo_compra__icontains=filter)|
Q(producto__ns_imei__icontains=filter)).exclude(vendedor_id=None)
return render(request, 'tienda/compras/listado_ajax.html',{
"compras": compras
})
@login_required(login_url='login_tk')
def cp_lista_modelos(request):
if request.method == "POST":
filter = request.POST["filter"]
filter_query = Modelos.objects.filter(Q(nombre__icontains=filter))
return render(request, "tienda/compras/lista_modelos.html", {'query': filter_query})
@login_required(login_url='login_tk')
def send_para_tester(request, id_modelo):
if "accion_comprar_dni" in request.session:
try:
producto = Productos.objects.get(ns_imei=request.POST.get("ns_imei"))
form = CPProductosForm(request.POST, instance=producto)
except Exception as p:
form = CPProductosForm(request.POST)
if form.is_valid():
producto = form.save(commit=False)
producto.modelo_id = request.session["accion_comprar_pk_modelo"]
producto.estado = "OS"
producto.tipo_id = 1
producto.precio_compra = producto.modelo.precio_usado
producto.save()
request.session["accion_comprar_pk_producto"] = producto.pk
#Guardamos el histarial de la accion Realizada
save_historial(request.user.pk, request.session["accion_comprar_pk_cliente"],
producto.pk, "Entrada para testeo posible compra")
#Creamos el documento de recepción de terminal.
doc = save_doc_testeo(request.user.pk, request.session["accion_comprar_pk_cliente"],
producto.pk)
#Guradamos el documen to para firmar
save_doc_firmas(request.user.pk, doc.id, "OS")
vaciar_sesison_compra(request)
return JsonResponse({"result": True})
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def cp_productos(request, id_modelo=-1):
if "accion_comprar_dni" in request.session:
if request.method != "POST" and id_modelo < 0:
f_modelo = ModelosForm()
return render(request, 'tienda/compras/find_modelos.html',
{"form": f_modelo})
elif request.method != "POST" and id_modelo > 0:
request.session["accion_comprar_pk_modelo"] = id_modelo
try:
modelo = Modelos.objects.get(pk=id_modelo)
except:
modelo = Modelos()
tipo = "no_existe"
form = CPProductosForm()
return render(request, 'tienda/compras/productos.html',
{'form':form, 'titulo': "Datos del producto",
'modelo': modelo,
'tipo': tipo})
else:
try:
producto = Productos.objects.get(ns_imei=request.POST.get("ns_imei"))
form = CPProductosForm(request.POST, instance=producto)
except Exception as p:
form = CPProductosForm(request.POST)
if form.is_valid():
producto = form.save(commit=False)
if "accion_comprar_pk_modelo" not in request.session:
vaciar_sesison_compra(request)
return redirect("tienda")
producto.modelo_id = request.session["accion_comprar_pk_modelo"]
producto.estado = "TD"
#tipos = Tipos.objects.all()
#if len(tipos) > 0:
# tipo = tipos[0].pk
#else:
# tipo = -1
#producto.tipo_id = tipo
producto.precio_compra = producto.modelo.precio_usado
producto.save()
request.session["accion_comprar_pk_producto"] = producto.pk
save_historial(request.user.pk, request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
"Producto comprado sin testear")
form = ProductosForm(instance=producto)
return render(request, 'tienda/compras/compras.html',
{'form':form, 'titulo': "Datos del producto",
"form_error": form.errors,
"id_modelo": request.session["accion_comprar_pk_modelo"]})
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def calcular_precio_usado(request, id_modelo):
if request.method == "POST":
tipo = Tipos.objects.get(pk=request.POST["tipo"])
modelo = Modelos.objects.get(pk=id_modelo)
return HttpResponse("{0:.2f}".format(float(tipo.incremento)*float(modelo.precio_usado)))
else:
return redirect("tienda")
@login_required(login_url='login_tk')
def hacer_compra(request):
if request.method == "POST":
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.tipo_id = request.POST["tipo"]
producto.precio_compra = request.POST["precio_compra"]
producto.estado = "ST"
producto.save()
except Exception as error:
return HttpResponse(reverse("en_construccion"))
estan_todos = True
estan_todos = estan_todos and "accion_comprar_pk_cliente" in request.session
estan_todos = estan_todos and "accion_comprar_pk_producto" in request.session
estan_todos = estan_todos and "accion_comprar_pk_modelo" in request.session
if estan_todos:
compra = guardar_compra(request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
request.user.id,
"Realizada la compra del producto")
return HttpResponse(reverse("validar_compra", args=[str(compra.id)]))
else:
return HttpResponse(reverse("tienda"))
@login_required(login_url='login_tk')
def trato_compra(request, id_producto):
if request.method == "POST":
producto = Productos.objects.get(pk=id_producto)
f_p = FinTratoForm(request.POST, instance=producto)
if f_p.is_valid():
p = f_p.save()
p.estado = "ST"
p.save()
clientes = Historial.objects.filter(producto_id=p.pk)
cliente_id = 1
if len(clientes) > 0:
cliente_id = clientes[0].cliente_id
compra = guardar_compra(cliente_id, p.id, request.user.id,
"Realizada la compra del producto. Despues de testear")
return HttpResponse(reverse("validar_compra", args=[compra.id]))
else:
producto = Productos.objects.get(pk=id_producto)
if producto.tipo == None:
producto.tipo = Tipos.objects.all()[0]
producto.precio_compra = "{0:.2f}".format(producto.modelo.precio_usado *
producto.tipo.incremento)
producto.save()
filter_query = Testeo.objects.filter(producto_id=id_producto)
lista_ids = filter_query.values_list("descripcion_id", flat=True)
no_realizaos = ListaTesteo.objects.filter(categoria=producto.modelo.categoria)
return render(request, "tienda/compras/trato_compra.html",
{'query': filter_query.exclude(estado="OK"), "p": producto,
"no_realizados": no_realizaos.exclude(pk__in=lista_ids),
"form": FinTratoForm(instance=producto)})
@login_required(login_url='login_tk')
def cancelar_compra(request):
if request.method == "POST":
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.tipo_id = request.POST["tipo"]
producto.precio_compra = request.POST["precio_compra"]
producto.estado = "CT"
producto.save()
except:
return HttpResponse(reverse("tienda"))
estan_todos = True
estan_todos = estan_todos and "accion_comprar_pk_cliente" in request.session
estan_todos = estan_todos and "accion_comprar_pk_producto" in request.session
estan_todos = estan_todos and "accion_comprar_pk_modelo" in request.session
if estan_todos:
#Guardamos historial de la cancelacion de la comprar
save_historial(request.user.id, request.session["accion_comprar_pk_cliente"],
request.session["accion_comprar_pk_producto"],
"Compra cancelada, producto en posesion del cliente")
vaciar_sesison_compra(request)
return HttpResponse(reverse("tienda"))
else:
return HttpResponse(reverse("en_construccion"))
@login_required(login_url='login_tk')
def salir_compra(request):
try:
producto = Productos.objects.get(pk=request.session["accion_comprar_pk_producto"])
producto.estado = "CT"
producto.save()
except:
pass
vaciar_sesison_compra(request)
return redirect("tienda")
def guardar_compra(cliente_id, producto_id, user_id, detalle):
compra = Compras()
compra.vendedor_id = cliente_id
compra.tipo_vendedor = 'CL'
compra.producto_id = producto_id
compra.usuario_id = user_id
compra.save()
#Guardamos el historial
save_historial(user_id, cliente_id, user_id, detalle)
return compra
def vaciar_sesison_compra(request):
if "accion_comprar_pk_cliente" in request.session:
del request.session["accion_comprar_pk_cliente"]
if "accion_comprar_pk_producto" in request.session:
del request.session["accion_comprar_pk_producto"]
if "accion_comprar_pk_modelo" in request.session:
del request.session["accion_comprar_pk_modelo"]
if "accion_comprar_dni" in request.session:
del request.session["accion_comprar_dni"]
def get_document_by_code(request, code):
datos = json.loads(base64.b64decode(code))
compras = Compras.objects.filter(pk=datos["id_compra"])
compra = Compras()
if len(compras) > 0:
compra = compras[0]
producto = Productos.objects.get(pk=compra.producto.pk)
return get_document(producto, compra)
return redirect('https://google.es')
def get_document(producto, compra):
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="%s.pdf"' % producto.modelo
doc_compra = get_documento_compra(producto, compra)
response.write(doc_compra.getvalue())
return response
def send_men_sing(compra):
vendedor = compra.get_vendedor()
datos = {
"id_compra": compra.id,
"codigo_compra": str(compra.codigo_compra),
"email": vendedor['email'],
}
send_data = base64.b64encode(json.dumps(datos))
url = settings.BASE_URL + reverse("sign_compra", args=[send_data])
from django.core.mail import send_mail
from django.template.loader import render_to_string
msg_plain = render_to_string(settings.BASE_DIR+'/templates/email/url_sign.html',
{'nombre': vendedor['nombre'],
"url": url})
send_mail(
'Firmar y aceptar condiciones',
msg_plain,
"[email protected]",
[datos['email']],
)
def sign_compra(request, code):
datos = json.loads(base64.b64decode(code))
compras = Compras.objects.filter(pk=datos["id_compra"])
datos_send = None
if len(compras) > 0:
compra = compras[0]
if compra.firma == '':
vendedor = compra.get_vendedor()
datos_send= {
"pk": datos["id_compra"],
"id_producto": compra.producto.pk,
"nombre": vendedor["nombre"],
"telefono": vendedor['telefono'],
"DNI": vendedor["DNI"].upper(),
"domicilio": vendedor['direccion'],
"ns_imei": compra.producto.ns_imei,
"precio_compra": str(compra.producto.precio_compra),
"code": code
}
return render(request, "tienda/compras/sign.html", {"datos":datos_send})
else:
return redirect("get_document_by_code", code=code )
return redirect('tienda')
def doc_testeo(doc):
tmpl_path = settings.DOCUMENT_TMPL
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'inline; filename="testeo_%s.pdf"' % doc.producto
pdfstr = get_documento_testeo(doc)
response.write(pdfstr.getvalue())
return response
| 2
| 19,015
| 0
| 0
| 0
| 3,367
| 0
| 664
| 1,114
|
2a17da60768a072018629688624a0b3345cf9f9d
| 16,285
|
py
|
Python
|
tests/unit/test/plan/grammar/test_assertions.py
|
arareko/pysoa
|
a90e428558500cf692f7f6e33fd358dd2779c328
|
[
"Apache-2.0"
] | 91
|
2017-05-08T22:41:33.000Z
|
2022-02-09T11:37:07.000Z
|
tests/unit/test/plan/grammar/test_assertions.py
|
arareko/pysoa
|
a90e428558500cf692f7f6e33fd358dd2779c328
|
[
"Apache-2.0"
] | 63
|
2017-06-14T20:08:49.000Z
|
2021-06-16T23:08:25.000Z
|
tests/unit/test/plan/grammar/test_assertions.py
|
arareko/pysoa
|
a90e428558500cf692f7f6e33fd358dd2779c328
|
[
"Apache-2.0"
] | 26
|
2017-10-13T23:23:13.000Z
|
2022-01-11T16:58:17.000Z
|
from __future__ import (
absolute_import,
unicode_literals,
)
# noinspection PyTypeChecker
| 41.649616
| 120
| 0.55413
|
from __future__ import (
absolute_import,
unicode_literals,
)
import unittest
from pysoa.common.errors import Error
from pysoa.test.plan.grammar import assertions
from pysoa.test.plan.grammar.data_types import AnyValue
# noinspection PyTypeChecker
class TestCustomAssertions(unittest.TestCase):
def test_assert_not_wanted_full_match(self):
with self.assertRaises(AssertionError):
assertions.assert_not_expected(
{
'foo': 'bar',
'blah': ['aa', 'bb'],
},
{
'foo': 'bar',
'blah': ['aa', 'bb'],
},
)
def test_assert_not_wanted_complete_mismatch(self):
assertions.assert_not_expected(
{
'foo': 'bar',
'blah': ['aa', 'bb'],
},
{
'zoom': 'bar',
},
)
def test_assert_not_wanted_partial_match(self):
with self.assertRaises(AssertionError):
assertions.assert_not_expected(
{
'foo': 'bar',
'blah': ['aa', 'bb'],
},
{
'blah': ['bb']
},
)
def test_assert_not_wanted_errors_array_empty(self):
assertions.assert_actual_list_not_subset(
[Error(code='INVALID', message=AnyValue('str'), field=AnyValue('str', permit_none=True))], # type: ignore
[],
)
def test_assert_not_wanted_errors_mismatch_list(self):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='BAZ', message='Baz message', field=None),
],
)
def test_assert_not_wanted_errors_match_list_no_field(self):
with self.assertRaises(AssertionError):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_not_wanted_errors_match_list_with_field(self):
with self.assertRaises(AssertionError):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
],
)
def test_assert_not_wanted_errors_match_list_with_field_and_extras(self):
with self.assertRaises(AssertionError):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAZ', message='Baz message', field=None),
],
)
def test_assert_not_wanted_errors_mismatch_message(self):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message='Bar message', field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='BAR', message='Qux message', field=None),
],
)
def test_assert_not_wanted_errors_mismatch_field(self):
assertions.assert_actual_list_not_subset(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field='bar_field'), # type: ignore
],
[
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_all_wanted_errors_mismatch_empty_list(self):
with self.assertRaises(AssertionError):
assertions.assert_lists_match_any_order(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[],
)
def test_assert_all_wanted_errors_mismatch_empty_list_other_way(self):
with self.assertRaises(AssertionError):
assertions.assert_lists_match_any_order(
[],
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
)
def test_assert_all_wanted_errors_mismatch_missing_error(self):
with self.assertRaises(AssertionError):
assertions.assert_lists_match_any_order(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_all_wanted_errors_match_same_order(self):
assertions.assert_lists_match_any_order(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_all_wanted_errors_match_different_order(self):
assertions.assert_lists_match_any_order(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='BAR', message='Bar message', field=None),
Error(code='FOO', message='Foo message', field='foo_field'),
],
)
def test_assert_any_wanted_error_mismatch_empty_actual_list(self):
with self.assertRaises(AssertionError):
assertions.assert_expected_list_subset_of_actual(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[],
)
def test_assert_any_wanted_error_mismatch_code(self):
with self.assertRaises(AssertionError):
assertions.assert_expected_list_subset_of_actual(
[
Error(code='BAZ', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAR', message='Bar Message', field=None),
],
)
def test_assert_any_wanted_error_match(self):
assertions.assert_expected_list_subset_of_actual(
[
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_any_wanted_error_match_with_field(self):
assertions.assert_expected_list_subset_of_actual(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAR', message='Bar message', field=None),
],
)
def test_assert_any_wanted_error_match_with_field_multiples(self):
assertions.assert_expected_list_subset_of_actual(
[
Error(code='FOO', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
Error(code='BAR', message=AnyValue('str'), field=AnyValue('str', permit_none=True)), # type: ignore
],
[
Error(code='FOO', message='Foo message', field='foo_field'),
Error(code='BAR', message='Bar message', field=None),
Error(code='BAZ', message='Baz message', field=None),
],
)
def test_assert_subset_structure_none(self):
assertions.assert_subset_structure(
{'foo': None},
{'foo': None},
subset_lists=True,
)
def test_assert_subset_structure_extras(self):
assertions.assert_subset_structure(
{'foo': 'bar'},
{'foo': 'bar', 'baz': 'qux'},
subset_lists=True,
)
def test_assert_subset_structure_mismatch(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_subset_structure(
{'foo': None},
{'foo': 'bar'},
subset_lists=True,
msg='Include this in the message',
)
self.assertTrue(error_info.exception.args[0].startswith('Include this in the message'))
self.assertIn('DATA ERROR', error_info.exception.args[0])
self.assertIn('Mismatch values', error_info.exception.args[0])
def test_assert_subset_structure_missing(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_subset_structure(
{'foo': None},
{'baz': 'qux'},
subset_lists=True,
)
self.assertNotIn('DATA ERROR', error_info.exception.args[0])
self.assertIn('Missing values', error_info.exception.args[0])
def test_assert_subset_structure_empty_list_not_empty(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_subset_structure(
{'foo': {'bar': []}},
{'foo': {'bar': ['baz', 'qux']}},
subset_lists=True,
)
self.assertNotIn('DATA ERROR', error_info.exception.args[0])
self.assertIn('Mismatch values', error_info.exception.args[0])
def test_assert_subset_structure_list_not_exact(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_subset_structure(
{'foo': {'bar': ['baz', 'qux', 'flem']}},
{'foo': {'bar': ['baz', 'qux']}},
)
self.assertNotIn('DATA ERROR', error_info.exception.args[0])
self.assertIn('Missing values', error_info.exception.args[0])
def test_assert_subset_structure_one_item_not_subset_of_actual_list(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_subset_structure(
{'foo': {'bar': 'flem'}},
{'foo': {'bar': ['baz', 'qux']}},
subset_lists=True,
)
self.assertNotIn('DATA ERROR', error_info.exception.args[0])
self.assertIn('Missing values', error_info.exception.args[0])
def test_assert_subset_structure_one_item_subset_of_actual_list(self):
assertions.assert_subset_structure(
{'foo': {'bar': 'baz'}},
{'foo': {'bar': ['baz', 'qux']}},
subset_lists=True,
)
def test_assert_not_present_but_present(self):
with self.assertRaises(AssertionError):
assertions.assert_not_present(
{'foo': AnyValue('str')},
{'foo': 'Hello', 'bar': 42},
)
def test_assert_not_present_but_present_sub_structure(self):
with self.assertRaises(AssertionError):
assertions.assert_not_present(
{'user': {'foo': AnyValue('str')}},
{'user': {'foo': 'Hello', 'bar': 42}},
)
def test_assert_not_present_not_present(self):
assertions.assert_not_present(
{'foo': AnyValue('str')},
{'bar': 42},
)
def test_assert_not_present_not_present_sub_structure(self):
assertions.assert_not_present(
{'user': {'foo': AnyValue('str')}},
{'user': {'bar': 42}},
)
def test_assert_exact_structure_mismatch(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parent': {'id': AnyValue('int'), 'name': 'Roger'}},
{'user': {'id': 12, 'name': 'Seth'}, 'parent': {'id': 79, 'name': 'Betty'}},
)
self.assertIn('Mismatch values', error_info.exception.args[0])
def test_assert_exact_structure_missing(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parent': {'id': AnyValue('int'), 'name': 'Roger'}},
{'user': {'id': 12, 'name': 'Seth'}, 'parent': {'name': 'Roger'}},
)
self.assertIn('Missing values', error_info.exception.args[0])
def test_assert_exact_structure_extra(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parent': {'id': AnyValue('int'), 'name': 'Roger'}},
{'user': {'id': 12, 'name': 'Seth'}, 'parent': {'id': 79, 'name': 'Roger', 'age': 65}},
)
self.assertIn('Extra values', error_info.exception.args[0])
def test_assert_exact_structure_non_empty(self):
with self.assertRaises(AssertionError) as error_info:
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parent': {}},
{'user': {'id': 12, 'name': 'Seth'}, 'parent': {'id': 79}},
)
self.assertIn('Extra values', error_info.exception.args[0])
def test_assert_exact_structure_match(self):
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parent': {'id': AnyValue('int'), 'name': 'Roger'}},
{'user': {'id': 12, 'name': 'Seth'}, 'parent': {'id': 79, 'name': 'Roger'}},
)
def test_assert_exact_structure_list_mismatch(self):
with self.assertRaises(AssertionError):
assertions.assert_exact_structure(
{'user': {'id': 12, 'name': AnyValue('str')}, 'parents': [79, 86]},
{'user': {'id': 12, 'name': 'Seth'}, 'parents': [79, 86, 51]},
)
| 0
| 0
| 0
| 16,003
| 0
| 0
| 0
| 69
| 112
|
0ba1914cfcee88af2ca20a9d79b917b79305d0c5
| 5,248
|
py
|
Python
|
lbry/tests/integration/test_wallet_server_sessions.py
|
Nykseli/lbry-sdk
|
07afc0aa0a1e6c0ef6aa284fb47513af940440c1
|
[
"MIT"
] | null | null | null |
lbry/tests/integration/test_wallet_server_sessions.py
|
Nykseli/lbry-sdk
|
07afc0aa0a1e6c0ef6aa284fb47513af940440c1
|
[
"MIT"
] | 4
|
2020-10-27T21:53:05.000Z
|
2022-02-11T03:10:54.000Z
|
lbry/tests/integration/test_wallet_server_sessions.py
|
braveheart12/lbry-sdk
|
dc709b468f9dce60d206161785def5c7ace2b763
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger(__name__)
| 39.458647
| 116
| 0.695122
|
import asyncio
import socket
import time
import logging
from unittest.mock import Mock
from torba.testcase import IntegrationTestCase, Conductor
import lbry.wallet
from lbry.schema.claim import Claim
from lbry.wallet.transaction import Transaction, Output
from lbry.wallet.dewies import dewies_to_lbc as d2l, lbc_to_dewies as l2d
log = logging.getLogger(__name__)
def wrap_callback_event(fn, callback):
def inner(*a, **kw):
callback()
return fn(*a, **kw)
return inner
class TestSessionBloat(IntegrationTestCase):
"""
ERROR:asyncio:Fatal read error on socket transport
protocol: <lbrynet.wallet.server.session.LBRYElectrumX object at 0x7f7e3bfcaf60>
transport: <_SelectorSocketTransport fd=3236 read=polling write=<idle, bufsize=0>>
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/selector_events.py", line 801, in _read_ready__data_received
data = self._sock.recv(self.max_size)
TimeoutError: [Errno 110] Connection timed out
"""
LEDGER = lbry.wallet
async def asyncSetUp(self):
self.conductor = Conductor(
ledger_module=self.LEDGER, manager_module=self.MANAGER, verbosity=self.VERBOSITY
)
await self.conductor.start_blockchain()
self.addCleanup(self.conductor.stop_blockchain)
await self.conductor.start_spv()
self.session_manager = self.conductor.spv_node.server.session_mgr
self.session_manager.servers['TCP'].sockets[0].setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 64)
self.session_manager.servers['TCP'].sockets[0].setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 64)
self.addCleanup(self.conductor.stop_spv)
await self.conductor.start_wallet()
self.addCleanup(self.conductor.stop_wallet)
self.client_session = list(self.session_manager.sessions)[0]
self.client_session.transport.set_write_buffer_limits(0, 0)
self.paused_session = asyncio.Event(loop=self.loop)
self.resumed_session = asyncio.Event(loop=self.loop)
def paused():
self.resumed_session.clear()
self.paused_session.set()
def delayed_resume():
self.paused_session.clear()
time.sleep(1)
self.resumed_session.set()
self.client_session.pause_writing = wrap_callback_event(self.client_session.pause_writing, paused)
self.client_session.resume_writing = wrap_callback_event(self.client_session.resume_writing, delayed_resume)
self.blockchain = self.conductor.blockchain_node
self.wallet_node = self.conductor.wallet_node
self.manager = self.wallet_node.manager
self.ledger = self.wallet_node.ledger
self.wallet = self.wallet_node.wallet
self.account = self.wallet_node.wallet.default_account
async def test_session_bloat_from_socket_timeout(self):
await self.account.ensure_address_gap()
address1, address2 = await self.account.receiving.get_addresses(limit=2, only_usable=True)
sendtxid1 = await self.blockchain.send_to_address(address1, 5)
sendtxid2 = await self.blockchain.send_to_address(address2, 5)
await self.blockchain.generate(1)
await asyncio.wait([
self.on_transaction_id(sendtxid1),
self.on_transaction_id(sendtxid2)
])
self.assertEqual(d2l(await self.account.get_balance()), '10.0')
channel = Claim()
channel_txo = Output.pay_claim_name_pubkey_hash(
l2d('1.0'), '@bar', channel, self.account.ledger.address_to_hash160(address1)
)
channel_txo.generate_channel_private_key()
channel_txo.script.generate()
channel_tx = await Transaction.create([], [channel_txo], [self.account], self.account)
stream = Claim()
stream.stream.description = "0" * 8000
stream_txo = Output.pay_claim_name_pubkey_hash(
l2d('1.0'), 'foo', stream, self.account.ledger.address_to_hash160(address1)
)
stream_tx = await Transaction.create([], [stream_txo], [self.account], self.account)
stream_txo.sign(channel_txo)
await stream_tx.sign([self.account])
self.paused_session.clear()
self.resumed_session.clear()
await self.broadcast(channel_tx)
await self.broadcast(stream_tx)
await asyncio.wait_for(self.paused_session.wait(), 2)
self.assertEqual(1, len(self.session_manager.sessions))
real_sock = self.client_session.transport._extra.pop('socket')
mock_sock = Mock(spec=socket.socket)
for attr in dir(real_sock):
if not attr.startswith('__'):
setattr(mock_sock, attr, getattr(real_sock, attr))
def recv(*a, **kw):
raise TimeoutError("[Errno 110] Connection timed out")
mock_sock.recv = recv
self.client_session.transport._sock = mock_sock
self.client_session.transport._extra['socket'] = mock_sock
self.assertFalse(self.resumed_session.is_set())
self.assertFalse(self.session_manager.session_event.is_set())
await self.session_manager.session_event.wait()
self.assertEqual(0, len(self.session_manager.sessions))
| 0
| 0
| 4,148
| 582
| 0
| 106
| 0
| 117
| 243
|
e76f76edb55be5c2cbb5a4d75f8d67fbe7d90f8d
| 1,924
|
py
|
Python
|
northwind.py
|
valogonor/DS-Unit-3-Sprint-2-SQL-and-Databases
|
07c83195c4933d0ce02f431692fe970ef154cacf
|
[
"MIT"
] | null | null | null |
northwind.py
|
valogonor/DS-Unit-3-Sprint-2-SQL-and-Databases
|
07c83195c4933d0ce02f431692fe970ef154cacf
|
[
"MIT"
] | null | null | null |
northwind.py
|
valogonor/DS-Unit-3-Sprint-2-SQL-and-Databases
|
07c83195c4933d0ce02f431692fe970ef154cacf
|
[
"MIT"
] | null | null | null |
import sqlite3
conn = sqlite3.connect('northwind_small.sqlite3')
curs = conn.cursor()
query = '''SELECT ProductName FROM Product
ORDER BY UnitPrice DESC
LIMIT 10'''
curs.execute(query)
results = curs.fetchall()
print('Ten most expensive items (per unit price):')
for result in results:
print(result[0])
query = '''SELECT avg(HireDate - BirthDate)
FROM Employee'''
curs.execute(query)
print('Average age of an employee at the time of their hiring:', curs.fetchall()[0][0])
query = '''SELECT City, avg(HireDate - BirthDate) as Age
FROM Employee
GROUP BY City'''
curs.execute(query)
print('Average age of an employee at the time of their hiring by city:')
results = curs.fetchall()
for result in results:
print(result[0], result[1])
query = '''SELECT ProductName, CompanyName FROM Product
INNER JOIN Supplier
ON Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10'''
curs.execute(query)
results = curs.fetchall()
print('Ten most expensive items (per unit price) and their suppliers:')
print('Product', 'Supplier', sep='\t\t\t')
for result in results:
if len(result[0]) > 15:
sep = '\t'
else:
sep = '\t\t'
print(result[0], result[1], sep=sep)
query = '''SELECT CategoryName, count(Product.Id) as ProductCount FROM Category
INNER JOIN Product
ON Category.Id = Product.CategoryId
GROUP BY CategoryId
ORDER BY ProductCount DESC
LIMIT 1'''
curs.execute(query)
print('Largest category (by number of products in it):', curs.fetchall()[0][0])
query = '''SELECT LastName, FirstName, count(Territory.TerritoryDescription) as TerritoryCount
FROM Employee, Territory
JOIN EmployeeTerritory
ON Employee.Id = EmployeeTerritory.EmployeeId
GROUP BY Employee.Id
ORDER BY TerritoryCount DESC
LIMIT 1'''
curs.execute(query)
results = curs.fetchall()
print('Employee with the most territories, and number of territories they have:',
results[0][1], results[0][0] + ';', results[0][2])
| 30.539683
| 94
| 0.730249
|
import sqlite3
conn = sqlite3.connect('northwind_small.sqlite3')
curs = conn.cursor()
query = '''SELECT ProductName FROM Product
ORDER BY UnitPrice DESC
LIMIT 10'''
curs.execute(query)
results = curs.fetchall()
print('Ten most expensive items (per unit price):')
for result in results:
print(result[0])
query = '''SELECT avg(HireDate - BirthDate)
FROM Employee'''
curs.execute(query)
print('Average age of an employee at the time of their hiring:', curs.fetchall()[0][0])
query = '''SELECT City, avg(HireDate - BirthDate) as Age
FROM Employee
GROUP BY City'''
curs.execute(query)
print('Average age of an employee at the time of their hiring by city:')
results = curs.fetchall()
for result in results:
print(result[0], result[1])
query = '''SELECT ProductName, CompanyName FROM Product
INNER JOIN Supplier
ON Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10'''
curs.execute(query)
results = curs.fetchall()
print('Ten most expensive items (per unit price) and their suppliers:')
print('Product', 'Supplier', sep='\t\t\t')
for result in results:
if len(result[0]) > 15:
sep = '\t'
else:
sep = '\t\t'
print(result[0], result[1], sep=sep)
query = '''SELECT CategoryName, count(Product.Id) as ProductCount FROM Category
INNER JOIN Product
ON Category.Id = Product.CategoryId
GROUP BY CategoryId
ORDER BY ProductCount DESC
LIMIT 1'''
curs.execute(query)
print('Largest category (by number of products in it):', curs.fetchall()[0][0])
query = '''SELECT LastName, FirstName, count(Territory.TerritoryDescription) as TerritoryCount
FROM Employee, Territory
JOIN EmployeeTerritory
ON Employee.Id = EmployeeTerritory.EmployeeId
GROUP BY Employee.Id
ORDER BY TerritoryCount DESC
LIMIT 1'''
curs.execute(query)
results = curs.fetchall()
print('Employee with the most territories, and number of territories they have:',
results[0][1], results[0][0] + ';', results[0][2])
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e6e666e5f27d346d3402248f6886267f86c56baa
| 10,930
|
py
|
Python
|
skfda/representation/basis/_basis.py
|
alejandro-ariza/scikit-fda
|
a3626eeaac81aac14660233ff7554ae9a1550434
|
[
"BSD-3-Clause"
] | null | null | null |
skfda/representation/basis/_basis.py
|
alejandro-ariza/scikit-fda
|
a3626eeaac81aac14660233ff7554ae9a1550434
|
[
"BSD-3-Clause"
] | null | null | null |
skfda/representation/basis/_basis.py
|
alejandro-ariza/scikit-fda
|
a3626eeaac81aac14660233ff7554ae9a1550434
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module for functional data manipulation in a basis system.
Defines functional data object in a basis function system representation and
the corresponding basis classes.
"""
| 30.788732
| 79
| 0.602928
|
"""Module for functional data manipulation in a basis system.
Defines functional data object in a basis function system representation and
the corresponding basis classes.
"""
import copy
import warnings
from abc import ABC, abstractmethod
from typing import Tuple
import numpy as np
from ..._utils import _domain_range, _reshape_eval_points, _same_domain
from . import _fdatabasis
def _check_domain(domain_range):
for domain in domain_range:
if len(domain) != 2 or domain[0] >= domain[1]:
raise ValueError(f"The interval {domain} is not well-defined.")
class Basis(ABC):
"""Defines the structure of a basis function system.
Attributes:
domain_range (tuple): a tuple of length 2 containing the initial and
end values of the interval over which the basis can be evaluated.
n_basis (int): number of functions in the basis.
"""
def __init__(self, *, domain_range=None, n_basis: int = 1):
"""Basis constructor.
Args:
domain_range (tuple or list of tuples, optional): Definition of the
interval where the basis defines a space. Defaults to (0,1).
n_basis: Number of functions that form the basis. Defaults to 1.
"""
if domain_range is not None:
domain_range = _domain_range(domain_range)
# Some checks
_check_domain(domain_range)
if n_basis < 1:
raise ValueError(
"The number of basis has to be strictly positive.",
)
self._domain_range = domain_range
self._n_basis = n_basis
super().__init__()
def __call__(self, *args, **kwargs) -> np.ndarray:
"""Evaluate the basis using :meth:`evaluate`."""
return self.evaluate(*args, **kwargs)
@property
def dim_domain(self) -> int:
return 1
@property
def dim_codomain(self) -> int:
return 1
@property
def domain_range(self) -> Tuple[Tuple[float, float], ...]:
if self._domain_range is None:
return ((0, 1),) * self.dim_domain
else:
return self._domain_range
@property
def n_basis(self) -> int:
return self._n_basis
@abstractmethod
def _evaluate(self, eval_points) -> np.ndarray:
"""Subclasses must override this to provide basis evaluation."""
pass
def evaluate(self, eval_points, *, derivative: int = 0) -> np.ndarray:
"""Evaluate Basis objects and its derivatives.
Evaluates the basis function system or its derivatives at a list of
given values.
Args:
eval_points (array_like): List of points where the basis is
evaluated.
Returns:
Matrix whose rows are the values of the each
basis function or its derivatives at the values specified in
eval_points.
"""
if derivative < 0:
raise ValueError("derivative only takes non-negative values.")
elif derivative != 0:
warnings.warn("Parameter derivative is deprecated. Use the "
"derivative function instead.", DeprecationWarning)
return self.derivative(order=derivative)(eval_points)
eval_points = _reshape_eval_points(eval_points,
aligned=True,
n_samples=self.n_basis,
dim_domain=self.dim_domain)
return self._evaluate(eval_points).reshape(
(self.n_basis, len(eval_points), self.dim_codomain))
def __len__(self) -> int:
return self.n_basis
def derivative(self, *, order: int = 1) -> '_fdatabasis.FDataBasis':
"""Construct a FDataBasis object containing the derivative.
Args:
order: Order of the derivative. Defaults to 1.
Returns:
Derivative object.
"""
return self.to_basis().derivative(order=order)
def _derivative_basis_and_coefs(self, coefs: np.ndarray, order: int = 1):
"""
Subclasses can override this to provide derivative construction.
A basis can provide derivative evaluation at given points
without providing a basis representation for its derivatives,
although is recommended to provide both if possible.
"""
raise NotImplementedError(f"{type(self)} basis does not support "
"the construction of a basis of the "
"derivatives.")
def plot(self, chart=None, **kwargs):
"""Plot the basis object or its derivatives.
Args:
chart (figure object, axe or list of axes, optional): figure over
with the graphs are plotted or axis over where the graphs are
plotted.
**kwargs: keyword arguments to be passed to the
fdata.plot function.
Returns:
fig (figure): figure object in which the graphs are plotted.
"""
self.to_basis().plot(chart=chart, **kwargs)
def _coordinate_nonfull(self, fdatabasis, key):
"""
Returns a fdatagrid for the coordinate functions indexed by key.
Subclasses can override this to provide coordinate indexing.
The key parameter has been already validated and is an integer or
slice in the range [0, self.dim_codomain.
"""
raise NotImplementedError("Coordinate indexing not implemented")
def _coordinate(self, fdatabasis, key):
"""Returns a fdatagrid for the coordinate functions indexed by key."""
# Raises error if not in range and normalize key
r_key = range(self.dim_codomain)[key]
if isinstance(r_key, range) and len(r_key) == 0:
raise IndexError("Empty number of coordinates selected")
# Full fdatabasis case
if (self.dim_codomain == 1 and r_key == 0) or (
isinstance(r_key, range) and len(r_key) == self.dim_codomain):
return fdatabasis.copy()
else:
return self._coordinate_nonfull(fdatabasis=fdatabasis, key=r_key)
def rescale(self, domain_range=None):
r"""Return a copy of the basis with a new :term:`domain` range, with
the corresponding values rescaled to the new bounds.
Args:
domain_range (tuple, optional): Definition of the interval
where the basis defines a space. Defaults uses the same as
the original basis.
"""
return self.copy(domain_range=domain_range)
def copy(self, domain_range=None):
"""Basis copy"""
new_copy = copy.deepcopy(self)
if domain_range is not None:
domain_range = _domain_range(domain_range)
# Some checks
_check_domain(domain_range)
new_copy._domain_range = domain_range
return new_copy
def to_basis(self) -> '_fdatabasis.FDataBasis':
"""Convert the Basis to FDatabasis.
Returns:
FDataBasis with this basis as its basis, and all basis functions
as observations.
"""
from . import FDataBasis
return FDataBasis(self.copy(), np.identity(self.n_basis))
def _list_to_R(self, knots):
retstring = "c("
for i in range(0, len(knots)):
retstring = retstring + str(knots[i]) + ", "
return retstring[0:len(retstring) - 2] + ")"
def _to_R(self):
raise NotImplementedError
def inner_product_matrix(self, other: 'Basis' = None) -> np.array:
r"""Return the Inner Product Matrix of a pair of basis.
The Inner Product Matrix is defined as
.. math::
IP_{ij} = \langle\phi_i, \theta_j\rangle
where :math:`\phi_i` is the ith element of the basi and
:math:`\theta_j` is the jth element of the second basis.
This matrix helps on the calculation of the inner product
between objects on two basis and for the change of basis.
Args:
other: Basis to compute the inner product
matrix. If not basis is given, it computes the matrix with
itself returning the Gram Matrix
Returns:
Inner Product Matrix of two basis
"""
from ...misc import inner_product_matrix
if other is None or self == other:
return self.gram_matrix()
return inner_product_matrix(self, other)
def _gram_matrix_numerical(self) -> np.array:
"""
Compute the Gram matrix numerically.
"""
from ...misc import inner_product_matrix
return inner_product_matrix(self, force_numerical=True)
def _gram_matrix(self) -> np.array:
"""
Compute the Gram matrix.
Subclasses may override this method for improving computation
of the Gram matrix.
"""
return self._gram_matrix_numerical()
def gram_matrix(self) -> np.array:
r"""Return the Gram Matrix of a basis
The Gram Matrix is defined as
.. math::
G_{ij} = \langle\phi_i, \phi_j\rangle
where :math:`\phi_i` is the ith element of the basis. This is a
symmetric matrix and positive-semidefinite.
Returns:
Gram Matrix of the basis.
"""
gram = getattr(self, "_gram_matrix_cached", None)
if gram is None:
gram = self._gram_matrix()
self._gram_matrix_cached = gram
return gram
def _add_same_basis(self, coefs1, coefs2):
return self.copy(), coefs1 + coefs2
def _add_constant(self, coefs, constant):
coefs = coefs.copy()
constant = np.array(constant)
coefs[:, 0] = coefs[:, 0] + constant
return self.copy(), coefs
def _sub_same_basis(self, coefs1, coefs2):
return self.copy(), coefs1 - coefs2
def _sub_constant(self, coefs, other):
coefs = coefs.copy()
other = np.array(other)
coefs[:, 0] = coefs[:, 0] - other
return self.copy(), coefs
def _mul_constant(self, coefs, other):
coefs = coefs.copy()
other = np.atleast_2d(other).reshape(-1, 1)
coefs = coefs * other
return self.copy(), coefs
def __repr__(self) -> str:
"""Representation of a Basis object."""
return (f"{self.__class__.__name__}(domain_range={self.domain_range}, "
f"n_basis={self.n_basis})")
def __eq__(self, other) -> bool:
"""Equality of Basis"""
return (type(self) == type(other)
and _same_domain(self, other)
and self.n_basis == other.n_basis)
def __hash__(self) -> int:
"""Hash of Basis"""
return hash((self.domain_range, self.n_basis))
| 0
| 446
| 0
| 9,876
| 0
| 174
| 0
| 52
| 202
|
833626e74d4e5013fbedd077febd8ce8f93d00fe
| 2,613
|
py
|
Python
|
02-lm-tensorflow/loglin-lm.py
|
tinySean/nn4nlp-tensorflow
|
17d64427ad3cf276f2d43eac706d14a6145cc3e6
|
[
"Apache-2.0"
] | 2
|
2019-03-04T10:53:23.000Z
|
2020-09-25T02:31:44.000Z
|
02-lm-tensorflow/loglin-lm.py
|
tinySean/nn4nlp-tensorflow
|
17d64427ad3cf276f2d43eac706d14a6145cc3e6
|
[
"Apache-2.0"
] | null | null | null |
02-lm-tensorflow/loglin-lm.py
|
tinySean/nn4nlp-tensorflow
|
17d64427ad3cf276f2d43eac706d14a6145cc3e6
|
[
"Apache-2.0"
] | 1
|
2020-09-22T10:33:02.000Z
|
2020-09-22T10:33:02.000Z
|
from collections import defaultdict
import math
import random
import tensorflow as tf
import numpy as np
# The length of the n-gram
N = 2
# Functions to read in the corpus
# NOTE: We are using data from the Penn Treebank, which is already converted
# into an easy-to-use format with "<unk>" symbols. If we were using other
# data we would have to do pre-processing and consider how to choose
# unknown words, etc.
w2i = defaultdict(lambda: len(w2i))
S = w2i["<s>"]
UNK = w2i["<unk>"]
# Read in the data
train = list(read_dataset("../data/ptb/train.txt"))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read_dataset("../data/ptb/valid.txt"))
i2w = {v: k for k, v in w2i.items()}
nwords = len(w2i)
x1 = tf.placeholder(shape=(1,), dtype=tf.int32)
x2 = tf.placeholder(shape=(1,), dtype=tf.int32)
y = tf.placeholder(shape=(1,None), dtype=tf.int32)
embedding1 = tf.get_variable(name="embedding1", shape=(nwords, nwords), initializer=tf.glorot_normal_initializer())
embedding2 = tf.get_variable(name="embedding2",shape=(nwords, nwords), initializer=tf.glorot_normal_initializer())
bias = tf.get_variable(name="bias", shape=(nwords), initializer=tf.glorot_normal_initializer())
embed1 = tf.nn.embedding_lookup(embedding1, x1)
embed2 = tf.nn.embedding_lookup(embedding2, x2)
score = embed1 + embed2 + bias
loss = tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y)
optimizer = tf.train.AdamOptimizer().minimize(loss)
session = tf.Session()
session.run(tf.global_variables_initializer())
for i in range(10):
random.shuffle(train)
total_loss = 0
train_words = 0
for id, sentence in enumerate(train):
history = [S] * N
sentence_loss = 0
for i in sentence + [S]:
y_one_hot = np.zeros(shape=(1, nwords))
y_one_hot[0][i] = 1
input1, input2 = history
history = history[1:] + [nwords]
feed_train = {x1: [input1],
x2: [input2],
y: y_one_hot}
char_loss, _ = session.run(fetches=[loss, optimizer], feed_dict=feed_train)
sentence_loss += char_loss
total_loss += sentence_loss
train_words += len(sentence)
if (id + 1) % 5000 == 0:
print("--finished %r sentences, %.4f" % (id + 1, (total_loss / train_words)))
print("iter %r: train loss/word=%.4f, ppl=%.4f" % (
i, total_loss / train_words, math.exp(total_loss / train_words)))
| 35.310811
| 115
| 0.654038
|
from collections import defaultdict
import math
import time
import random
import tensorflow as tf
import numpy as np
# The length of the n-gram
N = 2
# Functions to read in the corpus
# NOTE: We are using data from the Penn Treebank, which is already converted
# into an easy-to-use format with "<unk>" symbols. If we were using other
# data we would have to do pre-processing and consider how to choose
# unknown words, etc.
w2i = defaultdict(lambda: len(w2i))
S = w2i["<s>"]
UNK = w2i["<unk>"]
def read_dataset(filename):
with open(filename, "r") as f:
for line in f:
yield [w2i[x] for x in line.strip().split(" ")]
# Read in the data
train = list(read_dataset("../data/ptb/train.txt"))
w2i = defaultdict(lambda: UNK, w2i)
dev = list(read_dataset("../data/ptb/valid.txt"))
i2w = {v: k for k, v in w2i.items()}
nwords = len(w2i)
x1 = tf.placeholder(shape=(1,), dtype=tf.int32)
x2 = tf.placeholder(shape=(1,), dtype=tf.int32)
y = tf.placeholder(shape=(1,None), dtype=tf.int32)
embedding1 = tf.get_variable(name="embedding1", shape=(nwords, nwords), initializer=tf.glorot_normal_initializer())
embedding2 = tf.get_variable(name="embedding2",shape=(nwords, nwords), initializer=tf.glorot_normal_initializer())
bias = tf.get_variable(name="bias", shape=(nwords), initializer=tf.glorot_normal_initializer())
embed1 = tf.nn.embedding_lookup(embedding1, x1)
embed2 = tf.nn.embedding_lookup(embedding2, x2)
score = embed1 + embed2 + bias
loss = tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y)
optimizer = tf.train.AdamOptimizer().minimize(loss)
session = tf.Session()
session.run(tf.global_variables_initializer())
for i in range(10):
random.shuffle(train)
total_loss = 0
train_words = 0
for id, sentence in enumerate(train):
history = [S] * N
sentence_loss = 0
for i in sentence + [S]:
y_one_hot = np.zeros(shape=(1, nwords))
y_one_hot[0][i] = 1
input1, input2 = history
history = history[1:] + [nwords]
feed_train = {x1: [input1],
x2: [input2],
y: y_one_hot}
char_loss, _ = session.run(fetches=[loss, optimizer], feed_dict=feed_train)
sentence_loss += char_loss
total_loss += sentence_loss
train_words += len(sentence)
if (id + 1) % 5000 == 0:
print("--finished %r sentences, %.4f" % (id + 1, (total_loss / train_words)))
print("iter %r: train loss/word=%.4f, ppl=%.4f" % (
i, total_loss / train_words, math.exp(total_loss / train_words)))
| 0
| 0
| 0
| 0
| 112
| 0
| 0
| -10
| 44
|
d92e03bffe94661a767cf2e5a8765b439f90506e
| 340
|
py
|
Python
|
hgapp/powers/migrations/0015_remove_base_power_example_powers.py
|
shadytradesman/The-Contract-Website
|
d8b353064f91c53ebab951dec784a0a36caba260
|
[
"Apache-2.0"
] | 6
|
2020-10-03T12:15:05.000Z
|
2021-10-15T04:43:36.000Z
|
hgapp/powers/migrations/0015_remove_base_power_example_powers.py
|
shadytradesman/The-Contract-Website
|
d8b353064f91c53ebab951dec784a0a36caba260
|
[
"Apache-2.0"
] | 99
|
2020-06-04T17:43:56.000Z
|
2022-03-12T01:07:20.000Z
|
hgapp/powers/migrations/0015_remove_base_power_example_powers.py
|
shadytradesman/The-Contract-Website
|
d8b353064f91c53ebab951dec784a0a36caba260
|
[
"Apache-2.0"
] | 9
|
2020-06-06T16:39:09.000Z
|
2020-10-02T16:24:17.000Z
|
# Generated by Django 2.2.12 on 2020-08-02 14:03
| 18.888889
| 48
| 0.605882
|
# Generated by Django 2.2.12 on 2020-08-02 14:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('powers', '0014_auto_20200731_1402'),
]
operations = [
migrations.RemoveField(
model_name='base_power',
name='example_powers',
),
]
| 0
| 0
| 0
| 233
| 0
| 0
| 0
| 11
| 46
|
7465a346a19ecfbc1286a25f61bbc8e6e0865c9f
| 201
|
py
|
Python
|
tree.py
|
juhyun0/python_turtle2
|
59943c03a07a71aa33ab7124bca56f6b880b6883
|
[
"Unlicense"
] | null | null | null |
tree.py
|
juhyun0/python_turtle2
|
59943c03a07a71aa33ab7124bca56f6b880b6883
|
[
"Unlicense"
] | null | null | null |
tree.py
|
juhyun0/python_turtle2
|
59943c03a07a71aa33ab7124bca56f6b880b6883
|
[
"Unlicense"
] | null | null | null |
t.left(90)
t.color("green")
t.speed(1)
tree(90)
| 11.823529
| 20
| 0.641791
|
def tree(length):
if length>5:
t.forward(length)
t.right(20)
tree(length-15)
t.left(40)
tree(length-15)
t.right(20)
t.backward(length)
t.left(90)
t.color("green")
t.speed(1)
tree(90)
| 0
| 0
| 0
| 0
| 0
| 128
| 0
| 0
| 22
|
5f574b8c03d61700a6552dfc9ad0569e5c66bcbe
| 3,648
|
py
|
Python
|
model/genetic_model.py
|
abduskhazi/PL-Binding-Affinity-Prediction-using-ML
|
fe7172570fa378480455b4dcd214d0b0c4e94ff0
|
[
"MIT"
] | 1
|
2021-12-07T09:00:01.000Z
|
2021-12-07T09:00:01.000Z
|
model/genetic_model.py
|
abduskhazi/PL-Binding-Affinity-Prediction-using-ML
|
fe7172570fa378480455b4dcd214d0b0c4e94ff0
|
[
"MIT"
] | null | null | null |
model/genetic_model.py
|
abduskhazi/PL-Binding-Affinity-Prediction-using-ML
|
fe7172570fa378480455b4dcd214d0b0c4e94ff0
|
[
"MIT"
] | null | null | null |
# genetic algorithm search of the one max optimization problem
# objective function
# tournament selection
# crossover two parents to create two children
# mutation operator
# genetic algorithm
if False:
# define the total iterations
n_iter = 100
# bits
n_bits = 500 #20
# define the population size
n_pop = n_bits * 5 #100
# crossover rate
r_cross = 0.9
# mutation rate
r_mut = 1.0 / float(n_bits)
# perform the genetic algorithm search
best, score = genetic_algorithm(onemax, n_bits, n_iter, n_pop, r_cross, r_mut)
print('Done!')
print('f(%s) = %f' % (best, score))
| 34.415094
| 96
| 0.590186
|
# genetic algorithm search of the one max optimization problem
from numpy.random import randint
from numpy.random import rand
import numpy as np
import json
# objective function
def onemax(x):
return -sum(x)
def sorted_population(pop, scores):
indices = scores.argsort()
sorted_pop = []
for i in indices:
sorted_pop += [pop[i]]
return sorted_pop
# tournament selection
def selection(pop, scores, k=10):
# first random selection
selection_ix = randint(len(pop))
for ix in randint(0, len(pop), k-1):
# check if better (e.g. perform a tournament)
if scores[ix] < scores[selection_ix]:
selection_ix = ix
return pop[selection_ix]
# crossover two parents to create two children
def crossover(p1, p2, r_cross):
# children are copies of parents by default
c1, c2 = p1.copy(), p2.copy()
# check for recombination
if rand() < r_cross:
# select crossover point that is not on the end of the string
pt = randint(1, len(p1)-2)
# perform crossover
c1 = p1[:pt] + p2[pt:]
c2 = p2[:pt] + p1[pt:]
return [c1, c2]
# mutation operator
def mutation(bitstring, r_mut):
for i in range(len(bitstring)):
# check for a mutation
if rand() < r_mut:
# flip the bit
bitstring[i] = 1 - bitstring[i]
# genetic algorithm
def genetic_algorithm(objective, X, y, n_bits, n_iter, n_pop, r_cross, r_mut, name = "genetic"):
# initial population of random bitstring
pop = [randint(0, 2, n_bits).tolist() for _ in range(n_pop)]
# keep track of best solution
best, best_eval = pop[0], objective([pop[0]], X, y)[0]
with open(name + "_feature_selection.json", 'w') as f:
json.dump((best_eval, best), f)
# enumerate generations
for gen in range(n_iter):
print("Generation - ", gen)
# evaluate all candidates in the population
scores = objective(pop, X, y)
# check for new best solution
for i in range(n_pop):
if scores[i] < best_eval:
best, best_eval = pop[i], scores[i]
#print(">%d, new best f(%s) = %.3f" % (gen, pop[i], scores[i]))
print(">%d, new best = %.3f." % (gen, scores[i]))
with open(name + "_feature_selection.json", 'w') as f:
json.dump((scores[i], pop[i]), f)
# select parents
selected = [selection(pop, scores) for _ in range(n_pop - 50)]
#Select the elite among the population
selected += sorted_population(pop, np.array(scores))[:50]
# create the next generation
children = list()
for i in range(0, n_pop, 2):
# get selected parents in pairs
p1, p2 = selected[i], selected[i+1]
# crossover and mutation
for c in crossover(p1, p2, r_cross):
# mutation
mutation(c, r_mut)
# store for next generation
children.append(c)
# replace population
pop = children
with open(name + "_generation_info.data", "w") as f:
json.dump(["Generation = " + str(gen), pop], f)
return [best, best_eval]
if False:
# define the total iterations
n_iter = 100
# bits
n_bits = 500 #20
# define the population size
n_pop = n_bits * 5 #100
# crossover rate
r_cross = 0.9
# mutation rate
r_mut = 1.0 / float(n_bits)
# perform the genetic algorithm search
best, score = genetic_algorithm(onemax, n_bits, n_iter, n_pop, r_cross, r_mut)
print('Done!')
print('f(%s) = %f' % (best, score))
| 0
| 0
| 0
| 0
| 0
| 2,789
| 0
| 6
| 221
|
96164478cbee8505379a42f6487489b2e0b29439
| 7,588
|
py
|
Python
|
dockerizing-django/web/joblistings/forms.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | 1
|
2021-06-14T06:54:16.000Z
|
2021-06-14T06:54:16.000Z
|
joblistings/forms.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | 34
|
2020-04-05T01:14:31.000Z
|
2022-03-12T00:23:02.000Z
|
joblistings/forms.py
|
MattYu/ConcordiaAce
|
35eff7614652eb548e532dcf00e3a7296855285c
|
[
"MIT"
] | null | null | null |
from joblistings.models import Job
from accounts.models import Employer
| 38.714286
| 183
| 0.649974
|
from django import forms
from joblistings.models import Job
from accounts.models import Employer
from ace.constants import CATEGORY_CHOICES, MAX_LENGTH_TITLE, MAX_LENGTH_DESCRIPTION, MAX_LENGTH_RESPONSABILITIES, MAX_LENGTH_REQUIREMENTS, MAX_LENGTH_STANDARDFIELDS, LOCATION_CHOICES
from tinymce.widgets import TinyMCE
from companies.models import Company
from joblistings.models import Job, JobPDFDescription
from django.shortcuts import get_object_or_404
from accounts.models import Employer
class JobForm(forms.Form):
title = forms.CharField(max_length=MAX_LENGTH_TITLE,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Your job title here'})
)
category = forms.ChoiceField(
choices = CATEGORY_CHOICES,
widget=forms.Select(attrs={'class': 'form-control', 'placeholder': 'Select Category'})
)
salaryRange = forms.CharField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Salary range'})
)
vacancy = forms.IntegerField(
required=False,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Vacancy'})
)
expirationDate = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'type': 'date'})
)
startDate = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'type': 'date'})
)
duration = forms.CharField(max_length=20,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Total duration in months'})
)
description = forms.CharField(
max_length=MAX_LENGTH_DESCRIPTION,
widget=TinyMCE(attrs={'class': 'tinymce-editor tinymce-editor-1'})
)
responsabilities = forms.CharField(
max_length=MAX_LENGTH_RESPONSABILITIES,
widget=TinyMCE(attrs={'class': 'tinymce-editor tinymce-editor-2'})
)
requirements = forms.CharField(
max_length=MAX_LENGTH_REQUIREMENTS,
widget=TinyMCE(attrs={'class': 'tinymce-editor tinymce-editor-2'})
)
country = forms.ChoiceField(
choices = LOCATION_CHOICES,
widget=forms.Select(attrs={'class': 'form-control', 'placeholder': 'Select Country'})
)
location = forms.CharField(max_length=20,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'City'})
)
postcode = forms.CharField(max_length=20,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Postal Code'})
)
yourLocation = forms.CharField(max_length=20,
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Your location'})
)
company = forms.ChoiceField(
widget=forms.Select(attrs={'class': 'form-control', 'placeholder': 'Select Category'})
)
descriptionFile = forms.FileField(required=False)
class Meta:
model = Job
exclude = ('company',)
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super().__init__(*args, **kwargs)
if user.user_type == 4:
company = Company.objects.all()
else:
company = [Employer.objects.get(user=user).company]
company_choices = []
for obj in company:
company_choices.append((obj.pk, obj))
self.fields['company'].choices = company_choices
def clean(self):
cleaned_data = super().clean()
title = cleaned_data.get('title')
category = cleaned_data.get('category')
salaryRange = cleaned_data.get('salaryRange')
vacancy = cleaned_data.get('vacancy')
expirationDate = cleaned_data.get('expirationDate')
startDate = cleaned_data.get('startDate')
duration = cleaned_data.get('duration')
description = cleaned_data.get('description')
responsabilities = cleaned_data.get('responsabilities')
requirements = cleaned_data.get('requirements')
country = cleaned_data.get('country')
location = cleaned_data.get('location')
postcode = cleaned_data.get('postcode')
yourLocation = cleaned_data.get('yourLocation')
company = cleaned_data.get('company')
self.cleaned_data = cleaned_data
if not title and not location and not salaryRange and not description and not location and not postcode:
raise forms.ValidationError('You have to write something')
'''
name = cleaned_data.get('name')
email = cleaned_data.get('email')
message = cleaned_data.get('message')
if not name and not email and not message:
raise forms.ValidationError('You have to write something!')
'''
def save(self):
job = Job()
cleaned_data = self.cleaned_data
job.title = cleaned_data.get('title')
job.category = cleaned_data.get('category')
job.salaryRange = cleaned_data.get('salaryRange')
job.vacancy = cleaned_data.get('vacancy')
job.expirationDate = cleaned_data.get('expirationDate')
job.startDate = cleaned_data.get('startDate')
job.duration = cleaned_data.get('duration')
job.description = cleaned_data.get('description')
job.responsabilities = cleaned_data.get('responsabilities')
job.requirements = cleaned_data.get('requirements')
job.country = cleaned_data.get('country')
job.location = cleaned_data.get('location')
job.postcode = cleaned_data.get('postcode')
job.yourLocation = cleaned_data.get('yourLocation')
job.company = get_object_or_404(Company, pk=cleaned_data.get('company'))
job.save()
if cleaned_data.get('descriptionFile'):
jobPDFDescription = JobPDFDescription()
jobPDFDescription.job = job
jobPDFDescription.descriptionFile = cleaned_data.get('descriptionFile')
jobPDFDescription.save()
return job
class AdminAddRemoveJobPermission(forms.Form):
addEmployer = forms.ChoiceField(
required = False,
widget=forms.Select(attrs={'class': 'form-control', 'placeholder': 'Select Category'})
)
removeEmployer = forms.ChoiceField(
required = False,
widget=forms.Select(attrs={'class': 'form-control', 'placeholder': 'Select Category'})
)
def __init__(self, *args, **kwargs):
jobId = kwargs.pop('jobId', None)
super().__init__(*args, **kwargs)
if jobId:
currentPermission = []
job = Job.objects.filter(pk= jobId).all()[0]
employerSet = set()
for employer in job.jobAccessPermission.all():
currentPermission.append((employer.pk, employer.user.email))
employerSet.add(employer)
employerOfSameCompanyWithoutPermission = Employer.objects.filter(company = job.company).all()
sameCompany = []
for employer in employerOfSameCompanyWithoutPermission.all():
if employer not in employerSet:
sameCompany.append((employer.pk, employer.user.email))
sorted(currentPermission, key=lambda x: x[1])
sorted(sameCompany, key=lambda x: x[1])
currentPermission.insert(0, ("Remove Permission", "Revoke Permission"))
sameCompany.insert(0, ("Add Permission", "Add Permission from " + job.company.name))
self.fields['addEmployer'].choices = sameCompany
self.fields['removeEmployer'].choices = currentPermission
| 0
| 0
| 0
| 7,046
| 0
| 0
| 0
| 266
| 204
|
dc39a84fe404c1eef75b6fc371c87b856fc55a84
| 500
|
py
|
Python
|
run.py
|
nefeli/trafficgen
|
81b6cb01d8e9d0abfcd83df641210035e265f13f
|
[
"BSD-3-Clause"
] | 10
|
2017-04-26T07:01:48.000Z
|
2020-07-25T00:29:45.000Z
|
run.py
|
nefeli/trafficgen
|
81b6cb01d8e9d0abfcd83df641210035e265f13f
|
[
"BSD-3-Clause"
] | 12
|
2017-03-21T17:58:16.000Z
|
2017-10-16T18:01:37.000Z
|
run.py
|
nefeli/trafficgen
|
81b6cb01d8e9d0abfcd83df641210035e265f13f
|
[
"BSD-3-Clause"
] | 5
|
2017-03-09T19:59:26.000Z
|
2018-04-02T19:49:57.000Z
|
#!/usr/bin/env python3
import io
import sys
if __name__ == '__main__':
if len(sys.argv) == 1:
run_cli()
else:
cmds = []
line_buf = []
for arg in sys.argv[1:]:
if arg == '--':
cmds.append(' '.join(line_buf))
line_buf = []
else:
line_buf.append(arg)
cmds.append(' '.join(line_buf))
run_cmds(io.StringIO('\n'.join(cmds)))
| 20.833333
| 47
| 0.498
|
#!/usr/bin/env python3
import io
import sys
import generator
from generator.cmdline import *
if __name__ == '__main__':
if len(sys.argv) == 1:
run_cli()
else:
cmds = []
line_buf = []
for arg in sys.argv[1:]:
if arg == '--':
cmds.append(' '.join(line_buf))
line_buf = []
else:
line_buf.append(arg)
cmds.append(' '.join(line_buf))
run_cmds(io.StringIO('\n'.join(cmds)))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 45
|
2d0afb7f18f7dfcc8cf1e3ca1087c009e3e728f5
| 974
|
py
|
Python
|
scripts/genotype_from_fpaths.py
|
JIC-Image-Analysis/fishtools
|
9d7cfa695711ec4b40986be65e11eea7ad1b0b5d
|
[
"MIT"
] | null | null | null |
scripts/genotype_from_fpaths.py
|
JIC-Image-Analysis/fishtools
|
9d7cfa695711ec4b40986be65e11eea7ad1b0b5d
|
[
"MIT"
] | null | null | null |
scripts/genotype_from_fpaths.py
|
JIC-Image-Analysis/fishtools
|
9d7cfa695711ec4b40986be65e11eea7ad1b0b5d
|
[
"MIT"
] | 1
|
2022-03-10T13:08:21.000Z
|
2022-03-10T13:08:21.000Z
|
if __name__ == "__main__":
main()
| 20.723404
| 60
| 0.666324
|
import os
import pathlib
import click
import parse
from fishtools.config import Config
def is_image(filename, image_exts=['.czi']):
_, ext = os.path.splitext(filename)
return ext in image_exts
@click.command()
@click.argument('config_fpath')
def main(config_fpath):
config = Config(config_fpath)
dirpath = pathlib.Path(config.images_root_dirpath)
dirpaths_fns = []
for dirpath, dirnames, filenames in os.walk(dirpath):
for fn in filenames:
if is_image(fn):
dirpaths_fns.append((dirpath, fn))
expid_to_genotype = {}
image_name_template = "Experiment-{expid:d}.czi"
for dirpath, fn in dirpaths_fns:
result = parse.parse(image_name_template, fn)
expid = result.named['expid']
expid_to_genotype[expid] = os.path.basename(dirpath)
for expid, genotype in expid_to_genotype.items():
print(f"{expid}\t{genotype}")
if __name__ == "__main__":
main()
| 0
| 701
| 0
| 0
| 0
| 94
| 0
| -23
| 158
|
052f73c51d8e906ef7280490bdcc2bd79bb64740
| 4,643
|
py
|
Python
|
list_id_bimap.py
|
martincochran/score-minion
|
58197798a0a3a4fbcd54ffa0a2fab2e865985bfd
|
[
"Apache-2.0"
] | null | null | null |
list_id_bimap.py
|
martincochran/score-minion
|
58197798a0a3a4fbcd54ffa0a2fab2e865985bfd
|
[
"Apache-2.0"
] | 3
|
2015-02-15T18:31:10.000Z
|
2015-02-22T19:56:05.000Z
|
list_id_bimap.py
|
martincochran/score-minion
|
58197798a0a3a4fbcd54ffa0a2fab2e865985bfd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2015 Martin Cochran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 31.80137
| 92
| 0.713117
|
#!/usr/bin/env python
#
# Copyright 2015 Martin Cochran
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from game_model import Game
from scores_messages import AgeBracket
from scores_messages import Division
from scores_messages import League
class ListIdBiMap:
"""Encapsulates mappings to and from list id and structured properties."""
# List ID definitions corresponding to lists defined in the twitter account of
# @martin_cochran.
USAU_COLLEGE_OPEN_LIST_ID = '186814318'
USAU_COLLEGE_WOMENS_LIST_ID = '186814882'
USAU_CLUB_OPEN_LIST_ID = '186732484'
USAU_CLUB_WOMENS_LIST_ID = '186732631'
USAU_CLUB_MIXED_LIST_ID = '186815046'
AUDL_LIST_ID = '186926608'
MLU_LIST_ID = '186926651'
ALL_LISTS = [
USAU_COLLEGE_OPEN_LIST_ID,
USAU_COLLEGE_WOMENS_LIST_ID,
USAU_CLUB_OPEN_LIST_ID,
USAU_CLUB_WOMENS_LIST_ID,
USAU_CLUB_MIXED_LIST_ID,
AUDL_LIST_ID,
MLU_LIST_ID
]
# Simple data structure to lookup lists if the league, division, and age
# bracket were specified in the request.
LIST_ID_MAP = {
League.USAU: {
Division.OPEN: {
AgeBracket.COLLEGE: USAU_COLLEGE_OPEN_LIST_ID,
AgeBracket.NO_RESTRICTION: USAU_CLUB_OPEN_LIST_ID,
},
Division.WOMENS: {
AgeBracket.COLLEGE: USAU_COLLEGE_WOMENS_LIST_ID,
AgeBracket.NO_RESTRICTION: USAU_CLUB_WOMENS_LIST_ID,
},
Division.MIXED: {
AgeBracket.NO_RESTRICTION: USAU_CLUB_MIXED_LIST_ID,
},
},
League.AUDL: {
Division.OPEN: {
AgeBracket.NO_RESTRICTION: AUDL_LIST_ID,
},
},
League.MLU: {
Division.OPEN: {
AgeBracket.NO_RESTRICTION: MLU_LIST_ID,
},
},
}
LIST_ID_TO_DIVISION = {
USAU_COLLEGE_OPEN_LIST_ID: Division.OPEN,
USAU_COLLEGE_WOMENS_LIST_ID: Division.WOMENS,
USAU_CLUB_OPEN_LIST_ID: Division.OPEN,
USAU_CLUB_WOMENS_LIST_ID: Division.WOMENS,
USAU_CLUB_MIXED_LIST_ID: Division.MIXED,
AUDL_LIST_ID: Division.OPEN,
MLU_LIST_ID: Division.OPEN,
}
LIST_ID_TO_AGE_BRACKET = {
USAU_COLLEGE_OPEN_LIST_ID: AgeBracket.COLLEGE,
USAU_COLLEGE_WOMENS_LIST_ID: AgeBracket.COLLEGE,
USAU_CLUB_OPEN_LIST_ID: AgeBracket.NO_RESTRICTION,
USAU_CLUB_WOMENS_LIST_ID: AgeBracket.NO_RESTRICTION,
USAU_CLUB_MIXED_LIST_ID: AgeBracket.NO_RESTRICTION,
AUDL_LIST_ID: AgeBracket.NO_RESTRICTION,
MLU_LIST_ID: AgeBracket.NO_RESTRICTION,
}
LIST_ID_TO_LEAGUE = {
USAU_COLLEGE_OPEN_LIST_ID: League.USAU,
USAU_COLLEGE_WOMENS_LIST_ID: League.USAU,
USAU_CLUB_OPEN_LIST_ID: League.USAU,
USAU_CLUB_WOMENS_LIST_ID: League.USAU,
USAU_CLUB_MIXED_LIST_ID: League.USAU,
AUDL_LIST_ID: League.AUDL,
MLU_LIST_ID: League.MLU,
}
@staticmethod
def GetListId(division, age_bracket, league):
"""Looks up the list_id which corresponds to the given division and league.
Args:
division: Division of interest
age_bracket: AgeBracket of interest
league: League of interest
Returns:
The list id corresponding to that league and division, or '' if no such
list exists.
"""
d = ListIdBiMap.LIST_ID_MAP.get(league, {})
if not d:
return ''
d = d.get(division, {})
if not d:
return ''
return d.get(age_bracket, '')
@staticmethod
def GetStructuredPropertiesForList(list_id):
"""Returns the division, age_bracket, and league for the given list id.
Defaults to Division.OPEN, AgeBracket.NO_RESTRICTION, and League.USAU,
if the division, age_bracket, or leauge, respectively, does not exist in
the map for the given list_id.
Args:
list_id: ID of list for which to retrieve properties.
Returns:
(division, age_bracket, league) tuple for the given list ID.
"""
division = ListIdBiMap.LIST_ID_TO_DIVISION.get(list_id, Division.OPEN)
age_bracket = ListIdBiMap.LIST_ID_TO_AGE_BRACKET.get(list_id, AgeBracket.NO_RESTRICTION)
league = ListIdBiMap.LIST_ID_TO_LEAGUE.get(list_id, League.USAU)
return (division, age_bracket, league)
| 0
| 1,281
| 0
| 2,595
| 0
| 0
| 0
| 51
| 112
|
468ad4ffeae4e5171b0014bec49676ed9cc8da05
| 2,988
|
py
|
Python
|
loralos/wms_image.py
|
SimonLarsen/loralos
|
198a6b94a984f12e7a069826e3f977db9de34d00
|
[
"MIT"
] | null | null | null |
loralos/wms_image.py
|
SimonLarsen/loralos
|
198a6b94a984f12e7a069826e3f977db9de34d00
|
[
"MIT"
] | null | null | null |
loralos/wms_image.py
|
SimonLarsen/loralos
|
198a6b94a984f12e7a069826e3f977db9de34d00
|
[
"MIT"
] | null | null | null |
FORMAT_ENDINGS = {"image/jpeg": "jpg"}
| 30.489796
| 78
| 0.522758
|
from owslib.wms import WebMapService
import pyproj
from PIL import Image
from typing import Tuple, List, Dict, Any
import os.path
from pathlib import Path
FORMAT_ENDINGS = {"image/jpeg": "jpg"}
class WMSImage:
def __init__(
self,
url: str,
layer: str,
cache_dir: str,
style: str = "default",
tile_size: int = 1000,
resolution: int = 500,
format: str = "image/jpeg",
crs: str = "epsg:25832",
headers: Dict[str, Any] = None,
) -> None:
self.url = url
self.layer = layer
self.cache_dir = cache_dir
self.style = style
self.tile_size = tile_size
self.resolution = resolution
self.format = format
self.crs = crs
self.headers = headers
self.wms = WebMapService(self.url, headers=self.headers)
self.trans = pyproj.Transformer.from_crs(
"wgs84", self.crs, always_xy=True
)
self.cached_image = None
def load_tile(self, x: float, y: float) -> None:
tx = int(x // self.tile_size * self.tile_size)
ty = int(y // self.tile_size * self.tile_size)
bbox = (tx, ty, tx + self.tile_size, ty + self.tile_size)
cache_file = Path(self.cache_dir) / (
f"wms_{self.layer}_{bbox[0]}_{bbox[1]}_{bbox[2]}_{bbox[3]}"
f"_{self.tile_size}_{self.resolution}"
f".{FORMAT_ENDINGS[self.format]}"
)
if not os.path.exists(cache_file):
res = self.wms.getmap(
layers=[self.layer],
styles=[self.style],
srs=self.crs,
bbox=bbox,
size=(self.resolution, self.resolution),
format=self.format,
)
with open(cache_file, "wb") as fp:
fp.write(res.read())
image = Image.open(cache_file)
self.cached_image = image.load()
def get_pixels(
self, lons: List[float], lats: List[float]
) -> List[Tuple[float, float, float]]:
points = [None] * len(lons)
tiles = [None] * len(lons)
for i in range(len(lons)):
x, y = self.trans.transform(lons[i], lats[i])
points[i] = (x, y)
tx = int(x // self.tile_size * self.tile_size)
ty = int(y // self.tile_size * self.tile_size)
tiles[i] = (tx, ty)
order = list(range(len(lons)))
order.sort(key=lambda i: tiles[i])
prev_tile = None
out = [None] * len(lons)
for i in order:
tile = tiles[i]
if tile != prev_tile:
self.load_tile(*tile)
prev_tile = tile
x, y = points[i]
px = round((x - tile[0]) / self.tile_size * (self.resolution - 1))
py = round(
(1.0 - (y - tile[1]) / self.tile_size) * (self.resolution - 1)
)
out[i] = self.cached_image[px, py]
return out
| 0
| 0
| 0
| 2,768
| 0
| 0
| 0
| 23
| 155
|
1acaf21ff5c98fb66692384b82ca684a87d4e348
| 5,004
|
py
|
Python
|
mail/gmailapi.py
|
prabin-acharya/mail-Gmail
|
b39bfbd48fedcd3e2a101cd0d2d4c3302faa233d
|
[
"MIT"
] | 1
|
2021-08-08T04:02:32.000Z
|
2021-08-08T04:02:32.000Z
|
mail/gmailapi.py
|
prabin-acharya/mail-Gmail
|
b39bfbd48fedcd3e2a101cd0d2d4c3302faa233d
|
[
"MIT"
] | null | null | null |
mail/gmailapi.py
|
prabin-acharya/mail-Gmail
|
b39bfbd48fedcd3e2a101cd0d2d4c3302faa233d
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('gmail', 'v1', credentials=creds)
| 31.872611
| 118
| 0.607314
|
from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import time
from email.mime.text import MIMEText
from .models import Email
import base64
import email
import json
import datetime
import pytz
import re
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('gmail', 'v1', credentials=creds)
def data_encoder(text):
if len(text)>0:
message = base64.urlsafe_b64decode(text)
message = str(message, 'utf-8')
message = email.message_from_string(message)
return message
def readMessage(content)->str:
message = None
if "data" in content['payload']['body']:
message = content['payload']['body']['data']
message = data_encoder(message)
elif "data" in content['payload']['parts'][0]['body']:
message = content['payload']['parts'][0]['body']['data']
message = data_encoder(message)
else:
print("body has no data.")
return message
def get_inbox_gmails():
# Call the Gmail API
results = service.users().messages().list(userId='me',labelIds=["INBOX"],q="is:unread category:primary").execute()
messages = results.get('messages', [])
for message in messages:
save_mail(message)
def send_gmail(recipient, subject, body):
mail_from = service.users().getProfile(userId='me').execute()['emailAddress']
mail_to = recipient
mail_subject = subject
mail_body = body
mail = MIMEText(mail_body)
mail['to'] = mail_to
mail['from'] = mail_from
mail['subject'] = mail_subject
raw = base64.urlsafe_b64encode(mail.as_bytes())
raw = raw.decode()
body = {'raw': raw}
try:
mail = (service.users().messages().send(userId='me', body=body).execute())
print("Your mail has been sent")
except errors.MessageError as error:
print("An error occured.Mail not sent.")
def get_sent_gmails():
results = service.users().messages().list(userId='me',labelIds=["SENT"]).execute()
messages = results.get('messages', [])
for message in messages[:5]:
save_mail(message)
def save_mail(message):
mail = service.users().messages().get(userId='me', id=message['id'], format="full").execute()
headers=mail["payload"]["headers"]
user = service.users().getProfile(userId='me').execute()['emailAddress']
gmail_id = message['id']
for i in headers:
if i["name"] == "From" or i["name"] == "from":
sender = i["value"]
sender_email = re.search('<(.+)>', sender)
if sender_email:
sender_email = sender_email.group(1)
else:
sender_email = sender
elif i["name"] == "To" or i["name"] == "to":
recipients = i["value"]
recipients_email = re.search('<(.+)>', recipients)
if recipients_email:
recipients_email = recipients_email.group(1)
else:
recipients_email = recipients
elif i["name"] == "Subject" or i["name"] == "subject":
subject = i["value"]
elif i["name"] == "Date" or i["name"] == "date":
date = i["value"]
try:
date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %Z')
except:
try:
date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %z')
except:
try:
date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %z (%Z)')
except:
date = date[:-6].strip()
date = datetime.datetime.strptime(date, '%a, %d %b %Y %X %z')
body = readMessage(mail)
mail2 = Email(
user = user,
gmail_id = gmail_id,
sender = sender,
sender_email = sender_email,
recipients = recipients,
recipients_email = recipients_email,
subject = subject,
body = body,
timestamp = date
)
mail2.save()
| 0
| 0
| 0
| 0
| 0
| 3,540
| 0
| -46
| 339
|
3c5b5a01aea276ed55213cc1efedac91d26ae1c8
| 1,580
|
py
|
Python
|
diagrams/ibm/blockchain.py
|
houmam/diagrams
|
eaf3e98304014e847c347bfae19bbfb3fe91abb2
|
[
"MIT"
] | 17,037
|
2020-02-03T01:30:30.000Z
|
2022-03-31T18:09:15.000Z
|
diagrams/ibm/blockchain.py
|
loftwah/diagrams
|
e45804b48d5360fe5bae1b785db6527db5a57d16
|
[
"MIT"
] | 529
|
2020-02-03T10:43:41.000Z
|
2022-03-31T17:33:08.000Z
|
diagrams/ibm/blockchain.py
|
loftwah/diagrams
|
e45804b48d5360fe5bae1b785db6527db5a57d16
|
[
"MIT"
] | 1,068
|
2020-02-05T11:54:29.000Z
|
2022-03-30T23:28:55.000Z
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
# Aliases
| 17.173913
| 68
| 0.728481
|
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _IBM
class _Blockchain(_IBM):
_type = "blockchain"
_icon_dir = "resources/ibm/blockchain"
class BlockchainDeveloper(_Blockchain):
_icon = "blockchain-developer.png"
class Blockchain(_Blockchain):
_icon = "blockchain.png"
class CertificateAuthority(_Blockchain):
_icon = "certificate-authority.png"
class ClientApplication(_Blockchain):
_icon = "client-application.png"
class Communication(_Blockchain):
_icon = "communication.png"
class Consensus(_Blockchain):
_icon = "consensus.png"
class EventListener(_Blockchain):
_icon = "event-listener.png"
class Event(_Blockchain):
_icon = "event.png"
class ExistingEnterpriseSystems(_Blockchain):
_icon = "existing-enterprise-systems.png"
class HyperledgerFabric(_Blockchain):
_icon = "hyperledger-fabric.png"
class KeyManagement(_Blockchain):
_icon = "key-management.png"
class Ledger(_Blockchain):
_icon = "ledger.png"
class MembershipServicesProviderApi(_Blockchain):
_icon = "membership-services-provider-api.png"
class Membership(_Blockchain):
_icon = "membership.png"
class MessageBus(_Blockchain):
_icon = "message-bus.png"
class Node(_Blockchain):
_icon = "node.png"
class Services(_Blockchain):
_icon = "services.png"
class SmartContract(_Blockchain):
_icon = "smart-contract.png"
class TransactionManager(_Blockchain):
_icon = "transaction-manager.png"
class Wallet(_Blockchain):
_icon = "wallet.png"
# Aliases
| 0
| 0
| 0
| 975
| 0
| 0
| 0
| -3
| 506
|
f353ecb9975a775e9ee105aa31a7468ed23f0c58
| 1,634
|
py
|
Python
|
sp/test_remap.py
|
crepuscularlight/SemesterProject
|
acfb219ca315d912b76bb581b932aaf48090fa94
|
[
"MIT"
] | null | null | null |
sp/test_remap.py
|
crepuscularlight/SemesterProject
|
acfb219ca315d912b76bb581b932aaf48090fa94
|
[
"MIT"
] | null | null | null |
sp/test_remap.py
|
crepuscularlight/SemesterProject
|
acfb219ca315d912b76bb581b932aaf48090fa94
|
[
"MIT"
] | null | null | null |
_base_='../swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py'
dataset_type='CocoDataset'
prefix='../coco-annotator/datasets/test/'
classes=('plasticbottle','alu can','box')
# classes=('',)
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=3),
mask_head=dict(num_classes=3)))
# train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
# dict(type='Resize', img_scale=(128,128), keep_ratio=True),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
# # ]
# train1=dict(
# type=dataset_type,
# classes=classes,
# ann_file=['data/own/test-1.json'],
# img_prefix=prefix,
# pipeline=train_pipeline
# )
# train2=dict(
# type=dataset_type,
# classes=classes,
# ann_file=['data/own/ann_map_to_1.json'],
# img_prefix=prefix,
# pipeline=train_pipeline
# )
data=dict(
train=dict(
type=dataset_type,
classes=classes,
ann_file=['data/own/test-1.json','data/own/ann_map_to_1.json'],
img_prefix=prefix
),
# train=[train1,train2],
val=dict(
type=dataset_type,
classes=classes,
ann_file='data/own/ann_map_to_1.json',
img_prefix=prefix
),
test=dict(
type=dataset_type,
classes=classes,
ann_file='data/own/ann_map_to_1.json',
img_prefix=prefix
)
)
| 29.178571
| 79
| 0.612607
|
_base_='../swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py'
dataset_type='CocoDataset'
prefix='../coco-annotator/datasets/test/'
classes=('plasticbottle','alu can','box')
# classes=('',)
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=3),
mask_head=dict(num_classes=3)))
# train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
# dict(type='Resize', img_scale=(128,128), keep_ratio=True),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
# # ]
# train1=dict(
# type=dataset_type,
# classes=classes,
# ann_file=['data/own/test-1.json'],
# img_prefix=prefix,
# pipeline=train_pipeline
# )
# train2=dict(
# type=dataset_type,
# classes=classes,
# ann_file=['data/own/ann_map_to_1.json'],
# img_prefix=prefix,
# pipeline=train_pipeline
# )
data=dict(
train=dict(
type=dataset_type,
classes=classes,
ann_file=['data/own/test-1.json','data/own/ann_map_to_1.json'],
img_prefix=prefix
),
# train=[train1,train2],
val=dict(
type=dataset_type,
classes=classes,
ann_file='data/own/ann_map_to_1.json',
img_prefix=prefix
),
test=dict(
type=dataset_type,
classes=classes,
ann_file='data/own/ann_map_to_1.json',
img_prefix=prefix
)
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cd903e7ade80030c34ceee4d669a0b45dddb9daa
| 5,234
|
py
|
Python
|
flickipedia/mysqlio.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2016-03-11T09:40:19.000Z
|
2016-03-11T09:40:19.000Z
|
flickipedia/mysqlio.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2015-02-27T02:23:19.000Z
|
2015-02-27T02:23:19.000Z
|
flickipedia/mysqlio.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Handle MySQL I/O via sqlalchemy engine and ORM
"""
| 28.601093
| 88
| 0.526557
|
"""
Handle MySQL I/O via sqlalchemy engine and ORM
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flickipedia.config import schema
from flickipedia.config import log, settings
class DataIOMySQL(object):
""" Class implementing data IO for MySQL. Utilizes sqlalchemy [1].
Database and table schemas will be stored in schema. Modifications
to this schema will be persisted with sync
[1] http://docs.sqlalchemy.org
"""
DEFAULTS = {
'dialect': 'mysql',
'driver': '',
'host': 'localhost',
'port': 3306,
'db': settings.__mysql_db__,
'user': settings.__mysql_user__,
'pwrd': settings.__mysql_pass__,
}
def __init__(self, **kwargs):
super(DataIOMySQL, self).__init__()
self.engine = None
self.sess = None
for key in self.DEFAULTS.keys():
if kwargs.has_key(key):
setattr(self, key, kwargs[key])
else:
setattr(self, key, self.DEFAULTS[key])
def connect(self, log=False):
""" dialect+driver://username:password@host:port/database """
if self.driver:
connect_str = '{0}+{1}://{2}:{3}@{4}/{5}'.format(
self.dialect,
self.driver,
self.user,
self.pwrd,
self.host,
self.db,
)
else:
connect_str = '{0}://{1}:{2}@{3}/{4}'.format(
self.dialect,
self.user,
self.pwrd,
self.host,
self.db,
)
if log:
log.info('Establishing connection to "%s://%s@%s/%s"' % (
self.dialect,
self.user,
self.host,
self.db
))
self.engine = create_engine(connect_str)
self.make_session()
def connect_lite(self):
""" Use an in-memory db """
self.engine = create_engine('sqlite://')
self.make_session()
def make_session(self):
""" Create a session """
Session = sessionmaker()
Session.configure(bind=self.engine)
self.sess = Session()
@property
def session(self):
return self.sess
def create_table(self, obj_name):
"""
Method for table creation
:param name: schema object name
:return: boolean indicating status
"""
if hasattr(schema, obj_name):
getattr(schema, obj_name).__table__.create(bind=self.engine)
return True
else:
log.error('Schema object not found for "%s"' % obj_name)
return False
def drop_table(self, obj_name):
"""
Method to drop creation
:param name: schema object name
:return: boolean indicating status
"""
if hasattr(schema, obj_name):
getattr(schema, obj_name).__table__.drop(bind=self.engine)
return True
else:
return False
def fetch_all_rows(self, obj_name):
"""
Method to extract all rows from database.
:param name: object to persist
:return: row list from table
"""
obj = getattr(schema, obj_name)
return self.session.query(obj, obj.name).all()
def fetch_row(self, tbl, col, value):
"""
Fetch a row by id
:param tbl: str, table name
:param col: str, column name
:param value: *, value on whih to filter
"""
schema_obj = getattr(schema, tbl)
try:
return self.session.query(schema_obj).filter(
getattr(schema_obj, col) == value)
except Exception as e:
log.error('Couldn\'t filter row: "%s"' % e.message)
return []
def insert(self, obj_name, **kwargs):
"""
Method to insert rows in database
:param name: object to persist
:param **kwargs: field values
:return: boolean indicating status of action
"""
if not self.session:
log.error('No session')
return False
try:
log.info('Attempting to insert row in schema "%s": "%s"' % (
obj_name, str([key + ':' + str(kwargs[key])[:100] for key in kwargs])))
self.session.add(getattr(schema, obj_name)(**kwargs))
self.session.commit()
return True
except Exception as e:
log.error('Failed to insert row: "%s"' % e.message)
return False
def delete(self, qry_obj):
"""
Method to delete rows from database
:param qry_obj: object to delete
:return: boolean indicating status of action
"""
if not self.session:
log.error('No session')
return False
try:
self.session.delete(qry_obj)
self.session.commit()
return True
except Exception as e:
log.error('Failed to delete row "%s": "%s"' % (str(qry_obj), e.message()))
return False
| 0
| 36
| 0
| 4,957
| 0
| 0
| 0
| 72
| 113
|
4af99f0e08de844feaa37c0def95f861de377265
| 1,158
|
py
|
Python
|
pepdb/tasks/migrations/0025_auto_20171022_0208.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 7
|
2015-12-21T03:52:46.000Z
|
2020-07-24T19:17:23.000Z
|
pepdb/tasks/migrations/0025_auto_20171022_0208.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 12
|
2016-03-05T18:11:05.000Z
|
2021-06-17T20:20:03.000Z
|
pepdb/tasks/migrations/0025_auto_20171022_0208.py
|
dchaplinsky/pep.org.ua
|
8633a65fb657d7f04dbdb12eb8ae705fa6be67e3
|
[
"MIT"
] | 4
|
2016-07-17T20:19:38.000Z
|
2021-03-23T12:47:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-21 23:08
from __future__ import unicode_literals
| 27.571429
| 118
| 0.632988
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-21 23:08
from __future__ import unicode_literals
from django.db import migrations
def count_connections(p):
return p.person2company_set.count() + p.from_persons.count() + p.person2country_set.count() + p.to_persons.count()
def delete_stuck_orphans(apps, schema_editor):
Person = apps.get_model("core", "Person")
PersonDeduplication = apps.get_model("tasks", "PersonDeduplication")
for pd in PersonDeduplication.objects.filter(status="m"):
try:
p1 = Person.objects.get(pk=pd.person1_id)
p2 = Person.objects.get(pk=pd.person2_id)
if not count_connections(p1):
p1.delete()
if not count_connections(p2):
p2.delete()
if count_connections(p1) and count_connections(p2):
pd.applied = False
pd.save()
except Person.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('tasks', '0024_auto_20171020_0121'),
]
operations = [
migrations.RunPython(delete_stuck_orphans)
]
| 0
| 0
| 0
| 167
| 0
| 772
| 0
| 11
| 92
|
ce94a6e9468ca1ec3c62b98a42a762f04ebc1840
| 182
|
py
|
Python
|
tests/test_patient.py
|
genghisken/python-intermediate-inflammation
|
dc16cfb5824a713e8881dba1116f607793dd5f4c
|
[
"MIT"
] | null | null | null |
tests/test_patient.py
|
genghisken/python-intermediate-inflammation
|
dc16cfb5824a713e8881dba1116f607793dd5f4c
|
[
"MIT"
] | 20
|
2021-12-10T10:36:32.000Z
|
2021-12-10T12:46:34.000Z
|
code/poetry_project/tests/test_patient.py
|
SABS-R3/software-engineering-day4
|
d73cc72786fceb236cd1ec33e900e482fbad08d4
|
[
"CC-BY-4.0"
] | 1
|
2021-12-10T11:54:57.000Z
|
2021-12-10T11:54:57.000Z
|
"""Tests for the Patient model."""
| 16.545455
| 43
| 0.659341
|
"""Tests for the Patient model."""
def test_create_patient():
from inflammation.models import Patient
name = 'Alice'
p = Patient(name=name)
assert p.name == name
| 0
| 0
| 0
| 0
| 0
| 123
| 0
| 0
| 23
|
270ceac1e816bd1486bca6f8ee6231afb3d168fa
| 6,299
|
py
|
Python
|
PT2022_0412_1716_simpler_keys.py
|
O8pen/PhraseTranslate
|
62e657d1e58ab36df27f181f51410840526e939f
|
[
"Apache-2.0"
] | null | null | null |
PT2022_0412_1716_simpler_keys.py
|
O8pen/PhraseTranslate
|
62e657d1e58ab36df27f181f51410840526e939f
|
[
"Apache-2.0"
] | null | null | null |
PT2022_0412_1716_simpler_keys.py
|
O8pen/PhraseTranslate
|
62e657d1e58ab36df27f181f51410840526e939f
|
[
"Apache-2.0"
] | null | null | null |
# Python 3.7.9
# pip install clipboard
# pip install pywin32
# pip install pyautogui
# pip install pynput
# Google chrome Keyboard Shortcuts for Google Translate https://chrome.google.com/webstore/detail/keyboard-shortcuts-for-go/akjhnbnjanndggbcegmdggfjjclohjpo
# alt+j listen google translate
# Google chrome Dark Reader https://chrome.google.com/webstore/detail/dark-reader/eimadpbcbfnmbkopoojfekhnkhdbieeh
# Microsoft edge 110% zoom - https://www.phrasereader.com/
# Google chrome 125% zoom - https://translate.google.com/
from pynput.keyboard import Listener
next_x = 612
next_y = 562
prev_x = 359
prev_y = 562
translate_text_x = 1356
translate_text_y = 352
translate_blank_x = 1392
translate_blank_y = 222
text = ""
x = []
hasbeencaptured = False
last_key = 0
was_pressed_next = False
was_pressed_prev = False
was_pressed_one = False
was_pressed_two = False
was_pressed_three = False
was_pressed_four = False
was_pressed_allwords = False
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
| 27.627193
| 156
| 0.589459
|
# Python 3.7.9
# pip install clipboard
# pip install pywin32
# pip install pyautogui
# pip install pynput
# Google chrome Keyboard Shortcuts for Google Translate https://chrome.google.com/webstore/detail/keyboard-shortcuts-for-go/akjhnbnjanndggbcegmdggfjjclohjpo
# alt+j listen google translate
# Google chrome Dark Reader https://chrome.google.com/webstore/detail/dark-reader/eimadpbcbfnmbkopoojfekhnkhdbieeh
# Microsoft edge 110% zoom - https://www.phrasereader.com/
# Google chrome 125% zoom - https://translate.google.com/
from clipboard import copy, paste
from win32api import SetCursorPos, mouse_event
from win32con import MOUSEEVENTF_LEFTDOWN, MOUSEEVENTF_LEFTUP
from time import sleep
from pyautogui import hotkey
from pynput.keyboard import Listener, Key
next_x = 612
next_y = 562
prev_x = 359
prev_y = 562
translate_text_x = 1356
translate_text_y = 352
translate_blank_x = 1392
translate_blank_y = 222
text = ""
x = []
hasbeencaptured = False
last_key = 0
was_pressed_next = False
was_pressed_prev = False
was_pressed_one = False
was_pressed_two = False
was_pressed_three = False
was_pressed_four = False
was_pressed_allwords = False
def on_press(key):
global last_key
global was_pressed_next
global was_pressed_prev
global was_pressed_one
global was_pressed_two
global was_pressed_three
global was_pressed_four
global was_pressed_allwords
# hasattr(key, 'vk')
# print("Key pressed: {0}".format(key))
# print(key.vk)
if hasattr(key, 'vk') and key.vk == 101: # Numpad 5 (Next Button)
if was_pressed_next == False:
was_pressed_next = True
last_key = 101
nextbutton()
elif hasattr(key, 'vk') and key.vk == 100: # Numpad 4 (Prev button)
if was_pressed_prev == False:
was_pressed_prev = True
last_key = 100
prevbutton()
elif hasattr(key, 'vk') and key.vk == 96: # Numpad 0 (Listen all words)
if was_pressed_allwords == False:
was_pressed_allwords = True
if last_key == 96:
hotkey('alt', 'j')
else:
last_key = 96
capture_faster()
copy(text)
playsound()
elif hasattr(key, 'vk') and key.vk == 97: # Numpad 1 (Listen Word[1])
if was_pressed_one == False:
was_pressed_one = True
if last_key == 97:
hotkey('alt', 'j')
else:
last_key = 97
capture_faster()
if(len(x) >= 1):
copy(x[0])
playsound()
elif hasattr(key, 'vk') and key.vk == 98: # Numpad 2 (Listen Word[2])
if was_pressed_two == False:
was_pressed_two = True
if last_key == 98:
hotkey('alt', 'j')
else:
last_key = 98
capture_faster()
if(len(x) >= 2):
copy(x[1])
playsound()
elif hasattr(key, 'vk') and key.vk == 99: # Numpad 3 (Listen Word[3])
if was_pressed_three == False:
was_pressed_three = True
if last_key == 99:
hotkey('alt', 'j')
else:
last_key = 99
capture_faster()
if(len(x) >= 3):
copy(x[2])
playsound()
elif hasattr(key, 'vk') and key.vk == 102: # Numpad 6 (Listen Word[4])
if was_pressed_four == False:
was_pressed_four = True
if last_key == 102:
hotkey('alt', 'j')
else:
last_key = 102
capture_faster()
if(len(x) >= 4):
copy(x[3])
playsound()
def on_release(key):
global was_pressed_next
global was_pressed_prev
global was_pressed_allwords
global was_pressed_one
global was_pressed_two
global was_pressed_three
global was_pressed_four
if hasattr(key, 'vk') and key.vk == 101: # Numpad 5 (Next Button)
was_pressed_next = False
elif hasattr(key, 'vk') and key.vk == 100: # Numpad 4 (Prev button)
was_pressed_prev = False
elif hasattr(key, 'vk') and key.vk == 96: # Numpad 0 (Listen all words)
was_pressed_allwords = False
elif hasattr(key, 'vk') and key.vk == 97: # Numpad 1 (Listen Word[1])
was_pressed_one = False
elif hasattr(key, 'vk') and key.vk == 98: # Numpad 2 (Listen Word[2])
was_pressed_two = False
elif hasattr(key, 'vk') and key.vk == 99: # Numpad 3 (Listen Word[3])
was_pressed_three = False
elif hasattr(key, 'vk') and key.vk == 102: # Numpad 6 (Listen Word[4])
was_pressed_four = False
def click(x, y):
SetCursorPos((x, y))
mouse_event(MOUSEEVENTF_LEFTDOWN, 0, 0)
sleep(0.05)
mouse_event(MOUSEEVENTF_LEFTUP, 0, 0)
def nextbutton():
global hasbeencaptured
global next_x
global next_y
click(next_x, next_y)
hotkey('ctrl', 'a')
sleep(0.05)
hotkey('ctrl', 'c')
hasbeencaptured = False
def prevbutton():
global hasbeencaptured
global prev_x
global prev_y
click(prev_x, prev_y)
hotkey('ctrl', 'a')
sleep(0.05)
hotkey('ctrl', 'c')
hasbeencaptured = False
def playsound():
global translate_text_x
global translate_text_y
global translate_blank_x
global translate_blank_y
click(translate_text_x, translate_text_y)
hotkey('ctrl', 'a')
sleep(0.1)
hotkey('ctrl', 'v')
sleep(0.05)
hotkey('alt', 'j')
sleep(0.55)
click(translate_blank_x, translate_blank_y)
def capture_faster():
global text
global x
global hasbeencaptured
if hasbeencaptured == False:
text = paste()
text = text[2:]
endNumber = text.find('\n')-1
text = text[0:endNumber]
punctuations = '''!()[]{};:'"\,<>—./?@#$%^&*‘_~\n'''
no_punct = ""
for char in text:
if char not in punctuations:
no_punct = no_punct + char
text = no_punct.lower()
x = text.split(' ')
hasbeencaptured = True
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
| 6
| 0
| 0
| 0
| 0
| 4,884
| 0
| 90
| 272
|
77826738beb692c3294e0414b44a66d2c7706884
| 1,019
|
py
|
Python
|
Python/SwapNodesInPairs.py
|
TonnyL/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 205
|
2017-11-16T08:38:46.000Z
|
2022-03-06T05:50:03.000Z
|
Python/SwapNodesInPairs.py
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 3
|
2018-04-10T10:17:52.000Z
|
2020-12-11T08:00:09.000Z
|
Python/SwapNodesInPairs.py
|
santosh241/Windary
|
39f85cdedaaf5b85f7ce842ecef975301fc974cf
|
[
"MIT"
] | 28
|
2018-04-10T06:42:42.000Z
|
2021-09-14T14:15:39.000Z
|
# -*- coding: UTF-8 -*-
#
# Given a linked list, swap every two adjacent nodes and return its head.
#
# For example,
# Given 1->2->3->4, you should return the list as 2->1->4->3.
#
# Your algorithm should use only constant space. You may not modify the values in the list, only nodes itself can be changed.
#
# Python, Python3 all accepted.
| 24.261905
| 125
| 0.56526
|
# -*- coding: UTF-8 -*-
#
# Given a linked list, swap every two adjacent nodes and return its head.
#
# For example,
# Given 1->2->3->4, you should return the list as 2->1->4->3.
#
# Your algorithm should use only constant space. You may not modify the values in the list, only nodes itself can be changed.
#
# Python, Python3 all accepted.
class SwapNodesInPairs:
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if head is None or head.next is None:
return head
pre = head
nxt = pre.next
while pre is not None and nxt is not None:
tmp = nxt.val
nxt.val = pre.val
pre.val = tmp
pre = nxt.next
if pre is not None:
nxt = pre.next
return head
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __eq__(self, other):
return self.val == other.val and self.next == other.next
| 0
| 0
| 0
| 630
| 0
| 0
| 0
| 0
| 46
|
5bf1138dbd9cc41844dc6aff07cb5e59592dbe1a
| 7,238
|
py
|
Python
|
vaelib/avb.py
|
rnagumo/vaelib
|
9505a62e07f539df1a94f1ac7e9ada694df62844
|
[
"MIT"
] | 1
|
2021-11-12T14:25:05.000Z
|
2021-11-12T14:25:05.000Z
|
vaelib/avb.py
|
rnagumo/vaelib
|
9505a62e07f539df1a94f1ac7e9ada694df62844
|
[
"MIT"
] | null | null | null |
vaelib/avb.py
|
rnagumo/vaelib
|
9505a62e07f539df1a94f1ac7e9ada694df62844
|
[
"MIT"
] | 1
|
2021-12-30T12:30:53.000Z
|
2021-12-30T12:30:53.000Z
|
"""Adversarial Variational Bayes (AVB).
Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative
Adversarial Networks
http://arxiv.org/abs/1701.04722
Ref)
https://github.com/gdikov/adversarial-variational-bayes
http://seiya-kumada.blogspot.com/2018/07/adversarial-variational-bayes.html
https://github.com/LMescheder/AdversarialVariationalBayes
https://nbviewer.jupyter.org/github/hayashiyus/Thermal-VAE/blob/master/adversarial%20variational%20bayes%20toy%20example-cyclical-annealing-MNIST-898-4000.ipynb
"""
| 28.496063
| 160
| 0.553606
|
"""Adversarial Variational Bayes (AVB).
Adversarial Variational Bayes: Unifying Variational Autoencoders and Generative
Adversarial Networks
http://arxiv.org/abs/1701.04722
Ref)
https://github.com/gdikov/adversarial-variational-bayes
http://seiya-kumada.blogspot.com/2018/07/adversarial-variational-bayes.html
https://github.com/LMescheder/AdversarialVariationalBayes
https://nbviewer.jupyter.org/github/hayashiyus/Thermal-VAE/blob/master/adversarial%20variational%20bayes%20toy%20example-cyclical-annealing-MNIST-898-4000.ipynb
"""
from typing import Dict, Iterator, Optional, Tuple
import torch
from torch import Tensor, nn
from .base import BaseVAE, nll_bernoulli
class Encoder(nn.Module):
"""Encoder q(z|x, e).
Args:
in_channels (int): Channel size of inputs.
z_dim (int): Dimension size of latents.
e_dim (int): Dimension size of noises.
"""
def __init__(self, in_channels: int, z_dim: int, e_dim: int) -> None:
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
)
self.fc_x = nn.Sequential(
nn.Linear(1024, 256),
nn.ReLU(),
nn.Linear(256, z_dim),
nn.ReLU(),
)
self.fc_e = nn.Sequential(
nn.Linear(e_dim, z_dim),
nn.ReLU(),
)
self.fc = nn.Linear(z_dim * 2, z_dim)
def forward(self, x: Tensor, e: Tensor) -> Tensor:
"""Encodes z given x, e.
Args:
x (torch.Tensor): Observations, size `(b, c, h, w)`.
e (torch.Tensor): Noises, size `(b, e)`.
Returns:
z (torch.Tensor): Encoded latents, size `(b, z)`.
"""
h_x = self.conv(x)
h_x = h_x.view(-1, 1024)
h_x = self.fc_x(h_x)
h_e = self.fc_e(e)
z = self.fc(torch.cat([h_x, h_e], dim=1))
return z
class Decoder(nn.Module):
"""Decoder p(x|z).
Args:
in_channels (int): Channel size of inputs.
z_dim (int): Dimension size of latents.
"""
def __init__(self, in_channels: int, z_dim: int) -> None:
super().__init__()
self.fc = nn.Sequential(
nn.Linear(z_dim, 256),
nn.ReLU(),
nn.Linear(256, 1024),
nn.ReLU(),
)
self.deconv = nn.Sequential(
nn.ConvTranspose2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, in_channels, 4, stride=2, padding=1),
nn.Sigmoid(),
)
def forward(self, z: Tensor) -> Tensor:
"""Encodes z given x.
Args:
z (torch.Tensor): Latents, size `(b, z)`.
Returns:
probs (torch.Tensor): Decoded observations, size `(b, c, h, w)`.
"""
h = self.fc(z)
h = h.view(-1, 64, 4, 4)
probs = self.deconv(h)
return probs
class Discriminator(nn.Module):
"""Discriminator T(x, z).
Args:
in_channels (int): Channel size of inputs.
z_dim (int): Dimension size of latents.
"""
def __init__(self, in_channels: int, z_dim: int) -> None:
super().__init__()
self.disc_x = nn.Sequential(
nn.Conv2d(in_channels, 32, 4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=1),
nn.LeakyReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.LeakyReLU(),
)
self.fc_x = nn.Linear(1024, 256)
self.disc_z = nn.Sequential(
nn.Linear(z_dim, 512),
nn.LeakyReLU(),
nn.Linear(512, 512),
nn.LeakyReLU(),
nn.Linear(512, 256),
nn.LeakyReLU(),
)
self.fc = nn.Linear(512, 1)
def forward(self, x: Tensor, z: Tensor) -> Tensor:
"""Discriminate p(x)p(z) from p(x)q(z|x).
Args:
x (torch.Tensor): Observations, size `(b, c, h, w)`.
z (torch.Tensor): Latents, size `(b, z)`.
Returns:
logits (torch.Tensor): Logits, size `(b, 1)`.
"""
h_x = self.disc_x(x)
h_x = self.fc_x(h_x.view(-1, 1024))
h_z = self.disc_z(z)
logits = self.fc(torch.cat([h_x, h_z], dim=1))
return logits
class AVB(BaseVAE):
"""Adversarial Variational Bayes.
Args:
in_channels (int, optional): Channel size of inputs.
z_dim (int, optional): Dimension size of latents.
e_dim (int, optional): Dimension size of noises.
"""
def __init__(self, in_channels: int = 3, z_dim: int = 10, e_dim: int = 10) -> None:
super().__init__()
self.z_dim = z_dim
self.e_dim = e_dim
self.encoder = Encoder(in_channels, z_dim, e_dim)
self.decoder = Decoder(in_channels, z_dim)
self.discriminator = Discriminator(in_channels, z_dim)
self.bce_loss = nn.BCEWithLogitsLoss(reduction="none")
self.p_mu: Tensor
self.p_var: Tensor
self.register_buffer("p_mu", torch.zeros(1, z_dim))
self.register_buffer("p_var", torch.ones(1, z_dim))
def inference(
self, x: Tensor, y: Optional[Tensor] = None, beta: float = 1.0
) -> Tuple[Tuple[Tensor, ...], Dict[str, Tensor]]:
batch = x.size(0)
e_mu = x.new_zeros((batch, self.e_dim))
e_var = x.new_ones((batch, self.e_dim))
e = e_mu + e_var ** 0.5 * torch.randn_like(e_var)
z_mu = x.new_zeros((batch, self.z_dim))
z_var = x.new_ones((batch, self.z_dim))
z_p = z_mu + z_var ** 0.5 * torch.randn_like(z_var)
z_q = self.encoder(x, e)
recon = self.decoder(z_q)
logits = self.discriminator(x, z_q)
logits = beta * logits.sum(dim=1)
ce_loss = nll_bernoulli(x, recon, reduce=False)
ce_loss = ce_loss.sum(dim=[1, 2, 3])
log_d_q = self.bce_loss(self.discriminator(x, z_q.detach()), z_q.new_ones((batch, 1)))
log_d_p = self.bce_loss(self.discriminator(x, z_p), z_p.new_zeros((batch, 1)))
loss_d = (log_d_q + log_d_p).sum(dim=1)
loss_dict = {
"loss": logits + ce_loss,
"ce_loss": ce_loss,
"logits": logits,
"loss_d": loss_d,
}
return (recon, z_q), loss_dict
def sample(self, batch_size: int = 1, y: Optional[Tensor] = None) -> Tensor:
mu = self.p_mu.repeat(batch_size, 1)
var = self.p_var.repeat(batch_size, 1)
z = mu + var ** 0.5 * torch.randn_like(var)
x = self.decoder(z)
return x
def adversarial_parameters(self) -> Optional[Iterator]:
return self.discriminator.parameters()
| 0
| 0
| 0
| 6,470
| 0
| 0
| 0
| 46
| 183
|
989d84ba9d1966b892115c10525a944d80912f4e
| 4,884
|
py
|
Python
|
boards/apollo2_evb/examples/multi_boot_secure_sample/generate_secureboot_assets.py
|
wher0001/AmbiqSuiteSDK
|
e280cbde3e366509da6768ab95471782a05d2371
|
[
"BSD-3-Clause"
] | 25
|
2019-09-26T18:30:40.000Z
|
2022-01-21T07:42:04.000Z
|
boards/apollo2_evb/examples/multi_boot_secure_sample/generate_secureboot_assets.py
|
vaxradius/AmbiqSuite-R2.4.2
|
0ffd4a67ec6b63512f56556c40fe6ee4ded1a569
|
[
"BSD-3-Clause"
] | 23
|
2020-01-20T17:25:02.000Z
|
2021-11-16T21:06:42.000Z
|
boards/apollo2_evb/examples/multi_boot_secure_sample/generate_secureboot_assets.py
|
vaxradius/AmbiqSuite-R2.4.2
|
0ffd4a67ec6b63512f56556c40fe6ee4ded1a569
|
[
"BSD-3-Clause"
] | 23
|
2020-04-04T18:35:35.000Z
|
2022-03-15T07:34:02.000Z
|
#!/usr/bin/env python3
import argparse
# This key table has to match the one in bootloader
keyTbl = [0xDEADBEEF, 0xAAAAAAAA, 0x11111111, 0x00000000, 0xFFFFFFFF, 0x55555555, 0xA5A5A5A5, 0x66666666]
#******************************************************************************
#
# Main function
#
#******************************************************************************
#******************************************************************************
#
# Turn a 32-bit number into a series of bytes for transmission.
#
# This command will split a 32-bit integer into an array of bytes, ordered
# LSB-first for transmission over the UART.
#
#******************************************************************************
#******************************************************************************
#
# Extract a word from a byte array
#
#******************************************************************************
#******************************************************************************
#
# CRC function that matches the CRC used by the Apollo bootloader.
#
#******************************************************************************
poly32 = 0x1EDC6F41
#******************************************************************************
#
# Main program flow
#
#******************************************************************************
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'Secure Image generation utility for Apollo or Apollo2')
parser.add_argument('binfile',
help = 'Binary file to program into the target device')
parser.add_argument('keyidxVal', default=0, type=int, help = 'encryption key index')
parser.add_argument('protectionVal', default=0, help = 'Image Protection Value (hex)')
parser.add_argument('encimagefile', help = 'Destination file for Encrypted image')
parser.add_argument('sectrailerfile', help = 'Destination file for security trailer')
args = parser.parse_args()
main()
| 33.452055
| 115
| 0.546683
|
#!/usr/bin/env python3
import argparse
import sys
import os
# This key table has to match the one in bootloader
keyTbl = [0xDEADBEEF, 0xAAAAAAAA, 0x11111111, 0x00000000, 0xFFFFFFFF, 0x55555555, 0xA5A5A5A5, 0x66666666]
#******************************************************************************
#
# Main function
#
#******************************************************************************
def main():
# Read the binary file from the command line.
with open(args.binfile, mode='rb') as binfile:
clear_application= binfile.read()
print('Loading Clear application {} bytes from {}...'.format(len(clear_application), args.binfile), flush=True)
plaintext = pad_to_block_size(clear_application, 4)
ivVal = word_from_bytes(os.urandom(4), 0)
print("Initialization Vector")
print(hex(ivVal))
application = encrypt_app(args.keyidxVal, plaintext, ivVal)
trailer = sec_trailer(args.keyidxVal, plaintext, ivVal, int(args.protectionVal, 0))
print('Saving encrypted image {} bytes to {}...'.format(len(application), args.encimagefile), flush=True)
with open(args.encimagefile, mode='wb') as encimagefile:
encimagebytearray = bytearray(application)
encimagefile.write(encimagebytearray)
print('Saving security trailer {} bytes to {}...'.format(len(trailer), args.sectrailerfile), flush=True)
with open(args.sectrailerfile, mode='wb') as sectrailerfile:
trailerbytearray = bytearray(trailer)
sectrailerfile.write(trailerbytearray)
print('Done.')
#******************************************************************************
#
# Turn a 32-bit number into a series of bytes for transmission.
#
# This command will split a 32-bit integer into an array of bytes, ordered
# LSB-first for transmission over the UART.
#
#******************************************************************************
def int_to_bytes(n):
A = [n & 0xFF,
(n >> 8) & 0xFF,
(n >> 16) & 0xFF,
(n >> 24) & 0xFF]
return A
#******************************************************************************
#
# Extract a word from a byte array
#
#******************************************************************************
def word_from_bytes(B, n):
return (B[n] + (B[n + 1] << 8) + (B[n + 2] << 16) + (B[n + 3] << 24))
#******************************************************************************
#
# CRC function that matches the CRC used by the Apollo bootloader.
#
#******************************************************************************
poly32 = 0x1EDC6F41
def crc32(L):
rem = 0
for b in L:
rem = rem ^ (b << 24)
for i in range(8):
if rem & 0x80000000:
rem = ((rem << 1) ^ poly32)
else:
rem = (rem << 1)
rem = rem & 0xFFFFFFFF
return rem
def pad_to_block_size(text, block_size):
text_length = len(text)
amount_to_pad = block_size - (text_length % block_size)
if amount_to_pad == 0:
amount_to_pad = block_size
for i in range(0, amount_to_pad, 1):
text += bytes(chr(amount_to_pad), 'ascii')
return text
def encrypt_app(keyidx, clear_app, iv):
key32 = keyTbl[keyidx]
applen = len(clear_app)
enc_app = []
for i in range(0, applen, 4):
word = word_from_bytes(clear_app, i)
word = (word ^ iv) ^ key32
iv = word
enc_app.extend(int_to_bytes(word))
return enc_app
def sec_trailer(keyidx, clear_app, iv, protection):
key32 = keyTbl[keyidx]
secTrailer = []
secTrailer.extend(int_to_bytes(keyidx))
secTrailer.extend(int_to_bytes(protection))
applen = len(clear_app)
secTrailer.extend(int_to_bytes(applen))
crc = crc32(clear_app)
sig = key32 ^ crc
secTrailer.extend(int_to_bytes(sig))
secTrailer.extend(int_to_bytes(iv))
# Trailer Signature
secTrailerSig = crc32(secTrailer) ^ key32
secTrailer.extend(int_to_bytes(secTrailerSig))
return secTrailer
#******************************************************************************
#
# Main program flow
#
#******************************************************************************
if __name__ == '__main__':
parser = argparse.ArgumentParser(description =
'Secure Image generation utility for Apollo or Apollo2')
parser.add_argument('binfile',
help = 'Binary file to program into the target device')
parser.add_argument('keyidxVal', default=0, type=int, help = 'encryption key index')
parser.add_argument('protectionVal', default=0, help = 'Image Protection Value (hex)')
parser.add_argument('encimagefile', help = 'Destination file for Encrypted image')
parser.add_argument('sectrailerfile', help = 'Destination file for security trailer')
args = parser.parse_args()
main()
| 0
| 0
| 0
| 0
| 0
| 2,644
| 0
| -23
| 201
|
0496b97a9bf1fb8fa8146228a03e548726184666
| 1,303
|
py
|
Python
|
backend/api/views.py
|
trib3/django-vue-vuetify-toy
|
b73fe31acf989b63511bf1779695912257c88cf2
|
[
"MIT"
] | null | null | null |
backend/api/views.py
|
trib3/django-vue-vuetify-toy
|
b73fe31acf989b63511bf1779695912257c88cf2
|
[
"MIT"
] | null | null | null |
backend/api/views.py
|
trib3/django-vue-vuetify-toy
|
b73fe31acf989b63511bf1779695912257c88cf2
|
[
"MIT"
] | null | null | null |
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from django.db.models import Count, Sum
from django.db.models.functions import Coalesce
from backend.api.models import Profile, ProfileDisplayFields, PostAggregateFields
from django.http import JsonResponse
from django.http import HttpRequest
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name="index.html"))
def profiles(request: HttpRequest) -> JsonResponse:
"""
Data about profiles and their posts
:param request: Request from the client
:return: JsonResponse containing a list of dictionaries that
represent profiles and their posts.
EX:
[
{
"name": "lifeoftanyamarie",
"thumbnail": "thumbnail.com",
"followers": 90900,
"post_count": 2,
"likes": 4310
},...
]
"""
fields = [
display.value for display in [*ProfileDisplayFields, *PostAggregateFields]
]
profiles_qs = (
Profile.objects.all()
.annotate(
post_count=Coalesce(Count("post"), 0),
likes=Coalesce(Sum("post__likes"), 0),
)
.values(*fields)
)
return JsonResponse(list(profiles_qs), safe=False)
| 30.302326
| 82
| 0.653876
|
from django.views.generic import TemplateView
from django.views.decorators.cache import never_cache
from django.db.models import Count, Sum
from django.db.models.functions import Coalesce
from backend.api.models import Profile, ProfileDisplayFields, PostAggregateFields
from django.http import JsonResponse
from django.http import HttpRequest
# Serve Vue Application
index_view = never_cache(TemplateView.as_view(template_name="index.html"))
def profiles(request: HttpRequest) -> JsonResponse:
"""
Data about profiles and their posts
:param request: Request from the client
:return: JsonResponse containing a list of dictionaries that
represent profiles and their posts.
EX:
[
{
"name": "lifeoftanyamarie",
"thumbnail": "thumbnail.com",
"followers": 90900,
"post_count": 2,
"likes": 4310
},...
]
"""
fields = [
display.value for display in [*ProfileDisplayFields, *PostAggregateFields]
]
profiles_qs = (
Profile.objects.all()
.annotate(
post_count=Coalesce(Count("post"), 0),
likes=Coalesce(Sum("post__likes"), 0),
)
.values(*fields)
)
return JsonResponse(list(profiles_qs), safe=False)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
512587114336c35d6dc9c508ffa136085e46b053
| 2,295
|
py
|
Python
|
submission/id/models.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
submission/id/models.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
submission/id/models.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
#
# Created on Wed Nov 18 2020
#
# Copyright (c) 2020 - Simon Prast
#
| 25.786517
| 71
| 0.651852
|
#
# Created on Wed Nov 18 2020
#
# Copyright (c) 2020 - Simon Prast
#
import os
import uuid
from django.conf import settings
from django.db import models
from user.models import User
class IDSubmissionManager(models.Manager):
def create_submission(self, submitter, document, document_back):
id_submission = IDSubmission(
submitter=submitter,
document=document,
document_back=document_back
)
id_submission.save()
return id_submission
def create_path(instance, filename):
folder = 'ids/' + str(uuid.uuid4())
os.makedirs(os.path.join(settings.MEDIA_ROOT, folder))
return os.path.join(folder, filename)
class IDSubmission(models.Model):
submitter = models.ForeignKey(
User, on_delete=models.SET_NULL, blank=True, null=True
)
# Will be saved to settings.MEDIA_ROOT (francy.media) + /ids/
document = models.ImageField(
upload_to=create_path
)
document_back = models.ImageField(
upload_to=create_path, blank=True, null=True
)
verified = models.BooleanField(default=False)
denied = models.BooleanField(default=False)
latest = models.BooleanField(default=True)
objects = IDSubmissionManager()
REQUIRED_FIELDS = []
def save(self, *args, **kwargs):
IDSubmission.objects.filter(
submitter=self.submitter, latest=True).update(latest=False)
self.latest = True
super(IDSubmission, self).save()
class Meta:
verbose_name = 'ID Submission'
def __str__(self):
return 'Ausweis von ' + str(self.submitter) + \
' (verified: ' + str(self.verified) + \
', latest: ' + str(self.latest) + ')'
class IDToken(models.Model):
user = models.ForeignKey(
User, on_delete=models.CASCADE, null=True, blank=True
)
token = models.UUIDField(
default=uuid.uuid4, null=True, blank=True
)
created_at = models.DateTimeField(
auto_now_add=True, null=True, blank=True
)
called = models.BooleanField(
default=False, null=True, blank=True
)
uploaded = models.BooleanField(
default=False, null=True, blank=True
)
expired = models.BooleanField(
default=False, null=True, blank=True
)
| 0
| 0
| 0
| 1,856
| 0
| 156
| 0
| 3
| 205
|
54229d5bb24d7ad6c282137584e0947395e03605
| 418
|
py
|
Python
|
Session_01/py101/10_classes.py
|
weighanchor4414/DigitalWorldWorkshop2020
|
9eca3a789e5532680ab032c20fe892bdbd47b891
|
[
"MIT"
] | 9
|
2020-06-05T17:01:23.000Z
|
2022-03-16T19:55:50.000Z
|
Session_01/py101/10_classes.py
|
weighanchor4414/DigitalWorldWorkshop2020
|
9eca3a789e5532680ab032c20fe892bdbd47b891
|
[
"MIT"
] | null | null | null |
Session_01/py101/10_classes.py
|
weighanchor4414/DigitalWorldWorkshop2020
|
9eca3a789e5532680ab032c20fe892bdbd47b891
|
[
"MIT"
] | 2
|
2020-02-20T16:48:35.000Z
|
2020-03-18T14:36:04.000Z
|
p1 = People("Maria", 1999)
print(p1.name)
print(p1.birthYear)
print(p1.age)
p1.pillar = "Architecture and Sustainable Design (ASD)"
print(f"{p1.name} is {p1.age} years old, and she is majored in {p1.pillar}")
| 23.222222
| 76
| 0.645933
|
class People:
def __init__(self, name, birthYear):
self.name = name
self.birthYear = birthYear
self.age = 2020 - birthYear
self.height = None
self.pillar = None
p1 = People("Maria", 1999)
print(p1.name)
print(p1.birthYear)
print(p1.age)
p1.pillar = "Architecture and Sustainable Design (ASD)"
print(f"{p1.name} is {p1.age} years old, and she is majored in {p1.pillar}")
| 0
| 0
| 0
| 183
| 0
| 0
| 0
| 0
| 22
|
1f913af634f48374288edbe27e053cffc84d41af
| 4,845
|
py
|
Python
|
tests/functional/commands/test_list_command.py
|
aimar1986bupt/orion
|
6d217af1f9002aa671f8a3260a687c540ca5336d
|
[
"BSD-3-Clause"
] | 4
|
2019-09-02T19:41:04.000Z
|
2020-04-07T13:05:47.000Z
|
tests/functional/commands/test_list_command.py
|
aimar1986bupt/orion
|
6d217af1f9002aa671f8a3260a687c540ca5336d
|
[
"BSD-3-Clause"
] | 2
|
2018-06-26T19:17:09.000Z
|
2022-02-23T13:40:04.000Z
|
tests/functional/commands/test_list_command.py
|
aimar1986bupt/orion
|
6d217af1f9002aa671f8a3260a687c540ca5336d
|
[
"BSD-3-Clause"
] | 2
|
2019-08-26T11:36:47.000Z
|
2020-04-07T13:05:48.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Perform a functional test of the list command."""
import os
import orion.core.cli
def test_no_exp(monkeypatch, clean_db, capsys):
"""Test that nothing is printed when there are no experiments."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == ""
def test_single_exp(clean_db, one_experiment, capsys):
"""Test that the name of the experiment is printed when there is one experiment."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_no_version_backward_compatible(clean_db, one_experiment_no_version, capsys):
"""Test status with no experiments."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-no-version-v1\n"
def test_broken_refers(clean_db, broken_refers, capsys):
"""Test that experiment without refers dict can be handled properly."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_two_exp(capsys, clean_db, two_experiments):
"""Test that experiment and child are printed."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1
test_double_exp_child-v1
"""
def test_three_exp(capsys, clean_db, three_experiments):
"""Test that experiment, child and grand-child are printed."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1
test_double_exp_child-v1
test_single_exp-v1
"""
def test_no_exp_name(clean_db, three_experiments, monkeypatch, capsys):
"""Test that nothing is printed when there are no experiments with a given name."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'I don\'t exist'])
captured = capsys.readouterr().out
assert captured == ""
def test_exp_name(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified experiment is printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_single_exp'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_exp_name_with_child(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified experiment is printed, and with its child."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_double_exp'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1
test_double_exp_child-v1
"""
def test_exp_name_child(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified child experiment is printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_double_exp_child'])
captured = capsys.readouterr().out
assert captured == " test_double_exp_child-v1\n"
def test_exp_same_name(clean_db, two_experiments_same_name, monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_single_exp-v1
test_single_exp-v2
"""
def test_exp_family_same_name(clean_db, three_experiments_family_same_name, monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed
even when one of them has a child.
"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_single_exp-v2
test_single_exp-v1
test_single_exp_child-v1
"""
def test_exp_family_branch_same_name(clean_db, three_experiments_branch_same_name,
monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed
even when last one has a child.
"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_single_exp-v1
test_single_exp-v2
test_single_exp_child-v1
"""
| 30.664557
| 100
| 0.685449
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Perform a functional test of the list command."""
import os
import orion.core.cli
def test_no_exp(monkeypatch, clean_db, capsys):
"""Test that nothing is printed when there are no experiments."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == ""
def test_single_exp(clean_db, one_experiment, capsys):
"""Test that the name of the experiment is printed when there is one experiment."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_no_version_backward_compatible(clean_db, one_experiment_no_version, capsys):
"""Test status with no experiments."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-no-version-v1\n"
def test_broken_refers(clean_db, broken_refers, capsys):
"""Test that experiment without refers dict can be handled properly."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_two_exp(capsys, clean_db, two_experiments):
"""Test that experiment and child are printed."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1┐
└test_double_exp_child-v1
"""
def test_three_exp(capsys, clean_db, three_experiments):
"""Test that experiment, child and grand-child are printed."""
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1┐
└test_double_exp_child-v1
test_single_exp-v1
"""
def test_no_exp_name(clean_db, three_experiments, monkeypatch, capsys):
"""Test that nothing is printed when there are no experiments with a given name."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'I don\'t exist'])
captured = capsys.readouterr().out
assert captured == ""
def test_exp_name(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified experiment is printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_single_exp'])
captured = capsys.readouterr().out
assert captured == " test_single_exp-v1\n"
def test_exp_name_with_child(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified experiment is printed, and with its child."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_double_exp'])
captured = capsys.readouterr().out
assert captured == """\
test_double_exp-v1┐
└test_double_exp_child-v1
"""
def test_exp_name_child(clean_db, three_experiments, monkeypatch, capsys):
"""Test that only the specified child experiment is printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list', '--name', 'test_double_exp_child'])
captured = capsys.readouterr().out
assert captured == " test_double_exp_child-v1\n"
def test_exp_same_name(clean_db, two_experiments_same_name, monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed."""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_single_exp-v1┐
└test_single_exp-v2
"""
def test_exp_family_same_name(clean_db, three_experiments_family_same_name, monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed
even when one of them has a child.
"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
┌test_single_exp-v2
test_single_exp-v1┤
└test_single_exp_child-v1
"""
def test_exp_family_branch_same_name(clean_db, three_experiments_branch_same_name,
monkeypatch, capsys):
"""Test that two experiments with the same name and different versions are correctly printed
even when last one has a child.
"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
orion.core.cli.main(['list'])
captured = capsys.readouterr().out
assert captured == """\
test_single_exp-v1┐
└test_single_exp-v2┐
└test_single_exp_child-v1
"""
| 45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
94f35cf52b9ed17eeeb19e3cea45eb8e5993057c
| 6,441
|
py
|
Python
|
video_test.py
|
SteveSZF/Traffic-Lane-Detection
|
8217808178cdf2d655d02632eb71c543d39f5258
|
[
"MIT"
] | 2
|
2019-10-08T08:52:43.000Z
|
2019-10-08T08:55:37.000Z
|
video_test.py
|
SteveSZF/Traffic-Lane-Detection
|
8217808178cdf2d655d02632eb71c543d39f5258
|
[
"MIT"
] | null | null | null |
video_test.py
|
SteveSZF/Traffic-Lane-Detection
|
8217808178cdf2d655d02632eb71c543d39f5258
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 43.816327
| 164
| 0.604409
|
import argparse
import os
import numpy as np
from tqdm import tqdm
from mypath import Path
from dataloaders import make_data_loader
from modeling.sync_batchnorm.replicate import patch_replication_callback
from modeling.erfnet_road import *
from utils.loss import SegmentationLosses
from utils.calculate_weights import calculate_weigths_labels
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils.LossWithUncertainty import LossWithUncertainty
from dataloaders.utils import decode_segmap
class Test(object):
def __init__(self, args):
self.args = args
# Define Dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True}
self.test_loader, self.nclass_pixel, self.nclass_scene = make_data_loader(args, **kwargs)
# Define network
if args.checkname == 'erfnet':
model = ERFNet(num_classes_pixel = self.nclass_pixel, num_classes_scene = self.nclass_scene,multitask = self.args.multitask)
elif args.checkname == 'resnet':
model = DeepLab(num_classes=self.nclass_pixel, backbone = 'resnet', output_stride=16)
elif args.checkname == 'mobilenet':
model = DeepLab(num_classes=self.nclass_pixel, backbone = 'mobilenet', output_stride=16)
self.model = model
# Using cuda
if args.cuda:
self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = 0.0
if args.resume is not None:
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
def write_test(self):
self.model.eval()
#self.evaluator.reset()
tbar = tqdm(self.test_loader, desc='\r')
saved_index = 0
for i, sample in enumerate(tbar):
image = sample['image']
if self.args.cuda:
image = image.cuda()
with torch.no_grad():
output, output_road = self.model(image)
if output_road != None:
pass
label_masks = torch.max(output, 1)[1].detach().cpu().numpy()
image = image.detach().cpu().numpy().transpose(0, 2, 3, 1)
#image = image.detach().cpu().numpy()
#targets = target.detach().cpu().numpy()
#print(targets.shape)
for idx, label_mask in enumerate(label_masks):
decode_segmap(label_mask, dataset=self.args.dataset, saved_path = self.args.saved_path + "/%(idx)05d.png" % {'idx':saved_index}, image = image[idx])
saved_index += 1
def main():
parser = argparse.ArgumentParser(description="PyTorch Lane Detection")
parser.add_argument('--dataset', type=str, default='bdd100k',
choices=['bdd100k'],
help='dataset name (default: bdd100k)')
parser.add_argument('--workers', type=int, default=4,
metavar='N', help='dataloader threads')
parser.add_argument('--base-w', type=int, default=960,
help='base image width')
parser.add_argument('--base-h', type=int, default=640,
help='base image height')
parser.add_argument('--crop-w', type=int, default=640,
help='crop image width')
parser.add_argument('--crop-h', type=int, default=480,
help='crop image height')
parser.add_argument('--output-w', type=int, default=640,
help='output image width')
parser.add_argument('--output-h', type=int, default=480,
help='output image height')
parser.add_argument('--multitask', type=bool, default=False,
help='whether to do multi-task (default: auto)')
parser.add_argument('--batch-size', type=int, default=None,
metavar='N', help='input batch size for \
test (default: auto)')
parser.add_argument('--no-cuda', action='store_true', default=
False, help='disables CUDA training')
parser.add_argument('--gpu-ids', type=str, default='0',
help='use which gpu to train, must be a \
comma-separated list of integers only (default=0)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# checking point
parser.add_argument('--checkname', type=str, default=None,
help='set the checkpoint name')
parser.add_argument('--resume', type=str, default=None,
help='put the path to resuming file if needed')
parser.add_argument('--write-val', action='store_true', default=False,
help='store val rgb results')
parser.add_argument('--video', type=str, default=None,
help='video segmentation only for write-val')
parser.add_argument('--saved-path', type=str, default=None,
help='path for saving segmentation result')
args = parser.parse_args()
if not os.path.exists(args.saved_path):
os.makedirs(args.saved_path)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
try:
args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]
except ValueError:
raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')
if args.batch_size is None:
args.batch_size = 4 * len(args.gpu_ids)
print(args)
torch.manual_seed(args.seed)
tester = Test(args)
tester.write_test()
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 2,702
| 0
| 3,052
| 0
| 250
| 399
|
b72f0aa5b11153c3e11b4251b59096cbdc84677d
| 128
|
py
|
Python
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/_pkg0_1_0_0_0/_mod0_1_0_0_0_3.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/_pkg0_1_0_0_0/_mod0_1_0_0_0_3.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_1/_pkg0_1_0/_pkg0_1_0_0/_pkg0_1_0_0_0/_mod0_1_0_0_0_3.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
name0_1_0_0_0_3_0 = None
name0_1_0_0_0_3_1 = None
name0_1_0_0_0_3_2 = None
name0_1_0_0_0_3_3 = None
name0_1_0_0_0_3_4 = None
| 14.222222
| 24
| 0.820313
|
name0_1_0_0_0_3_0 = None
name0_1_0_0_0_3_1 = None
name0_1_0_0_0_3_2 = None
name0_1_0_0_0_3_3 = None
name0_1_0_0_0_3_4 = None
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5bc3308252b8c656c9f2d85675cb4f58fd8d48c6
| 1,440
|
py
|
Python
|
covertCSVtoData.py
|
kobe41999/ASUS_ECG
|
0e20ccc92ade8130fe4a8ace3c6ef2e910631376
|
[
"MIT"
] | null | null | null |
covertCSVtoData.py
|
kobe41999/ASUS_ECG
|
0e20ccc92ade8130fe4a8ace3c6ef2e910631376
|
[
"MIT"
] | null | null | null |
covertCSVtoData.py
|
kobe41999/ASUS_ECG
|
0e20ccc92ade8130fe4a8ace3c6ef2e910631376
|
[
"MIT"
] | null | null | null |
import csv
import pandas as pd
from sklearn import preprocessing
import numpy as np
if __name__ == '__main__':
df = pd.read_csv('./JsonToCSV/data0126.csv')
ecgList = []
recordLen = 10000
for i in range(len(df.ECG)):
ecgList.append(changeToList(df.ECG[i].split(" ")))
for j in range(len(ecgList)):
if recordLen > len(ecgList[j]):
recordLen = len(ecgList[j])
numOfRow = []
for k in range(recordLen - 1):
numOfRow.append(k)
with open('try0126.csv', 'w', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(numOfRow)
for j in range(len(ecgList)):
#
# Min_Max_Scaler = preprocessing.MinMaxScaler(feature_range=(-5, 5)) #
# MinMax_Data = Min_Max_Scaler.fit_transform(ecgList[j]) # Data
# # npa = np.asarray(ecgList[j], dtype=np.float32)
# # norm = np.linalg.norm(npa)
# # normal_array = npa / norm
X = preprocessing.scale(ecgList[j])
final = np.round(X, 4)
writer.writerow(final[0:(recordLen - 1)])
| 27.692308
| 94
| 0.584722
|
import csv
import config as C
import pandas as pd
from sklearn import preprocessing
import numpy as np
def changeToList(data):
dataList = []
first = data[0].replace("['", "")
dataList.append(first)
for i in range(len(data) - 3):
dataList.append(data[i + 1])
last = data[len(data) - 1].replace("']", "")
dataList.append(last)
return dataList
if __name__ == '__main__':
df = pd.read_csv('./JsonToCSV/data0126.csv')
ecgList = []
recordLen = 10000
for i in range(len(df.ECG)):
ecgList.append(changeToList(df.ECG[i].split(" ")))
for j in range(len(ecgList)):
if recordLen > len(ecgList[j]):
recordLen = len(ecgList[j])
numOfRow = []
for k in range(recordLen - 1):
numOfRow.append(k)
with open('try0126.csv', 'w', newline='') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(numOfRow)
for j in range(len(ecgList)):
# 標準化處理
# Min_Max_Scaler = preprocessing.MinMaxScaler(feature_range=(-5, 5)) # 設定縮放的區間上下限
# MinMax_Data = Min_Max_Scaler.fit_transform(ecgList[j]) # Data 為原始資料
# # npa = np.asarray(ecgList[j], dtype=np.float32)
# # norm = np.linalg.norm(npa)
# # normal_array = npa / norm
X = preprocessing.scale(ecgList[j])
final = np.round(X, 4)
writer.writerow(final[0:(recordLen - 1)])
| 60
| 0
| 0
| 0
| 0
| 255
| 0
| -3
| 45
|
0f98aaa91b5b977fd6d211f7d9569c79ce941321
| 597
|
py
|
Python
|
Challenges/Quartiles.py
|
adarsh2104/Hacker-Rank-Days-of-Statistics
|
30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9
|
[
"MIT"
] | 2
|
2021-02-26T14:28:08.000Z
|
2021-02-26T18:51:51.000Z
|
Challenges/Quartiles.py
|
adarsh2104/Hacker-Rank-Days-of-Statistics
|
30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9
|
[
"MIT"
] | null | null | null |
Challenges/Quartiles.py
|
adarsh2104/Hacker-Rank-Days-of-Statistics
|
30a1c56dc69ae0a98c09e5075f9b6dd0b747e0f9
|
[
"MIT"
] | null | null | null |
# Github : https://github.com/adarsh2104
# HR-Profile: https://www.hackerrank.com/adarsh_2104
# Challenge : https://www.hackerrank.com/challenges/s10-quartiles
# Max Score : 30
n = input()
input_array = sorted([int(x) for x in input().split()])
print(find_median(input_array[:len(input_array)//2]))
print(find_median(input_array))
print(find_median(input_array[len(input_array) // 2 + len(input_array) % 2:]))
| 28.428571
| 78
| 0.658291
|
# Github : https://github.com/adarsh2104
# HR-Profile: https://www.hackerrank.com/adarsh_2104
# Challenge : https://www.hackerrank.com/challenges/s10-quartiles
# Max Score : 30
def find_median(array):
if len(array) % 2 == 1:
return array[len(array) // 2]
else:
return (array[len(array) // 2] + array[len(array) // 2 - 1]) // 2
n = input()
input_array = sorted([int(x) for x in input().split()])
print(find_median(input_array[:len(input_array)//2]))
print(find_median(input_array))
print(find_median(input_array[len(input_array) // 2 + len(input_array) % 2:]))
| 0
| 0
| 0
| 0
| 0
| 152
| 0
| 0
| 23
|
586635bed9aefd8fbc1c66989b9458a4ab61adfe
| 1,945
|
py
|
Python
|
corehq/apps/export/det/base.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/export/det/base.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/export/det/base.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
TITLE_ROW = [
'Source Field',
'Field',
'Map Via',
'Data Source',
'Filter Name',
'Filter Value',
'Table Name',
'Format Via',
]
| 24.935897
| 83
| 0.521337
|
import attr
from couchexport.export import export_raw
from couchexport.models import Format
TITLE_ROW = [
'Source Field',
'Field',
'Map Via',
'Data Source',
'Filter Name',
'Filter Value',
'Table Name',
'Format Via',
]
@attr.s
class DETConfig:
name = attr.ib()
tables = attr.ib(factory=list)
@property
def table_names(self):
return [t.name for t in self.tables]
def get_table(self, name):
filtered_tables = [t for t in self.tables if t.name == name]
assert len(filtered_tables) == 1
return filtered_tables[0]
def export_to_file(self, output_file):
header_sheets = []
data_sheets = []
for table in self.tables:
header_sheets.append((table.name, TITLE_ROW))
data_sheets.append((table.name, list(table.get_sheet_data())))
export_raw(header_sheets, data_sheets, output_file, format=Format.XLS_2007)
@attr.s
class DETTable:
name = attr.ib()
source = attr.ib()
rows = attr.ib(factory=list)
filter_name = attr.ib(default='')
filter_value = attr.ib(default='')
def get_sheet_data(self):
if not self.rows:
return
else:
for i, row in enumerate(self.rows):
if i == 0:
# the first row also contains the source/filter data
yield [
row.source_field,
row.field,
row.map_via,
self.source,
self.filter_name,
self.filter_value,
]
else:
yield [
row.source_field,
row.field,
row.map_via,
]
@attr.s
class DETRow:
source_field = attr.ib()
field = attr.ib()
map_via = attr.ib(default='')
| 0
| 1,621
| 0
| 0
| 0
| 0
| 0
| 26
| 136
|
87eb59f5bcf3a945fa6fe34538a2552cbcaa1241
| 2,241
|
py
|
Python
|
wayne/trend_generators/visit_trends.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 7
|
2017-05-30T09:01:50.000Z
|
2019-04-05T05:46:23.000Z
|
wayne/trend_generators/visit_trends.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 1
|
2018-06-07T17:31:19.000Z
|
2018-06-07T19:38:27.000Z
|
wayne/trend_generators/visit_trends.py
|
ucl-exoplanets/wayne
|
48fd07588cbbab6f5a32038455e36d7fc6b89625
|
[
"MIT"
] | 2
|
2018-04-30T23:16:22.000Z
|
2020-09-30T18:12:47.000Z
|
""" Handles visit long trends (scaling factors) applied to the observation. The
classic cases are the `hook' and long term ramp
"""
import numpy as np
def gen_orbit_start_times_per_exp(time_array, obs_start_index):
"""Generates t0, the time of an orbit for each orbit so it can vectorised
i.e for each element time_array there will be a matching element in t_0 giving the
orbit start time.
"""
obs_index = obs_start_index[:]
obs_index.append(len(time_array))
t_0 = np.zeros(len(time_array))
for i in xrange(len(obs_index) - 1):
t_0[obs_index[i]:obs_index[i + 1]] = time_array[obs_start_index[i]]
return t_0
| 30.283784
| 86
| 0.671129
|
""" Handles visit long trends (scaling factors) applied to the observation. The
classic cases are the `hook' and long term ramp
"""
import abc
import numpy as np
class BaseVisitTrend(object):
""" Visit trends take input the visit planner output and generate a
scaling factor that will be multiplied per exposure.
They must implement the method `_gen_scaling_factors` which outputs
a list of scaling factors, one per exposure
"""
__metaclass__ = abc.ABCMeta
def __init__(self, visit_plan, coeffs=None):
self.visit_plan = visit_plan
self.coeffs = coeffs
self.scale_factors = self._gen_scaling_factors(visit_plan, coeffs)
@abc.abstractmethod
def _gen_scaling_factors(self, visit_plan, coeffs):
pass
def get_scale_factor(self, exp_num):
""" Returns the scale factor for the exposure number `exp_num`."""
return self.scale_factors[exp_num]
class HookAndLongTermRamp(BaseVisitTrend):
def _gen_scaling_factors(self, visit_plan, coeffs):
t = visit_plan['exp_start_times']
t_0 = gen_orbit_start_times_per_exp(t, visit_plan['orbit_start_index'])
ramp = self.ramp_model(t, t_0, *coeffs)
return ramp
@staticmethod
def ramp_model(t, t_0, a1, b1, b2, to):
""" Combined hook and long term ramp model
:param t: time_array
:param t_0: array of orbit start times (per exposure)
:param a1: linear ramp gradient
:param b1: exponential hook coeff1
:param b2: exponential hook coeff2
:return: ramp_model
"""
t = np.array(t) # wipes units if any
ramp = (1 - a1 * (t - to)) * (1 - b1 * np.exp(-b2 * (t - t_0)))
return ramp
def gen_orbit_start_times_per_exp(time_array, obs_start_index):
"""Generates t0, the time of an orbit for each orbit so it can vectorised
i.e for each element time_array there will be a matching element in t_0 giving the
orbit start time.
"""
obs_index = obs_start_index[:]
obs_index.append(len(time_array))
t_0 = np.zeros(len(time_array))
for i in xrange(len(obs_index) - 1):
t_0[obs_index[i]:obs_index[i + 1]] = time_array[obs_start_index[i]]
return t_0
| 0
| 551
| 0
| 973
| 0
| 0
| 0
| -11
| 69
|
20b20caa6fbb670cc141c57bc10a431f41d617b3
| 14,975
|
py
|
Python
|
ietf/meeting/migrations/0011_ietf92_meetings.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2021-11-20T03:40:40.000Z
|
2021-11-20T03:40:42.000Z
|
ietf/meeting/migrations/0011_ietf92_meetings.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ietf/meeting/migrations/0011_ietf92_meetings.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
| 76.403061
| 152
| 0.482204
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations
def backfill_92_other_meetings(apps, schema_editor):
Meeting = apps.get_model('meeting', 'Meeting')
Schedule = apps.get_model('meeting', 'Schedule')
ScheduledSession = apps.get_model('meeting', 'ScheduledSession')
Room = apps.get_model('meeting', 'Room')
Session = apps.get_model('meeting', 'Session')
Group = apps.get_model('group', 'Group')
Person = apps.get_model('person', 'Person')
ietf92 = Meeting.objects.filter(number=92).first()
if not ietf92:
print "IETF92 not found, no data changed"
else:
# Clear out one orphaned ill-configured Session object
qs = Session.objects.filter(meeting__number=92,name__icontains='beverage break').exclude(type_id='break')
if qs.count()==1:
qs.delete()
agenda92 = Schedule.objects.get(meeting=ietf92,pk=ietf92.agenda.pk)
map_existing = {
'Regency Ballroom': 'Lounge',
'Garden Terrace Level': 'Meet and Greet',
'Royal': 'Breakout 1',
'Continental': 'Breakout 2',
'Far East': 'Breakout 3',
'Oak ': 'Breakout 4',
'Parisian': 'Breakout 5',
'Venetian': 'Breakout 6',
'Gold': 'Breakout 7',
'International': 'Breakout 8',
'Brasserie': 'Terminal Room',
'State': 'Office #3 (Secretariat Office)',
'French': 'Meeting Room #2 (IESG Meeting Room)',
}
for name,functional_name in map_existing.items():
Room.objects.filter(meeting__number=92,name=name).update(functional_name=functional_name)
regency = Room.objects.get(meeting=ietf92,name='Regency Ballroom')
garden = Room.objects.get(meeting=ietf92,name='Garden Terrace Level')
royal = Room.objects.get(meeting=ietf92,name='Royal')
continental = Room.objects.get(meeting=ietf92,name='Continental')
far_east = Room.objects.get(meeting=ietf92,name='Far East')
oak = Room.objects.get(meeting=ietf92,name='Oak ')
#parisian = Room.objects.get(meeting=ietf92,name='Parisian')
#venetian = Room.objects.get(meeting=ietf92,name='Venetian')
#gold = Room.objects.get(meeting=ietf92,name='Gold')
#international = Room.objects.get(meeting=ietf92,name='International')
brasserie = Room.objects.get(meeting=ietf92,name='Brasserie')
state = Room.objects.get(meeting=ietf92,name='State')
#french = Room.objects.get(meeting=ietf92,name='French')
executive = Room.objects.create(meeting=ietf92,name='Executive',functional_name='Meeting Room #4 (IAOC/IAD)',capacity=20)
regency_foyer = Room.objects.create(meeting=ietf92,name='Regency Foyer',functional_name='Registration',capacity=1200)
florentine = Room.objects.create(meeting=ietf92,name='Florentine',functional_name='Meeting Room #1 (IAB)', capacity=40)
pavilion = Room.objects.create(meeting=ietf92,name='Pavilion',functional_name='Meeting Room #6', capacity=80)
terrace = Room.objects.create(meeting=ietf92,name='Terrace',functional_name='Meeting Room #7', capacity=80)
panorama = Room.objects.create(meeting=ietf92,name='Panorama',functional_name='Companion Reception', capacity=200)
regency.session_types.add('offagenda')
pavilion.session_types.add('offagenda')
pavilion.session_types.add('lead')
garden.session_types.add('lead')
panorama.session_types.add('offagenda')
executive.session_types.add('lead')
executive.session_types.add('offagenda')
regency_foyer.session_types.add('offagenda')
oak.session_types.add('offagenda')
continental.session_types.add('offagenda')
state.session_types.add('offagenda')
florentine.session_types.add('offagenda')
terrace.session_types.add('lead')
terrace.session_types.add('offagenda')
far_east.session_types.add('offagenda')
brasserie.session_types.add('offagenda')
royal.session_types.add('offagenda')
iesg = Group.objects.get(acronym='iesg')
iab = Group.objects.get(acronym='iab')
iaoc = Group.objects.get(acronym='iaoc')
secr = Group.objects.get(acronym='secretariat')
system = Person.objects.get(name='(System)')
for d, h, m, duration, type_id, groups, room, slotname, label in [
( 20, 13, 0, 480, 'offagenda', [secr], brasserie, 'Setup', 'Hackathon: Setup'),
( 20, 8, 0, 540, 'offagenda', [secr], executive, 'Meeting', 'DNS OARC Meeting'),
( 21, 8, 0, 540, 'offagenda', [secr], executive, 'Meeting', 'DNS OARC Meeting'),
( 22, 12, 0, 720, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 22, 11, 0, 480, 'offagenda', [secr], regency_foyer, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 22, 19, 0, 120, 'offagenda', [secr], state, 'Meeting', 'CJK Generation Panel coordination informal meeting'),
( 22, 19, 0, 120, 'offagenda', [iab], florentine, 'Meeting', 'IAB PrivSec program'),
( 22, 8, 30, 90, 'lead', [iesg], pavilion, 'Breakfast', None),
( 22, 9, 0, 150, 'lead', [iesg], pavilion, 'Meeting', None),
( 22, 11, 30, 150, 'lead', [iab], pavilion, 'Lunch', 'IAB Lunch with the IESG'),
( 22, 11, 30, 150, 'lead', [iesg], pavilion, 'Lunch', 'IESG Lunch with the IAB'),
( 22, 14, 0, 180, 'lead', [iab], pavilion, 'Meeting', None),
( 22, 9, 0, 480, 'offagenda', [secr], terrace, 'Meeting', 'RootOPS'),
( 22, 16, 30, 60, 'offagenda', [secr], panorama, 'Reception', "Companion's Reception"), # Should this appear on agenda?
( 22, 21, 0, 180, 'lead', [secr], garden, 'Gathering', 'AMS/IESG/IAB/IAOC Gathering'),
( 22, 9, 0, 480, 'offagenda', [secr], royal, 'ICNRG', 'ICNRG'),
( 22, 19, 0, 180, 'offagenda', [secr], royal, 'Meeting', 'Huawei'),
( 22, 12, 30, 240, 'offagenda', [secr], continental, 'Meeting', 'Verisign ROA Workshop'),
( 22, 15, 15, 165, 'offagenda', [secr], far_east, 'Meeting', 'RSSAC'),
( 22, 9, 0, 150, 'offagenda', [secr], oak, 'Meeting', 'Ericsson'),
( 23, 0, 0, 1440, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 23, 8, 0, 600, 'offagenda', [secr], regency_foyer, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 23, 0, 0, 1440, 'offagenda', [secr], regency, 'Lounge', 'Lounge'),
( 23, 11, 30, 180, 'offagenda', [secr], executive, 'Lunch', 'ICANN Lunch'),
( 23, 7, 0, 120, 'lead', [iesg], pavilion, 'Breakfast', 'IESG Breakfast with the IAB'),
( 23, 7, 0, 120, 'lead', [iab], pavilion, 'Breakfast', 'IAB Breakfast with the IESG'),
( 23, 11, 30, 90, 'offagenda', [secr], pavilion, 'Meeting', 'OPS Directorate Meeting'),
( 23, 19, 0, 120, 'offagenda', [secr], pavilion, 'Meeting', 'ACE'),
( 23, 7, 30, 90, 'offagenda', [secr], terrace, 'Meeting', 'NRO ECG'),
( 23, 11, 30, 90, 'offagenda', [secr], terrace, 'Meeting', 'IETF/3GPP Meeting'),
( 23, 19, 0, 120, 'offagenda', [secr], terrace, 'Meeting', 'I2NSF'),
( 23, 18, 50, 60, 'offagenda', [secr], royal, 'Meeting', 'Captive Portal Bar BOF'),
( 24, 0, 0, 1440, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 24, 8, 0, 600, 'offagenda', [secr], regency_foyer, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 24, 0, 0, 1440, 'offagenda', [secr], regency, 'Lounge', 'Lounge'),
( 24, 11, 30, 90, 'offagenda', [secr], state, 'Meeting', 'HIAPS'),
( 24, 16, 30, 120, 'offagenda', [secr], state, 'Meeting', 'PDF Draft Review'),
( 24, 7, 0, 120, 'lead', [iesg], pavilion, 'Breakfast', None),
( 24, 11, 30, 90, 'offagenda', [secr], pavilion, 'Meeting', 'SECdir Meeting'),
( 24, 7, 0, 120, 'lead', [iab], terrace, 'Breakfast', None),
( 24, 9, 0, 120, 'offagenda', [secr], terrace, 'Meeting', 'ICNN DRZK Design Team'),
( 24, 11, 30, 90, 'offagenda', [secr], terrace, 'Lunch', 'RSAG/ISEB Lunch'),
( 24, 13, 0, 120, 'offagenda', [secr], terrace, 'Meeting', 'SACM'),
( 24, 15, 0, 90, 'offagenda', [secr], terrace, 'Meeting', 'RSOC Meeting'),
( 24, 17, 30, 60, 'offagenda', [secr], terrace, 'Meeting', 'SACM'),
( 24, 11, 30, 90, 'offagenda', [secr], royal, 'Meeting', 'IoT Directorate'),
( 25, 0, 0, 1440, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 25, 8, 0, 600, 'offagenda', [secr], regency_foyer, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 25, 0, 0, 1440, 'offagenda', [secr], regency, 'Lounge', 'Lounge'),
( 25, 8, 0, 60, 'offagenda', [secr], state, 'Meeting', 'SFC Control Plane Offline Discussion'),
( 25, 19, 0, 240, 'offagenda', [secr], state, 'Meeting', 'WWG'),
( 25, 8, 0, 60, 'offagenda', [secr], florentine, 'Meeting', 'IAB Name Resolution'),
( 25, 6, 45, 135, 'lead', [iaoc], executive, 'Breakfast', None),
( 25, 11, 30, 90, 'offagenda', [secr], pavilion, 'Meeting', 'RMCAT'),
( 25, 19, 0, 120, 'offagenda', [secr], pavilion, 'Meeting', 'I2NSF'),
( 25, 8, 0, 60, 'offagenda', [secr], terrace, 'Meeting', 'IETF/IEEE 802 Coordination'),
( 25, 11, 30, 90, 'offagenda', [secr], terrace, 'Lunch', 'RFC Editor Lunch'),
( 25, 19, 30, 120, 'offagenda', [secr], terrace, 'Dinner', 'SSAC Dinner'),
( 26, 0, 0, 1440, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 26, 8, 0, 600, 'offagenda', [secr], regency_foyer, 'T-Shirt Distribution', 'T-shirt Distribution'),
( 26, 0, 0, 1440, 'offagenda', [secr], regency, 'Lounge', 'Lounge'),
( 26, 7, 30, 90, 'offagenda', [secr], state, 'Breakfast', 'EDU Team Breakfast'),
( 26, 14, 0, 120, 'offagenda', [secr], state, 'Meeting', 'JJB'),
( 26, 11, 30, 90, 'offagenda', [secr], florentine, 'Meeting', 'IAB Liaison Oversight'),
( 26, 18, 0, 150, 'offagenda', [secr], pavilion, 'Meeting', '6LO Security Discussion'),
( 26, 7, 0, 120, 'lead', [iab], terrace, 'Breakfast', None),
( 26, 17, 40, 60, 'offagenda', [secr], terrace, 'Meeting', 'SACM'),
( 26, 19, 30, 150, 'offagenda', [secr], royal, 'Meeting', 'Lavabit'),
( 27, 0, 0, 900, 'offagenda', [secr], brasserie, 'Terminal Room', 'Terminal Room Open to Attendees'),
( 27, 7, 30, 90, 'offagenda', [secr], executive, 'Meeting', 'Post-Con with Ray'),
( 27, 7, 30, 75, 'offagenda', [secr], state, 'Breakfast', 'Gen-art'),
( 27, 13, 30, 90, 'lead', [iab], pavilion, 'Lunch', 'IAB Lunch with the IESG'),
( 27, 13, 30, 90, 'lead', [iesg], pavilion, 'Lunch', 'IESG Lunch with the IAB'),
]:
ts = ietf92.timeslot_set.create(type_id=type_id, name=slotname,
time=datetime.datetime(2015,3,d,h,m,0),
duration=datetime.timedelta(minutes=duration),
location=room,show_location=(type_id not in ['lead','offagenda']))
for group in groups:
session = ietf92.session_set.create(name= label or "%s %s"%(group.acronym.upper(),slotname),
group=group, attendees=25,
requested=datetime.datetime(2014,11,1,0,0,0),
requested_by=system, status_id='sched',type_id=type_id)
ScheduledSession.objects.create(schedule=agenda92, timeslot=ts, session=session)
class Migration(migrations.Migration):
dependencies = [
('meeting', '0010_auto_20150501_0732'),
('name', '0004_auto_20150318_1140'),
('group', '0004_auto_20150430_0847'),
('person', '0004_auto_20150308_0440'),
]
operations = [
migrations.RunPython(backfill_92_other_meetings)
]
| 0
| 0
| 0
| 320
| 0
| 14,493
| 0
| 5
| 91
|
5d8f36982b929c47137cde1f262689332f36b121
| 26,755
|
py
|
Python
|
pipelines/create_trackhub_for_project.py
|
PRIDE-Toolsuite/trackhub-creator
|
ade2cfafeaad95088664caecacb783b501c170aa
|
[
"Apache-2.0"
] | null | null | null |
pipelines/create_trackhub_for_project.py
|
PRIDE-Toolsuite/trackhub-creator
|
ade2cfafeaad95088664caecacb783b501c170aa
|
[
"Apache-2.0"
] | null | null | null |
pipelines/create_trackhub_for_project.py
|
PRIDE-Toolsuite/trackhub-creator
|
ade2cfafeaad95088664caecacb783b501c170aa
|
[
"Apache-2.0"
] | null | null | null |
#
# Author: Manuel Bernal Llinares
# Project: trackhub-creator
# Timestamp : 07-09-2017 11:24
# ---
# 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
This pipeline creates a trackhub for a PRIDE project, based on the information provided via a JSON formatted file, as it
can be seen on this sample:
{
"trackHubName" : "PXD000625",
"trackHubShortLabel" : "<a href=\"http://www.ebi.ac.uk/pride/archive/projects/PXD000625\">PXD000625</a> - Hepatoc...",
"trackHubLongLabel" : "Experimental design For the label-free ...",
"trackHubType" : "PROTEOMICS",
"trackHubEmail" : "[email protected]",
"trackHubInternalAbsolutePath" : "...",
"trackhubCreationReportFilePath": "...",
"trackMaps" : [ {
"trackName" : "PXD000625_10090_Original",
"trackShortLabel" : "<a href=\"http://www.ebi.ac.uk/pride/archive/projects/PXD000625\">PXD000625</a> - Mus musc...",
"trackLongLabel" : "Experimental design For the label-free proteome analysis 17 mice were used composed of 5 ...",
"trackSpecies" : "10090",
"pogoFile" : "..."
} ]
}
"""
# App imports
import ensembl.service
# Globals
__configuration_file = None
__pipeline_arguments = None
__pipeline_director = None
# Pipeline properties access
# Models for dealing with the data file that describes the project
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| 50.196998
| 120
| 0.676808
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 07-09-2017 11:24
# ---
# © 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
This pipeline creates a trackhub for a PRIDE project, based on the information provided via a JSON formatted file, as it
can be seen on this sample:
{
"trackHubName" : "PXD000625",
"trackHubShortLabel" : "<a href=\"http://www.ebi.ac.uk/pride/archive/projects/PXD000625\">PXD000625</a> - Hepatoc...",
"trackHubLongLabel" : "Experimental design For the label-free ...",
"trackHubType" : "PROTEOMICS",
"trackHubEmail" : "[email protected]",
"trackHubInternalAbsolutePath" : "...",
"trackhubCreationReportFilePath": "...",
"trackMaps" : [ {
"trackName" : "PXD000625_10090_Original",
"trackShortLabel" : "<a href=\"http://www.ebi.ac.uk/pride/archive/projects/PXD000625\">PXD000625</a> - Mus musc...",
"trackLongLabel" : "Experimental design For the label-free proteome analysis 17 mice were used composed of 5 ...",
"trackSpecies" : "10090",
"pogoFile" : "..."
} ]
}
"""
import os
import json
import time
# App imports
import config_manager
import ensembl.service
import ensembl.data_downloader
import trackhub.models as trackhubs
import toolbox.general as general_toolbox
from parallel.models import ParallelRunnerManagerFactory
from parallel.exceptions import NoMoreAliveRunnersException
from pogo.models import PogoRunnerFactory
from pipelines.template_pipeline import TrackhubCreationPogoBasedDirector, DirectorConfigurationManager
# Globals
__configuration_file = None
__pipeline_arguments = None
__pipeline_director = None
# Pipeline properties access
def set_configuration_file(config_file):
global __configuration_file
if __configuration_file is None:
__configuration_file = config_file
return __configuration_file
def set_pipeline_arguments(pipeline_arguments):
global __pipeline_arguments
if __pipeline_arguments is None:
__pipeline_arguments = pipeline_arguments
return __pipeline_arguments
def get_pipeline_director():
global __pipeline_director
if __pipeline_director is None:
__pipeline_director = TrackhubCreatorForProject(config_manager.read_config_from_file(__configuration_file),
__configuration_file,
__pipeline_arguments)
return __pipeline_director
class ConfigManager(DirectorConfigurationManager):
# Command Line Arguments for this pipeline look like
# # This is a JSON formatted file that contains all the relevant information needed for processing the project
# # data and create its trackhub
# project_data_file=project_data.json
# Command Line Argument keys
_CONFIG_COMMAND_LINE_ARGUMENT_KEY_PROJECT_DATA_FILE = 'project_data_file'
def __init__(self, configuration_object, configuration_file, pipeline_arguments):
super(ConfigManager, self).__init__(configuration_object, configuration_file, pipeline_arguments)
# Lazy Process command line arguments
self.__pipeline_arguments_object = None
self.__running_mode = None
def _get_allowed_configuration_keys(self):
return {self._CONFIG_COMMAND_LINE_ARGUMENT_KEY_PROJECT_DATA_FILE}
def get_project_data_file_path(self):
return self._get_value_for_pipeline_argument_key(self._CONFIG_COMMAND_LINE_ARGUMENT_KEY_PROJECT_DATA_FILE)
def get_file_path_trackhub_creation_report(self):
return os.path.join(config_manager.get_app_config_manager().get_session_working_dir(),
"trackhub_creation.report")
def get_project_description_url(self):
# TODO - This could be made configurable in the future
return "docs/index.html"
# Models for dealing with the data file that describes the project
class ProjectTrackDescriptor:
"""
This class models the tracks that are defined in the given project under the "trackMaps" section
"""
# Project Data File keys relative to every TrackMap object
_PROJECT_DATA_FILE_KEY_TRACK_NAME = 'trackName'
_PROJECT_DATA_FILE_KEY_TRACK_SHORT_LABEL = 'trackShortLabel'
_PROJECT_DATA_FILE_KEY_TRACK_LONG_LABEL = 'trackLongLabel'
_PROJECT_DATA_FILE_KEY_TRACK_SPECIES = 'trackSpecies'
_PROJECT_DATA_FILE_KEY_TRACK_POGO_FILE_PATH = 'pogoFile'
def __init__(self, project_track_descriptor_object):
self.__project_track_descriptor_object = project_track_descriptor_object
def _get_value_for_key(self, key, default=""):
if self.__project_track_descriptor_object and (key in self.__project_track_descriptor_object):
return self.__project_track_descriptor_object[key]
return default
def get_track_name(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACK_NAME)
def get_track_short_label(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACK_SHORT_LABEL)
def get_track_long_label(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACK_LONG_LABEL)
def get_track_species(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACK_SPECIES)
def get_track_file_path_pogo(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACK_POGO_FILE_PATH)
class ProjectTrackhubDescriptor:
"""
This class models the trackhub as described by the given project description data, see sample project description
information at the top of this module
"""
# Project Data File keys
_PROJECT_DATA_FILE_KEY_TRACKHUB_NAME = 'trackHubName'
_PROJECT_DATA_FILE_KEY_TRACKHUB_SHORT_LABEL = 'trackHubShortLabel'
_PROJECT_DATA_FILE_KEY_TRACKHUB_LONG_LABEL = 'trackHubLongLabel'
_PROJECT_DATA_FILE_KEY_TRACKHUB_HUB_TYPE = 'trackHubType'
_PROJECT_DATA_FILE_KEY_TRACKHUB_EMAIL = 'trackHubEmail'
_PROJECT_DATA_FILE_KEY_TRACKHUB_INTERNAL_ABSOLUTE_PATH = 'trackHubInternalAbsolutePath'
_PROJECT_DATA_FILE_KEY_TRACKHUB_REPORT_FILE = 'trackhubCreationReportFilePath'
_PROJECT_DATA_FILE_KEY_TRACKHUB_SECTION_TRACKMAPS = 'trackMaps'
def __init__(self, project_data_file_path):
self.__project_data_file_path = project_data_file_path
self.__project_data_object = None
self.__project_tracks_descriptors = None
def _get_project_data_object(self):
if not self.__project_data_object:
self.__project_data_object = general_toolbox.read_json(self.__project_data_file_path)
return self.__project_data_object
def _get_value_for_key(self, key, default=""):
# TODO - I should start thinking about refactoring this out
if key in self._get_project_data_object():
return self._get_project_data_object()[key]
return default
def get_trackhub_name(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_NAME,
os.path.basename(self.__project_data_file_path))
def get_trackhub_short_label(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_SHORT_LABEL,
"--- NO SHORT LABEL HAS BEEN DEFINED FOR THIS TRACKHUB ---")
def get_trackhub_long_label(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_LONG_LABEL,
"--- NO LONG LABEL HAS BEEN DEFINED FOR THIS TRACKHUB ---")
def get_trackhub_hub_type(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_HUB_TYPE,
"PROTEOMICS")
def get_trackhub_email(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_EMAIL,
"[email protected]")
def get_trackhub_destination_path(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_INTERNAL_ABSOLUTE_PATH)
def get_trackhub_project_defined_tracks(self):
if not self.__project_tracks_descriptors:
# Default value is an empty list of tracks
self.__project_tracks_descriptors = []
data_file_project_track_description_objects = \
self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_SECTION_TRACKMAPS)
if data_file_project_track_description_objects:
self.__project_tracks_descriptors = \
[ProjectTrackDescriptor(data_file_project_track_description_object)
for data_file_project_track_description_object
in data_file_project_track_description_objects]
return self.__project_tracks_descriptors
def get_trackhub_report_file_path(self):
return self._get_value_for_key(self._PROJECT_DATA_FILE_KEY_TRACKHUB_REPORT_FILE)
class PipelineResult:
"""
This class models the pipeline report that will be made available at the end of the pipeline execution
"""
_VALUE_STATUS_SUCCESS = 'SUCCESS'
_VALUE_STATUS_ERROR = 'ERROR'
_VALUE_STATUS_WARNING = 'WARNING'
def __init__(self):
self.status = self._VALUE_STATUS_SUCCESS
self.error_messages = []
self.success_messages = []
self.warning_messages = []
self.hub_descriptor_file_path = ""
# Absolute file path to the folder that represents the running session of the pipeline
self.file_path_pipeline_session = ""
# Absolute file path to the log files that belong to the running session of the pipeline
self.file_path_log_files = []
# Ensembl Release used for creating the trackhub
self.ensembl_release = ""
def set_status_error(self):
self.status = self._VALUE_STATUS_ERROR
def add_error_message(self, error_message):
"""
Adds an error message to the pipeline report. As this report is the final word on how the pipeline performed,
the first error message that is set will set the status of the pipeline as 'failed'
:param error_message: error message
:return: no return value
"""
# This is the report on the final result from running the pipeline
self.set_status_error()
self.error_messages.append(error_message)
def add_success_message(self, success_message):
"""
This will add messages to the pipeline report, but it doesn't change its status.
:param success_message: message to add
:return: no return value
"""
self.success_messages.append(success_message)
def add_warning_message(self, warning_message):
"""
This will add warning messages to the pipeline report, setting the status to 'WARNING' if it wasn't in 'ERROR'
status.
:param warning_message: warning message to add
:return: no return value
"""
self.warning_messages.append(warning_message)
if self.status != self._VALUE_STATUS_ERROR:
self.status = self._VALUE_STATUS_WARNING
def add_log_files(self, log_files):
"""
Add all the log files produce by the pipeline to its final report
:param log_files: a list of log files to add
:return: no return value
"""
self.file_path_log_files.extend(log_files)
def __str__(self):
return json.dumps({'status': self.status,
'success_messages': self.success_messages,
'warning_messages': self.warning_messages,
'error_messages': self.error_messages,
'hub_descriptor_file_path': self.hub_descriptor_file_path,
'ensembl_release': self.ensembl_release,
'pipeline_session_working_dir': self.file_path_pipeline_session,
'log_files': self.file_path_log_files})
class TrackhubCreatorForProject(TrackhubCreationPogoBasedDirector):
"""
Given a project description file that contains the information specified at the beginning of this module, this
pipeline creates a trackhub for all the project defined tracks
"""
def __init__(self, configuration_object, configuration_file, pipeline_arguments):
runner_id = "{}-{}".format(__name__, time.time())
super().__init__(runner_id)
self.__config_manager = ConfigManager(configuration_object, configuration_file, pipeline_arguments)
self.__project_trackhub_descriptor = None
# Only the valid project tracks will be processed for being included in the trackhub
self.__valid_project_tracks = None
self.__indexed_project_tracks_by_taxonomy_id = None
# Pipeline result object
self.__pipeline_result_object = PipelineResult()
self.__trackhub_descriptor = None
self.__trackhub_exporter = None
def __get_valid_project_tracks(self):
"""
This helper creates a list of valid trackhub tracks from the given project, i.e. tracks that meet this cirteria:
- Its taxonomy ID is available on Ensembl
The list of valid tracks is cached, so it won't change between multiple calls
:return: a list of valid trackhub tracks for the given project
"""
if not self.__valid_project_tracks:
self.__valid_project_tracks = []
ensembl_service = ensembl.service.get_service()
for project_track_descriptor in self.__project_trackhub_descriptor.get_trackhub_project_defined_tracks():
if ensembl_service.get_species_data_service().get_species_entry_for_taxonomy_id(
project_track_descriptor.get_track_species()):
self.__valid_project_tracks.append(project_track_descriptor)
else:
self.__pipeline_result_object \
.add_warning_message("MISSING Taxonomy #{} on Ensembl"
.format(project_track_descriptor.get_track_species()))
return self.__valid_project_tracks
def __get_index_project_track_for_taxonomy_id(self):
"""
Get the project tracks indexed by taxonomy id
:return: map (taxonomy_id, project_track)
"""
if not self.__indexed_project_tracks_by_taxonomy_id:
self.__indexed_project_tracks_by_taxonomy_id = {}
self._get_logger().debug("Indexing #{} valid project tracks".format(len(self.__get_valid_project_tracks())))
for project_track in self.__get_valid_project_tracks():
if project_track.get_track_species() in self.__indexed_project_tracks_by_taxonomy_id:
self._get_logger() \
.error("ERROR DUPLICATED TAXONOMY indexing project track '{}', "
"another project track, '{}' is in the index - SKIP -"
.format(project_track.get_track_name(),
self.__indexed_project_tracks_by_taxonomy_id[
project_track.get_track_species()].get_track_name()))
continue
self.__indexed_project_tracks_by_taxonomy_id[project_track.get_track_species()] = project_track
self._get_logger().debug("Project track '{}' indexed with taxonomy ID '{}'"
.format(project_track.get_track_name(),
project_track.get_track_species()))
return self.__indexed_project_tracks_by_taxonomy_id
def __get_project_track_for_taxonomy_id(self, taxonomy_id):
if taxonomy_id in self.__get_index_project_track_for_taxonomy_id():
return self.__get_index_project_track_for_taxonomy_id()[taxonomy_id]
# I know, we should never return None
return None
def _before(self):
# Set Pipeline Session working directory
self.__pipeline_result_object.file_path_pipeline_session = \
config_manager.get_app_config_manager().get_session_working_dir()
# Add this pipeline session log files to the final report
self.__pipeline_result_object.add_log_files(config_manager.get_app_config_manager().get_session_log_files())
# Add information about the Ensembl Release being used
self.__pipeline_result_object.ensembl_release = str(ensembl.service.get_service().get_release_number())
if self.__config_manager.get_project_data_file_path():
self._get_logger().info("Reading Project Trackhub Descriptor from file at '{}'"
.format(self.__config_manager.get_project_data_file_path()))
self.__project_trackhub_descriptor = \
ProjectTrackhubDescriptor(self.__config_manager.get_project_data_file_path())
# Check that the destination folder exists
if not os.path.isdir(self.__project_trackhub_descriptor.get_trackhub_destination_path()):
error_message = "Trackhub destination path NOT VALID, '{}'" \
.format(self.__project_trackhub_descriptor.get_trackhub_destination_path())
self._get_logger().error(error_message)
self.__pipeline_result_object.add_error_message(error_message)
self.set_pipeline_status_fail()
return False
# Check valid project tracks
if not self.__get_valid_project_tracks():
# It makes no sense to go ahead if this project has no valid tracks
error_message = "Project Trackhub contains NO VALID TRACKS"
self._get_logger().error(error_message)
self.__pipeline_result_object.add_error_message(error_message)
self.set_pipeline_status_fail()
return False
return True
error_message = "INVALID / MISSING Project Trackhub Descriptor file, '{}'" \
.format(self.__config_manager.get_project_data_file_path())
self._get_logger().error(error_message)
self.__pipeline_result_object.add_error_message(error_message)
self.set_pipeline_status_fail()
return False
# Helpers
# Override
def _get_pogo_results_for_input_data(self):
# TODO - Needs to be extended for abstracting from results files from '-mm' parameter use
# This is a map (project_track_descriptor, PogoRunResult)
pogo_run_results = {}
parallel_run_manager = ParallelRunnerManagerFactory.get_parallel_runner_manager()
for project_track in self.__get_valid_project_tracks():
pogo_input_file_path = project_track.get_track_file_path_pogo()
pogo_protein_sequence_file_path = \
self._get_pogo_protein_sequence_file_path_for_taxonomy(project_track.get_track_species())
pogo_gtf_file_path = self._get_pogo_gtf_file_path_for_taxonomy(project_track.get_track_species())
parallel_run_manager.add_runner(PogoRunnerFactory.get_pogo_runner(project_track.get_track_species(),
pogo_input_file_path,
pogo_protein_sequence_file_path,
pogo_gtf_file_path))
# Run PoGo with '-mm 1'
parallel_run_manager.add_runner(PogoRunnerFactory.get_pogo_runner(project_track.get_track_species(),
pogo_input_file_path,
pogo_protein_sequence_file_path,
pogo_gtf_file_path,
'1'))
self._get_logger().debug("Running PoGo for #{} Project Tracks".format(len(self.__get_valid_project_tracks())))
parallel_run_manager.start_runners()
self._get_logger().debug("Processing PoGo runners results")
try:
while True:
pogo_runner = parallel_run_manager.get_next_finished_runner()
if not pogo_runner.is_success():
message = "PoGo FAILED running on file '{}', taxonomy #{} - SKIPPING its results" \
.format(pogo_runner.pogo_input_file, pogo_runner.ncbi_taxonomy_id)
self._get_logger().error(message)
self.__pipeline_result_object.add_warning_message(message)
continue
if pogo_runner.ncbi_taxonomy_id not in pogo_run_results:
pogo_run_results[pogo_runner.ncbi_taxonomy_id] = []
self._get_logger().info("PoGo SUCCESS for taxonomy '{}', input file '{}'"
.format(pogo_runner.ncbi_taxonomy_id,
pogo_runner.pogo_input_file))
# Every taxonomy now has a list of PoGo run results
pogo_run_results[pogo_runner.ncbi_taxonomy_id].append(pogo_runner.get_pogo_run_result())
except NoMoreAliveRunnersException as e:
self._get_logger().debug("All PoGo runners results collected!")
if len(pogo_run_results) == 0:
message = "ALL PoGo files FAILED for this project!!!"
self._get_logger().error(message)
self.__pipeline_result_object.add_error_message(message)
self.set_pipeline_status_fail()
return pogo_run_results
# Override
def _get_trackhub_descriptor(self):
if not self.__trackhub_descriptor:
# TODO - This iteration has no description URL for the project trackhub, we should include it in the project
# TODO - input json file the pipeline gets as a parameter
self.__trackhub_descriptor = \
trackhubs.TrackHub(self.__project_trackhub_descriptor.get_trackhub_name(),
self.__project_trackhub_descriptor.get_trackhub_short_label(),
self.__project_trackhub_descriptor.get_trackhub_long_label(),
self.__project_trackhub_descriptor.get_trackhub_email(),
self.__config_manager.get_project_description_url())
return self.__trackhub_descriptor
# Override
def _get_trackhub_track_for_taxonomy_id(self, taxonomy_id, pogo_run_result):
# Default values
trackhub_track_title = "- NOT PROVIDED -"
trackhub_track_short_label = "- NOT PROVIDED -"
trackhub_track_long_label = "- NOT PROVIDED -"
# Fill in the project trackhub track information if found
project_track = self.__get_project_track_for_taxonomy_id(taxonomy_id)
if project_track:
trackhub_track_title = project_track.get_track_name()
trackhub_track_short_label = project_track.get_track_short_label()
trackhub_track_long_label = project_track.get_track_long_label()
trackhub_track_title = "{} {}"\
.format(trackhub_track_title,
self._get_trackhub_track_name_modifiers_based_on_pogo_run(pogo_run_result))
return trackhubs.BaseTrack(trackhub_track_title,
trackhub_track_short_label,
trackhub_track_long_label)
# Override
def _get_trackhub_exporter(self):
if not self.__trackhub_exporter:
self._get_logger().info("Default trackhub exporter - 'TrackHubLocalFilesystemExporter'")
self.__trackhub_exporter = trackhubs.TrackHubLocalFilesystemExporter()
return self.__trackhub_exporter
# Override
def _prepare_trackhub_destination_folder(self, trackhub_exporter):
self._get_logger().info("Trackhub destination folder ---> '{}'"
.format(self.__project_trackhub_descriptor.get_trackhub_destination_path()))
trackhub_exporter.track_hub_destination_folder = \
self.__project_trackhub_descriptor.get_trackhub_destination_path()
def _run_pipeline(self):
if not self.is_pipeline_status_ok():
error_message = "--- ABORT Pipeline Execution ---, the previous stage failed"
self._get_logger().warning(error_message)
self.__pipeline_result_object.add_error_message(error_message)
return False
# Use default trackhub creation workflow
try:
self._create_trackhub()
except Exception as e:
# I know this is too generic but, for this iteration of the software it is completely fine
self.__pipeline_result_object.add_error_message(str(e))
self.set_pipeline_status_fail()
return False
# Fill in the pipeline report
self.__pipeline_result_object.hub_descriptor_file_path = \
self._get_trackhub_exporter() \
.export_summary \
.track_hub_descriptor_file_path
for message in self._get_trackhub_exporter().export_summary.warnings:
self.__pipeline_result_object.add_warning_message(message)
for message in self._get_trackhub_exporter().export_summary.errors:
self.__pipeline_result_object.add_error_message(message)
if self._get_trackhub_exporter().export_summary.errors:
self.set_pipeline_status_fail()
return True
def _after(self):
"""
Dump to a file the pipeline report
:return: no return value
"""
if not self.is_pipeline_status_ok():
self._get_logger().warning("This Pipeline is finishing with NON-OK status.")
report_files = [self.__config_manager.get_file_path_trackhub_creation_report()]
if self.__project_trackhub_descriptor \
and self.__project_trackhub_descriptor.get_trackhub_report_file_path():
report_files.append(self.__project_trackhub_descriptor.get_trackhub_report_file_path())
for report_file in report_files:
self._get_logger().info("Dumping Pipeline Report to '{}'".format(report_file))
with open(report_file, 'w') as f:
f.write(str(self.__pipeline_result_object))
return True
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| 16
| 0
| 0
| 23,944
| 0
| 717
| 0
| 186
| 425
|
b76df0159986f1a2e79043c17c75ba6fb06ea156
| 8,516
|
py
|
Python
|
tests/extractors/test_protein.py
|
KalinNonchev/kipoiseq
|
38d1134885e401198acd3883286dc55627cf12a6
|
[
"MIT"
] | 2
|
2019-12-16T17:13:04.000Z
|
2021-07-29T12:05:47.000Z
|
tests/extractors/test_protein.py
|
KalinNonchev/kipoiseq
|
38d1134885e401198acd3883286dc55627cf12a6
|
[
"MIT"
] | 117
|
2020-04-22T12:46:45.000Z
|
2021-08-02T04:40:58.000Z
|
tests/extractors/test_protein.py
|
KalinNonchev/kipoiseq
|
38d1134885e401198acd3883286dc55627cf12a6
|
[
"MIT"
] | null | null | null |
from kipoiseq.dataclasses import Interval
gtf_file = 'tests/data/sample_1_protein.gtf'
fasta_file = 'tests/data/demo_dna_seq.fa'
transcript_id = 'enst_test1'
vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'
intervals = [
Interval('22', 580, 596, strand='+', attrs={'tag': 'cds_end_NF'}),
Interval('22', 597, 610, strand='+', attrs={'tag': 'cds_end_NF'})
]
# TODO: write test for with sample_id
# TODO: add for all proteins.pep.all.fa
| 31.308824
| 99
| 0.705965
|
import pytest
from pytest_mock import mocker
import pandas as pd
from kipoiseq.transforms.functional import translate, rc_dna
from kipoiseq.dataclasses import Interval, Variant
from kipoiseq.extractors.protein import cut_transcript_seq, gtf_row2interval, \
CDSFetcher, TranscriptSeqExtractor, ProteinSeqExtractor, \
ProteinVCFSeqExtractor, SingleSeqProteinVCFSeqExtractor, \
SingleVariantProteinVCFSeqExtractor
gtf_file = 'tests/data/sample_1_protein.gtf'
fasta_file = 'tests/data/demo_dna_seq.fa'
transcript_id = 'enst_test1'
vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'
intervals = [
Interval('22', 580, 596, strand='+', attrs={'tag': 'cds_end_NF'}),
Interval('22', 597, 610, strand='+', attrs={'tag': 'cds_end_NF'})
]
def test_cut_seq():
seq = 'ATCGATG'
seq = cut_transcript_seq(seq, 'cds_end_NF')
assert len(seq) == 6
seq = 'ATCGATG'
seq = cut_transcript_seq(seq, 'cds_end_NF,cds_start_NF')
assert len(seq) == 3
seq = 'ATCGATG'
seq = cut_transcript_seq(seq, 'cds_start_NF')
assert len(seq) == 9
seq = 'ATCGATG'
seq = cut_transcript_seq(seq, 'no_tag')
assert len(seq) == 3
def test_gtf_row2interval():
row = pd.Series({
'Chromosome': '22',
'Start': 10,
'End': 20,
'Strand': '-',
'tag': 'cds_end_NF'
})
expected_interval = Interval(chrom='22', start=10,
end=20, name='', strand='-', attrs={'tag': 'cds_end_NF'})
assert gtf_row2interval(row) == expected_interval
def test_CDSFetcher__read_cds():
cds = CDSFetcher._read_cds(gtf_file, duplicate_attr=True)
assert cds.shape[0] == 7
assert cds.iloc[0].Chromosome == '22'
assert cds.iloc[0].Start == 598
assert cds.iloc[0].End == 3050
assert cds.iloc[3].Start == 3
assert cds.iloc[3].End == 300
@pytest.fixture
def cds_fetcher():
return CDSFetcher(gtf_file)
def test_CDSFetcher__len__(cds_fetcher):
assert len(cds_fetcher) == 3
def test_CDSFetcher_get_cds(cds_fetcher):
intervals = cds_fetcher.get_cds(transcript_id)
intervals[0] == Interval(chrom='22', start=598,
end=3196, name='', strand='+')
# TODO: Improve testcase with adding transcript with 2 cds
@pytest.fixture
def transcript_seq_extractor():
return TranscriptSeqExtractor(gtf_file, fasta_file)
def test_get_protein_seq(transcript_seq_extractor):
transcript_id = 'enst_test2'
seq = transcript_seq_extractor.get_protein_seq(transcript_id)
txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'
expected_seq = open(txt_file).readline()
assert seq[1:] == expected_seq[1:] # no expected mutation here
def test_TranscriptSeqExtractor_prepare_seq():
seqs = ['ATCGATG']
assert 'ATCGAT' == TranscriptSeqExtractor._prepare_seq(
seqs, '+', 'cds_end_NF')
assert 'CATCGA' == TranscriptSeqExtractor._prepare_seq(
seqs, '-', 'cds_end_NF')
def test_TranscriptSeqExtractor_get_seq(transcript_seq_extractor):
seq = transcript_seq_extractor.get_seq(transcript_id)
assert len(seq) == 3196 - 598
def test_TranscriptSeqExtractor_get_item(transcript_seq_extractor):
assert transcript_seq_extractor[0] == transcript_seq_extractor.get_seq(
transcript_id)
@pytest.fixture
def protein_seq_extractor():
return ProteinSeqExtractor(gtf_file, fasta_file)
def test_ProteinSeqExtractor_prepare_seq(protein_seq_extractor):
seqs = ['ATCGATG']
pro_seq = protein_seq_extractor._prepare_seq(seqs, '+', 'cds_end_NF')
assert pro_seq == 'ID'
pro_seq = protein_seq_extractor._prepare_seq(seqs, '-', 'cds_end_NF')
assert pro_seq == 'HR'
def test_ProteinVCFSeqExtractor__unstrand():
unstrand_intervals = ProteinVCFSeqExtractor._unstrand(intervals)
assert all(i.strand == '.' for i in unstrand_intervals)
# TODO: write test for with sample_id
@pytest.fixture
def protein_vcf_seq(mocker):
extractor = ProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)
extractor.extract_query = mocker.MagicMock(
return_value=iter((['ATC', 'GATG'], ['CATC', 'GAT'])))
return extractor
def test_ProteinVCFSeqExtractor_extract_cds(protein_vcf_seq):
protein_seqs = list(protein_vcf_seq.extract_cds(intervals))
assert protein_seqs[0] == 'ID'
assert protein_seqs[1] == 'HR'
query = list(protein_vcf_seq.extract_query
.call_args[0][0].variant_intervals)
variants = list(query[0][0])
assert len(variants) == 1
assert variants[0].pos == 596
interval = query[0][1]
assert interval.start == 580
variants = list(query[1][0])
assert len(variants) == 1
assert variants[0].pos == 598
interval = query[1][1]
assert interval.start == 597
def test_ProteinVCFSeqExtractor_extract(protein_vcf_seq):
transcript_id = 'enst_test2'
protein_seqs = list(protein_vcf_seq.extract(transcript_id))
assert protein_seqs[0] == 'HR'
assert protein_seqs[1] == 'ID'
@pytest.fixture
def single_seq_protein():
vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'
return SingleSeqProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)
def test_SingleSeqProteinVCFSeqExtractor_extract(single_seq_protein, transcript_seq_extractor):
transcript_id = 'enst_test2'
seq = single_seq_protein.extract(transcript_id)
txt_file = 'tests/data/Output_singleSeq_vcf_enst_test2.txt'
expected_seq = open(txt_file).readline()
assert seq == expected_seq
vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'
transcript_id = 'enst_test1'
single_seq_protein = SingleSeqProteinVCFSeqExtractor(
gtf_file, fasta_file, vcf_file)
seq = single_seq_protein.extract(transcript_id)
ref_seq = transcript_seq_extractor.get_protein_seq(transcript_id)
assert len(seq) == len(ref_seq)
count = diff_between_two_seq(seq, ref_seq)
assert count == 1, 'Expected diff of 1 AA, but it was: '+str(count)
vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'
single_seq_protein = SingleSeqProteinVCFSeqExtractor(
gtf_file, fasta_file, vcf_file)
seq = list(single_seq_protein.extract_all())
assert len(seq) == 0
@pytest.fixture
def single_variant_seq():
vcf_file = 'tests/data/singleVar_vcf_enst_test2.vcf.gz'
return SingleVariantProteinVCFSeqExtractor(gtf_file, fasta_file, vcf_file)
def diff_between_two_seq(seq1, seq2):
count = 0
for i in range(len(seq1)):
if seq1[i] != seq2[i]:
count += 1
return count
def test_SingleVariantProteinVCFSeqExtractor_extract(single_variant_seq, transcript_seq_extractor):
transcript_id = 'enst_test2'
seqs = list(single_variant_seq.extract(transcript_id))
txt_file = 'tests/data/Output_singleVar_vcf_enst_test2.txt'
expected_seq = open(txt_file).read().splitlines()
assert seqs[0] == expected_seq[0]
assert seqs[1] == expected_seq[1]
assert seqs[2] == expected_seq[2]
seqs = list(single_variant_seq.extract_all())
counter = 0
for tr_id, t_id_seqs in seqs:
t_id_seqs = list(t_id_seqs)
counter += len(t_id_seqs)
for i, seq in enumerate(t_id_seqs):
assert seq == expected_seq[i]
assert tr_id == 'enst_test2'
assert counter == 3, 'Number of variants in vcf 3, but # of seq was: ' + \
str(counter)
transcript_id = ['enst_test2', 'enst_test1']
seqs = single_variant_seq.extract_list(transcript_id)
for tr_id, t_id_seqs in seqs:
assert tr_id in ['enst_test2', 'enst_test1'], tr_id
vcf_file = 'tests/data/singleVar_vcf_enst_test1_diff_type_of_variants.vcf.gz'
transcript_id = 'enst_test1'
single_var_protein = SingleVariantProteinVCFSeqExtractor(
gtf_file, fasta_file, vcf_file)
seqs = list(single_var_protein.extract(transcript_id))
ref_seq = transcript_seq_extractor.get_protein_seq(transcript_id)
assert len(seqs) == 1
for seq in seqs:
assert len(seq) == len(ref_seq)
count = diff_between_two_seq(seq, ref_seq)
assert count == 1, 'Expected diff of 1 AA, but it was: '+str(count)
vcf_file = 'tests/data/singleSeq_vcf_enst_test2.vcf.gz'
single_var_protein = SingleVariantProteinVCFSeqExtractor(
gtf_file, fasta_file, vcf_file)
length = 0
seqs = list(single_var_protein.extract_all())
for t_id in seqs:
length = len(list(t_id))
assert length == 0
# TODO: add for all proteins.pep.all.fa
| 0
| 743
| 0
| 0
| 0
| 6,400
| 0
| 271
| 616
|
eed5699e06d3cac61b4a945b53a1004046c608f3
| 1,026
|
py
|
Python
|
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | 1
|
2020-08-05T08:06:33.000Z
|
2020-08-05T08:06:33.000Z
|
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | null | null | null |
task3/task3.py
|
ksmirenko/ml-homework
|
a5e558352ffc332ad5e40526dda21f205718a203
|
[
"MIT"
] | null | null | null |
# Works when launched from terminal
# noinspection PyUnresolvedReferences
input_image_file = 'lena.jpg'
output_image_prefix = 'out_lena'
n_clusters = [2, 3, 5]
max_iterations = 100
launch_count = 3
main()
| 27.72973
| 104
| 0.692008
|
from PIL import Image
import numpy as np
# Works when launched from terminal
# noinspection PyUnresolvedReferences
from k_means import k_means
input_image_file = 'lena.jpg'
output_image_prefix = 'out_lena'
n_clusters = [2, 3, 5]
max_iterations = 100
launch_count = 3
def main():
# Read input image
image = np.array(Image.open(input_image_file))
X = image.reshape((image.shape[0] * image.shape[1], image.shape[2]))
for k in n_clusters:
print(f"{k} clusters")
# 'Compress' image using K-means
centroids, clustered = k_means(X, k=k, max_iterations=max_iterations, launch_count=launch_count)
new_X = np.array([centroids[cluster_index] for cluster_index in clustered])
new_X = new_X.astype(np.uint8)
# Write output image
new_image = new_X.reshape(image.shape)
output_image_name = f"{output_image_prefix}_{k}.jpg"
Image.fromarray(new_image).save(output_image_name)
print(f"Saved {output_image_name}")
print("Done.")
main()
| 0
| 0
| 0
| 0
| 0
| 724
| 0
| 3
| 89
|
4029817dc33967552ae1824d14039c95b823fc6b
| 12,289
|
py
|
Python
|
neural_guided_symbolic_regression/models/mcts.py
|
egonrian/google-research
|
8177adbe9ca0d7e5a9463b54581fe6dd27be0974
|
[
"Apache-2.0"
] | 3
|
2021-01-18T04:46:49.000Z
|
2021-03-05T09:21:40.000Z
|
neural_guided_symbolic_regression/models/mcts.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 7
|
2021-11-10T19:44:38.000Z
|
2022-02-10T06:48:39.000Z
|
neural_guided_symbolic_regression/models/mcts.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find expression by Monte Carlo Tree Search guided by neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 37.58104
| 85
| 0.714948
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find expression by Monte Carlo Tree Search guided by neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from neural_guided_symbolic_regression.mcts import policies
from neural_guided_symbolic_regression.mcts import rewards
from neural_guided_symbolic_regression.mcts import states
from neural_guided_symbolic_regression.models import metrics
from neural_guided_symbolic_regression.models import partial_sequence_model_generator
class NeuralProductionRuleAppendPolicy(policies.PolicyBase):
"""Appends a valid production rule on existing list of production rules.
The probabilities of the actions will be determined by the partial sequence
model.
"""
def __init__(self, sess, grammar, max_length, symbolic_properties_dict):
"""Initializer.
Args:
sess: tf.Session, the session contains the trained model to predict next
production rule from input partial sequence. If None, each step will
be selected randomly.
grammar: arithmetic_grammar.Grammar object.
max_length: Integer, the max length of production rule sequence.
symbolic_properties_dict: Dict, the keys are the symbolic properties used
as conditions. Values are the corresponding desired values of the
symbolic properties.
"""
self._sess = sess
self._grammar = grammar
self._max_length = max_length
conditions = {}
if symbolic_properties_dict is not None:
conditions.update({
key: np.array([value], dtype=np.float32)
for key, value in symbolic_properties_dict.iteritems()
})
self._conditions = conditions
def get_new_states_probs(self, state):
"""Gets new state from current state by appending a valid production rule.
Args:
state: A mcts.states.ProductionRulesState object. Contains a list of
nltk.grammar.Production objects in attribute
production_rules_sequence.
Returns:
new_states: A list of next states. Each state is a result from apply an
action in the instance attribute actions to the input state.
action_probs: A float numpy array with shape [num_actions,]. The
probability of each action in the class attribute actions.
Raises:
TypeError: If input state is not states.ProductionRulesState object.
"""
if not isinstance(state, states.ProductionRulesState):
raise TypeError('Input state shoud be an instance of '
'states.ProductionRulesState but got %s' % type(state))
production_rules_sequence = state.production_rules_sequence
if len(production_rules_sequence) > self._max_length:
# Do not allow the length of production rules sequence exceed _max_length.
# All nan probabilities will stop the rollout in MCTS.
masked_probabilities = [np.nan] * self._grammar.num_production_rules
else:
masked_probabilities = (
partial_sequence_model_generator.get_masked_probabilities_from_model(
sess=self._sess,
max_length=self._max_length,
partial_sequence=[self._grammar.prod_rule_to_index[str(prod_rule)]
for prod_rule in production_rules_sequence],
next_production_rule_mask=self._grammar.masks[
self._grammar.lhs_to_index[state.stack_peek()]],
conditions=self._conditions))
new_states = []
action_probs = []
for probability, production_rule in zip(
masked_probabilities, self._grammar.prod_rules):
if state.is_valid_to_append(production_rule):
new_state = state.copy()
new_state.append_production_rule(production_rule)
new_states.append(new_state)
action_probs.append(probability)
else:
new_states.append(None)
action_probs.append(np.nan)
action_probs = np.asarray(action_probs)
action_probs /= np.nansum(action_probs)
return new_states, action_probs
class LeadingPowers(rewards.RewardBase):
"""Computes reward for univariate expression only on leading powers.
This reward measures a univariate expression by whether this expression
satisfies the desired leading powers at 0 and infinity.
reward = -abs(leading power difference at 0)
- abs(leading power difference at infinity))
"""
def __init__(
self,
leading_at_0,
leading_at_inf,
variable_symbol='x',
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
variable_symbol: String, the symbol of variable in function expression.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(LeadingPowers, self).__init__(
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._leading_at_0 = leading_at_0
self._leading_at_inf = leading_at_inf
self._variable_symbol = variable_symbol
def get_leading_power_error(self, state):
"""Gets the leading power error.
The leading power error is defined as
abs(leading power difference at 0) + abs(leading power difference at inf).
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
true_leading_at_0, true_leading_at_inf = (
metrics.evaluate_leading_powers_at_0_inf(
expression_string=state.get_expression(),
symbol=self._variable_symbol))
return (abs(true_leading_at_0 - self._leading_at_0)
+ abs(true_leading_at_inf - self._leading_at_inf))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
leading_power_error = self.get_leading_power_error(state)
if np.isfinite(leading_power_error):
return -float(leading_power_error)
else:
return self._default_value
class NumericalPointsAndLeadingPowers(LeadingPowers):
"""Computes reward for univariate expression with leading powers and values.
This reward measures an univariate expression in two aspects:
1. The mean square error of numerical values defined by input_values and
output_values.
2. Whether this expression satisfies the desired leading powers at 0 and
infinity.
hard_penalty_default_value decides whether to use soft or hard penalty when
the expression does not match the desired leading powers.
Soft penalty
reward = (
-(root mean square error)
- abs(leading power difference at 0)
- abs(leading power difference at infinity))
Hard penalty
If leading power at 0 and infinity are both correct
reward = -(root mean square error)
Otherwise reward = hard_penalty_default_value
If include_leading_powers is False, the reward is just
-(root mean square error).
"""
def __init__(
self,
input_values,
output_values,
leading_at_0,
leading_at_inf,
hard_penalty_default_value=None,
variable_symbol='x',
include_leading_powers=True,
post_transformer=None,
allow_nonterminal=False,
default_value=None):
"""Initializer.
Args:
input_values: Numpy array with shape [num_input_values]. List of input
values to univariate function.
output_values: Numpy array with shape [num_output_values]. List of output
values from the univariate function.
leading_at_0: Float, desired leading power at 0.
leading_at_inf: Float, desired leading power at inf.
hard_penalty_default_value: Float, the default value for hard penalty.
Default None, the reward will be computed by soft penalty instead of
hard penalty.
variable_symbol: String, the symbol of variable in function expression.
include_leading_powers: Boolean, whether to include leading powers in
reward.
post_transformer: Callable. This function takes one float number and
output a float number as the transformed value of input. It is used
to post-transformation the reward evaluated on a state. Default None,
no post-transformation will be applied.
allow_nonterminal: Boolean, if False, ValueError will be raised when
list of symbols to evaluate contains non-terminal symbol and
default_value is None. Default False.
default_value: Float, if allow_nonterminal is False and non-terminal
symbol exists, instead of raising a ValueError, return default_value
as the reward value.
"""
super(NumericalPointsAndLeadingPowers, self).__init__(
leading_at_0=leading_at_0,
leading_at_inf=leading_at_inf,
variable_symbol=variable_symbol,
post_transformer=post_transformer,
allow_nonterminal=allow_nonterminal,
default_value=default_value)
self._input_values = input_values
self._output_values = output_values
self._include_leading_powers = include_leading_powers
self._hard_penalty_default_value = hard_penalty_default_value
def get_input_values_rmse(self, state):
"""Evaluates root mean square error on input_values.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float.
"""
expression_output_values = metrics.evaluate_expression(
expression_string=state.get_expression(),
grids=self._input_values,
symbol=self._variable_symbol)
return np.sqrt(
np.mean((expression_output_values - self._output_values) ** 2))
def _evaluate(self, state):
"""Evaluates the reward from input state.
Args:
state: mcts.states.StateBase object. Records all the information of
expression.
Returns:
Float, the reward of the current state.
"""
input_values_rmse = self.get_input_values_rmse(state)
if not self._include_leading_powers:
if np.isfinite(input_values_rmse):
return -input_values_rmse
else:
return self._default_value
# NOTE(leeley): If computing the leading power fails
# (timeout or sympy ValueError) or functions in symbolic_properties return
# nan (for example, 1 / (x - x)).
leading_power_error = self.get_leading_power_error(state)
if self._hard_penalty_default_value is None:
# Soft penalty.
if np.isfinite(leading_power_error):
return -input_values_rmse - leading_power_error
else:
return self._default_value
else:
# Hard penalty.
if (np.isfinite(leading_power_error)
and np.isclose(leading_power_error, 0)):
return -input_values_rmse
else:
return self._hard_penalty_default_value
| 0
| 0
| 0
| 11,078
| 0
| 0
| 0
| 211
| 203
|
c84726272f5ccce872fa9f251d5064eaed566127
| 1,302
|
py
|
Python
|
Graded/G3/slam/solution/__init__.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/solution/__init__.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
Graded/G3/slam/solution/__init__.py
|
chrstrom/TTK4250
|
f453c3a59597d3fe6cff7d35b790689919798b94
|
[
"Unlicense"
] | null | null | null |
from .pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xed\x00\x00\x00\x00\x00\x00\x18\x3d\x71\xc5\x03\x9e\x68\x9a\xa0\x37\x72\x21\xef\xad\x8a\xf4\x10\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x8c\x82\x42\x16\x77\xe5\x90\x93\xcb\xad\x1f\x2f\x25\x62\x6c\xf5\x02\xd8\xd5\xa2\x5e\x70\x77\xac\xd7\x78\x2f\xbe\x60\x40\x8f\x2b\x57\x02\x4f\xa0\x4f\xb9\x5f\x3f\x67\x56\x7c\x8c\x15\x95\x26\xdf\xaf\x5d\x30\xf2\xbc\x4b\x06\x6d\x66\x77\x1d\xf1\xd6\x67\x18\x5f\xe5\x7f\x4a\x8d\x4e\x82\x97\x42\x19\xfa\xff\x42\xe3\x1b\xe7\xa1\x36\x46\x2b\x63\x0b\x2b\x4a\x53\x6e\x1b\x06\xf1\x8d\xc9\xf5\x16\x5c\xcd\xd0\xc8\xd3\xaf\x08\x86\x5e\x20\xc7\xad\x33\x4a\x8c\x06\x71\x4d\x9a\x1e\xbe\xa7\xe8\x08\x3f\xf1\x6b\x6e\x54\x4e\x6f\x4b\xe3\x3b\x98\x9a\x2a\x3a\x01\xfa\x52\xc3\xf6\x64\x3c\xeb\xa6\xbf\x4c\xb6\x5e\xf4\x59\x40\xd3\xb9\x02\x01\x63\x0f\xa8\x5a\x9f\x60\x26\xc4\xdc\xa6\xb6\xe6\xf8\xac\xea\xaa\x04\xa4\x23\x1a\x50\xb2\x67\x91\xf9\xee\xed\xbc\x35\x18\xff\x1f\x5a\xab\x0b\xbe\x95\xc6\x72\x12\x2d\x31\xf9\x4a\x52\x60\x1f\x42\x0f\x5d\xcc\xf1\x4c\xa0\xed\xc5\x2b\x49\x68\x71\xa4\x0f\x7b\x76\x16\x50\xe6\xdb\x83\xd7\x2f\xc4\x57\xc7\x12\x02\x30\xc8\xef\xe8\x38\xf6', 2)
| 434
| 1,243
| 0.754992
|
from .pytransform import pyarmor_runtime
pyarmor_runtime()
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x09\x00\x61\x0d\x0d\x0a\x08\x2d\xa0\x01\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\xed\x00\x00\x00\x00\x00\x00\x18\x3d\x71\xc5\x03\x9e\x68\x9a\xa0\x37\x72\x21\xef\xad\x8a\xf4\x10\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x8c\x82\x42\x16\x77\xe5\x90\x93\xcb\xad\x1f\x2f\x25\x62\x6c\xf5\x02\xd8\xd5\xa2\x5e\x70\x77\xac\xd7\x78\x2f\xbe\x60\x40\x8f\x2b\x57\x02\x4f\xa0\x4f\xb9\x5f\x3f\x67\x56\x7c\x8c\x15\x95\x26\xdf\xaf\x5d\x30\xf2\xbc\x4b\x06\x6d\x66\x77\x1d\xf1\xd6\x67\x18\x5f\xe5\x7f\x4a\x8d\x4e\x82\x97\x42\x19\xfa\xff\x42\xe3\x1b\xe7\xa1\x36\x46\x2b\x63\x0b\x2b\x4a\x53\x6e\x1b\x06\xf1\x8d\xc9\xf5\x16\x5c\xcd\xd0\xc8\xd3\xaf\x08\x86\x5e\x20\xc7\xad\x33\x4a\x8c\x06\x71\x4d\x9a\x1e\xbe\xa7\xe8\x08\x3f\xf1\x6b\x6e\x54\x4e\x6f\x4b\xe3\x3b\x98\x9a\x2a\x3a\x01\xfa\x52\xc3\xf6\x64\x3c\xeb\xa6\xbf\x4c\xb6\x5e\xf4\x59\x40\xd3\xb9\x02\x01\x63\x0f\xa8\x5a\x9f\x60\x26\xc4\xdc\xa6\xb6\xe6\xf8\xac\xea\xaa\x04\xa4\x23\x1a\x50\xb2\x67\x91\xf9\xee\xed\xbc\x35\x18\xff\x1f\x5a\xab\x0b\xbe\x95\xc6\x72\x12\x2d\x31\xf9\x4a\x52\x60\x1f\x42\x0f\x5d\xcc\xf1\x4c\xa0\xed\xc5\x2b\x49\x68\x71\xa4\x0f\x7b\x76\x16\x50\xe6\xdb\x83\xd7\x2f\xc4\x57\xc7\x12\x02\x30\xc8\xef\xe8\x38\xf6', 2)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1ca24822ceacb59afa74f32fca7fe5d5d075a42c
| 5,532
|
py
|
Python
|
tests/unit/modules/test_mine.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T06:40:14.000Z
|
2022-02-09T06:40:14.000Z
|
tests/unit/modules/test_mine.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/modules/test_mine.py
|
xiaowei582648206/saltx
|
1d17b030b973ce5422e0fbe7e17c98c7ca91c49b
|
[
"Apache-2.0"
] | 4
|
2020-11-04T06:28:05.000Z
|
2022-02-09T10:54:49.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Rupesh Tare <[email protected]>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
# Import Salt Libs
import salt.modules.mine as mine
| 44.256
| 99
| 0.357014
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Rupesh Tare <[email protected]>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.mine as mine
@skipIf(NO_MOCK, NO_MOCK_REASON)
class MineTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.mine
'''
def setup_loader_modules(self):
return {mine: {}}
def test_get_docker(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker()
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
'172.17.42.1:80',
'192.168.0.1:80',
])}}})
def test_get_docker_with_container_id(self):
'''
Test for Get all mine data for 'docker.ps' and run an
aggregation.
'''
ps_response = {
'localhost': {
'host': {
'interfaces': {
'docker0': {
'hwaddr': '88:99:00:00:99:99',
'inet': [{'address': '172.17.42.1',
'broadcast': None,
'label': 'docker0',
'netmask': '255.255.0.0'}],
'inet6': [{'address': 'ffff::eeee:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
'eth0': {'hwaddr': '88:99:00:99:99:99',
'inet': [{'address': '192.168.0.1',
'broadcast': '192.168.0.255',
'label': 'eth0',
'netmask': '255.255.255.0'}],
'inet6': [{'address':
'ffff::aaaa:aaaa:bbbb:8888',
'prefixlen': '64'}],
'up': True},
}},
'abcdefhjhi1234567899': { # container Id
'Ports': [{'IP': '0.0.0.0', # we bind on every interfaces
'PrivatePort': 80,
'PublicPort': 80,
'Type': 'tcp'}],
'Image': 'image:latest',
'Info': {'Id': 'abcdefhjhi1234567899'},
},
}}
with patch.object(mine, 'get', return_value=ps_response):
ret = mine.get_docker(with_container_id=True)
# Sort ifaces since that will change between py2 and py3
ret['image:latest']['ipv4'][80] = sorted(ret['image:latest']['ipv4'][80])
self.assertEqual(ret,
{'image:latest': {
'ipv4': {80: sorted([
('172.17.42.1:80', 'abcdefhjhi1234567899'),
('192.168.0.1:80', 'abcdefhjhi1234567899'),
])}}})
| 0
| 5,100
| 0
| 0
| 0
| 0
| 0
| 115
| 89
|
261d9ad0af2c41868951b0b120d7fd4d4af8e62d
| 13,215
|
py
|
Python
|
metrics/custom_losses.py
|
tbuikr/fastMRI
|
4395380bbcddefe0bcfea76a2790e0d978009dea
|
[
"MIT"
] | 2
|
2019-12-09T04:57:57.000Z
|
2020-02-24T18:04:12.000Z
|
metrics/custom_losses.py
|
tbuikr/fastMRI
|
4395380bbcddefe0bcfea76a2790e0d978009dea
|
[
"MIT"
] | null | null | null |
metrics/custom_losses.py
|
tbuikr/fastMRI
|
4395380bbcddefe0bcfea76a2790e0d978009dea
|
[
"MIT"
] | null | null | null |
#import models.networks as networks
# class CSSIM(nn.Module): # Complementary SSIM
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# return 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size,
# sigma=self.sigma, reduction=self.reduction)
# class CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# input = input.unsqueeze(1)
# target = target.unsqueeze(1)
# ssim_value = ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
# return ssim_value #+ self.l1_weight * l1_loss
## Combination loss for SRRaGAN
# Define GAN loss: [vanilla | lsgan | wgan-gp]
| 37.225352
| 139
| 0.62308
|
import torch
from torch import nn
import torch.nn.functional as F
from metrics.ssim import ssim
from metrics.tv_loss import TVLoss
#import models.networks as networks
from metrics.my_ssim import ssim_loss
# class CSSIM(nn.Module): # Complementary SSIM
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# return 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size,
# sigma=self.sigma, reduction=self.reduction)
# class CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
# def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
# super().__init__()
# self.max_val = default_range
# self.filter_size = filter_size
# self.k1 = k1
# self.k2 = k2
# self.sigma = sigma
# self.reduction = reduction
# def forward(self, input, target, max_val=None):
# max_val = self.max_val if max_val is None else max_val
# input = input.unsqueeze(1)
# target = target.unsqueeze(1)
# ssim_value = ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
# return ssim_value #+ self.l1_weight * l1_loss
class CSSIM(nn.Module): # Complementary SSIM
def __init__(self, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
input = input.unsqueeze(1)
print (input.max())
target = target.unsqueeze(1)
return 1- ssim_loss(input, target, max_val=max_val, filter_size=self.filter_size, k1=self.k1, k2=self.k2,
sigma=self.sigma, reduction=self.reduction)
class L1CSSIM(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean'):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
return cssim + self.l1_weight * l1_loss
class L1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = F.l1_loss(input, target, reduction=self.reduction)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class C1CSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.cham = CharbonnierLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.cham(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss
class ECSSIMTV(nn.Module): # Replace this with a system of summing losses in Model Trainer later on.
def __init__(self, l1_weight, default_range=1, filter_size=11, k1=0.01, k2=0.03, sigma=1.5, reduction='mean', tvloss_weight=1e-4, p=2):
super().__init__()
self.l1_weight = l1_weight
self.max_val = default_range
self.filter_size = filter_size
self.k1 = k1
self.k2 = k2
self.sigma = sigma
self.reduction = reduction
self.tvloss_weight = tvloss_weight
self.p = p
self.ElasticLoss = ElasticLoss()
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
l1_loss = self.ElasticLoss(input, target)
tv_loss = TVLoss(input, self.tvloss_weight, self.p)
return cssim + self.l1_weight * l1_loss + tv_loss, cssim, tv_loss
## Combination loss for SRRaGAN
class SRRaGAN(nn.Module):
def __init__(self, elastic_weight = 1):
super().__init__()
self.cri_pix = ElasticLoss().to(self.device) # Pixel Loss
self.cri_fea = ElasticLoss().to(self.device) # Feature Loss
self.netF = networks.define_F(opt, use_bn=False).to(self.device)
def forward(self, input, target, max_val=None):
max_val = self.max_val if max_val is None else max_val
cssim = 1 - ssim(input, target, max_val=max_val, filter_size=self.filter_size, sigma=self.sigma, reduction=self.reduction)
return
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-6):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
b, c, h, w = y.size()
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss/(c*b*h*w)
def LoG(imgHF):
weight = [
[0, 0, 1, 0, 0],
[0, 1, 2, 1, 0],
[1, 2, -16, 2, 1],
[0, 1, 2, 1, 0],
[0, 0, 1, 0, 0]
]
weight = np.array(weight)
weight_np = np.zeros((1, 1, 5, 5))
weight_np[0, 0, :, :] = weight
weight_np = np.repeat(weight_np, imgHF.shape[1], axis=1)
weight_np = np.repeat(weight_np, imgHF.shape[0], axis=0)
weight = torch.from_numpy(weight_np).type(torch.FloatTensor).to('cuda:0')
return nn.functional.conv2d(imgHF, weight, padding=1)
class GaussianSmoothing(nn.Module):
def __init__(self, channels, kernel_size=15, sigma=3, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / std) ** 2 / 2)
kernel = kernel / torch.sum(kernel)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input):
return self.conv(input, weight=self.weight, groups=self.groups)
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class HFENL1Loss(nn.Module):
def __init__(self):
super(HFENL1Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.abs(LoG(input_smooth-target_smooth)).sum()
class HFENL2Loss(nn.Module):
def __init__(self):
super(HFENL2Loss, self).__init__()
def forward(self, input, target):
c = input.shape[1]
smoothing = GaussianSmoothing(c, 5, 1)
smoothing = smoothing.to('cuda:0')
input_smooth = nn.functional.pad(input, (2, 2, 2, 2), mode='reflect')
target_smooth = nn.functional.pad(target, (2, 2, 2, 2), mode='reflect')
input_smooth = smoothing(input_smooth)
target_smooth = smoothing(target_smooth)
return torch.sum(torch.pow((LoG(input_smooth-target_smooth)), 2))
class ElasticLoss(nn.Module):
def __init__(self, a=0.2): #a=0.5 default
super(ElasticLoss, self).__init__()
self.alpha = torch.FloatTensor([a, 1 - a]).to('cuda:0')
def forward(self, input, target):
if not isinstance(input, tuple):
input = (input,)
for i in range(len(input)):
l2 = nn.functional.mse_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[0])
l1 = nn.functional.l1_loss(input[i].squeeze(), target.squeeze()).mul(self.alpha[1])
loss = l1 + l2
return loss
| 0
| 0
| 0
| 10,588
| 0
| 515
| 0
| 37
| 461
|
36e99d037172fee82cbe9e3275f4053561bba8c8
| 1,808
|
py
|
Python
|
tests/integration_tests/data_steward/gcloud/gcs_client_test.py
|
dcarbone/curation
|
68f9ba9466646d73509d424567b64566856fb8e8
|
[
"MIT"
] | 1
|
2019-03-18T18:22:41.000Z
|
2019-03-18T18:22:41.000Z
|
tests/integration_tests/data_steward/gcloud/gcs_client_test.py
|
nishanthpp93/curation
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
[
"MIT"
] | null | null | null |
tests/integration_tests/data_steward/gcloud/gcs_client_test.py
|
nishanthpp93/curation
|
ac9f38b2f4580ae806121dd929293159132c7d2a
|
[
"MIT"
] | 1
|
2021-09-16T14:25:19.000Z
|
2021-09-16T14:25:19.000Z
|
"""
Test the Google Cloud Storage Client and associated helper functions
"""
# Python stl imports
# Project imports
# Third-party imports
| 29.639344
| 79
| 0.606195
|
"""
Test the Google Cloud Storage Client and associated helper functions
"""
# Python stl imports
import os
import unittest
# Project imports
from gcloud.gcs import StorageClient
# Third-party imports
class GcsClientTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.client = StorageClient()
self.bucket_name: str = os.environ.get('BUCKET_NAME_FAKE')
self.prefix: str = 'prefix'
self.data: bytes = b'bytes'
self.sub_prefixes: tuple = (f'{self.prefix}/a', f'{self.prefix}/b',
f'{self.prefix}/c', f'{self.prefix}/d')
def test_empty_bucket(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
self.client.empty_bucket(self.bucket_name)
actual = self.client.list_blobs(self.bucket_name)
expected: list = []
self.assertCountEqual(actual, expected)
def test_list_sub_prefixes(self):
self.client.empty_bucket(self.bucket_name)
self._stage_bucket()
items = self.client.list_sub_prefixes(self.bucket_name, self.prefix)
self.assertEqual(len(self.sub_prefixes), len(items))
for item in items:
self.assertIn(item[:-1], self.sub_prefixes)
self.client.empty_bucket(self.bucket_name)
def _stage_bucket(self):
bucket = self.client.bucket(self.bucket_name)
for sub_prefix in self.sub_prefixes:
bucket.blob(f'{sub_prefix}/obj.txt').upload_from_string(self.data)
def tearDown(self):
self.client.empty_bucket(self.bucket_name)
| 0
| 204
| 0
| 1,377
| 0
| 0
| 0
| -3
| 89
|
e52b2e1c1ea59fa64c3206672451d1ca75882b8f
| 297
|
py
|
Python
|
Contacts/urls.py
|
simonescob/Agendadj
|
badd90f3fce0950aa151840f7015c68632c7a203
|
[
"MIT"
] | null | null | null |
Contacts/urls.py
|
simonescob/Agendadj
|
badd90f3fce0950aa151840f7015c68632c7a203
|
[
"MIT"
] | null | null | null |
Contacts/urls.py
|
simonescob/Agendadj
|
badd90f3fce0950aa151840f7015c68632c7a203
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('all', views.index),
path('create', views.create, name="create"),
path('delete/<int:contact_id>', views.delete, name="delete"),
path('edit/<int:contact_id>', views.edit, name="edit"),
]
| 29.7
| 62
| 0.680135
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="home"),
path('all', views.index),
path('create', views.create, name="create"),
path('delete/<int:contact_id>', views.delete, name="delete"),
path('edit/<int:contact_id>', views.edit, name="edit"),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c30132e2c9779826c7032440043cdd50a86109e8
| 146
|
py
|
Python
|
src/dummy.py
|
ashesh705/master-of-coin
|
8ce253cd1c73005856c896a155ef25804d95d02f
|
[
"MIT"
] | null | null | null |
src/dummy.py
|
ashesh705/master-of-coin
|
8ce253cd1c73005856c896a155ef25804d95d02f
|
[
"MIT"
] | null | null | null |
src/dummy.py
|
ashesh705/master-of-coin
|
8ce253cd1c73005856c896a155ef25804d95d02f
|
[
"MIT"
] | null | null | null |
""" Dummy source code to initialize repo"""
from typing import Literal
def dummy() -> Literal[True]:
"""Dummy function"""
return True
| 14.6
| 43
| 0.657534
|
""" Dummy source code to initialize repo"""
from typing import Literal
def dummy() -> Literal[True]:
"""Dummy function"""
return True
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
202dc2c2ce0978019f7627c8b0b1ddd47cb141d2
| 160
|
py
|
Python
|
module.py
|
ShveczovaKS/8lab2k
|
1c58ec07c8a7fa5ed9807a7751315131f2e361f0
|
[
"MIT"
] | null | null | null |
module.py
|
ShveczovaKS/8lab2k
|
1c58ec07c8a7fa5ed9807a7751315131f2e361f0
|
[
"MIT"
] | null | null | null |
module.py
|
ShveczovaKS/8lab2k
|
1c58ec07c8a7fa5ed9807a7751315131f2e361f0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
| 12.307692
| 24
| 0.4875
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def get_func(tag):
def func(s):
group = tag, s
return group
return func
| 0
| 0
| 0
| 0
| 0
| 82
| 0
| 0
| 25
|
0ba659f60c6cbb8e70fbe2ade949ed0726b3d12f
| 680
|
py
|
Python
|
crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 38,667
|
2015-01-01T00:15:34.000Z
|
2022-03-31T22:57:03.000Z
|
crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 1,599
|
2016-09-27T09:07:36.000Z
|
2022-03-31T23:04:51.000Z
|
crabageprediction/venv/Lib/site-packages/fontTools/misc/cython.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 11,269
|
2015-01-01T08:41:17.000Z
|
2022-03-31T16:12:52.000Z
|
""" Exports a no-op 'cython' namespace similar to
https://github.com/cython/cython/blob/master/Cython/Shadow.py
This allows to optionally compile @cython decorated functions
(when cython is available at built time), or run the same code
as pure-python, without runtime dependency on cython module.
We only define the symbols that we use. E.g. see fontTools.cu2qu
"""
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
| 26.153846
| 64
| 0.739706
|
""" Exports a no-op 'cython' namespace similar to
https://github.com/cython/cython/blob/master/Cython/Shadow.py
This allows to optionally compile @cython decorated functions
(when cython is available at built time), or run the same code
as pure-python, without runtime dependency on cython module.
We only define the symbols that we use. E.g. see fontTools.cu2qu
"""
from types import SimpleNamespace
def _empty_decorator(x):
return x
compiled = False
for name in ("double", "complex", "int"):
globals()[name] = None
for name in ("cfunc", "inline"):
globals()[name] = _empty_decorator
locals = lambda **_: _empty_decorator
returns = lambda _: _empty_decorator
| 0
| 0
| 0
| 0
| 0
| 16
| 0
| 12
| 46
|
ab9929ed3ea92aae5e1281a1b168bffd87818815
| 2,172
|
py
|
Python
|
appengine/swarming/server/acl.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | null | null | null |
appengine/swarming/server/acl.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | null | null | null |
appengine/swarming/server/acl.py
|
pombreda/swarming
|
c70f311f3db8f25752c793a0d7b36cf537d95580
|
[
"Apache-2.0"
] | 1
|
2021-12-06T03:37:36.000Z
|
2021-12-06T03:37:36.000Z
|
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Defines access groups."""
from components import auth
from components import utils
# Names of groups.
# See https://code.google.com/p/swarming/wiki/SwarmingAccessGroups for each
# level.
ADMINS_GROUP = 'swarming-admins'
BOTS_GROUP = 'swarming-bots'
PRIVILEGED_USERS_GROUP = 'swarming-privileged-users'
USERS_GROUP = 'swarming-users'
def is_bot_or_admin():
"""Returns True if current user can execute user-side and bot-side calls."""
return is_bot() or is_admin()
def get_user_type():
"""Returns a string describing the current access control for the user."""
if is_admin():
return 'admin'
if is_privileged_user():
return 'privileged user'
if is_user():
return 'user'
if is_bot():
return 'bot'
return 'unknown user'
def bootstrap_dev_server_acls():
"""Adds localhost to IP whitelist and Swarming groups."""
assert utils.is_local_dev_server()
if auth.is_replica():
return
bots = auth.bootstrap_loopback_ips()
auth.bootstrap_group(BOTS_GROUP, bots, 'Swarming bots')
auth.bootstrap_group(USERS_GROUP, bots, 'Swarming users')
# Add a swarming admin. [email protected] is used in
# server_smoke_test.py
admin = auth.Identity(auth.IDENTITY_USER, '[email protected]')
auth.bootstrap_group(ADMINS_GROUP, [admin], 'Swarming administrators')
# Add an instance admin (for easier manual testing when running dev server).
auth.bootstrap_group(
auth.ADMIN_GROUP,
[auth.Identity(auth.IDENTITY_USER, '[email protected]')],
'Users that can manage groups')
| 26.487805
| 78
| 0.743094
|
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Defines access groups."""
from components import auth
from components import utils
# Names of groups.
# See https://code.google.com/p/swarming/wiki/SwarmingAccessGroups for each
# level.
ADMINS_GROUP = 'swarming-admins'
BOTS_GROUP = 'swarming-bots'
PRIVILEGED_USERS_GROUP = 'swarming-privileged-users'
USERS_GROUP = 'swarming-users'
def is_admin():
return auth.is_group_member(ADMINS_GROUP) or auth.is_admin()
def is_bot():
return auth.is_group_member(BOTS_GROUP) or is_admin()
def is_privileged_user():
return auth.is_group_member(PRIVILEGED_USERS_GROUP) or is_admin()
def is_user():
return auth.is_group_member(USERS_GROUP) or is_privileged_user()
def is_bot_or_user():
return is_bot() or is_user()
def is_bot_or_privileged_user():
return is_bot() or is_privileged_user()
def is_bot_or_admin():
"""Returns True if current user can execute user-side and bot-side calls."""
return is_bot() or is_admin()
def get_user_type():
"""Returns a string describing the current access control for the user."""
if is_admin():
return 'admin'
if is_privileged_user():
return 'privileged user'
if is_user():
return 'user'
if is_bot():
return 'bot'
return 'unknown user'
def bootstrap_dev_server_acls():
"""Adds localhost to IP whitelist and Swarming groups."""
assert utils.is_local_dev_server()
if auth.is_replica():
return
bots = auth.bootstrap_loopback_ips()
auth.bootstrap_group(BOTS_GROUP, bots, 'Swarming bots')
auth.bootstrap_group(USERS_GROUP, bots, 'Swarming users')
# Add a swarming admin. [email protected] is used in
# server_smoke_test.py
admin = auth.Identity(auth.IDENTITY_USER, '[email protected]')
auth.bootstrap_group(ADMINS_GROUP, [admin], 'Swarming administrators')
# Add an instance admin (for easier manual testing when running dev server).
auth.bootstrap_group(
auth.ADMIN_GROUP,
[auth.Identity(auth.IDENTITY_USER, '[email protected]')],
'Users that can manage groups')
| 0
| 0
| 0
| 0
| 0
| 321
| 0
| 0
| 138
|
f75d5caf71f40f210458b85e2a678429b8d45bdb
| 1,715
|
py
|
Python
|
suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py
|
runtest007/dpdk_surcata_4.1.1
|
5abf91f483b418b5d9c2dd410b5c850d6ed95c5f
|
[
"MIT"
] | 77
|
2019-06-17T07:05:07.000Z
|
2022-03-07T03:26:27.000Z
|
suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py
|
clockdad/DPDK_SURICATA-4_1_1
|
974cc9eb54b0b1ab90eff12a95617e3e293b77d3
|
[
"MIT"
] | 22
|
2019-07-18T02:32:10.000Z
|
2022-03-24T03:39:11.000Z
|
suricata-4.1.4/suricata-update/suricata/update/commands/removesource.py
|
clockdad/DPDK_SURICATA-4_1_1
|
974cc9eb54b0b1ab90eff12a95617e3e293b77d3
|
[
"MIT"
] | 49
|
2019-06-18T03:31:56.000Z
|
2022-03-13T05:23:10.000Z
|
# Copyright (C) 2017 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import logging
logger = logging.getLogger()
| 34.3
| 73
| 0.749271
|
# Copyright (C) 2017 Open Information Security Foundation
#
# You can copy, redistribute or modify this Program under the terms of
# the GNU General Public License version 2 as published by the Free
# Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# version 2 along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
from __future__ import print_function
import os
import logging
from suricata.update import config
from suricata.update import sources
logger = logging.getLogger()
def register(parser):
parser.add_argument("name")
parser.set_defaults(func=remove_source)
def remove_source():
name = config.args().name
enabled_source_filename = sources.get_enabled_source_filename(name)
if os.path.exists(enabled_source_filename):
logger.debug("Deleting file %s.", enabled_source_filename)
os.remove(enabled_source_filename)
logger.info("Source %s removed, previously enabled.", name)
return 0
disabled_source_filename = sources.get_disabled_source_filename(name)
if os.path.exists(disabled_source_filename):
logger.debug("Deleting file %s.", disabled_source_filename)
os.remove(disabled_source_filename)
logger.info("Source %s removed, previously disabled.", name)
return 0
logger.warning("Source %s does not exist.", name)
return 1
| 0
| 0
| 0
| 0
| 0
| 811
| 0
| 15
| 114
|
3ca045d0b4c2187471f92b0e5fdbef4d90523a1c
| 936
|
py
|
Python
|
blogs/admin.py
|
AgnosticMe/phleeb
|
48f85048d2db5d16d243feee2f84a961682a0f4d
|
[
"MIT"
] | null | null | null |
blogs/admin.py
|
AgnosticMe/phleeb
|
48f85048d2db5d16d243feee2f84a961682a0f4d
|
[
"MIT"
] | null | null | null |
blogs/admin.py
|
AgnosticMe/phleeb
|
48f85048d2db5d16d243feee2f84a961682a0f4d
|
[
"MIT"
] | null | null | null |
# Register your models here.
| 22.829268
| 63
| 0.673077
|
from django.contrib import admin
from .models import Blog, Category, Tag, Comment
# Register your models here.
@admin.register(Blog)
class AdminBlog(admin.ModelAdmin):
list_display = ['title', 'publishing_date']
list_display_links = ['title', 'publishing_date']
list_filter = ['publishing_date', 'category', 'tag']
search_fields = ['title', 'content']
class Meta:
model = Blog
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Category
@admin.register(Tag)
class AdminTag(admin.ModelAdmin):
list_display = ['title']
search_fields = ['title']
class Meta:
model = Tag
@admin.register(Comment)
class AdminComment(admin.ModelAdmin):
search_fields = ['name', 'email', 'content', 'blog__title']
list_filter = ['publishing_date']
class Meta:
model = Comment
| 0
| 730
| 0
| 0
| 0
| 0
| 0
| 38
| 135
|
7eab3c278fcfc42d13e8ea1b8a894c6d62712411
| 7,206
|
py
|
Python
|
zcode/inout/tests/test_inout_core.py
|
lzkelley/zcode
|
55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba
|
[
"MIT"
] | 1
|
2021-02-11T03:24:55.000Z
|
2021-02-11T03:24:55.000Z
|
zcode/inout/tests/test_inout_core.py
|
lzkelley/zcode
|
55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba
|
[
"MIT"
] | null | null | null |
zcode/inout/tests/test_inout_core.py
|
lzkelley/zcode
|
55a63693fe3ad744957d7ce2d74fb4c8e09ea8ba
|
[
"MIT"
] | null | null | null |
"""Test methods for `inout_core.py`.
Can be run with:
$ nosetests zcode/inout/tests/test_inout_core.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import run_module_suite
# Run all methods as if with `nosetests ...`
if __name__ == "__main__":
run_module_suite()
| 37.53125
| 95
| 0.600056
|
"""Test methods for `inout_core.py`.
Can be run with:
$ nosetests zcode/inout/tests/test_inout_core.py
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import warnings
import shutil
from numpy.testing import run_module_suite
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal
class TestInoutCore(object):
@classmethod
def setup_class(cls):
cls.fname_npz = '_test_inout_core_testfile.npz'
cls.fname_npz_subdir = os.path.join('./subdir', cls.fname_npz)
cls.test_dir_0 = '_test_inout_core_dir'
cls.test_file_0 = '_test_filename.txt'
cls._kill_test_files()
@classmethod
def teardown_class(cls):
cls._kill_test_files()
@classmethod
def _kill_test_files(cls):
# Remove created directories
if os.path.exists(cls.test_dir_0):
print("removing '{}'".format(cls.test_dir_0))
shutil.rmtree(cls.test_dir_0)
# Remove created files
if os.path.exists(cls.fname_npz_subdir):
print("removing '{}'".format(cls.fname_npz_subdir))
os.remove(cls.fname_npz_subdir)
tname = os.path.dirname(cls.fname_npz_subdir)
print("Removing '{}'".format(tname))
os.rmdir(tname)
if os.path.exists(cls.fname_npz):
print("removing '{}'".format(cls.fname_npz))
os.remove(cls.fname_npz)
if os.path.exists(cls.test_file_0):
print("removing '{}'".format(cls.test_file_0))
os.remove(cls.test_file_0)
return
def test_dictToNPZ_npzToDict(self):
fname = self.fname_npz
fname_subdir = self.fname_npz_subdir
from zcode.inout.inout_core import npzToDict, dictToNPZ
# Create a test dictionary to save
subdata = {'a': 'a', 'b': 'abc', 'c': np.arange(4)}
data = {'one': np.array(1), 'two': np.array(2, dtype=np.uint64), 'three': subdata}
# Try saving
dictToNPZ(data, fname)
assert_true(os.path.exists(fname))
# Try Loading
loaded = npzToDict(fname)
for key, item in data.items():
print("key = ", key)
print("\t", type(loaded[key]), repr(loaded[key]))
print("\t", type(item), repr(item))
# Look at internal dictionary separately
if type(item) is not dict and type(loaded[key]) is not dict:
assert_true(np.array_equal(loaded[key], item))
assert_equal(type(loaded[key]), type(item))
# Check internal dictionary
subloaded = loaded['three']
print("Subloaded keys = ", subloaded.keys())
for key, item in subdata.items():
print("key = ", key)
print("\t", subloaded[key])
print("\t", item)
assert_true(np.array_equal(subloaded[key], item))
assert_equal(type(subloaded[key]), type(item))
# Make sure subdirectories are created if needed
dictToNPZ(data, fname_subdir)
assert_true(os.path.exists(fname_subdir))
def test_modify_exists_files(self):
fdir = self.test_dir_0
fname = self.test_file_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Create test directory if needed, store boolean whether to later remove it.
if not os.path.exists(fdir):
os.makedirs(fdir)
# Create test filename
fname = os.path.join(fdir, fname)
# Make sure it doesnt already exist
if os.path.exists(fname):
raise RuntimeError("Test filename '{}' already exists.".format(fname))
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fname))
for ii in range(num_files):
new_name = modify_exists(fname, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create file
open(new_name, 'a')
if ii == 0:
intended_name = str(fname)
else:
intended_name = modify_filename(fname, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Make sure filenames dont exceed maximum, and raises warning
with warnings.catch_warnings(record=True) as ww:
assert_equal(modify_exists(fname, max=num_files-1), None)
assert_true(len(ww) > 0)
def test_modify_exists_dirs(self):
fdir = self.test_dir_0
num_files = 4
max_files = 20 # This must be between [11, 100]
from zcode.inout.inout_core import modify_exists, modify_filename
# Make sure directory doesn't initially exist
if os.path.exists(fdir) and os.path.isdir(fdir):
shutil.rmtree(fdir)
'''
# Create files that should *not* interfere with 'modify_exists'
# 'modify_exists' should only look for 2-digit appended numbers
fname_distract_1 = modify_filename(fname, append='_6')
fname_distract_2 = modify_filename(fname, append='_123')
print("Interference filenames = '{}', '{}'".format(fname_distract_1, fname_distract_2))
for ff in [fname_distract_1, fname_distract_2]:
open(ff, 'a')
'''
# Test that filenames are appropriately modified
# ----------------------------------------------
print("fname = '{}'".format(fdir))
created = []
for ii in range(num_files):
new_name = modify_exists(fdir, max=max_files)
print(ii, "new_name = ", new_name)
assert_false(os.path.exists(new_name))
# Create directory
os.makedirs(new_name)
created.append(new_name)
if ii == 0:
intended_name = str(fdir)
else:
intended_name = modify_filename(fdir, append="_{:02d}".format(ii-1))
print("\tshould be = ", intended_name)
assert_true(os.path.exists(intended_name))
if not os.path.exists(new_name):
raise RuntimeError("New file should have been created '{}'.".format(new_name))
# Cleanup
for fdir in created:
shutil.rmtree(fdir)
return
# Run all methods as if with `nosetests ...`
if __name__ == "__main__":
run_module_suite()
| 0
| 1,140
| 0
| 5,583
| 0
| 0
| 0
| 12
| 134
|
fb8a1c89bff42274aadcbbd4e1333face551763d
| 46
|
py
|
Python
|
Lecture2/name.py
|
EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript
|
80df8834c1db8cc28b72d5393ff9aa340c069b57
|
[
"MIT"
] | null | null | null |
Lecture2/name.py
|
EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript
|
80df8834c1db8cc28b72d5393ff9aa340c069b57
|
[
"MIT"
] | null | null | null |
Lecture2/name.py
|
EusebioSimango/CS50s-Web-Programming-With-Python-And-Javascript
|
80df8834c1db8cc28b72d5393ff9aa340c069b57
|
[
"MIT"
] | null | null | null |
name = input("Mame: ")
print(f"Hello, {name}")
| 23
| 23
| 0.608696
|
name = input("Mame: ")
print(f"Hello, {name}")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f510ea13d343357dd5eae6dc2035a2c37e918c43
| 2,733
|
py
|
Python
|
tasksapi/models/validators.py
|
mwiens91/saltant
|
9e72175a896f5859ada304ad3ae4d84dfc3834db
|
[
"MIT"
] | 3
|
2018-12-08T01:18:29.000Z
|
2018-12-14T23:18:42.000Z
|
tasksapi/models/validators.py
|
saltant-org/saltant
|
db498a1186fc74221f8214ad1819dd03bf4b08ac
|
[
"MIT"
] | 3
|
2019-05-23T07:43:13.000Z
|
2021-06-10T20:46:53.000Z
|
tasksapi/models/validators.py
|
saltant-org/saltant
|
db498a1186fc74221f8214ad1819dd03bf4b08ac
|
[
"MIT"
] | 2
|
2019-03-13T22:31:09.000Z
|
2019-05-03T00:18:30.000Z
|
"""Contains validators for task models."""
def task_instance_args_are_valid(instance, fill_missing_args=False):
"""Determines whether a task instance's arguments are valid.
The arguments are valid if the instance's argument includes all of
its task type's required arguments (but not necessarily the
arguments for which a default value exists).
Arg:
instance: A task instance instance. (Yikes!)
fill_missing_args: A boolean determining whether to fill in any
missing arguments in the instance with default values.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Validate an instance's args against its required args.
task_type_required_args = instance.task_type.required_arguments
task_type_default_vals = (
instance.task_type.required_arguments_default_values
)
instance_arg_keys = instance.arguments.keys()
for required_arg in task_type_required_args:
# Check if the required argument is provided
if required_arg not in instance_arg_keys:
# Required argument not provided. Check if default argument
# value exists.
if required_arg not in task_type_default_vals:
# No default exists
return (
False,
"required argument '%s' not provided!" % required_arg,
)
# Fill in the default value if we're told to
if fill_missing_args:
instance.arguments[required_arg] = task_type_default_vals[
required_arg
]
# Valid
return (True, "")
def task_type_args_are_valid(instance):
"""Determines whether a task type's argument fields are valid.
The argument fields are valid if the argument keys in the
required_arguments_default_values field are a subset of its required
arguments.
Arg:
instance: A task type instance.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Ensure that the default arguments form a subset of the required
# arguments
if not set(instance.required_arguments_default_values.keys()).issubset(
set(instance.required_arguments)
):
return (False, "default arguments not a subset of required arguments")
# Valid
return (True, "")
| 36.932432
| 78
| 0.668496
|
"""Contains validators for task models."""
def task_instance_args_are_valid(instance, fill_missing_args=False):
"""Determines whether a task instance's arguments are valid.
The arguments are valid if the instance's argument includes all of
its task type's required arguments (but not necessarily the
arguments for which a default value exists).
Arg:
instance: A task instance instance. (Yikes!)
fill_missing_args: A boolean determining whether to fill in any
missing arguments in the instance with default values.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Validate an instance's args against its required args.
task_type_required_args = instance.task_type.required_arguments
task_type_default_vals = (
instance.task_type.required_arguments_default_values
)
instance_arg_keys = instance.arguments.keys()
for required_arg in task_type_required_args:
# Check if the required argument is provided
if required_arg not in instance_arg_keys:
# Required argument not provided. Check if default argument
# value exists.
if required_arg not in task_type_default_vals:
# No default exists
return (
False,
"required argument '%s' not provided!" % required_arg,
)
# Fill in the default value if we're told to
if fill_missing_args:
instance.arguments[required_arg] = task_type_default_vals[
required_arg
]
# Valid
return (True, "")
def task_type_args_are_valid(instance):
"""Determines whether a task type's argument fields are valid.
The argument fields are valid if the argument keys in the
required_arguments_default_values field are a subset of its required
arguments.
Arg:
instance: A task type instance.
Returns:
A tuple containing a boolean and a string, where the boolean
signals whether the arguments are valid and the string explains
why, in the case that the boolean is False (otherwise it's an
empty string).
"""
# Ensure that the default arguments form a subset of the required
# arguments
if not set(instance.required_arguments_default_values.keys()).issubset(
set(instance.required_arguments)
):
return (False, "default arguments not a subset of required arguments")
# Valid
return (True, "")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b18e463d5d6fb71fac9aa5f7d292312fb8ee31b9
| 4,954
|
py
|
Python
|
semantic_segmentation/keras_metrics.py
|
Jason-Khan/ubdvss
|
76cabfa642af1f659920de32827ea6c3fe008588
|
[
"Apache-2.0"
] | 13
|
2020-01-20T13:22:47.000Z
|
2021-11-12T07:35:36.000Z
|
semantic_segmentation/keras_metrics.py
|
Jason-Khan/ubdvss
|
76cabfa642af1f659920de32827ea6c3fe008588
|
[
"Apache-2.0"
] | 3
|
2020-09-09T13:19:11.000Z
|
2020-11-15T10:52:23.000Z
|
semantic_segmentation/keras_metrics.py
|
Jason-Khan/ubdvss
|
76cabfa642af1f659920de32827ea6c3fe008588
|
[
"Apache-2.0"
] | 5
|
2020-06-01T16:26:07.000Z
|
2022-03-08T02:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright () ABBYY (BIT Software), 1993 - 2019. All rights reserved.
"""
keras
"""
import keras.backend as K
import tensorflow as tf
from semantic_segmentation.losses import get_losses
def confusion_matrix(true, pred, weights):
"""
Confusion matrix
:param true:
:param pred:
:param weights:
:return: tp, tn, fp, fn - confusion matrix
"""
equal = K.equal(true, pred)
tp = tf.logical_and(equal, K.equal(true, 1))
tn = tf.logical_and(equal, K.equal(true, 0))
fp = tf.logical_and(tf.logical_not(equal), K.equal(pred, 1))
fn = tf.logical_and(tf.logical_not(equal), K.equal(pred, 0))
tp = calculate_sum(tp)
tn = calculate_sum(tn)
fp = calculate_sum(fp)
fn = calculate_sum(fn)
return tp, tn, fp, fn
def detection_pixel_acc(y_true, y_pred):
"""
accuracy
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return _acc(detection_true, detection_pred)
def detection_pixel_precision(y_true, y_pred):
"""
(precision)
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return precision(detection_true, detection_pred)
def detection_pixel_recall(y_true, y_pred):
"""
(recall)
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return recall(detection_true, detection_pred)
def detection_pixel_f1(y_true, y_pred):
"""
f1-
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return f1(detection_true, detection_pred)
def classification_pixel_acc(y_true, y_pred):
"""
accuracy
y_true > 0 .. -
:param y_true:
:param y_pred:
:return:
"""
mask = K.cast(y_true > 0, tf.float32)
labels = K.cast((y_true - 1) * mask, tf.int64)
class_p = tf.nn.softmax(y_pred[..., 1:], axis=-1)
predictions = tf.argmax(class_p, axis=-1)
return _acc(labels, predictions, weights=mask)
def get_all_metrics(classification_mode=False):
"""
:param classification_mode:
:return:
"""
all_metrics = [
detection_pixel_acc,
detection_pixel_precision,
detection_pixel_recall,
detection_pixel_f1
]
if classification_mode:
all_metrics.append(classification_pixel_acc)
all_metrics += get_losses(classification_mode)
return all_metrics
| 25.802083
| 74
| 0.66411
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (С) ABBYY (BIT Software), 1993 - 2019. All rights reserved.
"""
Различные метрики для keras подсчитываемые при обучении
"""
import functools
import keras.backend as K
import tensorflow as tf
from semantic_segmentation.losses import get_losses
def _squeeze_single_dims(*args):
return [tf.squeeze(arg) for arg in args]
def _metric_wrapper(metric_fn):
@functools.wraps(metric_fn)
def metric_fn_wrapped(true, pred, weights=None):
if weights is None:
weights = tf.ones_like(true, tf.float32)
_true, _pred, _weights = _squeeze_single_dims(true, pred, weights)
metric_value = metric_fn(_true, _pred, _weights)
return metric_value
return metric_fn_wrapped
@_metric_wrapper
def _acc(true, pred, weights):
equal = K.cast(K.equal(true, pred), tf.float32)
return K.sum(equal * weights) / K.maximum(1., K.sum(weights))
def confusion_matrix(true, pred, weights):
"""
Confusion matrix для бинарной классификации
:param true:
:param pred:
:param weights:
:return: tp, tn, fp, fn - confusion matrix
"""
equal = K.equal(true, pred)
def calculate_sum(metric):
m = K.cast(metric, tf.float32)
return K.sum(m * weights)
tp = tf.logical_and(equal, K.equal(true, 1))
tn = tf.logical_and(equal, K.equal(true, 0))
fp = tf.logical_and(tf.logical_not(equal), K.equal(pred, 1))
fn = tf.logical_and(tf.logical_not(equal), K.equal(pred, 0))
tp = calculate_sum(tp)
tn = calculate_sum(tn)
fp = calculate_sum(fp)
fn = calculate_sum(fn)
return tp, tn, fp, fn
@_metric_wrapper
def precision(true, pred, weights):
"""
Вычисляет precision c учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fp)
@_metric_wrapper
def recall(true, pred, weights):
"""
Вычисляет recall с учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
return tp / K.maximum(1., tp + fn)
@_metric_wrapper
def f1(true, pred, weights):
"""
Вычисляет f1-меру с учетом весов
:param true:
:param pred:
:param weights:
:return:
"""
tp, tn, fp, fn = confusion_matrix(true, pred, weights)
precision = tp / K.maximum(1., tp + fp)
recall = tp / K.maximum(1., tp + fn)
return tf.cond(K.not_equal(precision + recall, 0.),
lambda: 2. * precision * recall / (precision + recall),
lambda: 0.)
def _get_detection_labels(y_true, y_pred):
detection_true = K.cast(K.greater(y_true, 0), tf.int32)
detection_pred = K.cast(K.greater(y_pred[..., 0], 0), tf.int32)
return detection_true, detection_pred
def detection_pixel_acc(y_true, y_pred):
"""
Вычисляет попиксельную accuracy детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return _acc(detection_true, detection_pred)
def detection_pixel_precision(y_true, y_pred):
"""
Вычисляет попиксельню точность (precision) детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return precision(detection_true, detection_pred)
def detection_pixel_recall(y_true, y_pred):
"""
Вычисляет попиксельню полноту (recall) детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return recall(detection_true, detection_pred)
def detection_pixel_f1(y_true, y_pred):
"""
Вычисляет попиксельню f1-меру детекции
:param y_true:
:param y_pred:
:return:
"""
detection_true, detection_pred = _get_detection_labels(y_true, y_pred)
return f1(detection_true, detection_pred)
def classification_pixel_acc(y_true, y_pred):
"""
Вычисляет попиксельную accuracy классификации
считается только по y_true > 0 т.е. там где есть какой-то объект
:param y_true:
:param y_pred:
:return:
"""
mask = K.cast(y_true > 0, tf.float32)
labels = K.cast((y_true - 1) * mask, tf.int64)
class_p = tf.nn.softmax(y_pred[..., 1:], axis=-1)
predictions = tf.argmax(class_p, axis=-1)
return _acc(labels, predictions, weights=mask)
def get_all_metrics(classification_mode=False):
"""
Возвращает список всех метрик
:param classification_mode:
:return:
"""
all_metrics = [
detection_pixel_acc,
detection_pixel_precision,
detection_pixel_recall,
detection_pixel_f1
]
if classification_mode:
all_metrics.append(classification_pixel_acc)
all_metrics += get_losses(classification_mode)
return all_metrics
| 738
| 1,326
| 0
| 0
| 0
| 391
| 0
| -5
| 211
|
a675dc6e47d5ff70decc3601d63c4681223ee3d8
| 2,722
|
py
|
Python
|
datazie/model/LinearModel.py
|
amozie/amozie
|
fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef
|
[
"Apache-2.0"
] | null | null | null |
datazie/model/LinearModel.py
|
amozie/amozie
|
fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef
|
[
"Apache-2.0"
] | null | null | null |
datazie/model/LinearModel.py
|
amozie/amozie
|
fb7c16ce537bc5567f9c87cfc22c564a4dffc4ef
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
model_sm = 'sm'
if __name__ == '__main__':
x = np.linspace(0, 10, 21)
y = 3*x + 2
y += np.random.randn(x.size)
lm = LinearModel(y, x)
lm.fit()
lm.summary()
print(lm.predict())
lm.plot()
| 26.950495
| 72
| 0.559882
|
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %(username)s
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
import statsmodels.api as sm
from statsmodels.sandbox.regression.predstd import wls_prediction_std
model_sm = 'sm'
class ModelException(Exception):
def __init__(self, message):
super().__init__(self)
self.message = message
class LinearModel():
def __init__(self, y, x=None, model=model_sm, add_constant=None):
self.model = model
self.add_constant = add_constant
self.__features = None
if x is None:
x = np.arange(y.size)
self.x = self.__pretreat_params(x)
self.y = y
def __pretreat_params(self, x):
if not isinstance(x, np.ndarray):
x = np.array(x)
if not self.__features:
if 1 == x.ndim:
self.__features = 1
elif 2 == x.ndim:
self.__features = x.shape[1]
else:
raise ModelException('dimension of x is error')
if 2 != x.ndim:
x = x.reshape(-1, self.__features)
if self.add_constant is None:
if model_sm == self.model:
x = self.__add_constant(x)
elif self.add_constant:
x = self.__add_constant(x)
return x
def __add_constant(self, x):
# 样本数为1时,sm.add_constant存在bug,没有添加常数返回原数组
if 1 == x.shape[0]:
return np.concatenate((np.ones((x.shape[0], 1)), x), axis=1)
else:
return sm.add_constant(x)
def __fit_sm(self):
self.res = sm.OLS(self.y, self.x).fit()
def fit(self):
if model_sm == self.model:
self.__fit_sm()
def predict(self, x=None, alpha=0.05):
if x is not None:
x = self.__pretreat_params(x)
ret = [self.res.predict(x)]
ret.extend(wls_prediction_std(self.res, exog=x, alpha=alpha))
return np.array(ret).T
def summary(self):
print(self.res.summary())
def plot(self):
fig = plt.figure()
fig.suptitle('LINEAR MODEL')
ax = plt.subplot(111)
y_prd = self.predict(alpha=0.1)
ax.plot(self.x[:, 1], self.y, '*', label='sample')
ax.plot(self.x[:, 1], y_prd[:, 0], label='predict')
ax.plot(self.x[:, 1], y_prd[:, 2], 'r--', label='std')
ax.plot(self.x[:, 1], y_prd[:, 3], 'r--', label='std')
plt.show()
if __name__ == '__main__':
x = np.linspace(0, 10, 21)
y = 3*x + 2
y += np.random.randn(x.size)
lm = LinearModel(y, x)
lm.fit()
lm.summary()
print(lm.predict())
lm.plot()
| 60
| 0
| 0
| 2,130
| 0
| 0
| 0
| 74
| 179
|
b67a3f691f33184cbbf6898ebf7144756349476d
| 4,888
|
py
|
Python
|
utilities.py
|
rc1035/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 10
|
2017-08-16T12:16:52.000Z
|
2022-02-26T05:09:39.000Z
|
utilities.py
|
d15c0/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 1
|
2019-07-10T12:00:00.000Z
|
2019-07-10T12:00:00.000Z
|
utilities.py
|
d15c0/directed-probe-matching
|
c724096672e778202d9e8ed197cdf7395ea1d211
|
[
"MIT"
] | 4
|
2017-11-30T11:01:06.000Z
|
2019-11-03T23:39:40.000Z
|
#!/usr/bin/env python3.6
"""Refactored utility functions."""
__author__ = "Richard Cosgrove"
from collections import defaultdict
import gzip
from itertools import combinations
from datetime import timedelta
import json
import os
def export_compressed_json(dict_item, file_name):
"""Export gzip compressed JSON.
(For Uni dataset compressed size is ~10% of uncompressed.)
:param dict_item: Dictionary to dump as JSON.
:param file_name: Name of file to be written e.g. dict.json.gz
"""
# Use lowest level of compression for fast speed.
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with gzip.open(file_name, mode="wt", compresslevel=1) as f:
json.dump(dict_item, f, separators=(',', ':'))
def import_compressed_json(file_name):
"""Import gzip compressed JSON.
:param file_name: Name of file to be read e.g. dict.json.gz
:returns: JSON as a dictionary.
"""
with gzip.open(file_name, mode="rt") as f:
return json.load(f)
def match_tokens_with_same_ssid_set(token_to_probes):
"""Split into clusters that share the SAME set of SSIDs probed for.
:param token_to_probes: Dictionary with token keys and probe values
:returns: Dictionary with SSID set keys and token values
"""
ssid_set_to_tokens = defaultdict(set)
token_to_ssid_set = {}
for token, probes in token_to_probes.items():
ssid_set = set()
for probe in probes:
if probe["ssid"] == 0:
# Ignore broadcast probes.
continue
ssid_set.add(probe["ssid"])
if len(ssid_set) < 2:
# Ignore sets with cardinality less than X
# due to high rate of false positives.
continue
# Cluster token with any tokens that share the same SSID set.
ssid_set_to_tokens[frozenset(ssid_set)].add(token)
token_to_ssid_set[token] = frozenset(ssid_set)
# Sanity check: Assert that no token has been matched more than once.
tokens = [t for tokens in list(ssid_set_to_tokens.values()) for t in tokens]
assert(len(tokens) == len(set(tokens)))
return (ssid_set_to_tokens, token_to_ssid_set)
def validate_clusters(clusters, token_to_probes):
"""Validate the correctness of a clustering.
:param clusters: An iterable of clusters, where each cluster is a list of tokens.
:returns: Dictionary of binary classifier results
"""
token_to_mac = import_compressed_json("int/token_to_mac.json.gz")
# Use a binary Classification
true_positives, false_positives = 0, 0
num_of_clusters = 0
mac_to_timestamps = defaultdict(list)
for cluster in clusters:
num_of_clusters += 1
for pair in combinations(cluster, r=2):
if token_to_mac[pair[0]] == token_to_mac[pair[1]]:
true_positives += 1
mac = token_to_mac[pair[0]]
t1_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[0]]]
t2_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[1]]]
mac_to_timestamps[mac] += t1_timestamps
mac_to_timestamps[mac] += t2_timestamps
else:
false_positives += 1
greater_than = 0
lengths = []
for mac, timestamps in mac_to_timestamps.items():
length = timedelta(seconds=max(timestamps)) - timedelta(seconds=min(timestamps))
if length > timedelta(hours=12):
greater_than += 1
lengths.append(length)
import statistics
mid = statistics.median(lengths)
# Total number of valid pairs and invalid pairs have been
# pre-computed in randomiseTokens.py ...
# So we can easily calculate the negatives by subtracting the positives.
actual_combos = import_compressed_json("int/valid_combinations.json.gz")
true_negatives = actual_combos["invalid_pairs"] - false_positives
false_negatives = actual_combos["valid_pairs"] - true_positives
# Sanity checks
assert(true_positives + false_positives + true_negatives + false_negatives == actual_combos["total_pairs"])
assert(true_positives + false_negatives == actual_combos["valid_pairs"])
assert(false_positives + true_negatives == actual_combos["invalid_pairs"])
true_positive_rate = (true_positives / (float(true_positives + false_negatives)))
false_positive_rate = (false_positives / (float(false_positives + true_negatives)))
accuracy = (true_positives + true_negatives) / float(actual_combos["total_pairs"])
return {
"tp": true_positives,
"fp": false_positives,
"tn": true_negatives,
"fn": false_negatives,
"tpr": true_positive_rate,
"fpr": false_positive_rate,
"accuracy": accuracy,
"clusters": num_of_clusters,
"macs": greater_than,
"median": mid
}
| 36.75188
| 112
| 0.672054
|
#!/usr/bin/env python3.6
"""Refactored utility functions."""
__author__ = "Richard Cosgrove"
from collections import defaultdict
import gzip
from itertools import combinations
from datetime import datetime, timedelta
import json
import os
def export_compressed_json(dict_item, file_name):
"""Export gzip compressed JSON.
(For Uni dataset compressed size is ~10% of uncompressed.)
:param dict_item: Dictionary to dump as JSON.
:param file_name: Name of file to be written e.g. dict.json.gz
"""
# Use lowest level of compression for fast speed.
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with gzip.open(file_name, mode="wt", compresslevel=1) as f:
json.dump(dict_item, f, separators=(',', ':'))
def import_compressed_json(file_name):
"""Import gzip compressed JSON.
:param file_name: Name of file to be read e.g. dict.json.gz
:returns: JSON as a dictionary.
"""
with gzip.open(file_name, mode="rt") as f:
return json.load(f)
def match_tokens_with_same_ssid_set(token_to_probes):
"""Split into clusters that share the SAME set of SSIDs probed for.
:param token_to_probes: Dictionary with token keys and probe values
:returns: Dictionary with SSID set keys and token values
"""
ssid_set_to_tokens = defaultdict(set)
token_to_ssid_set = {}
for token, probes in token_to_probes.items():
ssid_set = set()
for probe in probes:
if probe["ssid"] == 0:
# Ignore broadcast probes.
continue
ssid_set.add(probe["ssid"])
if len(ssid_set) < 2:
# Ignore sets with cardinality less than X
# due to high rate of false positives.
continue
# Cluster token with any tokens that share the same SSID set.
ssid_set_to_tokens[frozenset(ssid_set)].add(token)
token_to_ssid_set[token] = frozenset(ssid_set)
# Sanity check: Assert that no token has been matched more than once.
tokens = [t for tokens in list(ssid_set_to_tokens.values()) for t in tokens]
assert(len(tokens) == len(set(tokens)))
return (ssid_set_to_tokens, token_to_ssid_set)
def validate_clusters(clusters, token_to_probes):
"""Validate the correctness of a clustering.
:param clusters: An iterable of clusters, where each cluster is a list of tokens.
:returns: Dictionary of binary classifier results
"""
token_to_mac = import_compressed_json("int/token_to_mac.json.gz")
# Use a binary Classification
true_positives, false_positives = 0, 0
num_of_clusters = 0
mac_to_timestamps = defaultdict(list)
for cluster in clusters:
num_of_clusters += 1
for pair in combinations(cluster, r=2):
if token_to_mac[pair[0]] == token_to_mac[pair[1]]:
true_positives += 1
mac = token_to_mac[pair[0]]
t1_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[0]]]
t2_timestamps = [float(p["timestamp"]) for p in token_to_probes[pair[1]]]
mac_to_timestamps[mac] += t1_timestamps
mac_to_timestamps[mac] += t2_timestamps
else:
false_positives += 1
greater_than = 0
lengths = []
for mac, timestamps in mac_to_timestamps.items():
length = timedelta(seconds=max(timestamps)) - timedelta(seconds=min(timestamps))
if length > timedelta(hours=12):
greater_than += 1
lengths.append(length)
import statistics
mid = statistics.median(lengths)
# Total number of valid pairs and invalid pairs have been
# pre-computed in randomiseTokens.py ...
# So we can easily calculate the negatives by subtracting the positives.
actual_combos = import_compressed_json("int/valid_combinations.json.gz")
true_negatives = actual_combos["invalid_pairs"] - false_positives
false_negatives = actual_combos["valid_pairs"] - true_positives
# Sanity checks
assert(true_positives + false_positives + true_negatives + false_negatives == actual_combos["total_pairs"])
assert(true_positives + false_negatives == actual_combos["valid_pairs"])
assert(false_positives + true_negatives == actual_combos["invalid_pairs"])
true_positive_rate = (true_positives / (float(true_positives + false_negatives)))
false_positive_rate = (false_positives / (float(false_positives + true_negatives)))
accuracy = (true_positives + true_negatives) / float(actual_combos["total_pairs"])
return {
"tp": true_positives,
"fp": false_positives,
"tn": true_negatives,
"fn": false_negatives,
"tpr": true_positive_rate,
"fpr": false_positive_rate,
"accuracy": accuracy,
"clusters": num_of_clusters,
"macs": greater_than,
"median": mid
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0
|
0e899a0c6dc84c26eed43123ac191ce6e094f3ae
| 3,464
|
py
|
Python
|
nautobot/users/tests/test_models.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 384
|
2021-02-24T01:40:40.000Z
|
2022-03-30T10:30:59.000Z
|
nautobot/users/tests/test_models.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 1,067
|
2021-02-24T00:58:08.000Z
|
2022-03-31T23:38:23.000Z
|
nautobot/users/tests/test_models.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 128
|
2021-02-24T02:45:16.000Z
|
2022-03-20T18:48:36.000Z
|
from django.contrib.auth import get_user_model
# Use the proper swappable User model
User = get_user_model()
| 33.631068
| 87
| 0.566397
|
from django.contrib.auth import get_user_model
from django.test import TestCase
# Use the proper swappable User model
User = get_user_model()
class UserConfigTest(TestCase):
def setUp(self):
user = User.objects.create_user(username="testuser")
user.config_data = {
"a": True,
"b": {
"foo": 101,
"bar": 102,
},
"c": {
"foo": {
"x": 201,
},
"bar": {
"y": 202,
},
"baz": {
"z": 203,
},
},
}
user.save()
self.user = user
def test_get(self):
# Retrieve root and nested values
self.assertEqual(self.user.get_config("a"), True)
self.assertEqual(self.user.get_config("b.foo"), 101)
self.assertEqual(self.user.get_config("c.baz.z"), 203)
# Invalid values should return None
self.assertIsNone(self.user.get_config("invalid"))
self.assertIsNone(self.user.get_config("a.invalid"))
self.assertIsNone(self.user.get_config("b.foo.invalid"))
self.assertIsNone(self.user.get_config("b.foo.x.invalid"))
# Invalid values with a provided default should return the default
self.assertEqual(self.user.get_config("invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("a.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.invalid", "DEFAULT"), "DEFAULT")
self.assertEqual(self.user.get_config("b.foo.x.invalid", "DEFAULT"), "DEFAULT")
def test_all(self):
flattened_data = {
"a": True,
"b.foo": 101,
"b.bar": 102,
"c.foo.x": 201,
"c.bar.y": 202,
"c.baz.z": 203,
}
# Retrieve a flattened dictionary containing all config data
self.assertEqual(self.user.all_config(), flattened_data)
def test_set(self):
# Overwrite existing values
self.user.set_config("a", "abc")
self.user.set_config("c.foo.x", "abc")
self.assertEqual(self.user.config_data["a"], "abc")
self.assertEqual(self.user.config_data["c"]["foo"]["x"], "abc")
# Create new values
self.user.set_config("d", "abc")
self.user.set_config("b.baz", "abc")
self.assertEqual(self.user.config_data["d"], "abc")
self.assertEqual(self.user.config_data["b"]["baz"], "abc")
# Set a value and commit to the database
self.user.set_config("a", "def", commit=True)
self.user.refresh_from_db()
self.assertEqual(self.user.config_data["a"], "def")
# Attempt to change a branch node to a leaf node
with self.assertRaises(TypeError):
self.user.set_config("b", 1)
# Attempt to change a leaf node to a branch node
with self.assertRaises(TypeError):
self.user.set_config("a.x", 1)
def test_clear(self):
# Clear existing values
self.user.clear_config("a")
self.user.clear_config("b.foo")
self.assertTrue("a" not in self.user.config_data)
self.assertTrue("foo" not in self.user.config_data["b"])
self.assertEqual(self.user.config_data["b"]["bar"], 102)
# Clear a non-existing value; should fail silently
self.user.clear_config("invalid")
| 0
| 0
| 0
| 3,296
| 0
| 0
| 0
| 11
| 45
|
58ff00ab9dd53405c8606240357d386f8a7c8414
| 3,823
|
py
|
Python
|
lib/googlecloudsdk/api_lib/sql/instances.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/sql/instances.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/sql/instances.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
_POSTGRES_DATABASE_VERSION_PREFIX = 'POSTGRES'
| 34.754545
| 79
| 0.757782
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for sql instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from apitools.base.py import list_pager
from googlecloudsdk.api_lib.sql import api_util
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
_POSTGRES_DATABASE_VERSION_PREFIX = 'POSTGRES'
class _BaseInstances(object):
"""Common utility functions for sql instances."""
@staticmethod
def GetDatabaseInstances(limit=None, batch_size=None):
"""Gets SQL instances in a given project.
Modifies current state of an individual instance to 'STOPPED' if
activationPolicy is 'NEVER'.
Args:
limit: int, The maximum number of records to yield. None if all available
records should be yielded.
batch_size: int, The number of items to retrieve per request.
Returns:
List of yielded sql_messages.DatabaseInstance instances.
"""
client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
sql_client = client.sql_client
sql_messages = client.sql_messages
project_id = properties.VALUES.core.project.Get(required=True)
params = {}
if limit is not None:
params['limit'] = limit
if batch_size is not None:
params['batch_size'] = batch_size
yielded = list_pager.YieldFromList(
sql_client.instances,
sql_messages.SqlInstancesListRequest(project=project_id), **params)
def YieldInstancesWithAModifiedState():
for result in yielded:
# TODO(b/63139112): Investigate impact of instances without settings.
if result.settings and result.settings.activationPolicy == 'NEVER':
result.state = 'STOPPED'
yield result
return YieldInstancesWithAModifiedState()
@staticmethod
def PrintAndConfirmAuthorizedNetworksOverwrite():
console_io.PromptContinue(
message='When adding a new IP address to authorized networks, '
'make sure to also include any IP addresses that have already been '
'authorized. Otherwise, they will be overwritten and de-authorized.',
default=True,
cancel_on_no=True)
@staticmethod
def IsPostgresDatabaseVersion(database_version):
"""Returns a boolean indicating if the database version is Postgres."""
return _POSTGRES_DATABASE_VERSION_PREFIX in database_version
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = [backup_config]
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.name = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = backup_config
| 0
| 2,374
| 0
| 391
| 0
| 0
| 0
| 94
| 158
|
a6e3fb81075849bc5006590462c1692cddcd0b28
| 1,912
|
py
|
Python
|
catalog/general/catalog_logger.py
|
eoss-cloud/madxxx_catalog_api
|
ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d
|
[
"MIT"
] | null | null | null |
catalog/general/catalog_logger.py
|
eoss-cloud/madxxx_catalog_api
|
ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d
|
[
"MIT"
] | null | null | null |
catalog/general/catalog_logger.py
|
eoss-cloud/madxxx_catalog_api
|
ef37374a36129de4f0a6fe5dd46b5bc2e2f01d1d
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
""" EOSS catalog system
Custom logger
Default configuration file within this directory is used to control logging behaviour; can be overwritten with LOGGING_CONF which points to
local logging configuration
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "[email protected]"
__status__ = "Production"
import logging
from logging.config import fileConfig
import os
from utilities import read_OS_var
try: # Python 2.7+
from logging import NullHandler
except ImportError:
if read_OS_var('LOGGING_CONF', mandatory=False) == None:
path = os.path.dirname(__file__)
log_config_file = os.path.join(path, 'logging.ini')
else:
log_config_file = read_OS_var('LOGGING_CONF', mandatory=False)
fileConfig(log_config_file)
logger = logging.getLogger()
logger.addHandler(NullHandler())
logging.getLogger(__name__).addHandler(NullHandler())
# Configure default logger to do nothing
notificator = logging.getLogger('EOSS:notification')
heartbeat_log = logging.getLogger('EOSS:heartbeat')
tracer_log = logging.getLogger('EOSS:tracer')
CALL = 41
START = 42
BEATING = 43
STOP = 44
STROKE = 45
HEALTH = 46
logging.addLevelName(CALL, 'CALL')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(STROKE, 'STROKE')
logging.addLevelName(HEALTH, 'HEALTH')
logging.addLevelName(START, 'START BEAT')
logging.addLevelName(STOP, 'STOP BEAT')
# 3rd party logger configuration
logging.getLogger('boto3.resources.action').setLevel(logging.WARNING)
logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
| 26.191781
| 140
| 0.764644
|
#-*- coding: utf-8 -*-
""" EOSS catalog system
Custom logger
Default configuration file within this directory is used to control logging behaviour; can be overwritten with LOGGING_CONF which points to
local logging configuration
"""
__author__ = "Thilo Wehrmann, Steffen Gebhardt"
__copyright__ = "Copyright 2016, EOSS GmbH"
__credits__ = ["Thilo Wehrmann", "Steffen Gebhardt"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Thilo Wehrmann"
__email__ = "[email protected]"
__status__ = "Production"
import logging
from logging.config import fileConfig
import os
from utilities import read_OS_var
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
if read_OS_var('LOGGING_CONF', mandatory=False) == None:
path = os.path.dirname(__file__)
log_config_file = os.path.join(path, 'logging.ini')
else:
log_config_file = read_OS_var('LOGGING_CONF', mandatory=False)
fileConfig(log_config_file)
logger = logging.getLogger()
logger.addHandler(NullHandler())
logging.getLogger(__name__).addHandler(NullHandler())
# Configure default logger to do nothing
notificator = logging.getLogger('EOSS:notification')
heartbeat_log = logging.getLogger('EOSS:heartbeat')
tracer_log = logging.getLogger('EOSS:tracer')
CALL = 41
START = 42
BEATING = 43
STOP = 44
STROKE = 45
HEALTH = 46
logging.addLevelName(CALL, 'CALL')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(BEATING, 'BEATING')
logging.addLevelName(STROKE, 'STROKE')
logging.addLevelName(HEALTH, 'HEALTH')
logging.addLevelName(START, 'START BEAT')
logging.addLevelName(STOP, 'STOP BEAT')
# 3rd party logger configuration
logging.getLogger('boto3.resources.action').setLevel(logging.WARNING)
logging.getLogger('botocore.vendored.requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)
| 0
| 0
| 0
| 63
| 0
| 0
| 0
| 0
| 26
|
25781249bb36750915e0251ce1e74e198d0fa28a
| 8,973
|
py
|
Python
|
deeplearning/ml4pl/models/batch.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | null | null | null |
deeplearning/ml4pl/models/batch.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | 2
|
2020-07-27T08:22:06.000Z
|
2020-07-30T17:34:35.000Z
|
deeplearning/ml4pl/models/batch.py
|
Zacharias030/ProGraML
|
cd99d2c5362acd0b24ee224492bb3e8c4d4736fb
|
[
"Apache-2.0"
] | 1
|
2020-06-05T04:58:13.000Z
|
2020-06-05T04:58:13.000Z
|
# Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
| 30.110738
| 78
| 0.701995
|
# Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains TODO: one line summary.
TODO: Detailed explanation of the file.
"""
from typing import Any
from typing import Iterable
from typing import List
from typing import NamedTuple
from typing import Optional
import numpy as np
import sklearn.metrics
from labm8.py import app
FLAGS = app.FLAGS
app.DEFINE_string(
"batch_scores_averaging_method",
"weighted",
"Selects the averaging method to use when computing recall/precision/F1 "
"scores. See <https://scikit-learn.org/stable/modules/generated/sklearn"
".metrics.f1_score.html>",
)
class Data(NamedTuple):
"""The model data for a batch."""
graph_ids: List[int]
data: Any
# A flag used to mark that this batch is the end of an iterable sequences of
# batches.
end_of_batches: bool = False
@property
def graph_count(self) -> int:
return len(self.graph_ids)
def EmptyBatch() -> Data:
"""Construct an empty batch."""
return Data(graph_ids=[], data=None)
def EndOfBatches() -> Data:
"""Construct a 'end of batches' marker."""
return Data(graph_ids=[], data=None, end_of_batches=True)
class BatchIterator(NamedTuple):
"""A batch iterator"""
batches: Iterable[Data]
# The total number of graphs in all of the batches.
graph_count: int
class Results(NamedTuple):
"""The results of running a batch through a model.
Don't instantiate this tuple directly, use Results.Create().
"""
targets: np.array
predictions: np.array
# The number of model iterations to compute the final results. This is used
# by iterative models such as message passing networks.
iteration_count: int
# For iterative models, this indicates whether the state of the model at
# iteration_count had converged on a solution.
model_converged: bool
# The learning rate and loss of models, if applicable.
learning_rate: Optional[float]
loss: Optional[float]
# Batch-level average performance metrics.
accuracy: float
precision: float
recall: float
f1: float
@property
def has_learning_rate(self) -> bool:
return self.learning_rate is not None
@property
def has_loss(self) -> bool:
return self.loss is not None
@property
def target_count(self) -> int:
"""Get the number of targets in the batch.
For graph-level classifiers, this will be equal to Data.graph_count, else
it's equal to the batch node count.
"""
return self.targets.shape[1]
def __repr__(self) -> str:
return (
f"accuracy={self.accuracy:.2%}%, "
f"precision={self.precision:.3f}, "
f"recall={self.recall:.3f}, "
f"f1={self.f1:.3f}"
)
def __eq__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy == rhs.accuracy
def __gt__(self, rhs: "Results"):
"""Compare batch results."""
return self.accuracy > rhs.accuracy
@classmethod
def Create(
cls,
targets: np.array,
predictions: np.array,
iteration_count: int = 1,
model_converged: bool = True,
learning_rate: Optional[float] = None,
loss: Optional[float] = None,
):
"""Construct a results instance from 1-hot targets and predictions.
This is the preferred means of construct a Results instance, which takes
care of evaluating all of the metrics for you. The behavior of metrics
calculation is dependent on the --batch_scores_averaging_method flag.
Args:
targets: An array of 1-hot target vectors with
shape (y_count, y_dimensionality), dtype int32.
predictions: An array of 1-hot prediction vectors with
shape (y_count, y_dimensionality), dtype int32.
iteration_count: For iterative models, the number of model iterations to
compute the final result.
model_converged: For iterative models, whether model converged.
learning_rate: The model learning rate, if applicable.
loss: The model loss, if applicable.
Returns:
A Results instance.
"""
if targets.shape != predictions.shape:
raise TypeError(
f"Expected model to produce targets with shape {targets.shape} but "
f"instead received predictions with shape {predictions.shape}"
)
y_dimensionality = targets.shape[1]
if y_dimensionality < 2:
raise TypeError(
f"Expected label dimensionality > 1, received {y_dimensionality}"
)
# Create dense arrays of shape (target_count).
true_y = np.argmax(targets, axis=1)
pred_y = np.argmax(predictions, axis=1)
# NOTE(github.com/ChrisCummins/ProGraML/issues/22): This assumes that
# labels use the values [0,...n).
labels = np.arange(y_dimensionality, dtype=np.int64)
return cls(
targets=targets,
predictions=predictions,
iteration_count=iteration_count,
model_converged=model_converged,
learning_rate=learning_rate,
loss=loss,
accuracy=sklearn.metrics.accuracy_score(true_y, pred_y),
precision=sklearn.metrics.precision_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
recall=sklearn.metrics.recall_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
f1=sklearn.metrics.f1_score(
true_y,
pred_y,
labels=labels,
average=FLAGS.batch_scores_averaging_method,
),
)
class RollingResults:
"""Maintain weighted rolling averages across batches."""
def __init__(self):
self.weight_sum = 0
self.batch_count = 0
self.graph_count = 0
self.target_count = 0
self.weighted_iteration_count_sum = 0
self.weighted_model_converged_sum = 0
self.has_learning_rate = False
self.weighted_learning_rate_sum = 0
self.has_loss = False
self.weighted_loss_sum = 0
self.weighted_accuracy_sum = 0
self.weighted_precision_sum = 0
self.weighted_recall_sum = 0
self.weighted_f1_sum = 0
def Update(
self, data: Data, results: Results, weight: Optional[float] = None
) -> None:
"""Update the rolling results with a new batch.
Args:
data: The batch data used to produce the results.
results: The batch results to update the current state with.
weight: A weight to assign to weighted sums. E.g. to weight results
across all targets, use weight=results.target_count. To weight across
targets, use weight=batch.target_count. To weight across
graphs, use weight=batch.graph_count. By default, weight by target
count.
"""
if weight is None:
weight = results.target_count
self.weight_sum += weight
self.batch_count += 1
self.graph_count += data.graph_count
self.target_count += results.target_count
self.weighted_iteration_count_sum += results.iteration_count * weight
self.weighted_model_converged_sum += (
weight if results.model_converged else 0
)
if results.has_learning_rate:
self.has_learning_rate = True
self.weighted_learning_rate_sum += results.learning_rate * weight
if results.has_loss:
self.has_loss = True
self.weighted_loss_sum += results.loss * weight
self.weighted_accuracy_sum += results.accuracy * weight
self.weighted_precision_sum += results.precision * weight
self.weighted_recall_sum += results.recall * weight
self.weighted_f1_sum += results.f1 * weight
@property
def iteration_count(self) -> float:
return self.weighted_iteration_count_sum / max(self.weight_sum, 1)
@property
def model_converged(self) -> float:
return self.weighted_model_converged_sum / max(self.weight_sum, 1)
@property
def learning_rate(self) -> Optional[float]:
if self.has_learning_rate:
return self.weighted_learning_rate_sum / max(self.weight_sum, 1)
@property
def loss(self) -> Optional[float]:
if self.has_loss:
return self.weighted_loss_sum / max(self.weight_sum, 1)
@property
def accuracy(self) -> float:
return self.weighted_accuracy_sum / max(self.weight_sum, 1)
@property
def precision(self) -> float:
return self.weighted_precision_sum / max(self.weight_sum, 1)
@property
def recall(self) -> float:
return self.weighted_recall_sum / max(self.weight_sum, 1)
@property
def f1(self) -> float:
return self.weighted_f1_sum / max(self.weight_sum, 1)
| 0
| 3,688
| 0
| 3,755
| 0
| 0
| 0
| 21
| 247
|
bd43660e61d12126149a6be149f44586a149537b
| 102
|
py
|
Python
|
python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py
|
wesleyegberto/dojos-languages
|
87170a722efac1247c713daa21cb3fcc39f5c5c1
|
[
"MIT"
] | null | null | null |
python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py
|
wesleyegberto/dojos-languages
|
87170a722efac1247c713daa21cb3fcc39f5c5c1
|
[
"MIT"
] | null | null | null |
python/python-tutorial/10_packages/package_a/subpackage_a/a_module.py
|
wesleyegberto/dojos-languages
|
87170a722efac1247c713daa21cb3fcc39f5c5c1
|
[
"MIT"
] | null | null | null |
# A module inside the package
print("Module: ", __name__)
| 12.75
| 29
| 0.666667
|
# A module inside the package
print("Module: ", __name__)
def do_stuff():
print("Doing stuff")
| 0
| 0
| 0
| 0
| 0
| 19
| 0
| 0
| 23
|
ea15f02ea347cde7a8bc22e6cd2d89594e3df3dd
| 1,096
|
py
|
Python
|
easy/1025-Divisor Game.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | 2
|
2020-05-08T02:17:17.000Z
|
2020-05-17T04:55:56.000Z
|
easy/1025-Divisor Game.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
easy/1025-Divisor Game.py
|
Davidxswang/leetcode
|
d554b7f5228f14c646f726ddb91014a612673e06
|
[
"Apache-2.0"
] | null | null | null |
"""
https://leetcode.com/problems/divisor-game/
Alice and Bob take turns playing a game, with Alice starting first.
Initially, there is a number N on the chalkboard. On each player's turn, that player makes a move consisting of:
Choosing any x with 0 < x < N and N % x == 0.
Replacing the number N on the chalkboard with N - x.
Also, if a player cannot make a move, they lose the game.
Return True if and only if Alice wins the game, assuming both players play optimally.
Example 1:
Input: 2
Output: true
Explanation: Alice chooses 1, and Bob has no more moves.
Example 2:
Input: 3
Output: false
Explanation: Alice chooses 1, Bob chooses 1, and Alice has no more moves.
Note:
1 <= N <= 1000
"""
# time complexity: O(nlogn), space complexity: O(n)
| 25.488372
| 113
| 0.636861
|
"""
https://leetcode.com/problems/divisor-game/
Alice and Bob take turns playing a game, with Alice starting first.
Initially, there is a number N on the chalkboard. On each player's turn, that player makes a move consisting of:
Choosing any x with 0 < x < N and N % x == 0.
Replacing the number N on the chalkboard with N - x.
Also, if a player cannot make a move, they lose the game.
Return True if and only if Alice wins the game, assuming both players play optimally.
Example 1:
Input: 2
Output: true
Explanation: Alice chooses 1, and Bob has no more moves.
Example 2:
Input: 3
Output: false
Explanation: Alice chooses 1, Bob chooses 1, and Alice has no more moves.
Note:
1 <= N <= 1000
"""
# time complexity: O(nlogn), space complexity: O(n)
class Solution:
def divisorGame(self, N: int) -> bool:
flags = [False] * (N+1)
import math
for i in range(2, N+1):
for j in range(1, int(math.sqrt(i))+2):
if i % j == 0 and flags[i-j] == False:
flags[i] = True
break
return flags[N]
| 0
| 0
| 0
| 314
| 0
| 0
| 0
| 0
| 23
|
56a51529edb0ee5b8e263f380f1c7725ffa73944
| 4,540
|
py
|
Python
|
evalml/automl/engine/sequential_engine.py
|
BlockchainClimateInstitute/price_microservice
|
11d1cff8965fe1befc997e9da3dc09efceed4579
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/automl/engine/sequential_engine.py
|
BlockchainClimateInstitute/price_microservice
|
11d1cff8965fe1befc997e9da3dc09efceed4579
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/automl/engine/sequential_engine.py
|
BlockchainClimateInstitute/price_microservice
|
11d1cff8965fe1befc997e9da3dc09efceed4579
|
[
"BSD-3-Clause"
] | null | null | null |
from evalml.utils import get_logger
logger = get_logger(__file__)
| 42.830189
| 109
| 0.620485
|
import sys
import traceback
import numpy as np
from evalml.automl.engine import EngineBase
from evalml.exceptions import PipelineScoreError
from evalml.model_family import ModelFamily
from evalml.objectives.utils import get_objective
from evalml.utils import get_logger
logger = get_logger(__file__)
class SequentialEngine(EngineBase):
"""The default engine for the AutoML search. Trains and scores pipelines locally, one after another."""
def evaluate_batch(self, pipelines):
"""Evaluate a batch of pipelines using the current dataset and AutoML state.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to be fitted and evaluated.
Returns:
list (int): a list of the new pipeline IDs which were created by the AutoML search.
"""
if self.X_train is None or self.y_train is None:
raise ValueError("Dataset has not been loaded into the engine.")
new_pipeline_ids = []
index = 0
while self._should_continue_callback() and index < len(pipelines):
pipeline = pipelines[index]
self._pre_evaluation_callback(pipeline)
X, y = self.X_train, self.y_train
if pipeline.model_family == ModelFamily.ENSEMBLE:
X, y = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
elif self.ensembling_indices is not None:
training_indices = [i for i in range(len(self.X_train)) if i not in self.ensembling_indices]
X = self.X_train.iloc[training_indices]
y = self.y_train.iloc[training_indices]
evaluation_result = EngineBase.train_and_score_pipeline(pipeline, self.automl, X, y)
new_pipeline_ids.append(self._post_evaluation_callback(pipeline, evaluation_result))
index += 1
return new_pipeline_ids
def train_batch(self, pipelines):
"""Train a batch of pipelines using the current dataset.
Arguments:
pipelines (list(PipelineBase)): A batch of pipelines to fit.
Returns:
dict[str, PipelineBase]: Dict of fitted pipelines keyed by pipeline name.
"""
super().train_batch(pipelines)
fitted_pipelines = {}
for pipeline in pipelines:
try:
fitted_pipeline = EngineBase.train_pipeline(
pipeline, self.X_train, self.y_train,
self.automl.optimize_thresholds,
self.automl.objective
)
fitted_pipelines[fitted_pipeline.name] = fitted_pipeline
except Exception as e:
logger.error(f'Train error for {pipeline.name}: {str(e)}')
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
return fitted_pipelines
def score_batch(self, pipelines, X, y, objectives):
"""Score a batch of pipelines.
Arguments:
pipelines (list(PipelineBase)): A batch of fitted pipelines to score.
X (ww.DataTable, pd.DataFrame): Features to score on.
y (ww.DataTable, pd.DataFrame): Data to score on.
objectives (list(ObjectiveBase), list(str)): Objectives to score on.
Returns:
dict: Dict containing scores for all objectives for all pipelines. Keyed by pipeline name.
"""
super().score_batch(pipelines, X, y, objectives)
scores = {}
objectives = [get_objective(o, return_instance=True) for o in objectives]
for pipeline in pipelines:
try:
scores[pipeline.name] = pipeline.score(X, y, objectives)
except Exception as e:
logger.error(f"Score error for {pipeline.name}: {str(e)}")
if isinstance(e, PipelineScoreError):
nan_scores = {objective: np.nan for objective in e.exceptions}
scores[pipeline.name] = {**nan_scores, **e.scored_successfully}
else:
# Traceback already included in the PipelineScoreError so we only
# need to include it for all other errors
tb = traceback.format_tb(sys.exc_info()[2])
logger.error("Traceback:")
logger.error("\n".join(tb))
scores[pipeline.name] = {objective.name: np.nan for objective in objectives}
return scores
| 0
| 0
| 0
| 4,213
| 0
| 0
| 0
| 80
| 179
|
ee2569e70a693fb7569365e25bd376b146aaf167
| 877
|
py
|
Python
|
source/read-file/app.py
|
aws-samples/aws-serverless-batch-architecture
|
1672d7623c2a0b6141bf83d019efe3c6c70efd00
|
[
"MIT-0"
] | 14
|
2021-11-12T02:02:46.000Z
|
2022-03-01T23:28:48.000Z
|
source/read-file/app.py
|
aws-samples/aws-serverless-batch-architecture
|
1672d7623c2a0b6141bf83d019efe3c6c70efd00
|
[
"MIT-0"
] | 1
|
2021-11-01T02:56:34.000Z
|
2022-01-17T00:19:53.000Z
|
source/read-file/app.py
|
aws-samples/aws-serverless-batch-architecture
|
1672d7623c2a0b6141bf83d019efe3c6c70efd00
|
[
"MIT-0"
] | 1
|
2022-03-24T13:00:45.000Z
|
2022-03-24T13:00:45.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import s3fs
s3 = s3fs.S3FileSystem(anon=False)
header = [
'uuid',
'country',
'itemType',
'salesChannel',
'orderPriority',
'orderDate',
'region',
'shipDate'
]
| 21.390244
| 78
| 0.58951
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import csv
import s3fs
import os
s3 = s3fs.S3FileSystem(anon=False)
header = [
'uuid',
'country',
'itemType',
'salesChannel',
'orderPriority',
'orderDate',
'region',
'shipDate'
]
def lambda_handler(event, context):
input_file = event['input']['FilePath']
output_data = []
skip_first = 0
with s3.open(input_file, 'r', newline='', encoding='utf-8-sig') as inFile:
file_reader = csv.reader(inFile)
for row in file_reader:
if skip_first == 0:
skip_first = skip_first + 1
continue
new_object = {}
for i in range(len(header)):
new_object[header[i]] = row[i]
output_data.append(new_object)
return output_data
| 0
| 0
| 0
| 0
| 0
| 535
| 0
| -23
| 67
|
85f1aa282d6853f2f160254093d5add98f8f0f8b
| 5,210
|
py
|
Python
|
modules/differentialLine.py
|
inconvergent/differential-line-cuda
|
07927dff7c3178821776fccd5ad0aa196a3bb858
|
[
"MIT"
] | 21
|
2016-05-22T17:40:02.000Z
|
2022-02-03T11:36:31.000Z
|
modules/differentialLine.py
|
inconvergent/differential-line-cuda
|
07927dff7c3178821776fccd5ad0aa196a3bb858
|
[
"MIT"
] | null | null | null |
modules/differentialLine.py
|
inconvergent/differential-line-cuda
|
07927dff7c3178821776fccd5ad0aa196a3bb858
|
[
"MIT"
] | 2
|
2017-03-17T05:13:16.000Z
|
2021-12-09T02:20:03.000Z
|
# -*- coding: utf-8 -*-
from numpy import pi
TWOPI = pi*2
PI = pi
| 18.876812
| 65
| 0.55739
|
# -*- coding: utf-8 -*-
from numpy import pi
from numpy import zeros
from numpy import sin
from numpy import cos
from numpy import sqrt
from numpy.random import random
from numpy import float32 as npfloat
from numpy import int32 as npint
TWOPI = pi*2
PI = pi
class DifferentialLine(object):
def __init__(
self,
size,
stp,
spring_stp,
reject_stp,
near_rad,
far_rad,
threads = 256,
nmax = 1000000
):
self.itt = 0
self.threads = threads
self.nmax = nmax
self.size = size
self.one = 1.0/size
self.stp = stp
self.spring_stp = spring_stp
self.reject_stp = reject_stp
self.near_rad = near_rad
self.far_rad = far_rad
self.__init()
self.__cuda_init()
def __init(self):
self.num = 0
nz = int(1.0/(2*self.far_rad))
self.nz = nz
self.nz2 = nz**2
nmax = self.nmax
self.xy = zeros((nmax, 2), npfloat)
self.dxy = zeros((nmax, 2), npfloat)
self.tmp = zeros((nmax, 1), npfloat)
self.link_len = zeros((nmax, 2), npfloat)
self.link_curv = zeros((nmax, 2), npfloat)
self.links = zeros((nmax, 2), npint)
zone_map_size = self.nz2*64
self.zone_node = zeros(zone_map_size, npint)
self.zone_num = zeros(self.nz2, npint)
def __cuda_init(self):
import pycuda.autoinit
from .helpers import load_kernel
self.cuda_agg_count = load_kernel(
'modules/cuda/agg_count.cu',
'agg_count',
subs={'_THREADS_': self.threads}
)
self.cuda_agg = load_kernel(
'modules/cuda/agg.cu',
'agg',
subs={'_THREADS_': self.threads}
)
self.cuda_step = load_kernel(
'modules/cuda/step.cu',
'step',
subs={
'_THREADS_': self.threads
}
)
def init_circle(self, n, rad):
from numpy import sort
num = self.num
links = self.links
angles = random(n)*TWOPI
angles = sort(angles)
xx = 0.5 + cos(angles)*rad
yy = 0.5 + sin(angles)*rad
self.xy[num:num+n, 0] = xx
self.xy[num:num+n, 1] = yy
for i in range(num+1, num+n-1):
links[i,0] = i-1
links[i,1] = i+1
links[num,1] = num+1
links[num,0] = num+n-1
links[(num+n-1),1] = num
links[(num+n-1),0] = num+n-2
self.num = num+n
def spawn_normal(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
mask = (random(num)<prob).nonzero()[0]
if len(mask)<1:
return
for i in mask:
b = links[i,1]
l = link_len[i,1]
if l>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def spawn_curl(self, limit, prob=0.01, t=None):
links = self.links
link_len = self.link_len
xy = self.xy
num = self.num
curve = sqrt(self.link_curv[1:num,0])
for i, (r, t) in enumerate(zip(random(num), curve)):
b = links[i,1]
if r>t and link_len[i,1]>limit:
newxy = (xy[b,:]+xy[i,:])*0.5
xy[num,:] = newxy
links[i,1] = num
links[num,0] = i
links[num,1] = b
links[b,0] = num
num += 1
self.num = num
def get_line(self):
from numpy import array
links = self.links
curr = links[0,0]
first = curr
order = [first]
while True:
a = links[curr,0]
b = links[curr,1]
if a != curr:
curr = a
else:
curr = b
order.append(curr)
if curr == first:
order.append(a)
break
return array(order, npint)
def step(self, t=None):
import pycuda.driver as drv
self.itt += 1
num = self.num
xy = self.xy
dxy = self.dxy
tmp = self.tmp
link_len = self.link_len
link_curv = self.link_curv
blocks = num//self.threads + 1
self.zone_num[:] = 0
self.cuda_agg_count(
npint(num),
npint(self.nz),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
block=(self.threads,1,1),
grid=(blocks,1)
)
zone_leap = self.zone_num[:].max()
zone_map_size = self.nz2*zone_leap
if zone_map_size>len(self.zone_node):
print('resize, new zone leap: ', zone_map_size*2./self.nz2)
self.zone_node = zeros(zone_map_size*2, npint)
self.zone_num[:] = 0
self.cuda_agg(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.InOut(self.zone_num),
drv.InOut(self.zone_node),
block=(self.threads,1,1),
grid=(blocks,1)
)
self.cuda_step(
npint(num),
npint(self.nz),
npint(zone_leap),
drv.In(xy[:num,:]),
drv.Out(dxy[:num,:]),
drv.Out(tmp[:num,:]),
drv.Out(link_len[:num,:]),
drv.Out(link_curv[:num,:]),
drv.In(self.links[:num,:]),
drv.In(self.zone_num),
drv.In(self.zone_node),
npfloat(self.stp),
npfloat(self.reject_stp),
npfloat(self.spring_stp),
npfloat(self.near_rad),
npfloat(self.far_rad),
block=(self.threads,1,1),
grid=(blocks,1)
)
xy[:num,:] += dxy[:num,:]
| 0
| 0
| 0
| 4,917
| 0
| 0
| 0
| 39
| 178
|
9c9a724dc974172fe6713d50c40457af8df6ee64
| 2,802
|
py
|
Python
|
main.py
|
greenactionstudio/openleadr-python
|
03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
greenactionstudio/openleadr-python
|
03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
greenactionstudio/openleadr-python
|
03f2ceb3d9a8a2ffdffb67c99ec116187b9ee063
|
[
"Apache-2.0"
] | 1
|
2021-11-03T02:36:32.000Z
|
2021-11-03T02:36:32.000Z
|
from flask import Flask
import os
import threading
import nest_asyncio
from openleadr.client import OpenADRClient
from openleadr.utils import report_callback
from openleadr.enums import MEASUREMENTS
nest_asyncio.apply()
client = OpenADRClient(ven_name='myven', vtn_url=os.environ.get('VTN_URL'))
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_STATUS')
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_USAGE', measurement= MEASUREMENTS.POWER_REAL)
app = Flask(__name__)
if __name__ == "__main__":
t1 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': os.environ.get('PORT') })
t2 = threading.Thread(target=client_run)
t1.start()
t2.start()
t2.join()
| 38.916667
| 120
| 0.745182
|
from logging import debug, exception
from flask import Flask, request
import os
import asyncio
import threading
import ssl
import aiohttp
import nest_asyncio
import json
from openleadr.client import OpenADRClient
from openleadr.utils import report_callback
from openleadr.enums import MEASUREMENTS
nest_asyncio.apply()
client = OpenADRClient(ven_name='myven', vtn_url=os.environ.get('VTN_URL'))
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_STATUS')
client.add_report(report_callback, client.ven_id, report_name = 'TELEMETRY_USAGE', measurement= MEASUREMENTS.POWER_REAL)
app = Flask(__name__)
@app.route('/create_party_registration', methods=['POST', 'GET'])
async def create_party_registration():
await client.create_party_registration(ven_id = client.ven_id, registration_id=client.registration_id)
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/create_party_registration_while_registered', methods=['POST', 'GET'])
async def create_party_registration_while_registered():
await client.create_party_registration_while_registered()
return {'status': 200, 'body': 'return from the create party registration'}
@app.route('/query_registration', methods=['POST'])
async def query_registration():
await client.query_registration()
return {'status': 200, 'body': 'return from the query registration'}
@app.route('/cancel_party_registration', methods=['POST'])
async def cancel_party_registration():
await client.cancel_party_registration()
return {'status': 200, 'body': 'return from the cancel registration'}
@app.route('/register_reports')
async def register_reports():
if client.reports:
await client.register_reports(client.reports)
return {'status': 200, 'body': 'The VEN has sent register report with metadata.'}
@app.route('/request_event', methods=['POST'])
async def request_event():
response_type, response_payload = await client.request_event()
if response_type == 'oadrDistributeEvent':
if 'events' in response_payload and len(response_payload['events']) > 0:
await client._on_event(response_payload)
return {'status': 200, 'body': 'return from the request event'}
@app.route('/create_opt', methods =['POST'])
async def create_opt():
return await client.create_opt(request.data)
@app.route('/cancel_opt', methods = ['POST'])
async def cancel_opt():
return await client.cancel_opt(request.data)
def client_run():
loop = asyncio.new_event_loop()
loop.create_task(client.run())
loop.run_forever()
if __name__ == "__main__":
t1 = threading.Thread(target=app.run, kwargs={'host': '0.0.0.0', 'port': os.environ.get('PORT') })
t2 = threading.Thread(target=client_run)
t1.start()
t2.start()
t2.join()
| 0
| 1,663
| 0
| 0
| 0
| 90
| 0
| -11
| 317
|
0a7253e54a7ce0b7e58517111d748be3f97a40cb
| 2,816
|
py
|
Python
|
sound_lib/external/pybass_aac.py
|
ctoth/sound_lib
|
0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6
|
[
"MIT"
] | 1
|
2020-09-03T15:35:03.000Z
|
2020-09-03T15:35:03.000Z
|
sound_lib/external/pybass_aac.py
|
ctoth/sound_lib
|
0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6
|
[
"MIT"
] | 2
|
2020-09-25T05:47:44.000Z
|
2021-06-25T15:25:34.000Z
|
sound_lib/external/pybass_aac.py
|
ctoth/sound_lib
|
0e0544a1f4e5da5bc2e0ee99cd7c5bac9ba934c6
|
[
"MIT"
] | 2
|
2020-01-05T16:24:20.000Z
|
2020-09-03T15:35:07.000Z
|
from __future__ import absolute_import
# Copyright(c) Max Kolosov 2009 [email protected]
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.1'
__versionTime__ = '2009-11-15'
__author__ = 'Max Kolosov <[email protected]>'
__doc__ = '''
pybass_aac.py - is ctypes python module for
BASS_AAC - extension to the BASS audio library that enables the playback
of Advanced Audio Coding and MPEG-4 streams (http://www.maresweb.de).
'''
import ctypes
from . import pybass
from .paths import x86_path, x64_path
import libloader
bass_aac_module = libloader.load_library('bass_aac', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
#Register the plugin with the Bass plugin system.
pybass.BASS_PluginLoad(libloader.find_library_path('bass_aac', x86_path=x86_path, x64_path=x64_path), 0)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
DOWNLOADPROC = pybass.DOWNLOADPROC
BASS_FILEPROCS = pybass.BASS_FILEPROCS
# Additional BASS_SetConfig options
BASS_CONFIG_MP4_VIDEO = 0x10700 # play the audio from MP4 videos
# Additional tags available from BASS_StreamGetTags (for MP4 files)
BASS_TAG_MP4 = 7 # MP4/iTunes metadata
BASS_AAC_STEREO = 0x400000 # downmatrix to stereo
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_AAC = 0x10b00 # AAC
BASS_CTYPE_STREAM_MP4 = 0x10b01 # MP4
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_AAC_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_AAC_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateURL)(const char *url, DWORD offset, DWORD flags, DOWNLOADPROC *proc, void *user);
BASS_AAC_StreamCreateURL = func_type(HSTREAM, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, DOWNLOADPROC, ctypes.c_void_p)(('BASS_AAC_StreamCreateURL', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_AAC_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_AAC_StreamCreateFileUser', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_MP4_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_MP4_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_MP4_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_MP4_StreamCreateFileUser', bass_aac_module))
| 52.148148
| 184
| 0.79652
|
from __future__ import absolute_import
# Copyright(c) Max Kolosov 2009 [email protected]
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.1'
__versionTime__ = '2009-11-15'
__author__ = 'Max Kolosov <[email protected]>'
__doc__ = '''
pybass_aac.py - is ctypes python module for
BASS_AAC - extension to the BASS audio library that enables the playback
of Advanced Audio Coding and MPEG-4 streams (http://www.maresweb.de).
'''
import os, sys, ctypes
from . import pybass
from .paths import x86_path, x64_path
import libloader
bass_aac_module = libloader.load_library('bass_aac', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
#Register the plugin with the Bass plugin system.
pybass.BASS_PluginLoad(libloader.find_library_path('bass_aac', x86_path=x86_path, x64_path=x64_path), 0)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
DOWNLOADPROC = pybass.DOWNLOADPROC
BASS_FILEPROCS = pybass.BASS_FILEPROCS
# Additional BASS_SetConfig options
BASS_CONFIG_MP4_VIDEO = 0x10700 # play the audio from MP4 videos
# Additional tags available from BASS_StreamGetTags (for MP4 files)
BASS_TAG_MP4 = 7 # MP4/iTunes metadata
BASS_AAC_STEREO = 0x400000 # downmatrix to stereo
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_AAC = 0x10b00 # AAC
BASS_CTYPE_STREAM_MP4 = 0x10b01 # MP4
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_AAC_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_AAC_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateURL)(const char *url, DWORD offset, DWORD flags, DOWNLOADPROC *proc, void *user);
BASS_AAC_StreamCreateURL = func_type(HSTREAM, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, DOWNLOADPROC, ctypes.c_void_p)(('BASS_AAC_StreamCreateURL', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_AAC_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_AAC_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_AAC_StreamCreateFileUser', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_MP4_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_MP4_StreamCreateFile', bass_aac_module))
#HSTREAM BASSAACDEF(BASS_MP4_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_MP4_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_MP4_StreamCreateFileUser', bass_aac_module))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0
|
0a79e9ed00dd7cff3b0787278aa3e51a4698409f
| 31,754
|
py
|
Python
|
src/olympia/addons/views.py
|
tapaswenipathak/addons-server
|
b7085559a754248a8baade399d5a27f2c3e3ca7e
|
[
"BSD-3-Clause"
] | 1
|
2019-08-17T21:17:50.000Z
|
2019-08-17T21:17:50.000Z
|
src/olympia/addons/views.py
|
tapaswenipathak/addons-server
|
b7085559a754248a8baade399d5a27f2c3e3ca7e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/addons/views.py
|
tapaswenipathak/addons-server
|
b7085559a754248a8baade399d5a27f2c3e3ca7e
|
[
"BSD-3-Clause"
] | null | null | null |
import olympia.core.logger
from .decorators import addon_view_factory
from .models import Addon
log = olympia.core.logger.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
DEFAULT_FIND_REPLACEMENT_PATH = '/collections/mozilla/featured-add-ons/'
FIND_REPLACEMENT_SRC = 'find-replacement'
| 41.891821
| 79
| 0.653776
|
from collections import OrderedDict
from django import http
from django.db.models import Prefetch
from django.db.transaction import non_atomic_requests
from django.shortcuts import redirect
from django.utils.cache import patch_cache_control
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from elasticsearch_dsl import Q, query, Search
from rest_framework import exceptions, serializers
from rest_framework.decorators import action
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import GenericViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.amo.models import manual_order
from olympia.amo.urlresolvers import get_outgoing_url
from olympia.api.pagination import ESPageNumberPagination
from olympia.api.permissions import (
AllowAddonAuthor, AllowReadOnlyIfPublic, AllowRelatedObjectPermissions,
AllowReviewer, AllowReviewerUnlisted, AnyOf, GroupPermission)
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.search.filters import (
AddonAppQueryParam, AddonAppVersionQueryParam, AddonAuthorQueryParam,
AddonCategoryQueryParam, AddonGuidQueryParam, AddonTypeQueryParam,
AutoCompleteSortFilter,
ReviewedContentFilter, SearchParameterFilter, SearchQueryFilter,
SortingFilter)
from olympia.translations.query import order_by_translation
from olympia.versions.models import Version
from .decorators import addon_view_factory
from .indexers import AddonIndexer
from .models import Addon, CompatOverride, ReplacementAddon
from .serializers import (
AddonEulaPolicySerializer,
AddonSerializer, AddonSerializerWithUnlistedData, CompatOverrideSerializer,
ESAddonAutoCompleteSerializer, ESAddonSerializer, LanguageToolsSerializer,
ReplacementAddonSerializer, StaticCategorySerializer, VersionSerializer)
from .utils import (
get_addon_recommendations, get_addon_recommendations_invalid,
get_creatured_ids, get_featured_ids, is_outcome_recommended)
log = olympia.core.logger.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_valid_disabled_pending_view = addon_view_factory(
qs=Addon.objects.valid_and_disabled_and_pending)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
return getattr(self, 'filter_{0}'.format(field))()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.base_queryset, ids, 'addons.id')
def filter_free(self):
if self.model == Addon:
return self.base_queryset.top_free(self.request.APP, listed=False)
else:
return self.base_queryset.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.base_queryset.top_paid(self.request.APP, listed=False)
else:
return self.base_queryset.top_paid(listed=False)
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return self.base_queryset.order_by('-average_daily_users')
def filter_created(self):
return self.base_queryset.order_by('-created')
def filter_updated(self):
return self.base_queryset.order_by('-last_updated')
def filter_rating(self):
return self.base_queryset.order_by('-bayesian_rating')
def filter_hotness(self):
return self.base_queryset.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.base_queryset.all(), 'name')
DEFAULT_FIND_REPLACEMENT_PATH = '/collections/mozilla/featured-add-ons/'
FIND_REPLACEMENT_SRC = 'find-replacement'
def find_replacement_addon(request):
guid = request.GET.get('guid')
if not guid:
raise http.Http404
try:
replacement = ReplacementAddon.objects.get(guid=guid)
path = replacement.path
except ReplacementAddon.DoesNotExist:
path = DEFAULT_FIND_REPLACEMENT_PATH
else:
if replacement.has_external_url():
# It's an external URL:
return redirect(get_outgoing_url(path))
replace_url = '%s%s?src=%s' % (
('/' if not path.startswith('/') else ''), path, FIND_REPLACEMENT_SRC)
return redirect(replace_url, permanent=False)
class AddonViewSet(RetrieveModelMixin, GenericViewSet):
permission_classes = [
AnyOf(AllowReadOnlyIfPublic, AllowAddonAuthor,
AllowReviewer, AllowReviewerUnlisted),
]
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
lookup_value_regex = '[^/]+' # Allow '.' for email-like guids.
def get_queryset(self):
"""Return queryset to be used for the view."""
# Special case: admins - and only admins - can see deleted add-ons.
# This is handled outside a permission class because that condition
# would pollute all other classes otherwise.
if (self.request.user.is_authenticated and
acl.action_allowed(self.request,
amo.permissions.ADDONS_VIEW_DELETED)):
return Addon.unfiltered.all()
# Permission classes disallow access to non-public/unlisted add-ons
# unless logged in as a reviewer/addon owner/admin, so we don't have to
# filter the base queryset here.
return Addon.objects.all()
def get_serializer_class(self):
# Override serializer to use serializer_class_with_unlisted_data if
# we are allowed to access unlisted data.
obj = getattr(self, 'instance')
request = self.request
if (acl.check_unlisted_addons_reviewer(request) or
(obj and request.user.is_authenticated and
obj.authors.filter(pk=request.user.pk).exists())):
return self.serializer_class_with_unlisted_data
return self.serializer_class
def get_lookup_field(self, identifier):
return Addon.get_lookup_field(identifier)
def get_object(self):
identifier = self.kwargs.get('pk')
self.lookup_field = self.get_lookup_field(identifier)
self.kwargs[self.lookup_field] = identifier
self.instance = super(AddonViewSet, self).get_object()
return self.instance
def check_object_permissions(self, request, obj):
"""
Check if the request should be permitted for a given object.
Raises an appropriate exception if the request is not permitted.
Calls DRF implementation, but adds `is_disabled_by_developer` to the
exception being thrown so that clients can tell the difference between
a 401/403 returned because an add-on has been disabled by their
developer or something else.
"""
try:
super(AddonViewSet, self).check_object_permissions(request, obj)
except exceptions.APIException as exc:
exc.detail = {
'detail': exc.detail,
'is_disabled_by_developer': obj.disabled_by_user,
'is_disabled_by_mozilla': obj.status == amo.STATUS_DISABLED,
}
raise exc
@action(detail=True)
def eula_policy(self, request, pk=None):
obj = self.get_object()
serializer = AddonEulaPolicySerializer(
obj, context=self.get_serializer_context())
return Response(serializer.data)
class AddonChildMixin(object):
"""Mixin containing method to retrieve the parent add-on object."""
def get_addon_object(self, permission_classes=None, lookup='addon_pk'):
"""Return the parent Addon object using the URL parameter passed
to the view.
`permission_classes` can be use passed to change which permission
classes the parent viewset will be used when loading the Addon object,
otherwise AddonViewSet.permission_classes will be used."""
if hasattr(self, 'addon_object'):
return self.addon_object
if permission_classes is None:
permission_classes = AddonViewSet.permission_classes
self.addon_object = AddonViewSet(
request=self.request, permission_classes=permission_classes,
kwargs={'pk': self.kwargs[lookup]}).get_object()
return self.addon_object
class AddonVersionViewSet(AddonChildMixin, RetrieveModelMixin,
ListModelMixin, GenericViewSet):
# Permissions are always checked against the parent add-on in
# get_addon_object() using AddonViewSet.permission_classes so we don't need
# to set any here. Some extra permission classes are added dynamically
# below in check_permissions() and check_object_permissions() depending on
# what the client is requesting to see.
permission_classes = []
serializer_class = VersionSerializer
def check_permissions(self, request):
requested = self.request.GET.get('filter')
if self.action == 'list':
if requested == 'all_with_deleted':
# To see deleted versions, you need Addons:ViewDeleted.
self.permission_classes = [
GroupPermission(amo.permissions.ADDONS_VIEW_DELETED)]
elif requested == 'all_with_unlisted':
# To see unlisted versions, you need to be add-on author or
# unlisted reviewer.
self.permission_classes = [AnyOf(
AllowReviewerUnlisted, AllowAddonAuthor)]
elif requested == 'all_without_unlisted':
# To see all listed versions (not just public ones) you need to
# be add-on author or reviewer.
self.permission_classes = [AnyOf(
AllowReviewer, AllowReviewerUnlisted, AllowAddonAuthor)]
# When listing, we can't use AllowRelatedObjectPermissions() with
# check_permissions(), because AllowAddonAuthor needs an author to
# do the actual permission check. To work around that, we call
# super + check_object_permission() ourselves, passing down the
# addon object directly.
return super(AddonVersionViewSet, self).check_object_permissions(
request, self.get_addon_object())
super(AddonVersionViewSet, self).check_permissions(request)
def check_object_permissions(self, request, obj):
# If the instance is marked as deleted and the client is not allowed to
# see deleted instances, we want to return a 404, behaving as if it
# does not exist.
if (obj.deleted and
not GroupPermission(amo.permissions.ADDONS_VIEW_DELETED).
has_object_permission(request, self, obj)):
raise http.Http404
if obj.channel == amo.RELEASE_CHANNEL_UNLISTED:
# If the instance is unlisted, only allow unlisted reviewers and
# authors..
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewerUnlisted, AllowAddonAuthor)])
]
elif not obj.is_public():
# If the instance is disabled, only allow reviewers and authors.
self.permission_classes = [
AllowRelatedObjectPermissions(
'addon', [AnyOf(AllowReviewer, AllowAddonAuthor)])
]
super(AddonVersionViewSet, self).check_object_permissions(request, obj)
def get_queryset(self):
"""Return the right base queryset depending on the situation."""
requested = self.request.GET.get('filter')
valid_filters = (
'all_with_deleted',
'all_with_unlisted',
'all_without_unlisted',
)
if requested is not None:
if self.action != 'list':
raise serializers.ValidationError(
'The "filter" parameter is not valid in this context.')
elif requested not in valid_filters:
raise serializers.ValidationError(
'Invalid "filter" parameter specified.')
# By default we restrict to valid, listed versions. Some filtering
# options are available when listing, and in addition, when returning
# a single instance, we don't filter at all.
if requested == 'all_with_deleted' or self.action != 'list':
queryset = Version.unfiltered.all()
elif requested == 'all_with_unlisted':
queryset = Version.objects.all()
elif requested == 'all_without_unlisted':
queryset = Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED)
else:
# By default, we rely on queryset filtering to hide
# non-public/unlisted versions. get_queryset() might override this
# if we are asked to see non-valid, deleted and/or unlisted
# versions explicitly.
queryset = Version.objects.filter(
files__status=amo.STATUS_APPROVED,
channel=amo.RELEASE_CHANNEL_LISTED).distinct()
# Filter with the add-on.
return queryset.filter(addon=self.get_addon_object())
class AddonSearchView(ListAPIView):
authentication_classes = []
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
SortingFilter,
]
pagination_class = ESPageNumberPagination
permission_classes = []
serializer_class = ESAddonSerializer
def get_queryset(self):
qset = Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name()).extra(
_source={'excludes': AddonIndexer.hidden_fields}).params(
search_type='dfs_query_then_fetch')
return qset
@classmethod
def as_view(cls, **kwargs):
view = super(AddonSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
class AddonAutoCompleteSearchView(AddonSearchView):
pagination_class = None
serializer_class = ESAddonAutoCompleteSerializer
filter_backends = [
ReviewedContentFilter, SearchQueryFilter, SearchParameterFilter,
AutoCompleteSortFilter,
]
def get_queryset(self):
# Minimal set of fields from ES that we need to build our results.
# It's the opposite tactic used by the regular search endpoint, which
# excludes a specific set of fields - because we know that autocomplete
# only needs to return very few things.
included_fields = (
'icon_type', # Needed for icon_url.
'id', # Needed for... id
'is_recommended',
'modified', # Needed for icon_url.
'name_translations', # Needed for... name.
'default_locale', # Needed for translations to work.
'slug', # Needed for url.
'type', # Needed to attach the Persona for icon_url (sadly).
)
qset = (
Search(
using=amo.search.get_es(),
index=AddonIndexer.get_index_alias(),
doc_type=AddonIndexer.get_doctype_name())
.extra(_source={'includes': included_fields}))
return qset
def list(self, request, *args, **kwargs):
# Ignore pagination (slice directly) but do wrap the data in a
# 'results' property to mimic what the search API does.
queryset = self.filter_queryset(self.get_queryset())[:10]
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class AddonFeaturedView(GenericAPIView):
authentication_classes = []
permission_classes = []
serializer_class = AddonSerializer
# We accept the 'page_size' parameter but we do not allow pagination for
# this endpoint since the order is random.
pagination_class = None
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
# Simulate pagination-like results, without actual pagination.
return Response({'results': serializer.data})
@classmethod
def as_view(cls, **kwargs):
view = super(AddonFeaturedView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def get_queryset(self):
return Addon.objects.valid()
def filter_queryset(self, queryset):
# We can pass the optional lang parameter to either get_creatured_ids()
# or get_featured_ids() below to get locale-specific results in
# addition to the generic ones.
lang = self.request.GET.get('lang')
if 'category' in self.request.GET:
# If a category is passed then the app and type parameters are
# mandatory because we need to find a category in the constants to
# pass to get_creatured_ids(), and category slugs are not unique.
# AddonCategoryQueryParam parses the request parameters for us to
# determine the category.
try:
categories = AddonCategoryQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = []
for category in categories:
ids.extend(get_creatured_ids(category, lang))
else:
# If no category is passed, only the app parameter is mandatory,
# because get_featured_ids() needs it to find the right collection
# to pick addons from. It can optionally filter by type, so we
# parse request for that as well.
try:
app = AddonAppQueryParam(
self.request).get_object_from_reverse_dict()
types = None
if 'type' in self.request.GET:
types = AddonTypeQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError(
'Invalid app, category and/or type parameter(s).')
ids = get_featured_ids(app, lang=lang, types=types)
# ids is going to be a random list of ids, we just slice it to get
# the number of add-ons that was requested. We do it before calling
# manual_order(), since it'll use the ids as part of a id__in filter.
try:
page_size = int(
self.request.GET.get('page_size', api_settings.PAGE_SIZE))
except ValueError:
raise exceptions.ParseError('Invalid page_size parameter')
ids = ids[:page_size]
return manual_order(queryset, ids, 'addons.id')
class StaticCategoryView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = StaticCategorySerializer
def get_queryset(self):
return sorted(CATEGORIES_BY_ID.values(), key=lambda x: x.id)
@classmethod
def as_view(cls, **kwargs):
view = super(StaticCategoryView, cls).as_view(**kwargs)
return non_atomic_requests(view)
def finalize_response(self, request, response, *args, **kwargs):
response = super(StaticCategoryView, self).finalize_response(
request, response, *args, **kwargs)
patch_cache_control(response, max_age=60 * 60 * 6)
return response
class LanguageToolsView(ListAPIView):
authentication_classes = []
pagination_class = None
permission_classes = []
serializer_class = LanguageToolsSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(LanguageToolsView, cls).as_view(**initkwargs))
def get_query_params(self):
"""
Parse query parameters that this API supports:
- app (mandatory)
- type (optional)
- appversion (optional, makes type mandatory)
- author (optional)
Can raise ParseError() in case a mandatory parameter is missing or a
parameter is invalid.
Returns a dict containing application (int), types (tuple or None),
appversions (dict or None) and author (string or None).
"""
# app parameter is mandatory when calling this API.
try:
application = AddonAppQueryParam(self.request).get_value()
except ValueError:
raise exceptions.ParseError('Invalid or missing app parameter.')
# appversion parameter is optional.
if AddonAppVersionQueryParam.query_param in self.request.GET:
try:
value = AddonAppVersionQueryParam(self.request).get_values()
appversions = {
'min': value[1],
'max': value[2]
}
except ValueError:
raise exceptions.ParseError('Invalid appversion parameter.')
else:
appversions = None
# type is optional, unless appversion is set. That's because the way
# dicts and language packs have their compatibility info set in the
# database differs, so to make things simpler for us we force clients
# to filter by type if they want appversion filtering.
if AddonTypeQueryParam.query_param in self.request.GET or appversions:
try:
addon_types = tuple(
AddonTypeQueryParam(self.request).get_value())
except ValueError:
raise exceptions.ParseError(
'Invalid or missing type parameter while appversion '
'parameter is set.')
else:
addon_types = (amo.ADDON_LPAPP, amo.ADDON_DICT)
# author is optional. It's a string representing the username(s) we're
# filtering on.
if AddonAuthorQueryParam.query_param in self.request.GET:
author = AddonAuthorQueryParam(self.request).get_value()
else:
author = None
return {
'application': application,
'types': addon_types,
'appversions': appversions,
'author': author,
}
def get_queryset(self):
"""
Return queryset to use for this view, depending on query parameters.
"""
# application, addon_types, appversions
params = self.get_query_params()
if params['types'] == (amo.ADDON_LPAPP,) and params['appversions']:
qs = self.get_language_packs_queryset_with_appversions(
params['application'], params['appversions'])
else:
# appversions filtering only makes sense for language packs only,
# so it's ignored here.
qs = self.get_queryset_base(params['application'], params['types'])
if params['author']:
qs = qs.filter(
addonuser__user__username__in=params['author'],
addonuser__listed=True).distinct()
return qs
def get_queryset_base(self, application, addon_types):
"""
Return base queryset to be used as the starting point in both
get_queryset() and get_language_packs_queryset_with_appversions().
"""
return (
Addon.objects.public()
.filter(appsupport__app=application, type__in=addon_types,
target_locale__isnull=False)
.exclude(target_locale='')
# Deactivate default transforms which fetch a ton of stuff we
# don't need here like authors, previews or current version.
# It would be nice to avoid translations entirely, because the
# translations transformer is going to fetch a lot of translations
# we don't need, but some language packs or dictionaries have
# custom names, so we can't use a generic one for them...
.only_translations()
# Since we're fetching everything with no pagination, might as well
# not order it.
.order_by()
)
def get_language_packs_queryset_with_appversions(
self, application, appversions):
"""
Return queryset to use specifically when requesting language packs
compatible with a given app + versions.
application is an application id, and appversions is a dict with min
and max keys pointing to application versions expressed as ints.
"""
# Base queryset.
qs = self.get_queryset_base(application, (amo.ADDON_LPAPP,))
# Version queryset we'll prefetch once for all results. We need to
# find the ones compatible with the app+appversion requested, and we
# can avoid loading translations by removing transforms and then
# re-applying the default one that takes care of the files and compat
# info.
versions_qs = (
Version.objects
.latest_public_compatible_with(application, appversions)
.no_transforms().transform(Version.transformer))
return (
qs.prefetch_related(Prefetch('versions',
to_attr='compatible_versions',
queryset=versions_qs))
.filter(versions__apps__application=application,
versions__apps__min__version_int__lte=appversions['min'],
versions__apps__max__version_int__gte=appversions['max'],
versions__channel=amo.RELEASE_CHANNEL_LISTED,
versions__files__status=amo.STATUS_APPROVED)
.distinct()
)
@method_decorator(cache_page(60 * 60 * 24))
def dispatch(self, *args, **kwargs):
return super(LanguageToolsView, self).dispatch(*args, **kwargs)
def list(self, request, *args, **kwargs):
# Ignore pagination (return everything) but do wrap the data in a
# 'results' property to mimic what the default implementation of list()
# does in DRF.
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
return Response({'results': serializer.data})
class ReplacementAddonView(ListAPIView):
authentication_classes = []
queryset = ReplacementAddon.objects.all()
serializer_class = ReplacementAddonSerializer
class CompatOverrideView(ListAPIView):
"""This view is used by Firefox so it's performance-critical.
Every firefox client requests the list of overrides approx. once per day.
Firefox requests the overrides via a list of GUIDs which makes caching
hard because the variation of possible GUID combinations prevent us to
simply add some dumb-caching and requires us to resolve cache-misses.
"""
queryset = CompatOverride.objects.all()
serializer_class = CompatOverrideSerializer
@classmethod
def as_view(cls, **initkwargs):
"""The API is read-only so we can turn off atomic requests."""
return non_atomic_requests(
super(CompatOverrideView, cls).as_view(**initkwargs))
def get_guids(self):
# Use the same Filter we use for AddonSearchView for consistency.
guid_filter = AddonGuidQueryParam(self.request)
return guid_filter.get_value()
def filter_queryset(self, queryset):
guids = self.get_guids()
if not guids:
raise exceptions.ParseError(
'Empty, or no, guid parameter provided.')
# Evaluate the queryset and cast it into a list.
# This will force Django to simply use len(queryset) instead of
# calling .count() on it and avoids an additional COUNT query.
# The amount of GUIDs we should get in real-life won't be paginated
# most of the time so it's safe to simply evaluate the query.
# The advantage here is that we are saving ourselves a `COUNT` query
# and these are expensive.
return list(queryset.filter(guid__in=guids).transform(
CompatOverride.transformer).order_by('-pk'))
class AddonRecommendationView(AddonSearchView):
filter_backends = [ReviewedContentFilter]
ab_outcome = None
fallback_reason = None
pagination_class = None
def get_paginated_response(self, data):
data = data[:4] # taar is only supposed to return 4 anyway.
return Response(OrderedDict([
('outcome', self.ab_outcome),
('fallback_reason', self.fallback_reason),
('page_size', 1),
('page_count', 1),
('count', len(data)),
('next', None),
('previous', None),
('results', data),
]))
def filter_queryset(self, qs):
qs = super(AddonRecommendationView, self).filter_queryset(qs)
guid_param = self.request.GET.get('guid')
taar_enable = self.request.GET.get('recommended', '').lower() == 'true'
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations(guid_param, taar_enable))
results_qs = qs.query(query.Bool(must=[Q('terms', guid=guids)]))
results_qs.execute() # To cache the results.
if results_qs.count() != 4 and is_outcome_recommended(self.ab_outcome):
guids, self.ab_outcome, self.fallback_reason = (
get_addon_recommendations_invalid())
return qs.query(query.Bool(must=[Q('terms', guid=guids)]))
return results_qs
def paginate_queryset(self, queryset):
# We don't need pagination for the fixed number of results.
return queryset
| 0
| 1,135
| 0
| 27,150
| 0
| 590
| 0
| 1,496
| 940
|
3840052143a4c80cb731fce500d7e4cb9f141b98
| 11,603
|
py
|
Python
|
scheduling/methods/k_means_NN_naive.py
|
CORE-Robotics-Lab/Personalized_Neural_Trees
|
3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e
|
[
"MIT"
] | 3
|
2021-05-22T19:25:01.000Z
|
2021-12-01T07:59:56.000Z
|
scheduling/methods/k_means_NN_naive.py
|
CORE-Robotics-Lab/Personalized_Neural_Trees
|
3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e
|
[
"MIT"
] | null | null | null |
scheduling/methods/k_means_NN_naive.py
|
CORE-Robotics-Lab/Personalized_Neural_Trees
|
3e8dd12fe4fc850be65c96c847eb143ef3bcdc2e
|
[
"MIT"
] | null | null | null |
"""
Created by Rohan Paleja on September 23, 2019
Nikolaidis et. al. benchmark
"""
import torch
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
# sys.path.insert(0, '../')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
# noinspection PyTypeChecker,PyArgumentList
def main():
"""
entry point for file
:return:
"""
res = []
for i in range(3):
trainer = NNTrain()
trainer.train()
out = trainer.evaluate_on_test_data()
res.append(out)
print(np.mean(res))
print(np.std(res))
if __name__ == '__main__':
main()
| 40.010345
| 171
| 0.620271
|
"""
Created by Rohan Paleja on September 23, 2019
Nikolaidis et. al. benchmark
"""
import torch
import torch.nn.functional as F
# sys.path.insert(0, '/home/Anonymous/PycharmProjects/bayesian_prolo')
import numpy as np
import pickle
from torch.autograd import Variable
from utils.naive_utils import load_in_naive_data, find_which_schedule_this_belongs_to
from utils.hri_utils import save_performance_results
from sklearn.cluster import KMeans
from scheduling.methods.train_autoencoder import Autoencoder, AutoEncoderTrain
# sys.path.insert(0, '../')
import itertools
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(0)
np.random.seed(0)
from scheduling.methods.NN_naive import NNSmall
# noinspection PyTypeChecker,PyArgumentList
class NNTrain:
"""
class structure to train the NN for a certain amount of schedules.
This class handles training the NN, evaluating the NN, and saving the results
"""
def __init__(self):
self.num_schedules = 150
self.num_test_schedules = 100
self.total_loss_array = []
self.X_train_naive, self.Y_train_naive, self.schedule_array_train_naive, self.X_test_naive, self.Y_test_naive, self.schedule_array_test_naive = load_in_naive_data(
250,250)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model1 = NNSmall().to(device)
model2 = NNSmall().to(device)
model3 = NNSmall().to(device)
self.models = [model1, model2, model3]
opt1 = torch.optim.Adam(self.models[0].parameters(), lr=.001)
opt2 = torch.optim.Adam(self.models[1].parameters(), lr=.001)
opt3 = torch.optim.Adam(self.models[2].parameters(), lr=.001)
self.optimizers = [opt1, opt2, opt3]
self.when_to_save = 1000
schedule_matrix_load_directory = '/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/results/'+str(self.num_schedules) + 'matrixes.pkl'
self.matrices = pickle.load(open(schedule_matrix_load_directory, "rb"))
self.kmeans_model, self.label = self.cluster_matrices(self.matrices, self.num_schedules)
self.X_train_naive, \
self.Y_train_naive, \
self.schedule_array_train_naive, = self.sample_data(150)
self.X_test_naive, \
self.Y_test_naive, \
self.schedule_array_test_naive, = self.sample_test_data(100)
self.num_test_schedules = 100
def sample_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_min = set_of_twenty * 20
return self.X_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_train_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_train_naive[set_of_twenty:set_of_twenty+size]
def sample_test_data(self, size):
# return self.X_train_naive[0:size * 20 * 20], \
# self.Y_train_naive[0:size * 20 * 20], \
# self.schedule_array_train_naive[0:size], \
# self.start_of_each_set_twenty_train[0:size * 20]
if size == 250:
set_of_twenty = 0
else:
set_of_twenty = np.random.randint(250-size)
self.sample_test_min = set_of_twenty * 20
return self.X_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.Y_test_naive[set_of_twenty*20:set_of_twenty*20 + size * 20], \
self.schedule_array_test_naive[set_of_twenty:set_of_twenty+size]
@staticmethod
def cluster_matrices(matrices, num_schedules):
"""
clusters the matrix schedules
:param matrices:
:param num_schedules:
:return:
"""
# vectorize each matrix
vectorized_set = []
for i in matrices:
vectorized = i.reshape(20 * 2048, 1)
vectorized_set.append(vectorized)
kmeans = KMeans(n_clusters=3, random_state=0) # random state makes it deterministic
# Fitting the input data
new_set = np.hstack(tuple(vectorized_set)).reshape(num_schedules, 20 * 2048)
kmeans_model = kmeans.fit(np.asarray(new_set))
labels = kmeans_model.predict(np.asarray(new_set))
return kmeans_model, labels
def train(self):
"""
Trains NN.
Randomly samples a schedule and timestep within that schedule, and passes in the corresponding data in an attempt to classify which task was scheduled
:return:
"""
epochs = 200000 * 3
for epoch in range(epochs):
# sample a timestep before the cutoff for cross_validation
rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))
input_nn = self.X_train_naive[rand_timestep_within_sched]
truth_nn = self.Y_train_naive[rand_timestep_within_sched]
which_schedule = find_which_schedule_this_belongs_to(self.schedule_array_train_naive, rand_timestep_within_sched+self.sample_min)
cluster_num = self.label[which_schedule]
# iterate over pairwise comparisons
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda()) # change to 5 to increase batch size
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth_nn).reshape(1)).long())
self.optimizers[cluster_num].zero_grad()
output = self.models[cluster_num].forward(input_nn)
loss = F.cross_entropy(output, truth)
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizers[cluster_num].step()
self.total_loss_array.append(loss.item())
if epoch % 500 == 499:
print('loss at', epoch, ', total loss (average for each 100, averaged)', np.mean(self.total_loss_array[-100:]))
# self.save_trained_nets(str(epoch))
@staticmethod
def create_iterables():
"""
adds all possible state combinations
:return:
"""
iterables = [[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1], [0, 1],
[0, 1], [0, 1]]
states = []
for t in itertools.product(*iterables):
states.append(t)
return states
def pass_in_embedding_out_state_ID(self, states, binary):
"""
pass in a binary embedding, and itll return the state id
:param states
:param binary:
:return:
"""
binary_as_tuple = tuple(binary)
index = states.index(binary_as_tuple)
return index
def evaluate_on_test_data(self):
"""
Evaluate performance of a trained network.
This is tested on 20% of the data and will be stored in a text file.
:return:
"""
# confusion_matrix = np.zeros((20,20))
autoencoder_class = AutoEncoderTrain(self.num_schedules)
checkpoint = torch.load('/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/Autoencoder150.tar')
autoencoder_class.model.load_state_dict(checkpoint['nn_state_dict'])
states = self.create_iterables()
prediction_accuracy = [0, 0]
percentage_accuracy_top1 = []
percentage_accuracy_top3 = []
mean_input = [1.3277743, 0.32837677, 1.4974482, -1.3519306, -0.64621973, 0.10534518, -2.338118, -2.7345326, 1.7558736, -3.0746384, -3.485554]
for i, schedule in enumerate(self.schedule_array_test_naive):
current_schedule_matrix = np.zeros((2048, 20))
for count in range(schedule[0]-self.sample_test_min, schedule[1]-self.sample_test_min + 1):
if current_schedule_matrix.sum() == 0:
cluster_num = self.kmeans_model.predict(current_schedule_matrix.reshape(1, -1))
else:
matrix = np.divide(current_schedule_matrix, current_schedule_matrix.sum())
cluster_num = self.kmeans_model.predict(matrix.reshape(1, -1))
net_input = self.X_test_naive[count]
truth = self.Y_test_naive[count]
if torch.cuda.is_available():
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)).cuda())
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)).cuda().long())
else:
input_nn = Variable(torch.Tensor(np.asarray(net_input).reshape(1, 242)))
truth = Variable(torch.Tensor(np.asarray(truth).reshape(1)))
# forward
output = self.models[int(cluster_num)].forward(input_nn)
index = torch.argmax(output).item()
# confusion_matrix[truth][index] += 1
# top 3
_, top_three = torch.topk(output, 3)
if index == truth.item():
prediction_accuracy[0] += 1
if truth.item() in top_three.detach().cpu().tolist()[0]:
prediction_accuracy[1] += 1
# update matrix
embedding_copy = np.zeros((1, 11))
input_element = autoencoder_class.model.forward_only_encoding(input_nn)
for z, each_element in enumerate(mean_input):
if each_element > input_element[0][z].item():
embedding_copy[0][z] = 0
else:
embedding_copy[0][z] = 1
index = self.pass_in_embedding_out_state_ID(states, embedding_copy[0])
action = truth.item()
current_schedule_matrix[index][int(action)] += 1
print('Prediction Accuracy: top1: ', prediction_accuracy[0] / 20, ' top3: ', prediction_accuracy[1] / 20)
print('schedule num:', i)
percentage_accuracy_top1.append(prediction_accuracy[0] / 20)
percentage_accuracy_top3.append(prediction_accuracy[1] / 20)
prediction_accuracy = [0, 0]
print(np.mean(percentage_accuracy_top1))
# save_performance_results(percentage_accuracy_top1, percentage_accuracy_top3, 'kmeans_to_NN_naive')
return np.mean(percentage_accuracy_top1)
def save_trained_nets(self, name):
"""
saves the model
:return:
"""
torch.save({'nn1_state_dict': self.models[0].state_dict(),
'nn2_state_dict': self.models[1].state_dict(),
'nn3_state_dict': self.models[2].state_dict()},
'/home/Anonymous/PycharmProjects/bayesian_prolo/scheduling_env/models/k_means_NN_' + name + '.tar')
def main():
"""
entry point for file
:return:
"""
res = []
for i in range(3):
trainer = NNTrain()
trainer.train()
out = trainer.evaluate_on_test_data()
res.append(out)
print(np.mean(res))
print(np.std(res))
if __name__ == '__main__':
main()
| 0
| 1,117
| 0
| 9,366
| 0
| 0
| 0
| 202
| 220
|
b388f68a9de50b2d68147365e456767b8e775cd2
| 1,167
|
py
|
Python
|
candy_collect.py
|
itiB/poke-scripts
|
e2da1356ee8000c7345682d4c07709481f2044f8
|
[
"MIT"
] | 2
|
2020-02-08T13:55:46.000Z
|
2020-07-21T13:17:26.000Z
|
candy_collect.py
|
itiB/poke-scripts
|
e2da1356ee8000c7345682d4c07709481f2044f8
|
[
"MIT"
] | null | null | null |
candy_collect.py
|
itiB/poke-scripts
|
e2da1356ee8000c7345682d4c07709481f2044f8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import serial
from time import sleep
parser = argparse.ArgumentParser()
parser.add_argument('port')
args = parser.parse_args()
sleep_time = 50
ser = serial.Serial(args.port, 9600)
sleep(3)
send('Button LCLICK', 0.1)
try:
while 1:
candyCorrect()
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 19.131148
| 51
| 0.552699
|
#!/usr/bin/env python3
import argparse
import serial
import time
from time import sleep
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('port')
args = parser.parse_args()
sleep_time = 50
def send(msg, duration=0):
print(msg)
ser.write(f'{msg}\r\n'.encode('utf-8'))
sleep(duration)
ser.write(b'RELEASE\r\n')
ser = serial.Serial(args.port, 9600)
def candyCorrect():
send('LY MAX', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(1)
send('Button B', 0.1)
sleep(0.8)
send('LY MIN', 0.1)
sleep(0.5)
send('Button A', 0.1)
sleep(0.3)
send('LY MAX', 0.1)
sleep(0.2)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(0.3)
send('Button A', 0.1)
sleep(2.5)
send('Button A', 0.1)
sleep(1.0)
send('Button A', 0.1)
for i in range(0, sleep_time):
sleep(1)
if i % 10 == 0:
print(f'あと {sleep_time - i}秒 スリープします')
sleep(3)
send('Button LCLICK', 0.1)
try:
while 1:
candyCorrect()
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 30
| 0
| 0
| 0
| 0
| 686
| 0
| -16
| 96
|
a4ef9bd877c250ea0e460a024503d8e819218c76
| 3,683
|
py
|
Python
|
utils_ic.py
|
kkkgabriel/50039Homework3
|
69d8f36f60868cac64bb1d1682eba34a548f3565
|
[
"MIT"
] | null | null | null |
utils_ic.py
|
kkkgabriel/50039Homework3
|
69d8f36f60868cac64bb1d1682eba34a548f3565
|
[
"MIT"
] | null | null | null |
utils_ic.py
|
kkkgabriel/50039Homework3
|
69d8f36f60868cac64bb1d1682eba34a548f3565
|
[
"MIT"
] | null | null | null |
import torch
from torchvision import transforms
from PIL import Image
# Define function to read cat names
# Define function to read data
# Define processing testing image function
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
# Resize and crop image
im = Image.open(image)
preprocess = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
im_tensor = preprocess(im)
im_tensor.unsqueeze_(0)
return im_tensor
# Define prediction function
def predict(image_path, model, topk, device, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(device)
model.eval()
# TODO: Implement the code to predict the class from an image file
img = process_image(image_path)
img = img.to(device)
output = model.forward(img)
ps = torch.exp(output)
probs, idxs = ps.topk(topk)
idx_to_class = dict((v,k) for k, v in model.classifier.class_to_idx.items())
classes = [v for k, v in idx_to_class.items() if k in idxs.to('cpu').numpy()]
if cat_to_name:
classes = [cat_to_name[str(i + 1)] for c, i in \
model.classifier.class_to_idx.items() if c in classes]
print('Probabilities:', probs.data.cpu().numpy()[0].tolist())
print('Classes:', classes)
| 41.852273
| 88
| 0.581048
|
import json
import torch
from torchvision import datasets, transforms
from PIL import Image
# Define function to read cat names
def read_jason(filename):
with open(filename, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
# Define function to read data
def load_data(data_dir):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define your transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
test_data = datasets.ImageFolder(test_dir, transform=test_valid_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform=test_valid_transforms)
# Using the image datasets and the trainforms, define the dataloaders
trainloader = torch.utils.data.DataLoader(train_data, batch_size=16, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32)
return trainloader, testloader, validloader, train_data
# Define processing testing image function
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
# Resize and crop image
im = Image.open(image)
preprocess = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
im_tensor = preprocess(im)
im_tensor.unsqueeze_(0)
return im_tensor
# Define prediction function
def predict(image_path, model, topk, device, cat_to_name):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to(device)
model.eval()
# TODO: Implement the code to predict the class from an image file
img = process_image(image_path)
img = img.to(device)
output = model.forward(img)
ps = torch.exp(output)
probs, idxs = ps.topk(topk)
idx_to_class = dict((v,k) for k, v in model.classifier.class_to_idx.items())
classes = [v for k, v in idx_to_class.items() if k in idxs.to('cpu').numpy()]
if cat_to_name:
classes = [cat_to_name[str(i + 1)] for c, i in \
model.classifier.class_to_idx.items() if c in classes]
print('Probabilities:', probs.data.cpu().numpy()[0].tolist())
print('Classes:', classes)
| 0
| 0
| 0
| 0
| 0
| 1,850
| 0
| 0
| 66
|
7c9ecc3b2e26c62f94ef6497972cb0e25e5a58f9
| 13,851
|
py
|
Python
|
tensorflow/python/tools/freeze_graph_test.py
|
yage99/tensorflow
|
c7fa71b32a3635eb25596ae80d007b41007769c4
|
[
"Apache-2.0"
] | 78
|
2020-08-04T12:36:25.000Z
|
2022-03-25T04:23:40.000Z
|
tensorflow/python/tools/freeze_graph_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/tools/freeze_graph_test.py
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 28
|
2020-02-10T07:03:06.000Z
|
2022-01-12T11:19:20.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import test
if __name__ == "__main__":
test.main()
| 40.264535
| 80
| 0.695401
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name, tags):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
tags,
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},
)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
@parameterized.named_parameters(
("empty_tags_set", "", []),
("default_tags_set", tag_constants.SERVING, [tag_constants.SERVING]))
def testFreezeSavedModel(self, tags_string, tags_list):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name, tags_list)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tags_string
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
if any(u"ParseExampleV2" in node.name for node in output_graph_def.node):
expected_node_count = 10
else:
expected_node_count = 8
self.assertEqual(expected_node_count, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
with self.assertRaises(ValueError):
freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
if __name__ == "__main__":
test.main()
| 0
| 2,551
| 0
| 9,185
| 0
| 0
| 0
| 615
| 576
|