hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4d9157319b66bba873f45428ff904995f748f650
| 2,149
|
py
|
Python
|
bib/views.py
|
acdh-oeaw/thunau-old
|
a3023885470e80f7312e43561028398bffd713e0
|
[
"MIT"
] | 1
|
2021-09-20T12:51:47.000Z
|
2021-09-20T12:51:47.000Z
|
bib/views.py
|
acdh-oeaw/cbab
|
7cd25f057913dccf85f851e448b1dbc2c5f8d624
|
[
"MIT"
] | 9
|
2020-02-12T00:19:18.000Z
|
2021-12-13T19:46:51.000Z
|
bib/views.py
|
acdh-oeaw/thunau-old
|
a3023885470e80f7312e43561028398bffd713e0
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
# following line has to match the settings-file you are using
def sync_zotero(request):
""" renders a simple template with a button to trigger sync_zotero_action function """
return render(request, 'bib/synczotero.html')
| 33.061538
| 91
| 0.590507
|
import requests
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .models import Book
# following line has to match the settings-file you are using
from django.conf import settings
def sync_zotero(request):
""" renders a simple template with a button to trigger sync_zotero_action function """
return render(request, 'bib/synczotero.html')
@login_required
def sync_zotero_action(request):
""" fetches the last n items form zoter and syncs it with the bib entries in defc-db"""
root = "https://api.zotero.org/users/"
params = "{}/collections/{}/items/top?v=3&key={}".format(
settings.Z_USER_ID, settings.Z_COLLECTION, settings.Z_API_KEY)
url = root + params + "&sort=dateModified&limit=25"
books_before = len(Book.objects.all())
try:
r = requests.get(url)
error = "No errors from ZoteroAPI"
except:
error = "aa! errors! The API didn´t response with a proper json-file"
response = r.json()
failed = []
saved = []
for x in response:
try:
x["data"]["creators"][0]
try:
x["data"]["creators"][0]["name"]
name = x["data"]["creators"][0]["name"]
except:
firstname = x["data"]["creators"][0]["firstName"]
lastname = x["data"]["creators"][0]["lastName"]
name = "{}, {}".format(lastname, firstname)
except:
name = "no name provided"
NewBook = Book(
zoterokey=x["data"]["key"], item_type=x["data"]["itemType"],
author=name,
title=x["data"]["title"],
short_title=x["data"]["shortTitle"]
)
try:
NewBook.save()
saved.append(x["data"])
except:
failed(x['data'])
books_after = len(Book.objects.all())
context = {}
context["error"] = error
context["saved"] = saved
context["failed"] = failed
context["books_before"] = [books_before]
context["books_after"] = [books_after]
return render(request, 'bib/synczotero_action.html', context)
| 2
| 1,724
| 0
| 0
| 0
| 0
| 0
| 44
| 112
|
0f06c5989c3c2521bfde65ac17818d8908b5e3a6
| 229
|
py
|
Python
|
contextManager/file_handler.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
contextManager/file_handler.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
contextManager/file_handler.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
import os
import subprocess
file = "some_file.txt"
with open(file, 'w') as opened_file:
opened_file.write('Hola!')
subprocess.run(["cat", file])
if os.path.exists(file):
os.remove(file)
assert not os.path.exists(file)
| 17.615385
| 36
| 0.703057
|
import os
import subprocess
file = "some_file.txt"
with open(file, 'w') as opened_file:
opened_file.write('Hola!')
subprocess.run(["cat", file])
if os.path.exists(file):
os.remove(file)
assert not os.path.exists(file)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1d088d3c2844363762dc3f4b36fd7e8b3eb3c010
| 133
|
py
|
Python
|
task/constants.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | null | null | null |
task/constants.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | 1
|
2021-06-10T23:00:14.000Z
|
2021-06-10T23:00:14.000Z
|
task/constants.py
|
suvajitsarkar/taskManagement
|
0054c20fba8dd8eb3c4c83abdded8fc778a8b62b
|
[
"Apache-2.0"
] | null | null | null |
TASK_STAGES = (
('n', 'Not Started'),
('i', 'In Progress'),
('r', 'In Review'),
('d', 'Done'),
)
| 19
| 29
| 0.353383
|
TASK_STAGES = (
('n', 'Not Started'),
('i', 'In Progress'),
('r', 'In Review'),
('d', 'Done'),
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c467b1c943e7b0098db2795f9c7ce4d0d502e039
| 2,687
|
py
|
Python
|
core/helpers.py
|
quis/exceptional-review-procedure
|
adf34fb72b2d3357f4f1da320019f729cc1ae653
|
[
"MIT"
] | null | null | null |
core/helpers.py
|
quis/exceptional-review-procedure
|
adf34fb72b2d3357f4f1da320019f729cc1ae653
|
[
"MIT"
] | null | null | null |
core/helpers.py
|
quis/exceptional-review-procedure
|
adf34fb72b2d3357f4f1da320019f729cc1ae653
|
[
"MIT"
] | null | null | null |
CACHE_KEY_USER = 'wizard-user-cache-key'
# unusual character that is unlikely to be included in each product label
PRODUCT_DELIMITER = ''
| 29.206522
| 115
| 0.717529
|
from urllib.parse import urlencode
import uuid
from formtools.wizard.storage.base import BaseStorage
from formtools.wizard.storage.session import SessionStorage
import requests
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.cache import cache
from django.shortcuts import Http404
CACHE_KEY_USER = 'wizard-user-cache-key'
# unusual character that is unlikely to be included in each product label
PRODUCT_DELIMITER = 'µ'
class NoResetStorage(SessionStorage):
def reset(self):
pass
class CacheStorage(BaseStorage):
is_shared_key = 'is_shared'
def __init__(self, prefix, request=None, file_storage=None):
key = get_user_cache_key(request)
if not key:
key = str(uuid.uuid4())
set_user_cache_key(request=request, key=key)
super().__init__(prefix=f'{prefix}_{key}', request=request, file_storage=file_storage)
self.data = self.load_data()
if not self.data:
self.init_data()
def init_data(self):
super().init_data()
self.extra_data[self.is_shared_key] = False
def load_data(self):
return cache.get(self.prefix)
def update_response(self, response):
super().update_response(response)
cache.set(self.prefix, self.data, timeout=60*60*72) # 72 hours
def mark_shared(self):
self.extra_data[self.is_shared_key] = True
def get_user_cache_key(request):
return request.session.get(CACHE_KEY_USER)
def set_user_cache_key(request, key):
request.session[CACHE_KEY_USER] = key
request.session.modified = True
def load_saved_submission(request, prefix, key):
submission = cache.get(f'wizard_{prefix}_{key}')
if not submission:
raise Http404
elif not submission[CacheStorage.extra_data_key][CacheStorage.is_shared_key]:
raise SuspiciousSession
else:
set_user_cache_key(request, key)
def lookup_commodity_code_by_name(query, page):
return requests.get(settings.COMMODITY_NAME_SEARCH_API_ENDPOINT, {'q': query, 'page': page})
def search_hierarchy(node_id):
# the API needs country code but it will not affect the hierarchy for our use case, so hard-code it
return requests.get(settings.HIERARCHY_BROWSER_LOOKUP_API_ENDPOINT, {'node_id': node_id, 'country_code': 'dj'})
def get_paginator_url(filters, url):
querystring = urlencode({
key: value
for key, value in filters.lists()
if value and key != 'page'
}, doseq=True)
return f'{url}?{querystring}'
def parse_commodities(commodities):
return commodities.split(PRODUCT_DELIMITER) if commodities else []
| 2
| 0
| 0
| 907
| 0
| 1,073
| 0
| 150
| 407
|
ba6f4ebec266f13af50b4b48401bae096b80e262
| 49,610
|
py
|
Python
|
hsm_software/sw/rpc_handling.py
|
DiamondKeySecurity/HSM
|
6b6a0d691a22863411e048c7c211ac63bf9ffaa7
|
[
"BSD-3-Clause"
] | null | null | null |
hsm_software/sw/rpc_handling.py
|
DiamondKeySecurity/HSM
|
6b6a0d691a22863411e048c7c211ac63bf9ffaa7
|
[
"BSD-3-Clause"
] | null | null | null |
hsm_software/sw/rpc_handling.py
|
DiamondKeySecurity/HSM
|
6b6a0d691a22863411e048c7c211ac63bf9ffaa7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2018, 2019 Diamond Key Security, NFP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of the NORDUnet nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
# import classes from the original cryptech.muxd
# cryptech_muxd has been renamed to cryptech/muxd.py
def rpc_get_int(msg, location):
"Get an int from a location in an RPC message"
return struct.unpack(">L", msg[location:location+4])[0]
def rpc_set_int(msg, data, location):
"Set an int from a location in an RPC message"
return msg[:location] + struct.pack(">L", data) + msg[location+4:]
| 41.724138
| 141
| 0.668373
|
#!/usr/bin/env python
# Copyright (c) 2018, 2019 Diamond Key Security, NFP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# - Neither the name of the NORDUnet nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import threading
from uuid import UUID
# import classes from the original cryptech.muxd
# cryptech_muxd has been renamed to cryptech/muxd.py
from hsm_tools.cryptech.muxd import logger
from hsm_tools.hsm import CrypTechDeviceState
from settings import HSMSettings
from hsm_tools.cryptech.cryptech.libhal import ContextManagedUnpacker, xdrlib
from hsm_tools.rpc_action import RPCAction
from hsm_tools.cryptech_port import DKS_RPCFunc, DKS_HALKeyType,\
DKS_HALKeyFlag, DKS_HALError
from hsm_tools.threadsafevar import ThreadSafeVariable
from rpc_builder import KeyMatchDetails, RPCpkey_open, RPCKeygen_result
from load_distribution import LoadDistribution
def rpc_get_int(msg, location):
"Get an int from a location in an RPC message"
return struct.unpack(">L", msg[location:location+4])[0]
def rpc_set_int(msg, data, location):
"Set an int from a location in an RPC message"
return msg[:location] + struct.pack(">L", data) + msg[location+4:]
class KeyHandleDetails:
"""Information on the key that a handle points to"""
def __init__(self, rpc_index, uuid):
self.rpc_index = rpc_index
self.uuid = uuid
class KeyOperationData:
def __init__(self, rpc_index, handle, uuid):
self.rpc_index = rpc_index
self.handle = handle
self.device_uuid = uuid
class MuxSession:
"""Simple class for defining the state of a
connection to the load balancer"""
def __init__(self, rpc_index, cache, settings, from_ethernet):
self.cache = cache
# if true, this session was started by a connection from
# outside the HSM and is not trusted
self.from_ethernet = from_ethernet
# should new keys be added to the cache? The synchronizer
# manually adds keys
self.cache_generated_keys = True
# if true, all incoming uuids should be treated as device uuids
self.incoming_uuids_are_device_uuids = False
# complete unencoded request that we're working on
self.current_request = None
# the current rpc_index to use for this session
self.rpc_index = rpc_index
# the index of the rpc that is being used for the
# initializing hash op
self.cur_hashing_index = 0
# dictionary mapping of hash rpc indexes by the hash handle
self.hash_rpcs = {}
# dictionary mapping of key rpc indexes by the key handle
self.key_rpcs = {}
# parameters for the current key operation
self.key_op_data = KeyOperationData(None, None, None)
# should exportable private keys be used for this session?
# Use 's' for PEP8
s = settings.get_setting(HSMSettings.ENABLE_EXPORTABLE_PRIVATE_KEYS)
self.enable_exportable_private_keys = s
class RPCPreprocessor:
"""Able to load balance between multiple rpcs"""
def __init__(self, rpc_list, cache, settings, netiface):
self.cache = cache
self.settings = settings
self.rpc_list = rpc_list
# this is the index of the RPC to use. When set to < 0,
# it will auto set
self.current_rpc = -1
self.sessions = {}
self.sessions_lock = threading.Lock()
self.function_table = {}
self.create_function_table()
self.netiface = netiface
self.hsm_locked = True
self.debug = False
self.tamper_detected = ThreadSafeVariable(False)
# used by load balancing heuristic
self.load_dist = LoadDistribution(len(rpc_list))
self.large_weight = 2**31
self.pkey_op_weight = 1
self.pkey_gen_weight = 100
# used when selecting any rpc, attempts to evenly
# distribute keys across all devices, even when
# only one thread is being used
self.next_any_device = 0
self.next_any_device_uses = 0
self.choose_any_thread_lock = threading.Lock()
def device_count(self):
return len(self.rpc_list)
def get_current_rpc(self):
if(self.current_rpc < 0):
return "auto"
elif (len(self.rpc_list) > self.current_rpc):
return self.rpc_list[self.current_rpc].name
else:
return "INVALID RPC"
def set_current_rpc(self, index):
if(isinstance(index, (int, )) is False):
return "Invalid index. The index must be a valid RPC index."
elif (index > len(self.rpc_list)):
return "Index out of range. The index must be a valid RPC index."
else:
self.current_rpc = index
return "RPC is now: " + self.get_current_rpc()
def create_session(self, client, from_ethernet):
# make sure we have a session for this handle
with (self.sessions_lock):
if(client not in self.sessions):
new_session = MuxSession(self.current_rpc,
self.cache,
self.settings,
from_ethernet)
self.sessions[client] = new_session
def delete_session(self, client):
with (self.sessions_lock):
if(client in self.sessions):
del self.sessions[client]
def make_all_rpc_list(self):
rpc_list = []
for rpc in self.rpc_list:
rpc_list.append(rpc)
return rpc_list
def get_session(self, client):
with (self.sessions_lock):
return self.sessions[client]
def update_device_weight(self, cryptech_device, amount):
try:
self.load_dist.inc(cryptech_device, amount)
except:
pass
def get_cryptech_device_weight(self, cryptech_device):
try:
return self.load_dist.get(cryptech_device)
except:
return self.large_weight
def choose_rpc_from_master_uuid(self, master_uuid):
uuid_dict = self.cache.get_alphas(master_uuid)
result = None
# initialize to a high weight
device_weight = self.large_weight
for key, val in uuid_dict.iteritems():
# for now choose the device with the lowest weight
new_device_weight = self.get_cryptech_device_weight(key)
if (new_device_weight < device_weight):
device_weight = new_device_weight
result = (key, val)
return result
def choose_rpc(self):
"""Simple Heuristic for selecting an alpha RPC channel to use"""
DEVICE_USES_BEFORE_NEXT = 2
device_count = len(self.rpc_list)
with(self.choose_any_thread_lock):
# first try to evenly distribute
self.next_any_device_uses += 1
if(self.next_any_device_uses > DEVICE_USES_BEFORE_NEXT):
self.next_any_device_uses = 0
self.next_any_device += 1
if(self.next_any_device >= device_count):
self.next_any_device = 0
# make sure this has the smallest weight
# If only one process is using the HSM, next_rpc
# will probably be ok, but if multiple processes
# are using the HSM, it's possible that the call
# may try to use a device that's busy
# initialize to weight of device
device_weight = self.get_cryptech_device_weight(self.next_any_device)
for device_index in xrange(device_count):
# if we find a device with a lower weight, use it
if (self.next_any_device != device_index):
new_device_weight = self.get_cryptech_device_weight(device_index)
if (new_device_weight < device_weight):
device_weight = new_device_weight
self.next_any_device = device_index
# reset uses
self.next_any_device_uses = 0
return self.next_any_device
def append_futures(self, futures):
for rpc in self.rpc_list:
futures.append(rpc.serial.rpc_output_loop())
futures.append(rpc.serial.logout_all())
@property
def is_mkm_set(self):
return self.settings.get_setting(HSMSettings.MASTERKEY_SET) == True
def is_rpc_locked(self):
return self.hsm_locked or (not self.cache.is_initialized()) or (not self.is_mkm_set)
def unlock_hsm(self):
self.hsm_locked = False
for rpc in self.rpc_list:
rpc.unlock_port()
def lock_hsm(self):
self.hsm_locked = True
for rpc in self.rpc_list:
rpc.change_state(CrypTechDeviceState.HSMLocked)
def on_tamper_event(self, tamper_object):
new_tamper_state = tamper_object.get_tamper_state()
old_tamper_state = self.tamper_detected.value
if(new_tamper_state != old_tamper_state):
self.tamper_detected.value = new_tamper_state
if(new_tamper_state is True):
self.hsm_locked = True
for rpc in self.rpc_list:
rpc.change_state(CrypTechDeviceState.TAMPER)
else:
self.hsm_locked = True
for rpc in self.rpc_list:
rpc.clear_tamper(CrypTechDeviceState.TAMPER_RESET)
def process_incoming_rpc(self, decoded_request):
# handle the message normally
unpacker = ContextManagedUnpacker(decoded_request)
# get the code of the RPC request
code = unpacker.unpack_uint()
# get the handle which identifies the TCP connection that the
# request came from
client = unpacker.unpack_uint()
# get the session so we know where to put the response and
# which rpc to use
session = self.get_session(client)
# save the current request in the session
session.current_request = decoded_request
# check to see if there's an ongoing tamper event
if (self.tamper_detected.value and session.from_ethernet):
return self.create_error_response(code, client,
DKS_HALError.HAL_ERROR_TAMPER)
# process the RPC request
action = self.function_table[code](code, client, unpacker, session)
# it's possible that the request has been altered so return it
action.request = session.current_request
return action
def create_error_response(self, code, client, hal_error):
# generate complete response
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
response.pack_uint(hal_error)
# TODO log error
return RPCAction(response.get_buffer(), None, None)
def handle_set_rpc(self, code, client, unpacker, session):
"""Special DKS RPC to set the RPC to use for all calls"""
logger.info("RPC code received %s, handle 0x%x",
DKS_RPCFunc.RPC_FUNC_SET_RPC_DEVICE, client)
# get the serial to switch to
rpc_index = unpacker.unpack_uint()
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
if (session.from_ethernet):
# the RPC can not be explicitly set from an outside
# ethernet connection
response.pack_uint(DKS_HALError.HAL_ERROR_FORBIDDEN)
elif (rpc_index < len(self.rpc_list)):
# set the rpc to use for this session
session.rpc_index = rpc_index
response.pack_uint(DKS_HALError.HAL_OK)
else:
response.pack_uint(DKS_HALError.HAL_ERROR_BAD_ARGUMENTS)
unencoded_response = response.get_buffer()
return RPCAction(unencoded_response, None, None)
def handle_enable_cache_keygen(self, code, client, unpacker, session):
"""Special DKS RPC to enable caching of generated keys"""
logger.info("RPC code received %s, handle 0x%x",
DKS_RPCFunc.RPC_FUNC_ENABLE_CACHE_KEYGEN.name, client)
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
if (session.from_ethernet):
# keygen caching can not be explicitly set from
# an ethernet connection
response.pack_uint(DKS_HALError.HAL_ERROR_FORBIDDEN)
else:
response.pack_uint(DKS_HALError.HAL_OK)
unencoded_response = response.get_buffer()
session.cache_generated_keys = True
print('caching enabled')
return RPCAction(unencoded_response, None, None)
def handle_disable_cache_keygen(self, code, client, unpacker, session):
"""Special DKS RPC to disable caching of generated keys"""
logger.info("RPC code received %s, handle 0x%x",
DKS_RPCFunc.RPC_FUNC_DISABLE_CACHE_KEYGEN.name, client)
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
if (session.from_ethernet):
# keygen caching can not be explicitly set from
# an ethernet connection
response.pack_uint(DKS_HALError.HAL_ERROR_FORBIDDEN)
else:
response.pack_uint(DKS_HALError.HAL_OK)
unencoded_response = response.get_buffer()
session.cache_generated_keys = False
print('caching disabled')
return RPCAction(unencoded_response, None, None)
def handle_use_incoming_device_uuids(self, code, client, unpacker, session):
"""Special DKS RPC to enable using incoming device uuids"""
logger.info("RPC code received %s, handle 0x%x",
DKS_RPCFunc.RPC_FUNC_USE_INCOMING_DEVICE_UUIDS.name, client)
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
if (session.from_ethernet):
# using device uuids can not be set fom
# an ethernet connection
response.pack_uint(DKS_HALError.HAL_ERROR_FORBIDDEN)
else:
response.pack_uint(DKS_HALError.HAL_OK)
unencoded_response = response.get_buffer()
session.incoming_uuids_are_device_uuids = True
print('accepting incoming device uuids')
return RPCAction(unencoded_response, None, None)
def handle_use_incoming_master_uuids(self, code, client, unpacker, session):
"""Special DKS RPC to enable using incoming master uuids"""
logger.info("RPC code received %s, handle 0x%x",
DKS_RPCFunc.RPC_FUNC_USE_INCOMING_MASTER_UUIDS.name, client)
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
if (session.from_ethernet):
# using device uuids can not be set fom
# an ethernet connection
response.pack_uint(DKS_HALError.HAL_ERROR_FORBIDDEN)
else:
response.pack_uint(DKS_HALError.HAL_OK)
unencoded_response = response.get_buffer()
session.incoming_uuids_are_device_uuids = False
print('accepting incoming master uuids')
return RPCAction(unencoded_response, None, None)
def get_response_unpacker(self, unencoded_response):
msg = "".join(unencoded_response)
if not msg:
return None
msg = ContextManagedUnpacker(msg)
# return the unpacker. the first uint is the code followed
# by the client
return msg
def handle_rpc_any(self, code, client, unpacker, session):
"""Can run on any available alpha because this is not alpha specific"""
rpc_index = session.rpc_index if(session.rpc_index >= 0) else self.choose_rpc()
logger.info("any rpc sent to %i", rpc_index)
return RPCAction(None, [self.rpc_list[rpc_index]], None)
def handle_rpc_all(self, code, client, unpacker, session):
"""Must run on all alphas to either to keep PINs synchronized
or because we don't know which alpha we'll need later"""
# if the rpc_index has been set for the session, always use it
if(session.rpc_index >= 0):
return RPCAction(None, [self.rpc_list[session.rpc_index]], None)
rpc_list = self.make_all_rpc_list()
return RPCAction(None, rpc_list, self.callback_rpc_all)
def callback_rpc_all(self, reply_list):
code = None
for reply in reply_list:
unpacker = self.get_response_unpacker(reply)
new_code = unpacker.unpack_int()
# get the client
client = unpacker.unpack_uint()
if(code is not None and new_code != code):
# error, the codes don't match
return self.create_error_response(new_code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
code = new_code
status = unpacker.unpack_uint()
if(status != 0):
# one of the alpha's returned an error so return that error
# TODO log error
return self.create_error_response(code, client, status)
#all of the replies are the same so just return the first one
return RPCAction(reply_list[0], None, None)
def handle_rpc_starthash(self, code, client, unpacker, session):
"""This is the begining of a hash operation. Any RPC can be used."""
# select an RPC to use for this hashing operation
session.cur_hashing_index = session.rpc_index if(session.rpc_index >= 0) else self.choose_rpc()
logger.info("hashing on RPC: %i", session.cur_hashing_index)
return RPCAction(None, [self.rpc_list[session.cur_hashing_index]], self.callback_rpc_starthash)
def callback_rpc_starthash(self, reply_list):
unpacker = self.get_response_unpacker(reply_list[0])
code = unpacker.unpack_uint()
client = unpacker.unpack_uint()
# hashing only happens on one alpha
if(len(reply_list) != 1):
logger.info("callback_rpc_starthash: len(reply_list) != 1")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
result = unpacker.unpack_uint()
if(code != DKS_RPCFunc.RPC_FUNC_HASH_INITIALIZE):
logger.info("callback_rpc_starthash: code != RPCFunc.RPC_FUNC_HASH_INITIALIZE")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
# get the session
session = self.get_session(client)
if(result != DKS_HALError.HAL_OK):
logger.info("callback_rpc_starthash: result != 0")
return self.create_error_response(code, client, result)
handle = unpacker.unpack_uint()
# save the RPC to use for this handle
session.hash_rpcs[handle] = session.cur_hashing_index
return RPCAction(reply_list[0], None, None)
def handle_rpc_hash(self, code, client, unpacker, session):
"""Once a hash has started, we have to continue with it the same RPC"""
# get the handle of the hash operation
handle = unpacker.unpack_uint()
# this handle must be a key
if(handle not in session.hash_rpcs):
logger.info("handle_rpc_hash: handle not in session.hash_rpcs")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_BAD_ARGUMENTS)
return RPCAction(None, [self.rpc_list[session.hash_rpcs[handle]]], None)
def handle_rpc_endhash(self, code, client, unpacker, session):
"""we've finished a hash operation"""
# get the handle of the hash operation
handle = unpacker.unpack_uint()
# this handle must be a key
if(handle not in session.hash_rpcs):
logger.info("handle_rpc_hash: handle not in session.hash_rpcs")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_BAD_ARGUMENTS)
rpc_index = session.hash_rpcs[handle]
# the handle no longer needs to be in the dictionary
del session.hash_rpcs.pop[handle]
return RPCAction(None, [self.rpc_list[rpc_index]], None)
def handle_rpc_usecurrent(self, code, client, unpacker, session):
"""The manually selected RPC must be used"""
rpc_index = session.rpc_index
if(rpc_index < 0):
rpc_index = session.key_op_data.rpc_index
return RPCAction(None, [self.rpc_list[rpc_index]], None)
def handle_rpc_pkeyexport(self, code, client, unpacker, session):
# make sure pkey export has been enabled. Always allow from internal non-ethernet sources
if (session.from_ethernet is False and
self.settings.get_setting(HSMSettings.ENABLE_KEY_EXPORT) is False):
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_FORBIDDEN)
"""The manually selected RPC must be used"""
rpc_index = session.rpc_index
if(rpc_index < 0):
rpc_index = session.key_op_data.rpc_index
return RPCAction(None, [self.rpc_list[rpc_index]], None)
def handle_rpc_pkeyopen(self, code, client, unpacker, session):
# pkcs11 session
session_param = unpacker.unpack_uint()
# uuid
incoming_uuid = UUID(bytes = unpacker.unpack_bytes())
# get the session to use
session = self.get_session(client)
# what type of uuid are we getting?
if(session.incoming_uuids_are_device_uuids):
if(session.rpc_index < 0):
logger.info("handle_rpc_pkeyopen: using device uuid, but device not set")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_IMPOSSIBLE)
device_uuid = incoming_uuid
session.key_op_data.rpc_index = session.rpc_index
else:
# find the device uuid from the master uuid
master_uuid = incoming_uuid
if(session.rpc_index >= 0):
# just use the set rpc_index
session.key_op_data.rpc_index = session.rpc_index
# see if this uuid is on the alpha we are requesting
device_list = self.cache.get_alphas(master_uuid)
if(session.rpc_index not in device_list):
logger.info("handle_rpc_pkeyopen: session.rpc_index not in device_list")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_KEY_NOT_FOUND)
device_uuid = device_list[session.rpc_index]
else:
rpc_uuid_pair = self.choose_rpc_from_master_uuid(master_uuid)
if(rpc_uuid_pair is None):
logger.info("handle_rpc_pkeyopen: rpc_uuid_pair is None")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_KEY_NOT_FOUND)
session.key_op_data.rpc_index = rpc_uuid_pair[0]
device_uuid = rpc_uuid_pair[1]
# recreate with the actual uuid
session.current_request = RPCpkey_open.create(code, client, session_param, device_uuid)
# save data about the key we are opening
session.key_op_data.device_uuid = device_uuid
"""uuid is used to select the RPC with the key and the handle is returned"""
return RPCAction(None, [self.rpc_list[session.key_op_data.rpc_index]], self.callback_rpc_pkeyopen)
def callback_rpc_pkeyopen(self, reply_list):
unpacker = self.get_response_unpacker(reply_list[0])
code = unpacker.unpack_uint()
client = unpacker.unpack_uint()
# hashing only happens on one alpha
if(len(reply_list) != 1):
logger.info("callback_rpc_pkeyopen: len(reply_list) != 1")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
result = unpacker.unpack_uint()
if(code != DKS_RPCFunc.RPC_FUNC_PKEY_OPEN):
logger.info("callback_rpc_pkeyopen: code != RPCFunc.RPC_FUNC_PKEY_OPEN")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
# get the session
session = self.get_session(client)
if(result != 0):
logger.info("callback_rpc_pkeyopen: result != 0")
return self.create_error_response(code, client, result)
session.key_op_data.handle = unpacker.unpack_uint()
# save the RPC to use for this handle
session.key_rpcs[session.key_op_data.handle] = KeyHandleDetails(session.key_op_data.rpc_index, session.key_op_data.device_uuid)
# inform the load balancer that we have an open pkey
self.update_device_weight(session.key_op_data.rpc_index, self.pkey_op_weight)
return RPCAction(reply_list[0], None, None)
def handle_rpc_pkey(self, code, client, unpacker, session):
"""use handle to select RPC"""
# get the handle of the hash operation
handle = unpacker.unpack_uint()
# this handle must be a key
if(handle not in session.key_rpcs):
logger.info("handle_rpc_pkey: handle not in session.key_rpcs")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_BAD_ARGUMENTS)
rpc_index = session.key_rpcs[handle].rpc_index
device_uuid = session.key_rpcs[handle].uuid
# logger.info("Using pkey handle:%i RPC:%i", handle, rpc_index)
session.key_op_data = KeyOperationData(rpc_index, handle, device_uuid)
if (code == DKS_RPCFunc.RPC_FUNC_PKEY_DELETE or
code == DKS_RPCFunc.RPC_FUNC_PKEY_CLOSE):
return RPCAction(None, [self.rpc_list[rpc_index]], self.callback_rpc_close_deletekey)
else:
return RPCAction(None, [self.rpc_list[rpc_index]], None)
def handle_rpc_pkeyload(self, code, client, unpacker, session):
"""use manually selected RPC and get returned uuid and handle"""
# if the session rpc_index has not be set, this must getting the public key
# rpc_index = session.rpc_index
# if(rpc_index < 0):
# rpc_index = self.choose_rpc() #session.key_op_data.rpc_index
# select an RPC to use for this hashing operation
session.key_op_data.rpc_index = session.rpc_index if(session.rpc_index >= 0) else self.choose_rpc()
logger.info("session.rpc_index == %i session.key_op_data.rpc_index == %i",
session.rpc_index, session.key_op_data.rpc_index)
# consume pkcs11 session id
unpacker.unpack_uint()
# consume der
unpacker.unpack_bytes()
# get flags
session.flags = unpacker.unpack_uint()
if hasattr(session, 'pkey_type'):
# treat as the public version of the last privte key generated as this is the standard usage
if(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_RSA_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_RSA_PUBLIC
elif(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_EC_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_EC_PUBLIC
elif(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_HASHSIG_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_HASHSIG_PUBLIC
else:
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_NONE
session.curve = 0
# inform the load balancer that we are doing an expensive key operation
self.update_device_weight(session.key_op_data.rpc_index, self.pkey_gen_weight)
return RPCAction(None, [self.rpc_list[session.key_op_data.rpc_index]], self.callback_rpc_keygen)
def handle_rpc_pkeyimport(self, code, client, unpacker, session):
"""use manually selected RPC and get returned uuid and handle"""
# if the session rpc_index has not be set, this must getting the public key
# rpc_index = session.rpc_index
# if(rpc_index < 0):
# rpc_index = self.choose_rpc() #session.key_op_data.rpc_index
# select an RPC to use for this hashing operation
session.key_op_data.rpc_index = session.rpc_index if(session.rpc_index >= 0) else self.choose_rpc()
logger.info("session.rpc_index == %i session.key_op_data.rpc_index == %i",
session.rpc_index, session.key_op_data.rpc_index)
# consume pkcs11 session id
unpacker.unpack_uint()
# consume kekek
unpacker.unpack_uint()
# consume pkcs8
unpacker.unpack_bytes()
# consume kek
unpacker.unpack_bytes()
# get flags
session.flags = unpacker.unpack_uint()
if hasattr(session, 'pkey_type'):
# treat as the public version of the last privte key generated as this is the standard usage
if(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_RSA_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_RSA_PUBLIC
elif(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_EC_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_EC_PUBLIC
elif(session.pkey_type == DKS_HALKeyType.HAL_KEY_TYPE_HASHSIG_PRIVATE and
session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_PUBLIC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_HASHSIG_PUBLIC
else:
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_NONE
session.curve = 0
# inform the load balancer that we are doing an expensive key operation
self.update_device_weight(session.key_op_data.rpc_index, self.pkey_gen_weight)
return RPCAction(None, [self.rpc_list[session.key_op_data.rpc_index]], self.callback_rpc_keygen)
def handle_rpc_keygen(self, code, client, unpacker, session):
"""A key has been generated. Returns uuid and handle"""
# consume pkcs11 session id
unpacker.unpack_uint()
# save the key settings
if (code == DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_RSA):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_RSA_PRIVATE
# consume keylen
unpacker.unpack_uint()
# get the exponent because we need the size
exponent = unpacker.unpack_bytes()
# get the location of the flags so we can change if needed
exp_len = len(exponent)
exp_padding = (4 - exp_len % 4) % 4
flag_location = 20 + exp_len + exp_padding
elif (code == DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_EC):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_EC_PRIVATE
# get the location of the flags so we can change if needed
flag_location = 16
elif (code == DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_HASHSIG):
session.pkey_type = DKS_HALKeyType.HAL_KEY_TYPE_HASHSIG_PRIVATE
# get the location of the flags so we can change if needed
flag_location = 24
# get the flags
session.flags = rpc_get_int(session.current_request, flag_location)
# check to see if the rpc has been setup to allow exportable private keys
if ((session.enable_exportable_private_keys == True) and
(session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_USAGE_KEYENCIPHERMENT) == 0 and
(session.flags & DKS_HALKeyFlag.HAL_KEY_FLAG_USAGE_DATAENCIPHERMENT) == 0):
new_flag = session.flags | DKS_HALKeyFlag.HAL_KEY_FLAG_EXPORTABLE
session.current_request = rpc_set_int(session.current_request, new_flag, flag_location)
# sanity check. Make sure we get back what we just set
session.flags = rpc_get_int(session.current_request, flag_location)
logger.info("Key Gen Flags: 0x%X"%session.flags)
# select an RPC to use for this hashing operation
session.key_op_data.rpc_index = session.rpc_index if(session.rpc_index >= 0) else self.choose_rpc()
logger.info("session.rpc_index == %i session.key_op_data.rpc_index == %i",
session.rpc_index, session.key_op_data.rpc_index)
# inform the load balancer that we are doing an expensive key operation
self.update_device_weight(session.key_op_data.rpc_index, self.pkey_gen_weight)
return RPCAction(None, [self.rpc_list[session.key_op_data.rpc_index]], self.callback_rpc_keygen)
def callback_rpc_close_deletekey(self, reply_list):
unpacker = self.get_response_unpacker(reply_list[0])
code = unpacker.unpack_uint()
client = unpacker.unpack_uint()
result = unpacker.unpack_uint()
if(result != DKS_HALError.HAL_OK):
logger.info("callback_rpc_closekey: result != 0")
return self.create_error_response(code, client, result)
# get the session
session = self.get_session(client)
# inform the load balancer that we have closed a key
self.update_device_weight(session.key_op_data.rpc_index, -self.pkey_op_weight)
handle = session.key_op_data.handle
# this handle must be a key
if(handle not in session.key_rpcs):
logger.info("callback_rpc_close_deletekey: handle not in session.key_rpcs")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_BAD_ARGUMENTS)
if (code == DKS_RPCFunc.RPC_FUNC_PKEY_DELETE):
# get the details about the key so we can delete from the cache
keydetails = session.key_rpcs[handle]
uuid = keydetails.uuid
rpc_index = keydetails.rpc_index
session.cache.remove_key_from_alpha(rpc_index, uuid)
# clear data
session.key_rpcs.pop(handle, None)
# the key was closed so we are not working on anything now
session.key_op_data = KeyOperationData(None, None, None)
return RPCAction(reply_list[0], None, None)
def callback_rpc_keygen(self, reply_list):
unpacker = self.get_response_unpacker(reply_list[0])
code = unpacker.unpack_uint()
client = unpacker.unpack_uint()
result = unpacker.unpack_uint()
# get the session
session = self.get_session(client)
# inform the load balancer that we are no longer doing an expensive operation
self.update_device_weight(session.key_op_data.rpc_index, -self.pkey_gen_weight)
# keygen only happens on one alpha
if(len(reply_list) != 1):
logger.info("callback_rpc_keygen: len(reply_list) != 1")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
if (code != DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_EC and
code != DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_RSA and
code != DKS_RPCFunc.RPC_FUNC_PKEY_LOAD and
code != DKS_RPCFunc.RPC_FUNC_PKEY_IMPORT and
code != DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_HASHSIG):
logger.info("callback_rpc_keygen: incorrect code received")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
if(result != DKS_HALError.HAL_OK):
logger.info("callback_rpc_keygen: result != 0")
return self.create_error_response(code, client, result)
# inform the load balancer that we have an open pkey
# keygen automatically opens the key
self.update_device_weight(session.key_op_data.rpc_index, self.pkey_op_weight)
# get the handle
session.key_op_data.handle = unpacker.unpack_uint()
#get the new uuid
device_uuid = UUID(bytes = unpacker.unpack_bytes())
# save the RPC to use for this handle
session.key_rpcs[session.key_op_data.handle] = KeyHandleDetails(session.key_op_data.rpc_index, session.key_op_data.device_uuid)
# add new key to cache
logger.info("Key generated and added to cache RPC:%i UUID:%s Type:%i Flags:%i",
session.key_op_data.rpc_index, session.key_op_data.device_uuid, session.pkey_type, session.flags)
# save the device uuid internally
session.key_op_data.device_uuid = device_uuid
# unless we're caching and using master_uuids, return the device uuid
outgoing_uuid = device_uuid
if (session.cache_generated_keys):
master_uuid = session.cache.add_key_to_alpha(session.key_op_data.rpc_index,
device_uuid,
session.pkey_type,
session.flags)
if (not session.incoming_uuids_are_device_uuids):
# the master_uuid will always be returned to ethernet connections
outgoing_uuid = master_uuid
# generate reply with the outgoing uuid
reply = RPCKeygen_result.create(code, client, result,
session.key_op_data.handle,
outgoing_uuid)
return RPCAction(reply, None, None)
def handle_rpc_pkeymatch(self, code, client, unpacker, session):
"""match on all rpcs and then combine results
incoming UUIDs are master table UUIDs"""
# if the rpc_index has been set for the session, always use it
if(session.incoming_uuids_are_device_uuids):
if(session.rpc_index >= 0):
return RPCAction(None, [self.rpc_list[session.rpc_index]], None)
else:
logger.info("handle_rpc_pkeymatch: using device uuid, but device not set")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_IMPOSSIBLE)
session.keymatch_details = KeyMatchDetails()
# unpack and store key match attributes
session.keymatch_details.unpack(unpacker)
logger.info("pkey_match: result_max = %i, uuid = %s",
session.keymatch_details.result_max, session.keymatch_details.uuid)
# if uuid is none, search RPC 0
# else search starting with the RPC that the uuid is on
if(session.keymatch_details.uuid == KeyMatchDetails.none_uuid):
if(session.rpc_index >= 0):
session.keymatch_details.rpc_index = session.rpc_index
else:
session.keymatch_details.rpc_index = 0
else:
# need to convert master_uuid to device_uuid
if(session.rpc_index >= 0):
device_list = session.cache.get_alphas(session.keymatch_details.uuid)
if (session.rpc_index not in device_list):
logger.info("handle_rpc_pkeyopen: session.rpc_index not in device_list")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_KEY_NOT_FOUND)
session.keymatch_details.rpc_index = session.rpc_index
# need to update the command with the new UUID
session.keymatch_details.uuid = device_list[session.rpc_index]
else:
# find the rpc that this is on
device_to_search = session.cache.get_alpha_lowest_index(session.keymatch_details.uuid)
if(device_to_search is None):
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
session.keymatch_details.rpc_index = device_to_search[0]
# need to update the command with the new UUID
session.keymatch_details.uuid = device_to_search[1]
session.current_request = session.keymatch_details.repack(code, client)
# make sure the rpc_index was set
if(hasattr(session.keymatch_details, 'rpc_index') == False):
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
return RPCAction(None, [self.rpc_list[session.keymatch_details.rpc_index]], self.callback_rpc_pkeymatch)
def callback_rpc_pkeymatch(self, reply_list):
reply = reply_list[0]
logger.info("callback_rpc_pkeymatch")
unpacker = self.get_response_unpacker(reply)
code = unpacker.unpack_uint()
client = unpacker.unpack_uint()
result = unpacker.unpack_uint()
# this should have been called on exactly one alpha
if(len(reply_list) != 1):
logger.info("callback_rpc_pkeymatch: len(reply_list) != 1")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
# get the session
session = self.get_session(client)
if (code != DKS_RPCFunc.RPC_FUNC_PKEY_MATCH):
logger.info("callback_rpc_pkeymatch: code != RPCFunc.RPC_FUNC_PKEY_MATCH")
return self.create_error_response(code, client, DKS_HALError.HAL_ERROR_RPC_TRANSPORT)
if(result != 0):
logger.info("callback_rpc_pkeymatch: result != 0")
return self.create_error_response(code, client, result)
session.keymatch_details.result.code = code
session.keymatch_details.result.client = client
session.keymatch_details.result.result = result
# get the pkcs#11 session
session.keymatch_details.result.session = unpacker.unpack_uint()
# get the count
n = unpacker.unpack_uint()
rpc_index = session.keymatch_details.rpc_index
logger.info("Matching found %i keys", n)
for i in xrange(n):
u = UUID(bytes = unpacker.unpack_bytes())
# convert device UUID to master UUID and if uuid is
# also on a device with a lowee index, don't add
master_uuid = session.cache.get_master_uuid(rpc_index, u)
if (master_uuid is not None):
lowest_index = session.cache.get_master_uuid_lowest_index(master_uuid)
if(lowest_index == rpc_index):
session.keymatch_details.result.uuid_list.append(master_uuid)
next_rpc = rpc_index + 1
if (len(session.keymatch_details.result.uuid_list) >= session.keymatch_details.result_max or
next_rpc >= len(self.rpc_list)):
# we've either reach the max or we've searched all devices
result_action = RPCAction(session.keymatch_details.result.build_result_packet(session.keymatch_details.result_max), None, None)
session.keymatch_details = None
return result_action
# we're searching a new alpha so start from 0
session.keymatch_details.rpc_index = next_rpc
session.keymatch_details.uuid = KeyMatchDetails.none_uuid
session.current_request = session.keymatch_details.repack(code, client)
# there may be more matching keys so generate another command
return RPCAction(None, [self.rpc_list[session.keymatch_details.rpc_index]], self.callback_rpc_pkeymatch)
def handle_rpc_getdevice_ip(self, code, client, unpacker, session):
# generate complete response
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
response.pack_uint(DKS_HALError.HAL_OK)
response.pack_bytes(self.netiface.get_ip())
return RPCAction(response.get_buffer(), None, None)
def handle_rpc_getdevice_state(self, code, client, unpacker, session):
# generate complete response
response = xdrlib.Packer()
response.pack_uint(code)
response.pack_uint(client)
response.pack_uint(DKS_HALError.HAL_OK)
response.pack_uint(len(self.rpc_list))
for rpc in self.rpc_list:
response.pack_bytes(str(rpc.state.value))
return RPCAction(response.get_buffer(), None, None)
def create_function_table(self):
"""Use a table to quickly select the method to handle each RPC request"""
self.function_table[DKS_RPCFunc.RPC_FUNC_GET_VERSION] = self.handle_rpc_any
self.function_table[DKS_RPCFunc.RPC_FUNC_GET_RANDOM] = self.handle_rpc_any
self.function_table[DKS_RPCFunc.RPC_FUNC_SET_PIN] = self.handle_rpc_all
self.function_table[DKS_RPCFunc.RPC_FUNC_LOGIN] = self.handle_rpc_all
self.function_table[DKS_RPCFunc.RPC_FUNC_LOGOUT] = self.handle_rpc_all
self.function_table[DKS_RPCFunc.RPC_FUNC_LOGOUT_ALL] = self.handle_rpc_all
self.function_table[DKS_RPCFunc.RPC_FUNC_IS_LOGGED_IN] = self.handle_rpc_all
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_GET_DIGEST_LEN] = self.handle_rpc_any
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_GET_DIGEST_ALGORITHM_ID] = self.handle_rpc_any
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_GET_ALGORITHM] = self.handle_rpc_hash
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_INITIALIZE] = self.handle_rpc_starthash
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_UPDATE] = self.handle_rpc_hash
self.function_table[DKS_RPCFunc.RPC_FUNC_HASH_FINALIZE] = self.handle_rpc_endhash
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_LOAD] = self.handle_rpc_pkeyload
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_OPEN] = self.handle_rpc_pkeyopen
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_RSA] = self.handle_rpc_keygen
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_EC] = self.handle_rpc_keygen
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_CLOSE] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_DELETE] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_KEY_TYPE] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_KEY_CURVE] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_KEY_FLAGS] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_PUBLIC_KEY_LEN] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_PUBLIC_KEY] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_SIGN] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_VERIFY] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_MATCH] = self.handle_rpc_pkeymatch
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_SET_ATTRIBUTES] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GET_ATTRIBUTES] = self.handle_rpc_pkey
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_EXPORT] = self.handle_rpc_pkeyexport
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_IMPORT] = self.handle_rpc_pkeyimport
self.function_table[DKS_RPCFunc.RPC_FUNC_PKEY_GENERATE_HASHSIG] = self.handle_rpc_keygen
self.function_table[DKS_RPCFunc.RPC_FUNC_GET_HSM_STATE] = self.handle_rpc_getdevice_state
self.function_table[DKS_RPCFunc.RPC_FUNC_GET_IP] = self.handle_rpc_getdevice_ip
self.function_table[DKS_RPCFunc.RPC_FUNC_SET_RPC_DEVICE] = self.handle_set_rpc
self.function_table[DKS_RPCFunc.RPC_FUNC_ENABLE_CACHE_KEYGEN] = self.handle_enable_cache_keygen
self.function_table[DKS_RPCFunc.RPC_FUNC_DISABLE_CACHE_KEYGEN] = self.handle_disable_cache_keygen
self.function_table[DKS_RPCFunc.RPC_FUNC_CHECK_TAMPER] = self.handle_rpc_usecurrent
self.function_table[DKS_RPCFunc.RPC_FUNC_USE_INCOMING_DEVICE_UUIDS] = self.handle_use_incoming_device_uuids
self.function_table[DKS_RPCFunc.RPC_FUNC_USE_INCOMING_MASTER_UUIDS] = self.handle_use_incoming_master_uuids
| 0
| 90
| 0
| 46,831
| 0
| 0
| 0
| 345
| 341
|
9a2f9bbcdef5df4bf28982e51a41ce770ed56584
| 745
|
py
|
Python
|
texttospeak.py
|
PoomGamerE/Text-To-Speak-With-Python-Pitch-Supported-
|
452a7a9bfaa22306da436832ea8e91185481d9d5
|
[
"WTFPL"
] | 1
|
2021-12-31T15:50:08.000Z
|
2021-12-31T15:50:08.000Z
|
texttospeak.py
|
PoomGamerE/Text-To-Speak-With-Python-Pitch-Supported
|
452a7a9bfaa22306da436832ea8e91185481d9d5
|
[
"WTFPL"
] | null | null | null |
texttospeak.py
|
PoomGamerE/Text-To-Speak-With-Python-Pitch-Supported
|
452a7a9bfaa22306da436832ea8e91185481d9d5
|
[
"WTFPL"
] | 1
|
2020-05-07T16:07:55.000Z
|
2020-05-07T16:07:55.000Z
|
from gtts import gTTS
from pydub import AudioSegment
AudioSegment.converter = "C:\\ffmpeg\\bin\\ffmpeg.exe"
AudioSegment.ffmpeg = "C:\\ffmpeg\\bin\\ffmpeg.exe"
AudioSegment.ffprobe ="C:\\ffmpeg\\bin\\ffprobe.exe"
from pydub.playback import play
import playsound
tts = gTTS(text='Test Hatsune Miku', lang='en', slow=True)
tts.save("input.mp3")
sound = AudioSegment.from_file('input.mp3', format="mp3")
# Customize
octaves = 0.2
new_sample_rate = int(sound.frame_rate * (2.0 ** octaves))
hipitch_sound = sound._spawn(sound.raw_data, overrides={'frame_rate': new_sample_rate})
hipitch_sound = hipitch_sound.set_frame_rate(44100)
play(hipitch_sound)
hipitch_sound.export("output.mp3", format="mp3")
playsound.playsound("output.mp3", True)
| 25.689655
| 87
| 0.754362
|
from gtts import gTTS
from pydub import AudioSegment
AudioSegment.converter = "C:\\ffmpeg\\bin\\ffmpeg.exe"
AudioSegment.ffmpeg = "C:\\ffmpeg\\bin\\ffmpeg.exe"
AudioSegment.ffprobe ="C:\\ffmpeg\\bin\\ffprobe.exe"
from pydub.playback import play
import playsound
tts = gTTS(text='Test Hatsune Miku', lang='en', slow=True)
tts.save("input.mp3")
sound = AudioSegment.from_file('input.mp3', format="mp3")
# Customize
octaves = 0.2
new_sample_rate = int(sound.frame_rate * (2.0 ** octaves))
hipitch_sound = sound._spawn(sound.raw_data, overrides={'frame_rate': new_sample_rate})
hipitch_sound = hipitch_sound.set_frame_rate(44100)
play(hipitch_sound)
hipitch_sound.export("output.mp3", format="mp3")
playsound.playsound("output.mp3", True)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d9ad6c19f7ef674e649f00e8e4202cdc498e8bfb
| 3,721
|
py
|
Python
|
__init__.py
|
krisgesling/device-initiated-interaction-skill
|
e961aeadc3af1c5c3d10c0bb90e392cbe4ecb54a
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
krisgesling/device-initiated-interaction-skill
|
e961aeadc3af1c5c3d10c0bb90e392cbe4ecb54a
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
krisgesling/device-initiated-interaction-skill
|
e961aeadc3af1c5c3d10c0bb90e392cbe4ecb54a
|
[
"Apache-2.0"
] | null | null | null |
API_ENDPOINT = "https://example.com/api"
API_KEY = "XXXXXXXXXXXXXXXXX"
MINUTES = 60 # seconds
HOURS = 60 * MINUTES
| 41.344444
| 145
| 0.689331
|
from datetime import datetime
from time import sleep
import requests
from mycroft import MycroftSkill, intent_handler
from mycroft.audio import wait_while_speaking
from mycroft.util.format import nice_duration, TimeResolution
API_ENDPOINT = "https://example.com/api"
API_KEY = "XXXXXXXXXXXXXXXXX"
MINUTES = 60 # seconds
HOURS = 60 * MINUTES
class DeviceInitiatedInteraction(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.proning_event = "Proning protocol"
def initialize(self):
self.settings_change_callback = self.on_settings_changed
def on_settings_changed(self):
""" Callback triggered when Skill settings are modified at
home.mycroft.ai/skills """
self.log.info("Trigger actions when settings are updated")
@intent_handler('interaction.initiated.device.intent')
def handle_interaction_initiated_device(self, message):
# Ensure any previously scheduled events are cancelled
self.cancel_scheduled_event(self.proning_event)
# Schedule the proning protocol every two hours, starting immediately
# https://mycroft-core.readthedocs.io/en/latest/source/mycroft.html?highlight=schedule#mycroft.MycroftSkill.schedule_repeating_event
self.schedule_repeating_event(self.proning_protocol,
datetime.now(), 2 * HOURS,
name=self.proning_event)
self.speak_dialog('interaction.initiated.device')
def proning_protocol(self):
self.speak_dialog('step.one')
# Pause for duration of Text to Speech
wait_while_speaking()
# Pause an additional period for patient to complete step
sleep(10)
# This method checks for a range of standard yes/no responses eg "yeah"
response = self.ask_yesno('confirm')
if response == 'yes':
self.speak_dialog('step.two')
# Schedule non-recurring patient check in 15 minutes
# Note: event time can be in seconds or a datetime
# https://mycroft-core.readthedocs.io/en/latest/source/mycroft.html?highlight=schedule#mycroft.MycroftSkill.schedule_event
self.schedule_event(self.check_on_patient, 30)
def check_on_patient(self):
# Ask user a question and record the response.
# Can also validate response and provide guidance if it fails validation
# https://mycroft-core.readthedocs.io/en/latest/source/mycroft.html#mycroft.MycroftSkill.get_response
response = self.get_response('get.feedback')
self.send_patient_response(response)
def send_patient_response(self, response):
# Standard POST request
data = { 'api_key': API_KEY, 'patient_response':response }
r = requests.post( url=API_ENDPOINT, data=data )
self.log.info("Sent feedback: {}".format(response))
@intent_handler('when.is.next.protocol.intent')
def handle_when_next(self, message):
# Get the time remaining before our scheduled event
# https://mycroft-core.readthedocs.io/en/latest/source/mycroft.html?highlight=get%20event#mycroft.MycroftSkill.get_scheduled_event_status
secs_remaining = self.get_scheduled_event_status(self.proning_event)
if secs_remaining:
# nice_duration formats our time remaining into a speakable format
# the resolution rounds this to the nearest minute
self.speak_dialog('time.remaining',
{ 'duration': nice_duration(secs_remaining,
resolution=TimeResolution.MINUTES) })
def create_skill():
return DeviceInitiatedInteraction()
| 0
| 1,394
| 0
| 1,898
| 0
| 38
| 0
| 94
| 180
|
88d3e7a0d8ea1590d06630c3b732f4ca8621ff36
| 319
|
py
|
Python
|
code/array_absurdity/array_absurdity.py
|
rafalmierzwiak/yearn
|
4f173f9c326bc742d90de26ad23af500713cd6a1
|
[
"Unlicense"
] | 1
|
2021-04-06T17:39:28.000Z
|
2021-04-06T17:39:28.000Z
|
code/array_absurdity/array_absurdity.py
|
rafalmierzwiak/yearn
|
4f173f9c326bc742d90de26ad23af500713cd6a1
|
[
"Unlicense"
] | null | null | null |
code/array_absurdity/array_absurdity.py
|
rafalmierzwiak/yearn
|
4f173f9c326bc742d90de26ad23af500713cd6a1
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
from sys import argv
with open(argv[1]) as f:
for line in f:
length, numbers = line.rstrip("\n").split(";")
duplicates = {}
for n in numbers.split(","):
if n in duplicates:
print(n)
break
duplicates[n] = True
| 21.266667
| 54
| 0.492163
|
#!/usr/bin/env python3
from sys import argv
with open(argv[1]) as f:
for line in f:
length, numbers = line.rstrip("\n").split(";")
duplicates = {}
for n in numbers.split(","):
if n in duplicates:
print(n)
break
duplicates[n] = True
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a2ef113d54b817448e697c5115dc69a977e0e9aa
| 107
|
py
|
Python
|
1. Python/2. Flow of Control/14.multiple_of_10.py
|
theparitoshkumar/Data-Structures-Algorithms-using-python
|
445b9dee56bca637f21267114cc1686d333ea4c4
|
[
"Apache-2.0"
] | 1
|
2021-12-05T18:02:15.000Z
|
2021-12-05T18:02:15.000Z
|
1. Python/2. Flow of Control/14.multiple_of_10.py
|
theparitoshkumar/Data-Structures-Algorithms-using-python
|
445b9dee56bca637f21267114cc1686d333ea4c4
|
[
"Apache-2.0"
] | null | null | null |
1. Python/2. Flow of Control/14.multiple_of_10.py
|
theparitoshkumar/Data-Structures-Algorithms-using-python
|
445b9dee56bca637f21267114cc1686d333ea4c4
|
[
"Apache-2.0"
] | null | null | null |
#Print multiples of 10 for numbers in a given range
for num in range(5):
if num > 0:
print(num * 10)
| 21.4
| 51
| 0.663551
|
#Print multiples of 10 for numbers in a given range
for num in range(5):
if num > 0:
print(num * 10)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dc7d4c67f2d423199d46c7b7ac45785d73c591e2
| 412
|
py
|
Python
|
SpaceHabitRPG/Tests/ForTestHelpers/Test_SpaceUnitTest.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | null | null | null |
SpaceHabitRPG/Tests/ForTestHelpers/Test_SpaceUnitTest.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | 13
|
2016-07-19T04:13:20.000Z
|
2016-08-17T06:06:47.000Z
|
SpaceHabitRPG/Tests/ForTestHelpers/Test_SpaceUnitTest.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
unittest.main()
| 29.428571
| 71
| 0.696602
|
from SpaceUnitTest import SpaceUnitTest
class Test_SpaceUnitTest(SpaceUnitTest):
def test_assertBool(self):
t1 = None
t2 = 6
self.assertRaises(AssertionError, lambda :self.assertFalse(t1))
self.assertRaises(AssertionError, lambda :self.assertTrue(t2))
self.assertNotEqual(t1,False)
self.assertNotEqual(t2,True)
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 301
| 0
| 0
| 0
| 18
| 45
|
78ecea3a7cd48bf6116ebbd160f009bb753182bc
| 1,288
|
py
|
Python
|
mpltex/mpltex.py
|
bluesquall/mpltex
|
b4794c601ac085fe3b89b4fdb46eb92ae06a3e15
|
[
"MIT"
] | 1
|
2021-12-01T15:21:39.000Z
|
2021-12-01T15:21:39.000Z
|
mpltex/mpltex.py
|
bluesquall/mpltex
|
b4794c601ac085fe3b89b4fdb46eb92ae06a3e15
|
[
"MIT"
] | null | null | null |
mpltex/mpltex.py
|
bluesquall/mpltex
|
b4794c601ac085fe3b89b4fdb46eb92ae06a3e15
|
[
"MIT"
] | null | null | null |
"""
mpltex
======
"""
golden_ratio = (5**.5-1.0)/2.0
packages = ['amsmath', 'amssymb', 'amsfonts', 'amsbsy', 'bm']
usepackages = [r'\usepackage{{{0}}}'.format(pkg) for pkg in packages]
def get_rcParams(fig_width_pt = 232.0, scale = 1.0, dpi = 600):
"""TODO: write a description
Parameters
----------
fig_width_pt: float (232.0)
Width of the figure in points.
Use LaTeX \showthe\linewidth to figure out what number to use.
232.0 for two-column IEEEtran articles
YYYY for MIT/WHOI thesis figures
"""
fig_width = pt_to_in(fig_width_pt) # width in inches
fig_height = fig_width * golden_ratio # height in inches
fig_size = [fig_width * scale, fig_height * scale]
params = { 'axes.labelsize': 10,
'text.fontsize': 8,
'legend.fontsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'font.family': 'serif',
'font.serif': 'Times',
'text.usetex': True,
'text.latex.preamble': usepackages,
'figure.figsize': fig_size,
'figure.dpi': dpi}
return params
| 27.404255
| 70
| 0.559006
|
"""
mpltex
======
"""
golden_ratio = (5**.5-1.0)/2.0
packages = ['amsmath', 'amssymb', 'amsfonts', 'amsbsy', 'bm']
usepackages = [r'\usepackage{{{0}}}'.format(pkg) for pkg in packages]
def pt_to_in(pt):
return pt / 72.27
def pt_to_mm(pt):
raise NotImplementedError
def get_rcParams(fig_width_pt = 232.0, scale = 1.0, dpi = 600):
"""TODO: write a description
Parameters
----------
fig_width_pt: float (232.0)
Width of the figure in points.
Use LaTeX \showthe\linewidth to figure out what number to use.
232.0 for two-column IEEEtran articles
YYYY for MIT/WHOI thesis figures
"""
fig_width = pt_to_in(fig_width_pt) # width in inches
fig_height = fig_width * golden_ratio # height in inches
fig_size = [fig_width * scale, fig_height * scale]
params = { 'axes.labelsize': 10,
'text.fontsize': 8,
'legend.fontsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'font.family': 'serif',
'font.serif': 'Times',
'text.usetex': True,
'text.latex.preamble': usepackages,
'figure.figsize': fig_size,
'figure.dpi': dpi}
return params
| 0
| 0
| 0
| 0
| 0
| 44
| 0
| 0
| 46
|
a2361bc10b2d2184556a466dee04a4a83a83384d
| 4,167
|
py
|
Python
|
garam.py
|
albus137/csp_python_tests
|
952b68ea4dc22b1f87a40bfa9a54bf17d305e235
|
[
"MIT"
] | null | null | null |
garam.py
|
albus137/csp_python_tests
|
952b68ea4dc22b1f87a40bfa9a54bf17d305e235
|
[
"MIT"
] | null | null | null |
garam.py
|
albus137/csp_python_tests
|
952b68ea4dc22b1f87a40bfa9a54bf17d305e235
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pyexcel as pe
from time import time
# create the problem object
problem = Problem(RecursiveBacktrackingSolver())
# import the Garam grid
grid = pe.get_array(file_name='csp_grid.ods')
n_lines, n_cols = len(grid), len(grid[0])
# display the Garam grid nicely
print('Initial grid:')
print()
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '':
print(' ', end='')
else:
print(grid[i][j], end='')
print()
# add the variables to the problem
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '?':
domain = range(0, 10)
elif type(grid[i][j]) == int:
domain = [grid[i][j]]
problem.addVariable((i, j), domain)
# create the "coor grid" (grid whose cases are tuples containing the cases coordinates)
grid_coor = [[(i, j) for j in range(n_cols)] for i in range(n_lines)]
# set the first case of each constraint
h_constraints_origins = [[0, 0], [0, 8],
[2, 4],
[5, 0], [5, 8],
[9, 0], [9, 8],
[11, 4],
[14, 0], [14, 8]]
v_constraints_origins = [[0, 0], [9, 0],
[5, 2],
[0, 4], [9, 4],
[0, 8], [9, 8],
[5, 10],
[0, 12], [9, 12]]
# get the horizontal constraints
h_constraints = []
for origin in h_constraints_origins:
i_origin, j_origin = origin
constraint = grid_coor[i_origin][j_origin:j_origin+5]
h_constraints.append(constraint)
# get the vertical constraints
v_constraints = []
for k in range(len(v_constraints_origins)):
i_origin, j_origin = v_constraints_origins[k]
if k == 2 or k == 7:
nb_cases = 5
else:
nb_cases = 6
constraint = [line[j_origin] for line in grid_coor[i_origin:i_origin+nb_cases]]
v_constraints.append(constraint)
# add the constraints to the problem
constraints = h_constraints + v_constraints
for constraint in constraints:
# get the operation type (+, - or *)
i, j = constraint[1]
op = grid[i][j]
if len(constraint) == 5:
if op == '+':
constraint_function = lambda a, b, c: a+b == c
elif op == '-':
constraint_function = lambda a, b, c: a-b == c
elif op == '*':
constraint_function = lambda a, b, c: a*b == c
problem.addConstraint(constraint_function, (constraint[0], constraint[2], constraint[4]))
print('{}{}{}={}'.format(grid[constraint[0][0]][constraint[0][1]],
op,
grid[constraint[2][0]][constraint[2][1]],
grid[constraint[4][0]][constraint[4][1]]))
elif len(constraint) == 6:
if op == '+':
constraint_function = lambda a, b, c, d: a+b == c*10+d
elif op == '-':
constraint_function = lambda a, b, c, d: a-b == c*10+d
elif op == '*':
constraint_function = lambda a, b, c, d: a*b == c*10+d
problem.addConstraint(constraint_function, (constraint[0], constraint[2], constraint[4], constraint[5]))
print('{}{}{}={}{}'.format(grid[constraint[0][0]][constraint[0][1]],
op,
grid[constraint[2][0]][constraint[2][1]],
grid[constraint[4][0]][constraint[4][1]],
grid[constraint[5][0]][constraint[5][1]]))
print()
print('Solving the problem...')
# solve the problem
start = time()
solution = problem.getSolution()
end = time()
print('Elapsed time: {:.0f} s'.format(end-start))
# display the solution
print()
print('Solved grid:')
print()
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '?' or type(grid[i][j]) == int:
print(solution[(i, j)], end='')
elif grid[i][j] == '':
print(' ', end='')
elif type(grid[i][j]) == str:
print(grid[i][j], end='')
print()
| 31.330827
| 112
| 0.521718
|
#!/usr/bin/env python3
from constraint import *
import pyexcel as pe
from time import time
# create the problem object
problem = Problem(RecursiveBacktrackingSolver())
# import the Garam grid
grid = pe.get_array(file_name='csp_grid.ods')
n_lines, n_cols = len(grid), len(grid[0])
# display the Garam grid nicely
print('Initial grid:')
print()
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '':
print(' ', end='')
else:
print(grid[i][j], end='')
print()
# add the variables to the problem
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '?':
domain = range(0, 10)
elif type(grid[i][j]) == int:
domain = [grid[i][j]]
problem.addVariable((i, j), domain)
# create the "coor grid" (grid whose cases are tuples containing the cases coordinates)
grid_coor = [[(i, j) for j in range(n_cols)] for i in range(n_lines)]
# set the first case of each constraint
h_constraints_origins = [[0, 0], [0, 8],
[2, 4],
[5, 0], [5, 8],
[9, 0], [9, 8],
[11, 4],
[14, 0], [14, 8]]
v_constraints_origins = [[0, 0], [9, 0],
[5, 2],
[0, 4], [9, 4],
[0, 8], [9, 8],
[5, 10],
[0, 12], [9, 12]]
# get the horizontal constraints
h_constraints = []
for origin in h_constraints_origins:
i_origin, j_origin = origin
constraint = grid_coor[i_origin][j_origin:j_origin+5]
h_constraints.append(constraint)
# get the vertical constraints
v_constraints = []
for k in range(len(v_constraints_origins)):
i_origin, j_origin = v_constraints_origins[k]
if k == 2 or k == 7:
nb_cases = 5
else:
nb_cases = 6
constraint = [line[j_origin] for line in grid_coor[i_origin:i_origin+nb_cases]]
v_constraints.append(constraint)
# add the constraints to the problem
constraints = h_constraints + v_constraints
for constraint in constraints:
# get the operation type (+, - or *)
i, j = constraint[1]
op = grid[i][j]
if len(constraint) == 5:
if op == '+':
constraint_function = lambda a, b, c: a+b == c
elif op == '-':
constraint_function = lambda a, b, c: a-b == c
elif op == '*':
constraint_function = lambda a, b, c: a*b == c
problem.addConstraint(constraint_function, (constraint[0], constraint[2], constraint[4]))
print('{}{}{}={}'.format(grid[constraint[0][0]][constraint[0][1]],
op,
grid[constraint[2][0]][constraint[2][1]],
grid[constraint[4][0]][constraint[4][1]]))
elif len(constraint) == 6:
if op == '+':
constraint_function = lambda a, b, c, d: a+b == c*10+d
elif op == '-':
constraint_function = lambda a, b, c, d: a-b == c*10+d
elif op == '*':
constraint_function = lambda a, b, c, d: a*b == c*10+d
problem.addConstraint(constraint_function, (constraint[0], constraint[2], constraint[4], constraint[5]))
print('{}{}{}={}{}'.format(grid[constraint[0][0]][constraint[0][1]],
op,
grid[constraint[2][0]][constraint[2][1]],
grid[constraint[4][0]][constraint[4][1]],
grid[constraint[5][0]][constraint[5][1]]))
print()
print('Solving the problem...')
# solve the problem
start = time()
solution = problem.getSolution()
end = time()
print('Elapsed time: {:.0f} s'.format(end-start))
# display the solution
print()
print('Solved grid:')
print()
for i in range(n_lines):
for j in range(n_cols):
if grid[i][j] == '?' or type(grid[i][j]) == int:
print(solution[(i, j)], end='')
elif grid[i][j] == '':
print(' ', end='')
elif type(grid[i][j]) == str:
print(grid[i][j], end='')
print()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 23
|
60a0943dd24d4687989fb3460f5ca8fc22c02667
| 1,045
|
py
|
Python
|
venv/0713.py
|
JungHwan-Park21/python_workspace
|
7463724d6b9955bd594d8538c9636de7748f7ece
|
[
"MIT"
] | null | null | null |
venv/0713.py
|
JungHwan-Park21/python_workspace
|
7463724d6b9955bd594d8538c9636de7748f7ece
|
[
"MIT"
] | null | null | null |
venv/0713.py
|
JungHwan-Park21/python_workspace
|
7463724d6b9955bd594d8538c9636de7748f7ece
|
[
"MIT"
] | null | null | null |
# data = list(range(1, 46))
#
# import random
#
#
# def random_pop(data):
# number = random.randint(0, len(data) - 1)
# return data.pop(number)
#
#
# for i in range(1, 7):
# print(random_pop(data))
##
# import webbrowser
#
# url = "https://coastwatch.pfeg.noaa.gov/erddap/griddap/erdMH1chlamday.geotif?chlorophyll%5B(2021-11-16T00:00:00Z)%5D%5B(-57.97917):(-66.68751)%5D%5B(-63.64583):(-54.9375)%5D&.draw=surface&.vars=longitude%7Clatitude%7Cchlorophyll&.colorBar=%7C%7C%7C%7C%7C&.bgColor=0xffccccff"
#
# # urllib.request.urlretrieve(imgURL, "E:/test/image1.tif")
#
# webbrowser.open(url)
#
# import webbrowser
#
# url = "https://www.naver.com"
#
# webbrowser.open(url)
##
from datetime import date
##
start_date = date(2021, 1,1)
end_date = date(2021,12,31)
##
for single_date in daterange(start_date, end_date):
a = (single_date.strftime("%Y-%m-%d"))
print(a)
| 23.75
| 277
| 0.68134
|
# data = list(range(1, 46))
#
# import random
#
#
# def random_pop(data):
# number = random.randint(0, len(data) - 1)
# return data.pop(number)
#
#
# for i in range(1, 7):
# print(random_pop(data))
##
# import webbrowser
#
# url = "https://coastwatch.pfeg.noaa.gov/erddap/griddap/erdMH1chlamday.geotif?chlorophyll%5B(2021-11-16T00:00:00Z)%5D%5B(-57.97917):(-66.68751)%5D%5B(-63.64583):(-54.9375)%5D&.draw=surface&.vars=longitude%7Clatitude%7Cchlorophyll&.colorBar=%7C%7C%7C%7C%7C&.bgColor=0xffccccff"
#
# # urllib.request.urlretrieve(imgURL, "E:/test/image1.tif")
#
# webbrowser.open(url)
#
# import webbrowser
#
# url = "https://www.naver.com"
#
# webbrowser.open(url)
## 날짜반복
from datetime import date, timedelta
## 시작날짜와 종료날짜 설정
start_date = date(2021, 1,1)
end_date = date(2021,12,31)
##
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
for single_date in daterange(start_date, end_date):
a = (single_date.strftime("%Y-%m-%d"))
print(a)
| 45
| 0
| 0
| 0
| 110
| 0
| 0
| 11
| 22
|
86b2b9be4c907cd7e31be2fd93d8800322a78cf9
| 739
|
py
|
Python
|
wagtaildemo/settings/production.py
|
caputomarcos/wagtaildemo
|
08cca1dbc44bb836c0923ff2720a8a3195f4042b
|
[
"BSD-3-Clause"
] | 3
|
2015-12-03T21:34:59.000Z
|
2017-08-12T16:53:33.000Z
|
wagtaildemo/settings/production.py
|
caputomarcos/wagtaildemo
|
08cca1dbc44bb836c0923ff2720a8a3195f4042b
|
[
"BSD-3-Clause"
] | null | null | null |
wagtaildemo/settings/production.py
|
caputomarcos/wagtaildemo
|
08cca1dbc44bb836c0923ff2720a8a3195f4042b
|
[
"BSD-3-Clause"
] | 4
|
2015-12-03T21:35:01.000Z
|
2020-11-14T09:29:29.000Z
|
DEBUG = False
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
'INDEX': 'wagtaildemo'
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'KEY_PREFIX': 'wagtaildemo',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
# Use the cached template loader
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
try:
except ImportError:
pass
| 19.447368
| 80
| 0.618403
|
from .base import *
DEBUG = False
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
'INDEX': 'wagtaildemo'
}
}
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'KEY_PREFIX': 'wagtaildemo',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
# Use the cached template loader
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
try:
from .local import *
except ImportError:
pass
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -3
| 45
|
cacd68ed453b920081ba00b225a82efbfcad337e
| 590
|
py
|
Python
|
algorithms/decisiontree/decisiontree.classification.py
|
Marcnuth/DataScienceInterestGroup
|
cae73aa9f6ab1588b565492225a1086b93e121f6
|
[
"Apache-2.0"
] | null | null | null |
algorithms/decisiontree/decisiontree.classification.py
|
Marcnuth/DataScienceInterestGroup
|
cae73aa9f6ab1588b565492225a1086b93e121f6
|
[
"Apache-2.0"
] | null | null | null |
algorithms/decisiontree/decisiontree.classification.py
|
Marcnuth/DataScienceInterestGroup
|
cae73aa9f6ab1588b565492225a1086b93e121f6
|
[
"Apache-2.0"
] | null | null | null |
from sklearn import tree
from sklearn.datasets import load_iris
from sklearn.externals.six import StringIO
import pydot
from IPython.display import Image
if __name__ == '__main__':
iris = load_iris()
print predictByDTC(iris.data, iris.target, iris.data[0:,])
| 22.692308
| 62
| 0.713559
|
from sklearn import tree
from sklearn.datasets import load_iris
from sklearn.externals.six import StringIO
import pydot
from IPython.display import Image
def predictByDTC(X,Y,test):
clf = tree.DecisionTreeClassifier().fit(X, Y)
#Visualize decision tree
#dot_data = StringIO()
#tree.export_graphviz(clf, out_file=dot_data)
#graph = pydot.graph_from_dot_data(dot_data.getvalue())
#graph.write_pdf("dt.pdf")
return clf.predict(test)
if __name__ == '__main__':
iris = load_iris()
print predictByDTC(iris.data, iris.target, iris.data[0:,])
| 0
| 0
| 0
| 0
| 0
| 286
| 0
| 0
| 23
|
8718ddc93fcc419c4999d37a997527ee8f0ed08c
| 846
|
py
|
Python
|
ktts.py
|
Jeyadevanjd/ktts
|
3e6aef7546abfbdf08b533b757e8353aae9caa7e
|
[
"MIT"
] | 1
|
2020-05-15T08:21:42.000Z
|
2020-05-15T08:21:42.000Z
|
ktts.py
|
Jeyadevanjd/ktts
|
3e6aef7546abfbdf08b533b757e8353aae9caa7e
|
[
"MIT"
] | null | null | null |
ktts.py
|
Jeyadevanjd/ktts
|
3e6aef7546abfbdf08b533b757e8353aae9caa7e
|
[
"MIT"
] | null | null | null |
import os
from configparser import ConfigParser
configr = ConfigParser()
pp = os.getcwd()
config = ConfigParser()
val = []
path = pp
para = path + "/det.ini"
print(para)
val2 = []
config.read(para)
ad = config['installation']['installation']
print(pp)
if ad == "False":
pas = input("pleae enter your password for root permission:")
ll = "echo " + pas + " |" + " sudo -S -k apt-get install flite"
if ad == "False":
print("starting")
os.system(ll)
config['installation'] = {
"installation" : "true"
}
with open(para, 'w') as f:
config.write(f)
print("done")
#say("a quick brown fox jumps over the lazy dog")
| 24.882353
| 84
| 0.605201
|
import os
import sys
from configparser import ConfigParser
configr = ConfigParser()
from subprocess import Popen as pop
pp = os.getcwd()
config = ConfigParser()
val = []
path = pp
para = path + "/det.ini"
print(para)
val2 = []
config.read(para)
ad = config['installation']['installation']
print(pp)
if ad == "False":
pas = input("pleae enter your password for root permission:")
ll = "echo " + pas + " |" + " sudo -S -k apt-get install flite"
if ad == "False":
print("starting")
os.system(ll)
config['installation'] = {
"installation" : "true"
}
with open(para, 'w') as f:
config.write(f)
print("done")
def say(x):
print(x)
xr = '"' + x + '"'
exe = "cd " + path + " && " + "flite -voice ./cmu_us_eey.flitevox -t" + " " + xr
os.system(exe)
#say("a quick brown fox jumps over the lazy dog")
| 0
| 0
| 0
| 0
| 0
| 130
| 0
| 3
| 66
|
6d684bef9ff874d142d8e3ac37ec1b10bc574e68
| 675
|
py
|
Python
|
funds/migrations/0008_auto_20211205_0852.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
funds/migrations/0008_auto_20211205_0852.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
funds/migrations/0008_auto_20211205_0852.py
|
cnlis/lib_books
|
05bed0f9775826e0b1f968a766ddf5c2d1d55f40
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-05 08:52
| 23.275862
| 49
| 0.551111
|
# Generated by Django 3.2.9 on 2021-12-05 08:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('funds', '0007_document_type'),
]
operations = [
migrations.RenameField(
model_name='fund',
old_name='fund_count',
new_name='income_count',
),
migrations.AddField(
model_name='fund',
name='outcome_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='fund',
name='pupils',
field=models.IntegerField(default=0),
),
]
| 0
| 0
| 0
| 561
| 0
| 0
| 0
| 19
| 46
|
8ad2d2a254ba090cf3c2576f441bf3cfe9241a0a
| 1,117
|
py
|
Python
|
web/practicas/practica2/parser.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | null | null | null |
web/practicas/practica2/parser.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | null | null | null |
web/practicas/practica2/parser.py
|
luisjimenez6245/escom
|
a1ae1f988d02f88844f5d29fba75e7cee04998db
|
[
"MIT"
] | 1
|
2020-03-03T04:16:42.000Z
|
2020-03-03T04:16:42.000Z
|
text = " <option>lvaro Obregn</option>\n" +" <option>Azcapotzalco</option>\n" +" <option>Benito Jurez</option>\n" +" <option>Coyoacn</option>\n" +" <option>Cuajimalpa de Morelos</option>\n" +" <option>Cuauhtmoc</option>\n" +" <option>Gustavo A. Madero</option>\n" +" <option>Iztacalco</option>\n" +" <option>Iztapalapa</option>\n" +" <option>Magdalena Contreras</option>\n" +" <option>Miguel Hidalgo</option>\n" +" <option>Milpa Alta</option>\n" +" <option>Tlhuac</option>\n" +" <option>Tlalpan</option> \n" +" <option>Venustiano Carranza</option>\n" +" <option>Xochimilco</option> "
text = text.replace("<option>", "").replace("</option>", "").replace(" ", "")
items = text.split("\n")
res = ""
for item in items:
res += "<option value=\""+item+"\">" +item+ "</option>\n"
print(res)
| 101.545455
| 909
| 0.45658
|
text = " <option>Álvaro Obregón</option>\n" +" <option>Azcapotzalco</option>\n" +" <option>Benito Juárez</option>\n" +" <option>Coyoacán</option>\n" +" <option>Cuajimalpa de Morelos</option>\n" +" <option>Cuauhtémoc</option>\n" +" <option>Gustavo A. Madero</option>\n" +" <option>Iztacalco</option>\n" +" <option>Iztapalapa</option>\n" +" <option>Magdalena Contreras</option>\n" +" <option>Miguel Hidalgo</option>\n" +" <option>Milpa Alta</option>\n" +" <option>Tláhuac</option>\n" +" <option>Tlalpan</option> \n" +" <option>Venustiano Carranza</option>\n" +" <option>Xochimilco</option> "
text = text.replace("<option>", "").replace("</option>", "").replace(" ", "")
items = text.split("\n")
res = ""
for item in items:
res += "<option value=\""+item+"\">" +item+ "</option>\n"
print(res)
| 12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
13ffa40792c9a2978d3b11a7f70c68598ebefae1
| 254
|
py
|
Python
|
Aulas/Aula13/tk01.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
Aulas/Aula13/tk01.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
Aulas/Aula13/tk01.py
|
matheusmenezs/com220
|
d699f00892df1259249ae012aa2a02f63ae0f06f
|
[
"MIT"
] | null | null | null |
# tk01.py
main()
| 18.142857
| 45
| 0.590551
|
# tk01.py
import tkinter as tk
def main():
# Cria a janela
janela = tk.Tk()
janela.title('Primeira janela')
# Entra no mainloop, o que faz com que
# a janela seja renderizada na tela
janela.mainloop()
main()
| 0
| 0
| 0
| 0
| 0
| 192
| 0
| -1
| 45
|
1bf61e9ce9b3a3f64582a8b86bf57fcde353dd25
| 3,411
|
py
|
Python
|
stash/database.py
|
fkolacek/Stash
|
58c2e38306a89bf1b1b63d427ac5db9134471cc4
|
[
"MIT"
] | null | null | null |
stash/database.py
|
fkolacek/Stash
|
58c2e38306a89bf1b1b63d427ac5db9134471cc4
|
[
"MIT"
] | null | null | null |
stash/database.py
|
fkolacek/Stash
|
58c2e38306a89bf1b1b63d427ac5db9134471cc4
|
[
"MIT"
] | null | null | null |
#
# Author: Frantisek Kolacek <[email protected]
# Version: 1.0
#
| 29.66087
| 128
| 0.577543
|
#
# Author: Frantisek Kolacek <[email protected]
# Version: 1.0
#
import logging
import sqlite3
from datetime import datetime
from .exception import StashDatabaseException
class StashDatabase:
config = None
connection = None
cursor = None
def __init__(self, **config):
try:
self.config = config
self.connection = sqlite3.connect(self.config['name'])
self.connection.row_factory = sqlite3.Row
self.cursor = self.connection.cursor()
self.create_schema()
except sqlite3.Error as e:
raise StashDatabaseException(e) from None
def close(self):
if self.connection:
self.connection.commit()
self.cursor.close()
self.connection.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def create_schema(self):
logging.info('Checking/creating DB schema')
self.query("""CREATE TABLE IF NOT EXISTS repos (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type TEXT NOT NULL,
remote TEXT NOT NULL,
description TEXT,
created TEXT,
updated TEXT,
score INTEGER DEFAULT 0);""")
self.query("""CREATE TABLE IF NOT EXISTS tokens (
id INTEGER PRIMARY KEY,
token TEXT NOT NULL UNIQUE);""")
token = self.config.get('token')
if token and not self.is_token(token):
self.query('INSERT INTO tokens (token) VALUES (?)', [token])
def is_token(self, token):
return self.query_exists('SELECT COUNT(*) FROM tokens WHERE token = ?', [token])
def is_repo(self, id):
return self.query_exists('SELECT COUNT(*) FROM repos WHERE id = ?', [id])
def add_repo(self, name, type, remote, description):
sql = 'INSERT INTO repos (name, type, remote, description, created, score) VALUES (?, ?, ?, ?, ?, ?)'
self.query(sql, [name, type, remote, description, self.get_now(), 0])
def del_repo(self, id):
self.query('DELETE FROM repos WHERE id = ?', [id])
def get_repo(self, id):
if not self.is_repo(id):
return None
result = self.query('SELECT id, name, type, remote, description, created, updated, score FROM repos WHERE id = ?', [id])
return self.dict_from_row(result.fetchone())
def get_repos(self):
repos = []
result = self.query('SELECT id, name, type, remote, description, created, updated, score FROM repos')
for repo in result.fetchall():
repos.append(self.dict_from_row(repo))
return repos
def query(self, sql, args=None):
args = [] if args is None else args
logging.debug('Executing: {} [{}]'.format(sql, ','.join(str(v) for v in args)))
data = self.cursor.execute(sql, args)
self.connection.commit()
return data
def query_exists(self, sql, args=None):
args = [] if args is None else args
return self.query(sql, args).fetchone()[0] > 0
@staticmethod
def get_now():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
@staticmethod
def dict_from_row(row):
return dict(zip(row.keys(), row))
| 0
| 133
| 0
| 3,082
| 0
| 0
| 0
| 18
| 113
|
3d9ba12ebbec8fcf3abd7c138d26883b8b57674c
| 1,321
|
py
|
Python
|
Flask/learnflask.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | 1
|
2022-01-14T18:03:42.000Z
|
2022-01-14T18:03:42.000Z
|
Flask/learnflask.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
Flask/learnflask.py
|
subash-kc/2022-01-04-Python
|
5ce51e4265bcd860a4e62423edef6ec9cd1437b4
|
[
"MIT"
] | null | null | null |
"""
Handle only one request one time
not good for developer
Flask is a small and lightweight Python web framework that provides useful tools and features that make creating web applications in Python easier.
It gives developers flexibility and is a more accessible framework for new developers since you can build a
web application quickly using only a single Python file
Flask is a web development framework created in Python language.
This Framework is based on the robust foundation of Jinja2 templates engine and Werkzeug comprehensive WSGI web application library.
"""
# An object of Flask class is our WSGI application
from flask import Flask
# Flask constructor takes the name of current
# module (__name__) as argument
app = Flask(__name__)
# route() function of the Flask class is a
# decorator, tells the application which URL
# should call the associated function
#not a good way to implement below code in flask
app.add_url_rule("/hello", "hello", hello_world1)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=2224) # runs the application
# app.run(host="0.0.0.0", port=2224, debug=True) # DEBUG MODE
| 32.219512
| 147
| 0.761544
|
"""
Handle only one request one time
not good for developer
Flask is a small and lightweight Python web framework that provides useful tools and features that make creating web applications in Python easier.
It gives developers flexibility and is a more accessible framework for new developers since you can build a
web application quickly using only a single Python file
Flask is a web development framework created in Python language.
This Framework is based on the robust foundation of Jinja2 templates engine and Werkzeug comprehensive WSGI web application library.
"""
# An object of Flask class is our WSGI application
from flask import Flask
# Flask constructor takes the name of current
# module (__name__) as argument
app = Flask(__name__)
# route() function of the Flask class is a
# decorator, tells the application which URL
# should call the associated function
@app.route("/")
def hello_world():
return "Hello World"
@app.route("/subash")
def hello_subash():
return "Hello, Namaste, Subash KC"
#not a good way to implement below code in flask
def hello_world1():
return "hello world"
app.add_url_rule("/hello", "hello", hello_world1)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=2224) # runs the application
# app.run(host="0.0.0.0", port=2224, debug=True) # DEBUG MODE
| 0
| 96
| 0
| 0
| 0
| 22
| 0
| 0
| 68
|
734d4660acd1fa981bb20e9452efd630f2edbc0e
| 17,263
|
py
|
Python
|
platformio/platforms/base.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | 2
|
2020-05-12T15:10:37.000Z
|
2021-07-02T15:41:56.000Z
|
platformio/platforms/base.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | null | null | null |
platformio/platforms/base.py
|
eiginn/platformio
|
33502f82f26e731f5bdc38c1ea6b17d1565dedd3
|
[
"Apache-2.0"
] | 1
|
2018-11-30T22:34:24.000Z
|
2018-11-30T22:34:24.000Z
|
# Copyright 2014-2016 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PLATFORM_PACKAGES = {
"framework-arduinoavr": [
("Arduino Wiring-based Framework (AVR Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinosam": [
("Arduino Wiring-based Framework (SAM Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoteensy": [
("Arduino Wiring-based Framework",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinomsp430": [
("Arduino Wiring-based Framework (MSP430 Core)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoespressif": [
("Arduino Wiring-based Framework (ESP8266 Core)",
"https://github.com/esp8266/Arduino")
],
"framework-arduinonordicnrf51": [
("Arduino Wiring-based Framework (RFduino Core)",
"https://github.com/RFduino/RFduino")
],
"framework-energiamsp430": [
("Energia Wiring-based Framework (MSP430 Core)",
"http://energia.nu/reference/")
],
"framework-energiativa": [
("Energia Wiring-based Framework (LM4F Core)",
"http://energia.nu/reference/")
],
"framework-cmsis": [
("Vendor-independent hardware abstraction layer for the Cortex-M "
"processor series",
"http://www.arm.com/products/processors/"
"cortex-m/cortex-microcontroller-software-interface-standard.php")
],
"framework-spl": [
("Standard Peripheral Library for STM32 MCUs",
"http://www.st.com"
"/web/catalog/tools/FM147/CL1794/SC961/SS1743/PF257890")
],
"framework-libopencm3": [
("libOpenCM3 Framework", "http://www.libopencm3.org/")
],
"framework-mbed": [
("mbed Framework", "http://mbed.org")
],
"framework-wiringpi": [
("GPIO Interface library for the Raspberry Pi", "http://wiringpi.com")
],
"sdk-esp8266": [
("ESP8266 SDK", "http://bbs.espressif.com")
],
"ldscripts": [
("Linker Scripts",
"https://sourceware.org/binutils/docs/ld/Scripts.html")
],
"toolchain-atmelavr": [
("avr-gcc", "https://gcc.gnu.org/wiki/avr-gcc"),
("GDB", "http://www.gnu.org/software/gdb/"),
("AVaRICE", "http://avarice.sourceforge.net/"),
("SimulAVR", "http://www.nongnu.org/simulavr/")
],
"toolchain-gccarmnoneeabi": [
("gcc-arm-embedded", "https://launchpad.net/gcc-arm-embedded"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-gccarmlinuxgnueabi": [
("GCC for Linux ARM GNU EABI", "https://gcc.gnu.org"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-gccmingw32": [
("MinGW", "http://www.mingw.org")
],
"toolchain-gcclinux32": [
("GCC for Linux i686", "https://gcc.gnu.org")
],
"toolchain-gcclinux64": [
("GCC for Linux x86_64", "https://gcc.gnu.org")
],
"toolchain-xtensa": [
("xtensa-gcc", "https://github.com/jcmvbkbc/gcc-xtensa"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-timsp430": [
("msp-gcc", "http://sourceforge.net/projects/mspgcc/"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"tool-scons": [
("SCons software construction tool", "http://www.scons.org")
],
"tool-avrdude": [
("AVRDUDE", "http://www.nongnu.org/avrdude/")
],
"tool-micronucleus": [
("Micronucleus", "https://github.com/micronucleus/micronucleus")
],
"tool-bossac": [
("BOSSA CLI", "https://sourceforge.net/projects/b-o-s-s-a/")
],
"tool-openocd": [
("OpenOCD", "http://openocd.org")
],
"tool-stlink": [
("ST-Link", "https://github.com/texane/stlink")
],
"tool-teensy": [
("Teensy Loader", "https://www.pjrc.com/teensy/loader.html")
],
"tool-lm4flash": [
("Flash Programmer", "http://www.ti.com/tool/lmflashprogrammer")
],
"tool-mspdebug": [
("MSPDebug", "http://mspdebug.sourceforge.net/")
],
"tool-esptool": [
("esptool-ck", "https://github.com/igrr/esptool-ck")
],
"tool-rfdloader": [
("rfdloader", "https://github.com/RFduino/RFduino")
],
"tool-mkspiffs": [
("Tool to build and unpack SPIFFS images",
"https://github.com/igrr/mkspiffs")
]
}
| 33.070881
| 78
| 0.573307
|
# Copyright 2014-2016 Ivan Kravets <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from imp import load_source
from multiprocessing import cpu_count
from os.path import isdir, isfile, join
import click
from platformio import app, exception, util
from platformio.app import get_state_item, set_state_item
from platformio.pkgmanager import PackageManager
PLATFORM_PACKAGES = {
"framework-arduinoavr": [
("Arduino Wiring-based Framework (AVR Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinosam": [
("Arduino Wiring-based Framework (SAM Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoteensy": [
("Arduino Wiring-based Framework",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinomsp430": [
("Arduino Wiring-based Framework (MSP430 Core)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoespressif": [
("Arduino Wiring-based Framework (ESP8266 Core)",
"https://github.com/esp8266/Arduino")
],
"framework-arduinonordicnrf51": [
("Arduino Wiring-based Framework (RFduino Core)",
"https://github.com/RFduino/RFduino")
],
"framework-energiamsp430": [
("Energia Wiring-based Framework (MSP430 Core)",
"http://energia.nu/reference/")
],
"framework-energiativa": [
("Energia Wiring-based Framework (LM4F Core)",
"http://energia.nu/reference/")
],
"framework-cmsis": [
("Vendor-independent hardware abstraction layer for the Cortex-M "
"processor series",
"http://www.arm.com/products/processors/"
"cortex-m/cortex-microcontroller-software-interface-standard.php")
],
"framework-spl": [
("Standard Peripheral Library for STM32 MCUs",
"http://www.st.com"
"/web/catalog/tools/FM147/CL1794/SC961/SS1743/PF257890")
],
"framework-libopencm3": [
("libOpenCM3 Framework", "http://www.libopencm3.org/")
],
"framework-mbed": [
("mbed Framework", "http://mbed.org")
],
"framework-wiringpi": [
("GPIO Interface library for the Raspberry Pi", "http://wiringpi.com")
],
"sdk-esp8266": [
("ESP8266 SDK", "http://bbs.espressif.com")
],
"ldscripts": [
("Linker Scripts",
"https://sourceware.org/binutils/docs/ld/Scripts.html")
],
"toolchain-atmelavr": [
("avr-gcc", "https://gcc.gnu.org/wiki/avr-gcc"),
("GDB", "http://www.gnu.org/software/gdb/"),
("AVaRICE", "http://avarice.sourceforge.net/"),
("SimulAVR", "http://www.nongnu.org/simulavr/")
],
"toolchain-gccarmnoneeabi": [
("gcc-arm-embedded", "https://launchpad.net/gcc-arm-embedded"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-gccarmlinuxgnueabi": [
("GCC for Linux ARM GNU EABI", "https://gcc.gnu.org"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-gccmingw32": [
("MinGW", "http://www.mingw.org")
],
"toolchain-gcclinux32": [
("GCC for Linux i686", "https://gcc.gnu.org")
],
"toolchain-gcclinux64": [
("GCC for Linux x86_64", "https://gcc.gnu.org")
],
"toolchain-xtensa": [
("xtensa-gcc", "https://github.com/jcmvbkbc/gcc-xtensa"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-timsp430": [
("msp-gcc", "http://sourceforge.net/projects/mspgcc/"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"tool-scons": [
("SCons software construction tool", "http://www.scons.org")
],
"tool-avrdude": [
("AVRDUDE", "http://www.nongnu.org/avrdude/")
],
"tool-micronucleus": [
("Micronucleus", "https://github.com/micronucleus/micronucleus")
],
"tool-bossac": [
("BOSSA CLI", "https://sourceforge.net/projects/b-o-s-s-a/")
],
"tool-openocd": [
("OpenOCD", "http://openocd.org")
],
"tool-stlink": [
("ST-Link", "https://github.com/texane/stlink")
],
"tool-teensy": [
("Teensy Loader", "https://www.pjrc.com/teensy/loader.html")
],
"tool-lm4flash": [
("Flash Programmer", "http://www.ti.com/tool/lmflashprogrammer")
],
"tool-mspdebug": [
("MSPDebug", "http://mspdebug.sourceforge.net/")
],
"tool-esptool": [
("esptool-ck", "https://github.com/igrr/esptool-ck")
],
"tool-rfdloader": [
("rfdloader", "https://github.com/RFduino/RFduino")
],
"tool-mkspiffs": [
("Tool to build and unpack SPIFFS images",
"https://github.com/igrr/mkspiffs")
]
}
def get_packages():
return PLATFORM_PACKAGES
class PlatformFactory(object):
@staticmethod
def get_clsname(type_):
return "%s%sPlatform" % (type_.upper()[0], type_.lower()[1:])
@staticmethod
def load_module(type_, path):
module = None
try:
module = load_source(
"platformio.platforms.%s" % type_, path)
except ImportError:
raise exception.UnknownPlatform(type_)
return module
@classmethod
@util.memoized
def _lookup_platforms(cls):
platforms = {}
for d in (util.get_home_dir(), util.get_source_dir()):
pdir = join(d, "platforms")
if not isdir(pdir):
continue
for p in sorted(os.listdir(pdir)):
if (p in ("__init__.py", "base.py") or not
p.endswith(".py")):
continue
type_ = p[:-3]
path = join(pdir, p)
try:
isplatform = hasattr(
cls.load_module(type_, path),
cls.get_clsname(type_)
)
if isplatform:
platforms[type_] = path
except exception.UnknownPlatform:
pass
return platforms
@classmethod
def get_platforms(cls, installed=False):
platforms = cls._lookup_platforms()
if not installed:
return platforms
installed_platforms = {}
for type_ in get_state_item("installed_platforms", []):
if type_ in platforms:
installed_platforms[type_] = platforms[type_]
return installed_platforms
@classmethod
def newPlatform(cls, type_):
platforms = cls.get_platforms()
if type_ not in platforms:
raise exception.UnknownPlatform(type_)
_instance = getattr(
cls.load_module(type_, platforms[type_]),
cls.get_clsname(type_)
)()
assert isinstance(_instance, BasePlatform)
return _instance
class BasePlatform(object):
PACKAGES = {}
LINE_ERROR_RE = re.compile(r"(\s+error|error[:\s]+)", re.I)
def __init__(self):
self._found_error = False
self._last_echo_line = None
# 1 = errors
# 2 = 1 + warnings
# 3 = 2 + others
self._verbose_level = 3
def get_type(self):
return self.__class__.__name__[:-8].lower()
def get_name(self):
return self.get_type().title()
def get_build_script(self):
builtin = join(util.get_source_dir(), "builder", "scripts", "%s.py" %
self.get_type())
if isfile(builtin):
return builtin
raise NotImplementedError()
def get_description(self):
if self.__doc__:
doclines = [l.strip() for l in self.__doc__.splitlines() if
l.strip()]
return " ".join(doclines[:-1]).strip()
else:
raise NotImplementedError()
def get_vendor_url(self):
if self.__doc__ and "http" in self.__doc__:
return self.__doc__[self.__doc__.index("http"):].strip()
else:
raise NotImplementedError()
def is_embedded(self):
for name, opts in self.get_packages().items():
if name == "framework-mbed" or opts.get("alias") == "uploader":
return True
return False
def get_packages(self):
return self.PACKAGES
def get_package_alias(self, pkgname):
return self.PACKAGES[pkgname].get("alias")
def pkg_aliases_to_names(self, aliases):
names = []
for alias in aliases:
name = alias
# lookup by package aliases
for _name, _opts in self.get_packages().items():
if _opts.get("alias") == alias:
name = None
names.append(_name)
# if alias is the right name
if name:
names.append(name)
return names
def get_default_packages(self):
return [k for k, v in self.get_packages().items()
if v.get("default", False)]
def get_installed_packages(self):
pm = PackageManager()
return [n for n in self.get_packages().keys() if pm.is_installed(n)]
def install(self, with_packages=None, without_packages=None,
skip_default_packages=False):
with_packages = set(
self.pkg_aliases_to_names(with_packages or []))
without_packages = set(
self.pkg_aliases_to_names(without_packages or []))
upkgs = with_packages | without_packages
ppkgs = set(self.get_packages().keys())
if not upkgs.issubset(ppkgs):
raise exception.UnknownPackage(", ".join(upkgs - ppkgs))
requirements = []
for name, opts in self.get_packages().items():
if name in without_packages:
continue
elif (name in with_packages or (not skip_default_packages and
opts.get("default"))):
requirements.append(name)
pm = PackageManager()
for name in requirements:
pm.install(name)
# register installed platform
data = get_state_item("installed_platforms", [])
if self.get_type() not in data:
data.append(self.get_type())
set_state_item("installed_platforms", data)
return len(requirements)
def uninstall(self):
platform = self.get_type()
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
if platform not in installed_platforms:
raise exception.PlatformNotInstalledYet(platform)
deppkgs = set()
for item in installed_platforms:
if item == platform:
continue
p = PlatformFactory.newPlatform(item)
deppkgs = deppkgs.union(set(p.get_packages().keys()))
pm = PackageManager()
for name in self.get_packages().keys():
if not pm.is_installed(name) or name in deppkgs:
continue
pm.uninstall(name)
# unregister installed platform
installed_platforms.remove(platform)
set_state_item("installed_platforms", installed_platforms)
return True
def update(self):
pm = PackageManager()
for name in self.get_installed_packages():
pm.update(name)
def is_outdated(self):
pm = PackageManager()
obsolated = pm.get_outdated()
return not set(self.get_packages().keys()).isdisjoint(set(obsolated))
def configure_default_packages(self, envoptions, targets):
# enbale used frameworks
for pkg_name in self.pkg_aliases_to_names(["framework"]):
for framework in envoptions.get("framework", "").split(","):
framework = framework.lower().strip()
if not framework:
continue
if framework in pkg_name:
self.PACKAGES[pkg_name]['default'] = True
# append SCons tool
self.PACKAGES['tool-scons'] = {"default": True}
# enable upload tools for upload targets
if any(["upload" in t for t in targets] + ["program" in targets]):
for _name, _opts in self.PACKAGES.iteritems():
if _opts.get("alias") == "uploader":
self.PACKAGES[_name]['default'] = True
elif "uploadlazy" in targets:
# skip all packages, allow only upload tools
self.PACKAGES[_name]['default'] = False
def _install_default_packages(self):
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
if (self.get_type() in installed_platforms and
set(self.get_default_packages()) <=
set(self.get_installed_packages())):
return True
if (not app.get_setting("enable_prompts") or
self.get_type() in installed_platforms or
click.confirm(
"The platform '%s' has not been installed yet. "
"Would you like to install it now?" % self.get_type())):
return self.install()
else:
raise exception.PlatformNotInstalledYet(self.get_type())
def run(self, variables, targets, verbose):
assert isinstance(variables, list)
assert isinstance(targets, list)
envoptions = {}
for v in variables:
_name, _value = v.split("=", 1)
envoptions[_name.lower()] = _value
self.configure_default_packages(envoptions, targets)
self._install_default_packages()
self._verbose_level = int(verbose)
if "clean" in targets:
targets.remove("clean")
targets.append("-c")
if "build_script" not in envoptions:
variables.append("BUILD_SCRIPT=%s" % self.get_build_script())
for v in variables:
if not v.startswith("BUILD_SCRIPT="):
continue
_, path = v.split("=", 1)
if not isfile(path):
raise exception.BuildScriptNotFound(path)
# append aliases of the installed packages
installed_packages = PackageManager.get_installed()
for name, options in self.get_packages().items():
if "alias" not in options or name not in installed_packages:
continue
variables.append(
"PIOPACKAGE_%s=%s" % (options['alias'].upper(), name))
self._found_error = False
result = self._run_scons(variables, targets)
assert "returncode" in result
# if self._found_error:
# result['returncode'] = 1
if self._last_echo_line == ".":
click.echo("")
return result
def _run_scons(self, variables, targets):
# pass current PYTHONPATH to SCons
if "PYTHONPATH" in os.environ:
_PYTHONPATH = os.environ.get("PYTHONPATH").split(os.pathsep)
else:
_PYTHONPATH = []
for p in os.sys.path:
if p not in _PYTHONPATH:
_PYTHONPATH.append(p)
os.environ['PYTHONPATH'] = os.pathsep.join(_PYTHONPATH)
result = util.exec_command(
[
os.path.normpath(sys.executable),
join(util.get_home_dir(), "packages", "tool-scons",
"script", "scons"),
"-Q",
"-j %d" % self.get_job_nums(),
"--warn=no-no-parallel-support",
"-f", join(util.get_source_dir(), "builder", "main.py")
] + variables + targets,
stdout=util.AsyncPipe(self.on_run_out),
stderr=util.AsyncPipe(self.on_run_err)
)
return result
def on_run_out(self, line):
self._echo_line(line, level=3)
def on_run_err(self, line):
is_error = self.LINE_ERROR_RE.search(line) is not None
if is_error:
self._found_error = True
self._echo_line(line, level=1 if is_error else 2)
def _echo_line(self, line, level):
assert 1 <= level <= 3
fg = ("red", "yellow", None)[level - 1]
if level == 3 and "is up to date" in line:
fg = "green"
if level > self._verbose_level:
click.secho(".", fg=fg, err=level < 3, nl=False)
self._last_echo_line = "."
return
if self._last_echo_line == ".":
click.echo("")
self._last_echo_line = line
click.secho(line, fg=fg, err=level < 3)
@staticmethod
def get_job_nums():
try:
return cpu_count()
except NotImplementedError:
return 1
| 0
| 2,024
| 0
| 9,717
| 0
| 27
| 0
| 81
| 427
|
ae7911041f957339b7880da7f8a77caa9715da61
| 2,462
|
py
|
Python
|
kedb/files/settings.py
|
salt-formulas/salt-formula-kedb
|
0739d4c8c5278ede0275820b8c5ba810e746c8f1
|
[
"Apache-2.0"
] | null | null | null |
kedb/files/settings.py
|
salt-formulas/salt-formula-kedb
|
0739d4c8c5278ede0275820b8c5ba810e746c8f1
|
[
"Apache-2.0"
] | null | null | null |
kedb/files/settings.py
|
salt-formulas/salt-formula-kedb
|
0739d4c8c5278ede0275820b8c5ba810e746c8f1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
{%- set app = pillar.kedb.server %}
from os.path import join, dirname, abspath, normpath
DATABASES = {
'default': {
{%- if app.database.engine == 'mysql' %}
'ENGINE': 'django.db.backends.mysql',
'PORT': '3306',
'OPTIONS': {'init_command': 'SET storage_engine=INNODB,character_set_connection=utf8,collation_connection=utf8_unicode_ci', },
{% else %}
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'PORT': '5432',
{%- endif %}
'HOST': '{{ app.database.host }}',
'NAME': '{{ app.database.name }}',
'PASSWORD': '{{ app.database.password }}',
'USER': '{{ app.database.user }}'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{{ app.cache.host }}:11211',
'TIMEOUT': 120,
'KEY_PREFIX': '{{ app.cache.prefix }}'
}
}
EMAIL_HOST = '{{ app.mail.host }}',
EMAIL_HOST_USER = '{{ app.mail.user }}',
EMAIL_HOST_PASSWORD = '{{ app.mail.password }}'
{%- if pillar.linux is defined %}
TIME_ZONE = '{{ pillar.linux.system.timezone }}'
{%- else %}
TIME_ZONE = '{{ pillar.system.timezone }}'
{%- endif %}
SECRET_KEY = '{{ app.secret_key }}'
{%- if app.logger_handler is defined %}
LOCAL_INSTALLED_APPS = (
'raven.contrib.django.raven_compat',
)
{%- else %}
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(asctime)s %(process)d %(levelname)s %(name)s '
'%(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/kedb/django.log',
'formatter': 'verbose',
},
},
}
{%- endif %}
RAVEN_CONFIG = {
{% if app.logger_handler is defined %}
'dsn': '{{ app.logger_handler.dsn }}',
{% endif %}
}
| 28.627907
| 134
| 0.561738
|
# -*- coding: utf-8 -*-
{%- set app = pillar.kedb.server %}
from os.path import join, dirname, abspath, normpath
DATABASES = {
'default': {
{%- if app.database.engine == 'mysql' %}
'ENGINE': 'django.db.backends.mysql',
'PORT': '3306',
'OPTIONS': {'init_command': 'SET storage_engine=INNODB,character_set_connection=utf8,collation_connection=utf8_unicode_ci', },
{% else %}
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'PORT': '5432',
{%- endif %}
'HOST': '{{ app.database.host }}',
'NAME': '{{ app.database.name }}',
'PASSWORD': '{{ app.database.password }}',
'USER': '{{ app.database.user }}'
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '{{ app.cache.host }}:11211',
'TIMEOUT': 120,
'KEY_PREFIX': '{{ app.cache.prefix }}'
}
}
EMAIL_HOST = '{{ app.mail.host }}',
EMAIL_HOST_USER = '{{ app.mail.user }}',
EMAIL_HOST_PASSWORD = '{{ app.mail.password }}'
{%- if pillar.linux is defined %}
TIME_ZONE = '{{ pillar.linux.system.timezone }}'
{%- else %}
TIME_ZONE = '{{ pillar.system.timezone }}'
{%- endif %}
SECRET_KEY = '{{ app.secret_key }}'
{%- if app.logger_handler is defined %}
LOCAL_INSTALLED_APPS = (
'raven.contrib.django.raven_compat',
)
{%- else %}
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(asctime)s %(process)d %(levelname)s %(name)s '
'%(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': '/var/log/kedb/django.log',
'formatter': 'verbose',
},
},
}
{%- endif %}
RAVEN_CONFIG = {
{% if app.logger_handler is defined %}
'dsn': '{{ app.logger_handler.dsn }}',
{% endif %}
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
276b2b75102958887a594dd0dc549f5b1e092d4b
| 6,243
|
py
|
Python
|
snesc_checker.py
|
wminner/SNES-Classic-Checker
|
9b9fc1b72ad4d64969d3456cd1b5d05cd03c7d46
|
[
"MIT"
] | 3
|
2017-08-23T01:32:31.000Z
|
2018-01-09T03:11:24.000Z
|
snesc_checker.py
|
wminner/SNES-Classic-Checker
|
9b9fc1b72ad4d64969d3456cd1b5d05cd03c7d46
|
[
"MIT"
] | null | null | null |
snesc_checker.py
|
wminner/SNES-Classic-Checker
|
9b9fc1b72ad4d64969d3456cd1b5d05cd03c7d46
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
# import browser_cookie3
# Create email with subject and body, then sends via gmail address provided
# Gets and caches your gmail address
# Gets your gmail password (does not cache)
# Strip off script name in arg list
if __name__ == "__main__":
main(sys.argv[1:])
| 31.852041
| 168
| 0.682364
|
#!/usr/bin/env python3
import re, sys
import getopt, pickle
import time, datetime
import urllib.request
import smtplib
from email.mime.text import MIMEText
import getpass
# import browser_cookie3
def main(argv):
default_send_email = "<your_sender_gmail>@gmail.com"
default_receive_email = "<your_receiver_gmail>@gmail.com"
sleep_time = 60
test_run = False
max_alerts = -1 # -1 means unlimited alerts
num_alerts = 0
websites = ['Bestbuy', 'Walmart', 'BHPhoto']
search_strings = {
'Amazon' : b"Currently unavailable.",
'Bestbuy' : b"data-add-to-cart-message=\"Coming Soon\"",
'Walmart' : b'<span class="copy-mini display-block-xs font-bold u-textBlack">Out of stock<link itemprop="availability" href="https://schema.org/OutOfStock"/></span>',
'BHPhoto' : b"data-selenium=\"notStock\">New Item - Coming Soon",
'Target' : b'class="sbc-add-to-cart btn btn-primary btn-lg btn-block sbc-selected" disabled=""> coming soon </button>'
}
urls = {
'Amazon' : "https://www.amazon.com/gp/product/B0721GGGS9",
'Bestbuy' : "http://www.bestbuy.com/site/nintendo-entertainment-system-snes-classic-edition/5919830.p?skuId=5919830",
'Walmart' : "https://www.walmart.com/ip/PO-HDW-PLACEHOLDER-652-WM50-Universal/55791858",
'BHPhoto' : "https://www.bhphotovideo.com/c/product/1347308-REG/nintendo_snes_super_nintendo_classic_edition.html",
'Target' : "https://www.target.com/p/snes-classic-edition/-/A-52826093"
}
def print_usage():
print("Usage: snesc_checker.py [option(s)]")
print(" [-n <max_num_of_alerts>] limits the max number of alerts")
print(" [-s <sleep_time_in_sec>] changes the sleep time")
print(" [-t] test email mode")
def search_website(website, url):
if website == 'Amazon' or website == 'Target': # Not working, removed from websites list
# cj = browser_cookie3.firefox()
# opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', 'Mozilla/5.0')]
else:
opener = urllib.request.build_opener()
try:
response = opener.open(url)
html = response.read()
except Exception as e:
print(e)
print("Exception occurred during {0} fetch!".format(website))
return 0
if search_strings[website] in html:
print("{0} Unavailble {1}...".format(website, datetime.datetime.now()))
# send_email(sender, sender_pass, receiver, "Not in stock", "test {0}".format(url))
return 0
else:
print("\n{0} IN STOCK {1}!!!!!!!!!!!!!!!!!!!!!!!!!\n".format(website.upper(), datetime.datetime.now()))
send_email(sender, sender_pass, receiver, "SNES CLASSIC IN STOCK AT {0}".format(website.upper()), url)
return 1
def progress_bar():
print("|0%", end="")
for k in range(int(sleep_time/2)-6):
print(" ", end="")
print("Sleep ", end="")
for k in range(int(sleep_time/2)-6):
print(" ", end="")
print("100%|\n|", end="")
sleep_cnt = 0
while sleep_cnt < sleep_time:
time.sleep(1)
print(".", end="", flush=True)
sleep_cnt += 1
print('|')
# Parse arguments
if len(sys.argv) != 0:
try:
opts, args = getopt.getopt(argv, "htn:s:", ["num=, sleep="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt == '-t':
test_run = True
elif opt in ('-s', '--sleep='):
sleep_time = int(arg)
elif opt in ('-n', '--num='):
max_alerts = int(arg)
if max_alerts <= 0:
print("Invalid max number of alerts! Exiting...")
sys.exit(2)
# Get email and password from which to send and receive alerts
sender = get_gmail_address(default_send_email, "send")
sender_pass = get_password(sender)
receiver = get_gmail_address(default_receive_email, "receive")
print("")
if test_run:
print("Sent test email.")
send_email(sender, sender_pass, receiver, "SNES Classic Checker - Test Email", "It works!")
# Main loop
try:
while True:
for website in websites:
num_alerts += search_website(website, urls[website])
if max_alerts >= 0 and num_alerts >= max_alerts:
print("Reached max number of alerts! Exiting...")
send_email(sender, sender_pass, receiver, "SNES Classic Checker - Max Alerts Reached", "Please restart SNES Classic Checker to receive more email alerts!")
sys.exit()
# Wait for a while before checking again
# time.sleep(sleep_time)
progress_bar()
except KeyboardInterrupt:
print("\nExiting...")
# Create email with subject and body, then sends via gmail address provided
def send_email(sender, sender_pass, receiver, subject, body):
# Create email
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = receiver
# Send email out
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(sender, sender_pass)
server.send_message(msg)
server.quit()
# Gets and caches your gmail address
def get_gmail_address(default_addr, mode):
last_email_addr = "{0}_email.pkl".format(mode)
# Try to get pickle of last IP address
try:
with open(last_email_addr, 'rb') as fp:
last_addr = pickle.load(fp)
except IOError:
last_addr = None
if mode == "send":
if last_addr:
addr = input("Enter your {0} gmail address (or return to use {1}): ".format(mode.upper(), last_addr)) or last_addr
else:
addr = input("Enter your {0} gmail address: ".format(mode.upper()))
else:
if last_addr:
addr = input("Enter your {0} gmail address (or return to use {1}): ".format(mode.upper(), last_addr)) or last_addr
else:
addr = input("Enter your {0} gmail address: ".format(mode.upper()))
# Validate address
pattern = re.compile("[^@][email protected]")
match = re.match(pattern, addr)
if not match:
print("Gmail address \"{0}\" is not valid!".format(addr))
sys.exit(1)
# Save the last used IP address to pickle file
with open(last_email_addr, 'wb') as fp:
pickle.dump(addr, fp)
return addr
# Gets your gmail password (does not cache)
def get_password(email):
password = getpass.getpass("Enter password for {0}: ".format(email))
if not password:
print("Email password should not be empty!")
sys.exit(1)
return password
# Strip off script name in arg list
if __name__ == "__main__":
main(sys.argv[1:])
| 0
| 0
| 0
| 0
| 0
| 5,708
| 0
| 5
| 222
|
0ecd9b1334e318b4ccffc4b6eeae536b6e03414f
| 186
|
py
|
Python
|
python-flask-server-generated/swagger_server/controllers/__init__.py
|
procube-open/pdns_certbot_server
|
516207b2416781c94a8620bb3a1d0897c773142d
|
[
"MIT"
] | null | null | null |
python-flask-server-generated/swagger_server/controllers/__init__.py
|
procube-open/pdns_certbot_server
|
516207b2416781c94a8620bb3a1d0897c773142d
|
[
"MIT"
] | null | null | null |
python-flask-server-generated/swagger_server/controllers/__init__.py
|
procube-open/pdns_certbot_server
|
516207b2416781c94a8620bb3a1d0897c773142d
|
[
"MIT"
] | null | null | null |
import threading
import logging
LOCK = threading.Lock()
logger = logging.getLogger("CERTBOT")
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
| 20.666667
| 37
| 0.801075
|
import threading
import logging
LOCK = threading.Lock()
logger = logging.getLogger("CERTBOT")
handler = logging.StreamHandler()
logger.addHandler(handler)
logger.setLevel(logging.INFO)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a09b9af2b847bf39b063ead0a72aab28cd93427e
| 126
|
py
|
Python
|
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
wtpy/apps/__init__.py
|
Huijun-Cui/wtpy
|
9a8243a20b944fbb37aa33d81215b7b36ac7b1e2
|
[
"MIT"
] | null | null | null |
from .WtBtAnalyst import WtBtAnalyst
from .WtCtaOptimizer import WtCtaOptimizer
__all__ = ["WtBtAnalyst","WtCtaOptimizer"]
| 31.5
| 43
| 0.809524
|
from .WtBtAnalyst import WtBtAnalyst
from .WtCtaOptimizer import WtCtaOptimizer
__all__ = ["WtBtAnalyst","WtCtaOptimizer"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
86633a33a9926907327bdb9b93dde0ee954c35a3
| 729
|
py
|
Python
|
tests/debugger.py
|
fferri/pygolog
|
98a4c5d9de053ee2e43ae2f61fa9f098eb38598a
|
[
"BSD-3-Clause"
] | 7
|
2017-06-15T14:56:37.000Z
|
2021-11-12T22:27:47.000Z
|
tests/debugger.py
|
fferri/pygolog
|
98a4c5d9de053ee2e43ae2f61fa9f098eb38598a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/debugger.py
|
fferri/pygolog
|
98a4c5d9de053ee2e43ae2f61fa9f098eb38598a
|
[
"BSD-3-Clause"
] | 1
|
2021-05-14T11:13:01.000Z
|
2021-05-14T11:13:01.000Z
|
#!/usr/bin/env python3
from domains.math1 import S
s = S(0)
p = Sequence(
Choose(
Exec(S.incr()),
Exec(S.double())
),
Choose(
Exec(S.incr()),
Exec(S.double())
),
Test(lambda s: s.n == 1)
)
debug(p, s)
| 22.78125
| 71
| 0.489712
|
#!/usr/bin/env python3
from collections import defaultdict
from copy import copy
from strips import *
from golog_program import *
from domains.math1 import S
s = S(0)
p = Sequence(
Choose(
Exec(S.incr()),
Exec(S.double())
),
Choose(
Exec(S.incr()),
Exec(S.double())
),
Test(lambda s: s.n == 1)
)
def debug(p, s, a=[], depth=0):
print(' '*depth+'%s' % a)
print(' '*depth+'s: %s' % s)
print(' '*depth+'p: %s%s' % (p, ' (final)' if p.final(s) else ''))
i = input('> [c=creep, other=skip] ')
if i != 'c': return
for pn, sn, an in p.trans(s):
debug(pn, sn, an[len(a):], depth+1)
debug(p, s)
| 0
| 0
| 0
| 0
| 0
| 291
| 0
| 19
| 112
|
c7b05272d678883c8d35b24c852e7ccef6f65d43
| 12,515
|
py
|
Python
|
tests/test_utf8inputtext.py
|
polm/SudachiPy
|
9eae063baa65d230cd89e382685fe9e410577125
|
[
"Apache-2.0"
] | 3
|
2017-09-21T14:56:30.000Z
|
2017-11-10T05:44:30.000Z
|
tests/test_utf8inputtext.py
|
polm/SudachiPy
|
9eae063baa65d230cd89e382685fe9e410577125
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utf8inputtext.py
|
polm/SudachiPy
|
9eae063baa65d230cd89e382685fe9e410577125
|
[
"Apache-2.0"
] | 1
|
2021-12-17T05:39:38.000Z
|
2021-12-17T05:39:38.000Z
|
# Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sudachipy.dictionarylib as dictionarylib
if __name__ == '__main__':
unittest.main()
| 48.886719
| 111
| 0.703076
|
# Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import sudachipy
import sudachipy.dictionarylib as dictionarylib
class TestUTF8InputText(unittest.TestCase):
def setUp(self):
self.TEXT = "âbC1あ234漢字𡈽アゴ"
self.bytes = [
b'0xC3', b'0xA2', b'0xEF', b'0xBD', b'0x82',
b'0x43', b'0x31', b'0xE3', b'0x81', b'0x82',
b'0x32', b'0x33', b'0x34', b'0xE6', b'0xBC',
b'0xA2', b'0xE5', b'0xAD', b'0x97', b'0xF0',
b'0xA1', b'0x88', b'0xBD', b'0xE3', b'0x82',
b'0xA2', b'0xEF', b'0xBD', b'0xBA', b'0xEF',
b'0xBE', b'0x9E'
]
self.input = None
grammar = self.MockGrammar()
char_category = dictionarylib.charactercategory.CharacterCategory()
this_dir = os.path.dirname(os.path.abspath(__file__))
char_category.read_character_definition(os.path.join(this_dir, 'resources/char.def'))
grammar.set_character_category(char_category)
self.builder = sudachipy.utf8inputtextbuilder.UTF8InputTextBuilder(self.TEXT, grammar)
def test_get_original_text(self):
self.assertEqual(self.builder.get_original_text(), self.TEXT)
self.assertEqual(self.builder.get_text(), self.TEXT)
self.input = self.builder.build()
self.assertEqual(self.input.get_original_text(), self.TEXT)
self.assertEqual(self.input.get_text(), self.TEXT)
def test_get_byte_text(self):
input_ = self.builder.build()
self.assertEqual(len(input_.get_byte_text()), 32)
self.assertEqual(self.TEXT.encode('utf-8'), input_.get_byte_text())
def test_get_original_index(self):
input_ = self.builder.build()
self.assertEqual(input_.get_original_index(0), 0)
self.assertEqual(input_.get_original_index(1), 0)
self.assertEqual(input_.get_original_index(2), 1)
self.assertEqual(input_.get_original_index(4), 1)
self.assertEqual(input_.get_original_index(6), 3)
self.assertEqual(input_.get_original_index(7), 4)
self.assertEqual(input_.get_original_index(10), 5)
self.assertEqual(input_.get_original_index(18), 9)
self.assertEqual(input_.get_original_index(19), 10)
self.assertEqual(input_.get_original_index(22), 10)
self.assertEqual(input_.get_original_index(23), 11)
self.assertEqual(input_.get_original_index(28), 12)
self.assertEqual(input_.get_original_index(31), 13)
def test_get_char_category_types(self):
input_ = self.builder.build()
self.assertTrue(dictionarylib.categorytype.CategoryType.ALPHA in input_.get_char_category_types(0))
self.assertTrue(dictionarylib.categorytype.CategoryType.ALPHA in input_.get_char_category_types(2))
self.assertTrue(dictionarylib.categorytype.CategoryType.ALPHA in input_.get_char_category_types(5))
self.assertTrue(dictionarylib.categorytype.CategoryType.NUMERIC in input_.get_char_category_types(6))
self.assertTrue(dictionarylib.categorytype.CategoryType.HIRAGANA in input_.get_char_category_types(7))
self.assertTrue(dictionarylib.categorytype.CategoryType.HIRAGANA in input_.get_char_category_types(9))
self.assertTrue(dictionarylib.categorytype.CategoryType.NUMERIC in input_.get_char_category_types(10))
self.assertTrue(dictionarylib.categorytype.CategoryType.KANJI in input_.get_char_category_types(13))
self.assertTrue(dictionarylib.categorytype.CategoryType.KANJI in input_.get_char_category_types(18))
self.assertTrue(dictionarylib.categorytype.CategoryType.DEFAULT in input_.get_char_category_types(19))
self.assertTrue(dictionarylib.categorytype.CategoryType.DEFAULT in input_.get_char_category_types(22))
self.assertTrue(dictionarylib.categorytype.CategoryType.KATAKANA in input_.get_char_category_types(23))
self.assertTrue(dictionarylib.categorytype.CategoryType.KATAKANA in input_.get_char_category_types(26))
self.assertTrue(dictionarylib.categorytype.CategoryType.KATAKANA in input_.get_char_category_types(31))
def test_get_char_category_continuous_length(self):
input_ = self.builder.build()
self.assertEqual(input_.get_char_category_continuous_length(0), 6)
self.assertEqual(input_.get_char_category_continuous_length(1), 5)
self.assertEqual(input_.get_char_category_continuous_length(2), 4)
self.assertEqual(input_.get_char_category_continuous_length(5), 1)
self.assertEqual(input_.get_char_category_continuous_length(6), 1)
self.assertEqual(input_.get_char_category_continuous_length(7), 3)
self.assertEqual(input_.get_char_category_continuous_length(10), 3)
self.assertEqual(input_.get_char_category_continuous_length(11), 2)
self.assertEqual(input_.get_char_category_continuous_length(12), 1)
self.assertEqual(input_.get_char_category_continuous_length(19), 4)
self.assertEqual(input_.get_char_category_continuous_length(22), 1)
self.assertEqual(input_.get_char_category_continuous_length(23), 9)
self.assertEqual(input_.get_char_category_continuous_length(26), 6)
self.assertEqual(input_.get_char_category_continuous_length(31), 1)
def test_replace_with_same_length(self):
self.builder.replace(8, 10, "ああ")
self.assertEqual(self.builder.get_original_text(), self.TEXT)
self.assertEqual(self.builder.get_text(), "âbC1あ234ああ𡈽アゴ")
input_ = self.builder.build()
self.assertEqual(input_.get_original_text(), self.TEXT)
self.assertEqual(input_.get_text(), "âbC1あ234ああ𡈽アゴ")
self.assertEqual(len(input_.get_byte_text()), 32)
self.assertEqual(input_.get_original_index(0), 0)
self.assertEqual(input_.get_original_index(12), 7)
self.assertEqual(input_.get_original_index(13), 8)
self.assertEqual(input_.get_original_index(15), 8)
self.assertEqual(input_.get_original_index(16), 10)
self.assertEqual(input_.get_original_index(18), 10)
self.assertEqual(input_.get_original_index(19), 10)
self.assertEqual(input_.get_original_index(22), 10)
self.assertEqual(input_.get_original_index(31), 13)
def test_replaceWithDeletion(self):
self.builder.replace(8, 10, "あ")
self.assertEqual(self.builder.get_original_text(), self.TEXT)
self.assertEqual(self.builder.get_text(), "âbC1あ234あ𡈽アゴ")
input_ = self.builder.build()
self.assertEqual(input_.get_original_text(), self.TEXT)
self.assertEqual(input_.get_text(), "âbC1あ234あ𡈽アゴ")
self.assertEqual(len(input_.get_byte_text()), 29)
self.assertEqual(input_.get_original_index(0), 0)
self.assertEqual(input_.get_original_index(12), 7)
self.assertEqual(input_.get_original_index(13), 8)
self.assertEqual(input_.get_original_index(15), 8)
self.assertEqual(input_.get_original_index(16), 10)
self.assertEqual(input_.get_original_index(19), 10)
self.assertEqual(input_.get_original_index(28), 13)
def test_replaceWithInsertion(self):
self.builder.replace(8, 10, "あああ")
self.assertEqual(self.builder.get_original_text(), self.TEXT)
self.assertEqual(self.builder.get_text(), "âbC1あ234あああ𡈽アゴ")
input_ = self.builder.build()
self.assertEqual(input_.get_original_text(), self.TEXT)
self.assertEqual(input_.get_text(), "âbC1あ234あああ𡈽アゴ")
self.assertEqual(len(input_.get_byte_text()), 35)
self.assertEqual(input_.get_original_index(0), 0) # â
self.assertEqual(input_.get_original_index(12), 7) # 4
self.assertEqual(input_.get_original_index(13), 8) # >あ< ああ
self.assertEqual(input_.get_original_index(21), 10) # ああ >あ<
self.assertEqual(input_.get_original_index(22), 10) # 𡈽
self.assertEqual(input_.get_original_index(25), 10) # 𡈽
self.assertEqual(input_.get_original_index(35), 14) # ゙
def test_replaceMultiTimes(self):
self.builder.replace(0, 1, "a")
self.builder.replace(1, 2, "b")
self.builder.replace(2, 3, "c")
self.builder.replace(10, 11, "土")
self.builder.replace(12, 14, "ゴ")
input_ = self.builder.build()
self.assertEqual(input_.get_original_text(), self.TEXT)
self.assertEqual(input_.get_text(), "abc1あ234漢字土アゴ")
self.assertEqual(len(input_.get_byte_text()), 25)
self.assertEqual(input_.get_original_index(0), 0)
self.assertEqual(input_.get_original_index(1), 1)
self.assertEqual(input_.get_original_index(2), 2)
self.assertEqual(input_.get_original_index(7), 5)
self.assertEqual(input_.get_original_index(8), 6)
self.assertEqual(input_.get_original_index(9), 7)
self.assertEqual(input_.get_original_index(15), 9)
self.assertEqual(input_.get_original_index(16), 10)
self.assertEqual(input_.get_original_index(18), 10)
self.assertEqual(input_.get_original_index(19), 11)
self.assertEqual(input_.get_original_index(21), 11)
self.assertEqual(input_.get_original_index(22), 12)
self.assertEqual(input_.get_original_index(24), 12)
def test_getByteLengthByCodePoints(self):
input_ = self.builder.build()
self.assertEqual(input_.get_code_points_offset_length(0, 1), 2)
self.assertEqual(input_.get_code_points_offset_length(0, 4), 7)
self.assertEqual(input_.get_code_points_offset_length(10, 1), 1)
self.assertEqual(input_.get_code_points_offset_length(11, 1), 1)
self.assertEqual(input_.get_code_points_offset_length(12, 1), 1)
self.assertEqual(input_.get_code_points_offset_length(13, 2), 6)
self.assertEqual(input_.get_code_points_offset_length(19, 1), 4)
self.assertEqual(input_.get_code_points_offset_length(23, 3), 9)
def test_codePointCount(self):
input_ = self.builder.build()
self.assertEqual(input_.code_point_count(0, 2), 1)
self.assertEqual(input_.code_point_count(0, 7), 4)
self.assertEqual(input_.code_point_count(13, 19), 2)
def test_canBow(self):
input_ = self.builder.build()
self.assertTrue(input_.can_bow(0)) # â
self.assertFalse(input_.can_bow(1))
self.assertFalse(input_.can_bow(2)) # b
self.assertFalse(input_.can_bow(3))
self.assertFalse(input_.can_bow(4))
self.assertFalse(input_.can_bow(5)) # C
self.assertTrue(input_.can_bow(6)) # 1
self.assertTrue(input_.can_bow(7)) # あ
self.assertTrue(input_.can_bow(19)) # 𡈽
self.assertFalse(input_.can_bow(20))
self.assertFalse(input_.can_bow(21))
self.assertFalse(input_.can_bow(22))
self.assertTrue(input_.can_bow(23)) # ア
def test_getWordCandidateLength(self):
input_ = self.builder.build()
self.assertEqual(input_.get_word_candidate_length(0), 6)
self.assertEqual(input_.get_word_candidate_length(6), 1)
self.assertEqual(input_.get_word_candidate_length(19), 4)
self.assertEqual(input_.get_word_candidate_length(29), 3)
class MockGrammar:
char_category = None
def get_part_of_speech_size(self):
return 0
def get_part_of_speech_string(self, pos_id):
return None
def get_part_of_speech_id(self, pos):
return 0
def get_connect_cost(self, left_id, right_id, cost=None):
if cost is None:
return 0
else:
return
def get_bos_parameter(self):
return None
def get_eos_parameter(self):
return None
def get_character_category(self):
return self.char_category
def set_character_category(self, char_category):
self.char_category = char_category
if __name__ == '__main__':
unittest.main()
| 277
| 0
| 0
| 11,661
| 0
| 0
| 0
| -17
| 69
|
9694a2f313620b91b57ba7cf9877cc25e3d3f26a
| 8,996
|
py
|
Python
|
mtil/reward_injection_wrappers.py
|
qxcv/mtil
|
62608046efb570b53f8107b8de9a7a1f28aee28a
|
[
"0BSD"
] | 1
|
2021-01-18T23:57:07.000Z
|
2021-01-18T23:57:07.000Z
|
mtil/reward_injection_wrappers.py
|
qxcv/mtil
|
62608046efb570b53f8107b8de9a7a1f28aee28a
|
[
"0BSD"
] | null | null | null |
mtil/reward_injection_wrappers.py
|
qxcv/mtil
|
62608046efb570b53f8107b8de9a7a1f28aee28a
|
[
"0BSD"
] | null | null | null |
"""Wrappers for rlpyt algorithms that inject reward from a custom reward model
at execution time.
TODO: need to figure out exactly how I'm going to do this for algorithms other
than PG. Some notes:
- For PG algorithms (PPO + A2C) it's easy to override the reward used at
training time by subclassing & overriding the process_returns(samples)
method. This won't work for algos with replay buffers!
- Not sure what to do for DQN. Prioritised DQN is a pain (and probably not
possible to do efficiently anyway, so I may as well skip it). Probably my
best bet is to override the loss() function to use actual reward. That will
also be annoying b/c by default the algorithm uses a "return_" thing
calculated by the replay buffer; to avoid that, I'll have to touch most parts
of the loss() method (so say goodbye to forward-compat with future versions
of rlpyt).
- Also not yet sure how to customise reward evaluation in samplers; perhaps I
shouldn't be doing that at all, and should instead write my own eval code?
I'll definitely need my own code if I want to display both eval_score and the
learnt reward."""
# ################# #
# For PG algorithms #
# ################# #
# ################ #
# For DQN variants #
# ################ #
# (TODO: going to try policy gradient first & then move to DQN if it seems more
# efficient)
| 39.113043
| 79
| 0.626723
|
"""Wrappers for rlpyt algorithms that inject reward from a custom reward model
at execution time.
TODO: need to figure out exactly how I'm going to do this for algorithms other
than PG. Some notes:
- For PG algorithms (PPO + A2C) it's easy to override the reward used at
training time by subclassing & overriding the process_returns(samples)
method. This won't work for algos with replay buffers!
- Not sure what to do for DQN. Prioritised DQN is a pain (and probably not
possible to do efficiently anyway, so I may as well skip it). Probably my
best bet is to override the loss() function to use actual reward. That will
also be annoying b/c by default the algorithm uses a "return_" thing
calculated by the replay buffer; to avoid that, I'll have to touch most parts
of the loss() method (so say goodbye to forward-compat with future versions
of rlpyt…).
- Also not yet sure how to customise reward evaluation in samplers; perhaps I
shouldn't be doing that at all, and should instead write my own eval code?
I'll definitely need my own code if I want to display both eval_score and the
learnt reward."""
from collections import namedtuple
import warnings
from rlpyt.algos.pg.a2c import A2C
from rlpyt.algos.pg.ppo import PPO
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
import torch
from mtil.utils.misc import RunningMeanVariance, tree_map
# ################# #
# For PG algorithms #
# ################# #
class RewardEvaluatorMT:
"""Batching, multi-task reward evaluator which can optionally standardise
reward values."""
def __init__(self,
task_ids_and_names,
reward_model,
obs_dims,
batch_size=256,
target_std=0.1,
normalise=False):
self.task_ids_and_names = task_ids_and_names
self.batch_size = batch_size
self.target_std = target_std
self.normalise = normalise
self.obs_dims = obs_dims
if normalise:
# TODO: replace self.rew_running_average with
# self.rew_running_averages (appropriately indexed)
num_tasks = 1 + max(
task_id for env_name, task_id in self.task_ids_and_names)
self.rew_running_averages = [
RunningMeanVariance(()) for _ in range(num_tasks)
]
self.dev = next(iter(reward_model.parameters())).device
self.reward_model = reward_model
def evaluate(self, obs_tuple, act_tensor, update_stats=True):
# put model into eval mode if necessary
old_training = self.reward_model.training
if old_training:
self.reward_model.eval()
with torch.no_grad():
# flatten observations & actions
obs_image = obs_tuple.observation
old_dev = obs_image.device
lead_dim, T, B, _ = infer_leading_dims(obs_image, self.obs_dims)
# use tree_map so we are able to handle the namedtuple directly
obs_flat = tree_map(
lambda t: t.view((T * B, ) + t.shape[lead_dim:]), obs_tuple)
act_flat = act_tensor.view((T * B, ) + act_tensor.shape[lead_dim:])
# now evaluate one batch at a time
reward_tensors = []
for b_start in range(0, T * B, self.batch_size):
obs_batch = obs_flat[b_start:b_start + self.batch_size]
act_batch = act_flat[b_start:b_start + self.batch_size]
dev_obs = tree_map(lambda t: t.to(self.dev), obs_batch)
dev_acts = act_batch.to(self.dev)
dev_reward = self.reward_model(dev_obs, dev_acts)
reward_tensors.append(dev_reward.to(old_dev))
# join together the batch results
new_reward_flat = torch.cat(reward_tensors, 0)
new_reward = restore_leading_dims(new_reward_flat, lead_dim, T, B)
task_ids = obs_tuple.task_id
assert new_reward.shape == task_ids.shape
# put back into training mode if necessary
if old_training:
self.reward_model.train(old_training)
# normalise if necessary
if self.normalise:
mus = []
stds = []
for task_id, averager in enumerate(self.rew_running_averages):
if update_stats:
id_sub = task_ids.view((-1, )) == task_id
if not torch.any(id_sub):
continue
rew_sub = new_reward.view((-1, ))[id_sub]
averager.update(rew_sub)
mus.append(averager.mean.item())
stds.append(averager.std.item())
mus = new_reward.new_tensor(mus)
stds = new_reward.new_tensor(stds)
denom = torch.max(stds.new_tensor(1e-3), stds / self.target_std)
denom_sub = denom[task_ids]
mu_sub = mus[task_ids]
# only bother applying result if we've actually seen an update
# before (otherwise reward will be insane)
new_reward = (new_reward - mu_sub) / denom_sub
return new_reward
class CustomRewardMixinPg:
# filled in by set_reward_model()
_reward_model = None
# filled in by set_reward_model()
_dev = None
# filled in by process_returns()
_last_rew_ret_adv = None
# filled in by optimize_agent()
_RRAInfo = None
# _custom_logging_fields is used by GAILMinibatchRl (also also be
# optimize_agent())
_custom_logging_fields = ('synthRew', 'synthRet', 'synthAdv')
def __init__(self, *args, true_reward_weight=None, **kwargs):
super().__init__(*args, **kwargs)
self._true_reward_weight = true_reward_weight
if self._true_reward_weight:
assert 0 < self._true_reward_weight <= 1.0, \
f"true_reward_weight must be in [0,1], but was " \
f"{self._true_reward_weight} (this code takes a convex " \
f"combination of true & GAIL rewards)"
warnings.warn(
"USING GROUND TRUTH REWARD (!) in CustomRewardMixinPg. This "
"is only for debugging, so remember to disable it later!")
def set_reward_evaluator(self, reward_evaluator):
self._reward_eval = reward_evaluator
def process_returns(self, samples):
assert self._reward_eval is not None, \
"must call .set_reward_eval() on algorithm before continuing"
# evaluate new rewards
new_reward = self._reward_eval.evaluate(samples.env.observation,
samples.agent.action)
# sanity-check reward shapes
assert new_reward.shape == samples.env.reward.shape, \
(new_reward.shape, samples.env.reward.shape)
if self._true_reward_weight:
# debuging branch: use ground truth rewards
alpha = self._true_reward_weight
warnings.warn(f"USING GROUND TRUTH REWARD (!) at alpha={alpha}")
env_reward = samples.env.reward
new_reward = (1 - alpha) * new_reward + alpha * env_reward
# replace rewards
new_samples = samples._replace(env=samples.env._replace(
reward=new_reward))
# actually do return/advantage calculations
return_, advantage, valid = super().process_returns(new_samples)
# record old reward, return, and advantage so that we can log stats
# later
self._last_rew_ret_adv = (new_reward, return_, advantage)
return return_, advantage, valid
@staticmethod
def _to_cpu_list(t):
# rlpyt logging code only pays attention to lists of numbers, so if I
# want my synthetic reward etc. to be logged then I need to put it here
return list(t.detach().cpu().flatten().numpy())
def optimize_agent(self, itr, samples):
# slightly hacky, but whatever
opt_info = super().optimize_agent(itr, samples)
# log extra data
if self._RRAInfo is None:
old_fields = opt_info._fields
all_fields = [*old_fields, *self._custom_logging_fields]
self._RRAInfo = namedtuple('_RRAInfo', all_fields)
rew, ret, adv = self._last_rew_ret_adv
rra_info = self._RRAInfo(**opt_info._asdict(),
synthRew=self._to_cpu_list(rew),
synthRet=self._to_cpu_list(ret),
synthAdv=self._to_cpu_list(adv))
# being defensive: I want to see exception if we try to unpack the same
# thing twice
self._last_rew_ret_adv = None
return rra_info
class CustomRewardPPO(CustomRewardMixinPg, PPO):
pass
class CustomRewardA2C(CustomRewardMixinPg, A2C):
pass
# ################ #
# For DQN variants #
# ################ #
# (TODO: going to try policy gradient first & then move to DQN if it seems more
# efficient)
| 3
| 231
| 0
| 7,046
| 0
| 0
| 0
| 110
| 249
|
3594e17236136b711372a9945750692b0c864ddd
| 1,701
|
py
|
Python
|
src/apps/alarm_clock/tests/test_alarm_time.py
|
stefanhoelzl/alarm-clock
|
efba84e71fcade26bef020dc7eaa10181ea9f96c
|
[
"MIT"
] | 1
|
2019-07-31T12:39:53.000Z
|
2019-07-31T12:39:53.000Z
|
src/apps/alarm_clock/tests/test_alarm_time.py
|
stefanhoelzl/alarm-clock
|
efba84e71fcade26bef020dc7eaa10181ea9f96c
|
[
"MIT"
] | null | null | null |
src/apps/alarm_clock/tests/test_alarm_time.py
|
stefanhoelzl/alarm-clock
|
efba84e71fcade26bef020dc7eaa10181ea9f96c
|
[
"MIT"
] | 1
|
2019-10-04T04:32:20.000Z
|
2019-10-04T04:32:20.000Z
|
# time.localtime(126000) 35*60*60
# time.struct_time(tm_year=1970, tm_mon=1, tm_mday=2,
# tm_hour=12, tm_min=0, tm_sec=0,
# tm_wday=4, tm_yday=2, tm_isdst=0)
| 30.375
| 80
| 0.633157
|
import pytest
from unittest.mock import patch
from ..alarm_time import diff_between_weekdays, get_next_weekday, next_time
# time.localtime(126000) 35*60*60
# time.struct_time(tm_year=1970, tm_mon=1, tm_mday=2,
# tm_hour=12, tm_min=0, tm_sec=0,
# tm_wday=4, tm_yday=2, tm_isdst=0)
def time_(hours=0, minutes=0):
return 126000 + (hours * 60 + minutes) * 60
class TestDiffBetweenWeekdays:
@pytest.mark.parametrize("current, next, diff", (
(0, 1, 1),
(0, 0, 0),
(6, 0, 1),
(6, 6, 0),
))
def test_allDays(self, current, next, diff):
assert diff_between_weekdays(current, next) == diff
class TestGetNextWeekday:
def test_singleValueInList(self):
assert get_next_weekday(2, (1,)) == 1
def test_multipleOrderedDays(self):
assert get_next_weekday(1, (1, 4)) == 4
def test_multipleOrderedDaysWithOverflow(self):
assert get_next_weekday(4, (1, 4)) == 1
def test_multipleUnorderedDays(self):
assert get_next_weekday(3, (4, 1, 3)) == 4
def test_multipleUnorderedDaysWithOverflow(self):
assert get_next_weekday(4, (4, 1, 3)) == 1
class TestNextTime:
def test_sameDay_later(self):
assert next_time(13, 0, current_time=time_()) == time_(1, 0)
def test_sameDay_later_withDays(self):
nt = next_time(13, 0, days=(0, 1, 2, 3, 4, 5, 6), current_time=time_())
assert nt == time_(1, 0)
def test_sameDay_earlier_getsNextDay(self):
assert next_time(11, 0, current_time=time_()) == time_(23, 0)
def test_days(self):
assert next_time(12, 0, days=(6,), current_time=time_()) == time_(48, 0)
| 0
| 236
| 0
| 971
| 0
| 57
| 0
| 56
| 184
|
8dcff54d0b2c4427d452f34f93a80e755ff0e5b6
| 422
|
py
|
Python
|
Array/python/reverse_aryasoni98.py
|
CodeTrophs/450DSA
|
84ab1b13cd837b860c4b747d567e3507b29291b3
|
[
"MIT"
] | 5
|
2021-05-15T13:35:33.000Z
|
2021-07-21T09:35:51.000Z
|
Array/python/reverse_aryasoni98.py
|
CodeTrophs/450DSA
|
84ab1b13cd837b860c4b747d567e3507b29291b3
|
[
"MIT"
] | 1
|
2021-07-17T12:20:09.000Z
|
2021-07-20T08:25:02.000Z
|
Array/python/reverse_aryasoni98.py
|
CodeTrophs/450DSA
|
84ab1b13cd837b860c4b747d567e3507b29291b3
|
[
"MIT"
] | 8
|
2021-05-26T22:22:07.000Z
|
2021-12-16T04:35:32.000Z
|
# Approach 1
# reverseList([1, 2, 3, 4, 5, 6], 0, 5) = [6 5 4 3 2 1]
# Approach 2
# reverseList([1, 2, 3, 4, 5, 6], 0, 5) = [6 5 4 3 2 1]
| 20.095238
| 55
| 0.509479
|
# Approach 1
def reverseList(A, start, end):
while start < end:
A[start], A[end] = A[end], A[start]
start += 1
end -= 1
# reverseList([1, 2, 3, 4, 5, 6], 0, 5) = [6 5 4 3 2 1]
# Approach 2
def reverseList(A, start, end):
if start >= end:
return
A[start], A[end] = A[end], A[start]
reverseList(A, start+1, end-1)
# reverseList([1, 2, 3, 4, 5, 6], 0, 5) = [6 5 4 3 2 1]
| 0
| 0
| 0
| 0
| 0
| 234
| 0
| 0
| 46
|
9022b4a37c85bb12d4642e5a3cdab4efa2f30215
| 1,140
|
py
|
Python
|
apps/comment/models.py
|
aplot249/my_blog
|
7cfcd67991f0a6dc861847514e8d0fca2213fa8b
|
[
"MIT"
] | null | null | null |
apps/comment/models.py
|
aplot249/my_blog
|
7cfcd67991f0a6dc861847514e8d0fca2213fa8b
|
[
"MIT"
] | 5
|
2021-06-02T01:30:26.000Z
|
2022-03-12T00:24:27.000Z
|
apps/comment/models.py
|
qq1788lover/my_blog
|
7cfcd67991f0a6dc861847514e8d0fca2213fa8b
|
[
"MIT"
] | null | null | null |
# django-ckeditor
# from ckeditor.fields import RichTextField
# django-mptt
#
| 40.714286
| 117
| 0.740351
|
from django.db import models
from django.contrib.auth.models import User
from article.models import ArticlePost
# django-ckeditor
# from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
# django-mptt
from mptt.models import MPTTModel, TreeForeignKey
# 博文的评论
class Comment(MPTTModel):
article = models.ForeignKey(ArticlePost,on_delete=models.CASCADE,related_name='comments') #对哪个文章进行评论
user = models.ForeignKey(User,on_delete=models.CASCADE,related_name='comments') #谁评论的
# mptt树形结构
parent = TreeForeignKey('self',on_delete=models.CASCADE,null=True,blank=True,related_name='children')
# 记录二级评论回复给谁, str
reply_to = models.ForeignKey(User,null=True,blank=True,on_delete=models.CASCADE,related_name='replyers') #评论给谁
body = RichTextUploadingField() #RichTextField() #评论的内容
created = models.DateTimeField(auto_now_add=True) #评论时间
class MPTTMeta:
order_insertion_by = ['created']
def __str__(self):
return self.body[:20]
class Meta:
verbose_name = "文章评论"
verbose_name_plural = verbose_name
| 147
| 0
| 0
| 766
| 0
| 0
| 0
| 112
| 132
|
70dc5ccb8c9fff90f926591a0f6bb044989fa3fa
| 1,490
|
py
|
Python
|
test_city.py
|
wang-xinyu/csp.pytorch
|
8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3
|
[
"MIT"
] | 13
|
2020-03-29T13:35:55.000Z
|
2021-04-11T10:45:53.000Z
|
test_city.py
|
wang-xinyu/csp.pytorch
|
8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3
|
[
"MIT"
] | 2
|
2020-09-08T09:53:08.000Z
|
2020-09-16T04:18:03.000Z
|
test_city.py
|
wang-xinyu/csp.pytorch
|
8d5187358c1eb0fbf7ba5f184b9afed6c2518ad3
|
[
"MIT"
] | 6
|
2020-05-24T21:49:17.000Z
|
2021-07-06T09:14:42.000Z
|
import torch
import cv2
import numpy as np
import os
from models.cspnet import CSPNet_p3p4p5
from utils.keras_weights_loader import load_keras_weights
if __name__ == '__main__':
device = 'cuda:0'
weights_path = 'net_e121_l0.hdf5'
out_path = 'output/valresults/city/h/off/121'
input_dim = [1024, 2048]
if not os.path.exists(out_path):
os.makedirs(out_path)
res_file = os.path.join(out_path, 'val_det.txt')
model = CSPNet_p3p4p5()
load_keras_weights(model, weights_path)
model.to(device).eval()
f = open('data/citypersons/val.txt', 'r')
files = f.readlines();
num_imgs = len(files)
res_all = []
for i in range(0, num_imgs):
l = files[i]
print(l)
img = cv2.imread('data/citypersons/leftImg8bit/val/' + l.strip())
x = format_img(img)
with torch.no_grad():
x = torch.from_numpy(x).to(device)
x = x.permute(0, 3, 1, 2)
x_cls, x_reg, x_off = model(x)
Y = [x_cls.detach().cpu().numpy(), x_reg.detach().cpu().numpy(), x_off.detach().cpu().numpy()]
boxes = parse_det_offset(Y, input_dim, score=0.1, down=4)
if len(boxes)>0:
f_res = np.repeat(i + 1, len(boxes), axis=0).reshape((-1, 1))
boxes[:, [2, 3]] -= boxes[:, [0, 1]]
res_all += np.concatenate((f_res, boxes), axis=-1).tolist()
np.savetxt(res_file, np.array(res_all), fmt='%6f')
f.close()
exit(0)
| 29.215686
| 102
| 0.599329
|
import torch
import cv2
import numpy as np
import os
from models.cspnet import CSPNet_p3p4p5
from utils.keras_weights_loader import load_keras_weights
from utils.utils import *
if __name__ == '__main__':
device = 'cuda:0'
weights_path = 'net_e121_l0.hdf5'
out_path = 'output/valresults/city/h/off/121'
input_dim = [1024, 2048]
if not os.path.exists(out_path):
os.makedirs(out_path)
res_file = os.path.join(out_path, 'val_det.txt')
model = CSPNet_p3p4p5()
load_keras_weights(model, weights_path)
model.to(device).eval()
f = open('data/citypersons/val.txt', 'r')
files = f.readlines();
num_imgs = len(files)
res_all = []
for i in range(0, num_imgs):
l = files[i]
print(l)
img = cv2.imread('data/citypersons/leftImg8bit/val/' + l.strip())
x = format_img(img)
with torch.no_grad():
x = torch.from_numpy(x).to(device)
x = x.permute(0, 3, 1, 2)
x_cls, x_reg, x_off = model(x)
Y = [x_cls.detach().cpu().numpy(), x_reg.detach().cpu().numpy(), x_off.detach().cpu().numpy()]
boxes = parse_det_offset(Y, input_dim, score=0.1, down=4)
if len(boxes)>0:
f_res = np.repeat(i + 1, len(boxes), axis=0).reshape((-1, 1))
boxes[:, [2, 3]] -= boxes[:, [0, 1]]
res_all += np.concatenate((f_res, boxes), axis=-1).tolist()
np.savetxt(res_file, np.array(res_all), fmt='%6f')
f.close()
exit(0)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 22
|
ba2439d9eddd4585f46a35effe33046144c873b1
| 18
|
py
|
Python
|
blockhash/constants.py
|
dsoprea/blockhash-python
|
bc322658d55c62b96255edc63ae05dbac7cf5ab7
|
[
"MIT"
] | 3
|
2017-11-26T18:33:23.000Z
|
2020-08-03T16:11:02.000Z
|
blockhash/constants.py
|
dsoprea/blockhash-python
|
bc322658d55c62b96255edc63ae05dbac7cf5ab7
|
[
"MIT"
] | null | null | null |
blockhash/constants.py
|
dsoprea/blockhash-python
|
bc322658d55c62b96255edc63ae05dbac7cf5ab7
|
[
"MIT"
] | null | null | null |
DEFAULT_BITS = 16
| 9
| 17
| 0.777778
|
DEFAULT_BITS = 16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
818414fc073b5bab726dc3956d8e5e9d22029c67
| 580
|
py
|
Python
|
config/zuul/zuul_functions.py
|
javierpena/SF-DLRN-poc
|
8b71e1b4a07caee0e3f079ce446d5bb84302e6a1
|
[
"Apache-2.0"
] | null | null | null |
config/zuul/zuul_functions.py
|
javierpena/SF-DLRN-poc
|
8b71e1b4a07caee0e3f079ce446d5bb84302e6a1
|
[
"Apache-2.0"
] | null | null | null |
config/zuul/zuul_functions.py
|
javierpena/SF-DLRN-poc
|
8b71e1b4a07caee0e3f079ce446d5bb84302e6a1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
| 32.222222
| 63
| 0.624138
|
#!/usr/bin/python
def set_node_reuse(item, job, params):
print "zuul_functions: set_node_reuse(", item, job, "): "
params['OFFLINE_NODE_WHEN_COMPLETE'] = '0'
def set_node_options(item, job, params):
if job.name in ('config-check', 'config-update',
'sf-mirror-update',
'pages-render', 'pages-update'):
# Prevent putting master node offline
params['OFFLINE_NODE_WHEN_COMPLETE'] = '0'
return
print "zuul_functions: set_node_options(", item, job, "): "
params['OFFLINE_NODE_WHEN_COMPLETE'] = '1'
| 0
| 0
| 0
| 0
| 0
| 514
| 0
| 0
| 46
|
116cd4ba8597e8948b72678be0c30f5b943dca3f
| 611
|
py
|
Python
|
Desafio015.py
|
sidneyalex/Desafios-do-Curso
|
11605caf59fb8b456adaca78a41eae2f7469ab7b
|
[
"MIT"
] | null | null | null |
Desafio015.py
|
sidneyalex/Desafios-do-Curso
|
11605caf59fb8b456adaca78a41eae2f7469ab7b
|
[
"MIT"
] | null | null | null |
Desafio015.py
|
sidneyalex/Desafios-do-Curso
|
11605caf59fb8b456adaca78a41eae2f7469ab7b
|
[
"MIT"
] | null | null | null |
#Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preo a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
dias = int(input('Por quantos dias o carro foi alugado? '))
km = float(input('Quantos quilometros voc andou com ele? '))
total = km * 0.15 + dias * 60
print('=' * 80)
print('A diaria do carro de R$60 e temos um adicional de R$0.15 por km.')
print('=' * 80)
print('Voc tem uma divida de R${:.2f}\nDiarias R${}\nKm R${:.2f}'.format(total, dias * 60, km * 0.15))
print('=' * 80)
| 61.1
| 225
| 0.682488
|
#Escreva um programa que pergunte a quantidade de Km percorridos por um carro alugado e a quantidade de dias pelos quais ele foi alugado. Calcule o preço a pagar, sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
dias = int(input('Por quantos dias o carro foi alugado? '))
km = float(input('Quantos quilometros você andou com ele? '))
total = km * 0.15 + dias * 60
print('=' * 80)
print('A diaria do carro é de R$60 e temos um adicional de R$0.15 por km.')
print('=' * 80)
print('Você tem uma divida de R${:.2f}\nDiarias R${}\nKm R${:.2f}'.format(total, dias * 60, km * 0.15))
print('=' * 80)
| 8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d96aa39beae0b4f6ee0866534b868f4a600e92a9
| 63
|
py
|
Python
|
98_tools/training_ram/day2/ex14.py
|
tlananthu/python-learning
|
cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2
|
[
"Apache-2.0"
] | 1
|
2020-05-11T18:39:54.000Z
|
2020-05-11T18:39:54.000Z
|
98_tools/training_ram/day2/ex14.py
|
tlananthu/python-learning
|
cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2
|
[
"Apache-2.0"
] | null | null | null |
98_tools/training_ram/day2/ex14.py
|
tlananthu/python-learning
|
cfda5bfa6c613bcbe8bfe00567cd058ce5afc4a2
|
[
"Apache-2.0"
] | null | null | null |
s='Some text with spaces'
x=s.split()
{i:x.count(i) for i in x}
| 21
| 25
| 0.650794
|
s='Some text with spaces'
x=s.split()
{i:x.count(i) for i in x}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
09163ecf781ce0d1e17ae2ffc234afc93fa21d10
| 518
|
py
|
Python
|
examples/weather_data_extraction.py
|
spelap/sport-activities-features
|
c9d99b94509d8f0dd988b41be6ef45eec291ef7d
|
[
"MIT"
] | null | null | null |
examples/weather_data_extraction.py
|
spelap/sport-activities-features
|
c9d99b94509d8f0dd988b41be6ef45eec291ef7d
|
[
"MIT"
] | null | null | null |
examples/weather_data_extraction.py
|
spelap/sport-activities-features
|
c9d99b94509d8f0dd988b41be6ef45eec291ef7d
|
[
"MIT"
] | null | null | null |
from sport_activities_features.weather_identification import WeatherIdentification
from sport_activities_features.tcx_manipulation import TCXFile
#read TCX file
tcx_file = TCXFile()
tcx_data = tcx_file.read_one_file("path_to_the_file")
#configure visual crossing api key
visual_crossing_api_key = "API_KEY" # https://www.visualcrossing.com/weather-api
#return weather objects
weather = WeatherIdentification(tcx_data['positions'], tcx_data['timestamps'], visual_crossing_api_key)
weatherlist = weather.get_weather()
| 39.846154
| 103
| 0.841699
|
from sport_activities_features.weather_identification import WeatherIdentification
from sport_activities_features.tcx_manipulation import TCXFile
#read TCX file
tcx_file = TCXFile()
tcx_data = tcx_file.read_one_file("path_to_the_file")
#configure visual crossing api key
visual_crossing_api_key = "API_KEY" # https://www.visualcrossing.com/weather-api
#return weather objects
weather = WeatherIdentification(tcx_data['positions'], tcx_data['timestamps'], visual_crossing_api_key)
weatherlist = weather.get_weather()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c768407e58ef331f29e4cf8bf8152406b9d9f5b4
| 27,330
|
py
|
Python
|
teslakit/numerical_models/swan/io.py
|
teslakit/teslak
|
3f3dda08c5c5998cb2a7debbf22f2be675a4ff8b
|
[
"MIT"
] | 12
|
2019-11-14T22:19:12.000Z
|
2022-03-04T01:25:33.000Z
|
teslakit/numerical_models/swan/io.py
|
anderdyl/teslaCoSMoS
|
1495bfa2364ddbacb802d145b456a35213abfb7c
|
[
"MIT"
] | 5
|
2020-03-24T18:21:41.000Z
|
2021-08-23T20:39:43.000Z
|
teslakit/numerical_models/swan/io.py
|
anderdyl/teslaCoSMoS
|
1495bfa2364ddbacb802d145b456a35213abfb7c
|
[
"MIT"
] | 2
|
2021-03-06T07:54:41.000Z
|
2021-06-30T14:33:22.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from .geo import gc_distance
#AUX. FUNCTIONs
def geo_distance_azimuth(lat_matrix, lon_matrix, lat_point, lon_point):
'''
Returns geodesic distance and azimuth between lat,lon matrix and lat,lon
point in degrees
'''
arcl = np.zeros(lat_matrix.shape) * np.nan
azi = np.zeros(lat_matrix.shape) * np.nan
sh1, sh2 = lat_matrix.shape
for i in range(sh1):
for j in range(sh2):
arcl[i,j], azi[i,j] = gc_distance(
lat_point, lon_point, lat_matrix[i][j], lon_matrix[i][j]
)
return arcl, azi
# SWAN INPUT/OUTPUT STAT LIBRARY
| 33.248175
| 149
| 0.530809
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import shutil as su
from datetime import datetime
import numpy as np
import pandas as pd
import xarray as xr
from scipy.io import loadmat
from .geo import gc_distance
# AUX. FUNCTIONs
def geo_distance_azimuth(lat_matrix, lon_matrix, lat_point, lon_point):
'''
Returns geodesic distance and azimuth between lat,lon matrix and lat,lon
point in degrees
'''
arcl = np.zeros(lat_matrix.shape) * np.nan
azi = np.zeros(lat_matrix.shape) * np.nan
sh1, sh2 = lat_matrix.shape
for i in range(sh1):
for j in range(sh2):
arcl[i,j], azi[i,j] = gc_distance(
lat_point, lon_point, lat_matrix[i][j], lon_matrix[i][j]
)
return arcl, azi
# SWAN INPUT/OUTPUT STAT LIBRARY
class SwanIO(object):
'SWAN numerical model input/output'
def __init__(self, swan_proj):
# needs SwanProject
self.proj = swan_proj
def make_project(self):
'makes swan project folder and subfolders'
if not op.isdir(self.proj.p_main): os.makedirs(self.proj.p_main)
if not op.isdir(self.proj.p_cases): os.makedirs(self.proj.p_cases)
class SwanIO_STAT(SwanIO):
'SWAN numerical model input/output - STATIONARY cases'
def make_input(self, p_file, id_run, ws, bnd):
'''
Writes input.swn file from waves sea state for stationary execution
p_file - input.swn file path
ws - wave sea state (hs, per, dr, spr)
bnd - wave sea state active boundaries
more info: http://swanmodel.sourceforge.net/online_doc/swanuse/node23.html
'''
# TODO: check readinp idla
# .swn file parameters
sea_level = self.proj.params['sea_level']
jonswap_gamma = self.proj.params['jonswap_gamma']
coords_spherical = self.proj.params['coords_spherical']
waves_period = self.proj.params['waves_period']
# main mesh
mm = self.proj.mesh_main
# .swn text file
t = "PROJ '{0}' '{1}'\n$\n".format(self.proj.name, id_run)
t += 'MODE STAT\n'
# spherical coordinates (mercator) switch
if coords_spherical != None:
t += 'COORDINATES SPHER {0}\n'.format(coords_spherical)
# sea level
t += 'SET level={0} NAUTICAL\n$\n'.format(sea_level)
# computational grid
t += 'CGRID REGULAR {0} {1} {2} {3} {4} {5} {6} CIRCLE 72 0.0345 1.00 34\n$\n'.format(
mm.cg['xpc'], mm.cg['ypc'], mm.cg['alpc'], mm.cg['xlenc'],
mm.cg['ylenc'], mm.cg['mxc']-1, mm.cg['myc']-1)
# bathymetry
t += 'INPGRID BOTTOM REGULAR {0} {1} {2} {3} {4} {5} {6}\n'.format(
mm.dg['xpc'], mm.dg['ypc'], mm.dg['alpc'], mm.dg['mxc'],
mm.dg['myc'], mm.dg['dxinp'], mm.dg['dyinp'])
t += "READINP BOTTOM 1 '{0}' {1} 0 FREE\n$\n".format(
mm.depth_fn, mm.dg_idla)
# waves boundary conditions
t += 'BOUND SHAPespec JONswap {0} {1} DSPR DEGR\n'.format(
jonswap_gamma, waves_period)
for ic in bnd:
t += "BOUN SIDE {0} CONstant PAR {1:.3f} {2:.3f} {3:.3f} {4:.3f}\n".format(
ic, ws.hs, ws.per, ws.dir, ws.spr)
t += "$\n"
# numerics
t += 'OFF QUAD\n'
# t += 'PROP BSBT\n'
# t += 'WCAP\n'
t += 'BREA\n'
t += 'FRICTION JONSWAP\n$\n'
# optional nested mesh
r_ns = [self.proj.run_nest1, self.proj.run_nest2, self.proj.run_nest3]
m_ns = [self.proj.mesh_nest1, self.proj.mesh_nest2, self.proj.mesh_nest3]
nout_0 = ['nest1', 'nest2', 'nest3']
nout_1 = ['bounds_nest1.dat', 'bounds_nest2.dat', 'bounds_nest3.dat']
for r_n, m_n, n0, n1 in zip(r_ns, m_ns, nout_0, nout_1):
if r_n:
t += "NGRID '{0}' {1} {2} {3} {4} {5} {6} {7}\n".format(
n0, m_n.cg['xpc'], m_n.cg['ypc'], m_n.cg['alpc'],
m_n.cg['xlenc'], m_n.cg['ylenc'],
np.int32(m_n.cg['xlenc']/mm.cg['dxinp']),
np.int32(m_n.cg['ylenc']/mm.cg['dyinp'])
)
t += "NESTOUT '{0}' '{1}'\n".format(n0, n1)
# output
t += "BLOCK 'COMPGRID' NOHEAD '{0}' LAY 3 HSIGN TM02 DIR TPS DSPR\n$\n".format(
mm.output_fn,
)
# compute
t += 'TEST 1,0\n'
t += 'COMPUTE \n'
t += 'STOP\n$\n'
# write file:
with open(p_file, 'w') as f:
f.write(t)
# log
fmt2 = ' 7.2f'
print(
'SWAN CASE: {1} ---> hs {2:{0}}, per {3:{0}}, dir {4:{0}}, spr {5:{0}}'.format(
fmt2, id_run, ws.hs, ws.per, ws.dir, ws.spr
)
)
def make_input_nested(self, p_file, id_run):
'''
Writes input_nested.swn file from waves sea state for stationary execution
p_file - input_nestedN.swn file path
'''
# TODO check myc-1, mxc -1
# .swn file parameters
sea_level = self.proj.params['sea_level']
coords_spherical = self.proj.params['coords_spherical']
nested_bounds = self.proj.params['nested_bounds']
# SWAN nested Computacional grid
mn1 = self.proj.mesh_nest1
# .swn text file
t = "PROJ '{0}' '{1}'\n$\n".format(self.proj.name, id_run)
t += 'MODE STAT\n'
# spherical coordinates (mercator) switch
if coords_spherical != None:
t += 'COORDINATES SPHER {0}\n'.format(coords_spherical)
t += 'SET level={0} NAUTICAL\n$\n'.format(sea_level)
# computational grid
t += 'CGRID REGULAR {0} {1} {2} {3} {4} {5} {6} CIRCLE 72 0.03558410 1.00 35\n$\n'.format(
mn1.cg['xpc'], mn1.cg['ypc'], mn1.cg['alpc'], mn1.cg['xlenc'],
mn1.cg['ylenc'], mn1.cg['mxc']-1, mn1.cg['myc']-1)
# bathymetry
t += 'INPGRID BOTTOM REGULAR {0} {1} {2} {3} {4} {5} {6}\n'.format(
mn1.dg['xpc'], mn1.dg['ypc'], mn1.dg['alpc'], mn1.dg['mxc']-1,
mn1.dg['myc']-1, mn1.dg['dxinp'], mn1.dg['dyinp'])
t += "READINP BOTTOM 1 '{0}' {1} 0 FREE\n$\n".format(
mn1.depth_fn, mn1.dg_idla)
# Boundary Conditions
t += "BOUN NEST '{0}' {1}\n".format('bounds_nest1.dat', nested_bounds)
# wind file
t += "$\n"
# numerics
t += 'OFF QUAD\n'
# t += 'GEN1\n'
# t += 'PROP BSBT\n'
# t += 'WCAP\n'
t += 'BREA\n'
t += 'FRICTION JONSWAP\n$\n'
# output
t += "BLOCK 'COMPGRID' NOHEAD '{0}' LAY 3 HSIGN TM02 DIR TPS DSPR\n$\n".format(
mn1.output_fn,
)
# compute
t += 'TEST 1,0\n'
t += 'COMPUTE \n'
t += 'STOP\n$\n'
# write file:
with open(p_file, 'w') as f:
f.write(t)
def build_case(self, case_id, waves_ss, bnd=['N', 'E', 'W', 'S']):
'''
Build SWAN STAT case input files for given wave sea state (hs, per, dir, spr)
ix_case - SWAN case index (int)
waves_ss - wave sea state (hs, per, dir, spr)
bnd - wave sea state active boundaries
'''
# SWAN case path
p_case = op.join(self.proj.p_cases, case_id)
# make execution dir
if not op.isdir(p_case): os.makedirs(p_case)
# make depth file for main mesh
self.proj.mesh_main.export_dat(p_case)
# make input.swn file
self.make_input(op.join(p_case, 'input.swn'), case_id, waves_ss, bnd)
# optional nested mesh depth and input files
r_ns = [self.proj.run_nest1, self.proj.run_nest2, self.proj.run_nest3]
m_ns = [self.proj.mesh_nest1, self.proj.mesh_nest2, self.proj.mesh_nest3]
i_ns = ['input_nest1.swn', 'input_nest2.swn', 'input_nest3.swn']
for r_n, m_n, i_n in zip(r_ns, m_ns, i_ns):
if r_n:
m_n.export_dat(p_case)
self.make_input_nested(op.join(p_case, i_n), case_id)
def outmat2xr(self, p_mat):
# matlab dictionary
dmat = loadmat(p_mat)
# return dataset
xds_out = xr.Dataset(
{
'Hsig': (('X','Y',), dmat['Hsig'].T, {'units':'m'}),
'Tm02': (('X','Y',), dmat['Tm02'].T, {'units':'s'}),
'Dir': (('X','Y',), dmat['Dir'].T, {'units':'º'}),
'Dspr': (('X','Y',), dmat['Dspr'].T, {'units':'º'}),
'TPsmoo': (('X','Y',), dmat['TPsmoo'].T, {'units':'s'}),
}
)
return xds_out
def output_case(self, p_case, mesh):
'read .mat output file from stationary and returns xarray.Dataset'
# extract output from selected mesh
p_mat = op.join(p_case, mesh.output_fn)
xds_out = self.outmat2xr(p_mat)
# set X and Y values
X, Y = mesh.get_XY()
xds_out = xds_out.assign_coords(X=X)
xds_out = xds_out.assign_coords(Y=Y)
# rename to longitude latitude in spherical coords cases
coords_spherical = self.proj.params['coords_spherical']
if coords_spherical != None:
xds_out = xds_out.rename({'X':'lon', 'Y':'lat'})
return xds_out
class SwanIO_NONSTAT(SwanIO):
'SWAN numerical model input/output - NON STATIONARY cases'
def make_out_points(self, p_file):
'Generates desired output-points coordinates file'
# define and save output points
x_out = self.proj.x_out
y_out = self.proj.y_out
if not x_out or not y_out:
return
else:
points = np.vstack((x_out,y_out)).T
np.savetxt(p_file, points, fmt='%.2f')
def make_wave_files(self, p_case, waves_event, time, bnd):
'Generate event wave files (swan compatible)'
# wave variables
hs = waves_event.hs.values[:]
per = waves_event.per.values[:]
direc = waves_event.dir.values[:]
spr = waves_event.spr.values[:]
# csv file
num_data = len(time)
data = np.zeros((num_data, 5))
data[:, 0] = time
data[:, 1] = hs
data[:, 2] = per
data[:, 3] = direc
data[:, 4] = spr
# Copy file for all boundaries
save = op.join(p_case, 'series_waves.dat')
np.savetxt(save, data, header='TPAR', comments='', fmt='%8.4f %2.3f %2.3f %3.2f %3.1f')
for i in bnd:
su.copyfile(save, op.join(p_case, 'series_waves_{0}.dat'.format(i)))
def make_wind_files(self, p_case, waves_event):
'''
Generate event wind mesh files (swan compatible)
uses wave_event U10 and V10 values at the entire SWAN comp. grid
'''
# wind variables
u10 = waves_event.U10.values[:]
v10 = waves_event.V10.values[:]
# main mesh
mm = self.proj.mesh_main
# each time needs 2D (mesh) wind files (U,V)
mxc = mm.cg['mxc'] # number mesh x
myc = mm.cg['myc'] # number mesh y
txt = ''
for c, (u, v) in enumerate(zip(u10,v10)):
# single point wind -> entire SWAN comp.grid wind
aux = np.ones((mxc, myc))
# TODO: wind has to be rotated if alpc != 0
# csv file
u_2d = aux * u
v_2d = aux * v
u_v_stack = np.vstack((u_2d, v_2d))
save = op.join(p_case, 'wind_{0:06}.dat'.format(c))
np.savetxt(save, u_v_stack, fmt='%.2f')
# wind list file
txt += 'wind_{0:06}.dat\n'.format(c)
# winds file path
save = op.join(p_case, 'series_wind.dat')
with open(save, 'w') as f:
f.write(txt)
def make_vortex_files(self, p_case, storm_track):
'''
Generate event wind mesh files (swan compatible)
uses wave_event storm path data over SWAN computational grid
needs SPHERICAL COORDINATES
'''
# parameters
RE = 6378.135 # Earth radius
# wind variables
storm_move = storm_track.move.values[:]
storm_vf = storm_track.vf.values[:]
storm_lon = storm_track.lon.values[:]
storm_lat = storm_track.lat.values[:]
storm_pn = storm_track.pn.values[:]
storm_p0 = storm_track.p0.values[:]
times = storm_track.index[:]
# main mesh
mm = self.proj.mesh_main
# comp. grid for generating vortex wind files
mxc = mm.cg['mxc'] # number mesh x
myc = mm.cg['myc'] # number mesh y
# comp. grid lat, lon limits
lon0 = mm.cg['xpc']
lat0 = mm.cg['ypc']
lon1 = mm.cg['xpc'] + mm.cg['xlenc']
lat1 = mm.cg['ypc'] + mm.cg['ylenc']
cg_lon = np.linspace(lon0, lon1, mxc)
cg_lat = np.linspace(lat0, lat1, myc)
mg_lon, mg_lat = np.meshgrid(cg_lon, cg_lat)
# wind output holder
hld_W = np.zeros((len(cg_lat), len(cg_lon), len(storm_move)))
hld_D = np.zeros((len(cg_lat), len(cg_lon), len(storm_move)))
# each time needs 2D (mesh) wind files (U,V)
txt = ''
for c, (lo, la, p0, pn, move, vf) in enumerate(zip(
storm_lon, storm_lat, storm_p0, storm_pn, storm_move, storm_vf)):
# get distance and angle between points
arcl, beta = geo_distance_azimuth(mg_lat, mg_lon, la, lo)
r = arcl * np.pi / 180.0 * RE
if p0 < 900: p0 = 900 # fix p0
# Silva et al. 2010
RC = 0.4785 * p0 - 413.01
# TODO usar otro radio ciclostrofico?
# Hydromet Rankin-Vortex model (eq. 76)
pr = p0 + (pn - p0) * np.exp(-2*RC/r)
py, px = np.gradient(pr)
ang = np.arctan2(py, px) + np.sign(la) * np.pi/2.0
# Wind model
w = 0.2618 # velocidad angular Earth (rad/h)
f = 2 * w * np.sin(la*np.pi/180) # coriolis
ur = 21.8 * np.sqrt(pn-p0) - 0.5 * f * RC # wind max grad (km/h)
fv = np.zeros(mg_lon.shape)
s1 = r/RC < 1 # eq. (9) Rodo (2009)
fv[s1] = 1 - 0.971 * np.exp(-6.826 * np.power(r[s1]/RC, 4.798))
s2 = r/RC >=1 # eq. (10) Rodo (2009)
nc = (f*RC)/ur
A = -0.99 * (1.066-np.exp(-1.936*nc))
B = -0.357 * (1.4456-np.exp(-5.2388*nc))
fv[s2] = np.exp(A*np.power(np.log(r[s2]/RC),3) * \
np.exp(B*np.log(r[s2]/RC)))
abnaut = move + beta
ab = np.remainder(-abnaut+270, 360) *np.pi/180 # nautical to cartesian
W = 0.986 * (fv*ur + 0.5*vf * np.cos(ab-np.pi/2))
W[W<0] = 0
# TODO: wind has to be rotated if alpc != 0
# csv file
u_2d = W * np.cos(ang) / 3.6 # km/h --> m/s
v_2d = W * np.sin(ang) / 3.6 # km/h --> m/s
u_v_stack = np.vstack((u_2d, v_2d))
save = op.join(p_case, 'wind_{0:06}.dat'.format(c))
np.savetxt(save, u_v_stack, fmt='%.2f')
# wind list file
txt += 'wind_{0:06}.dat\n'.format(c)
# hold wind data (m/s)
hld_W[:,:,c] = W / 3.6 # km/h --> m/s
hld_D[:,:,c] = 270 - np.rad2deg(ang) # direction (º clock. rel. north)
# winds file path
save = op.join(p_case, 'series_wind.dat')
with open(save, 'w') as f:
f.write(txt)
# aux. save vortex wind fields
p_vortex = op.join(p_case, 'vortex_wind.nc')
xds_vortex = xr.Dataset(
{
'W': (('lat','lon','time'), hld_W, {'units':'m/s'}),
'Dir': (('lat','lon','time'), hld_D, {'units':'º'})
},
coords={
'Y' : cg_lat,
'X' : cg_lon,
'time' : times,
}
)
xds_vortex.attrs['xlabel'] = 'Longitude (º)'
xds_vortex.attrs['ylabel'] = 'Latitude (º)'
xds_vortex.to_netcdf(p_vortex)
def make_level_files(self, p_case, wave_event):
'Generate event level mesh files (swan compatible)'
# parse pandas time index to swan iso format
swan_iso_fmt = '%Y%m%d.%H%M'
time = pd.to_datetime(wave_event.index).strftime(swan_iso_fmt).values[:]
# level variables
zeta = wave_event.level.values[:]
tide = wave_event.tide.values[:]
# main mesh
mm = self.proj.mesh_main
# each time needs 2D (mesh) level
mxc = mm.cg['mxc'] # number mesh x
myc = mm.cg['myc'] # number mesh y
txt = ''
for c, (z, t) in enumerate(zip(zeta, tide)):
# single point level -> entire SWAN comp.grid level
aux = np.ones((mxc, myc)).T
# csv file
l = z + t # total level
l_2d = aux * l
save = op.join(p_case, 'level_{0:06}.dat'.format(c))
np.savetxt(save, l_2d, fmt='%.2f')
# level list file
txt += 'level_{0:06}.dat\n'.format(c)
# waves file path
save = op.join(p_case, 'series_level.dat')
with open(save, 'w') as f:
f.write(txt)
def make_input(self, p_file, id_run, time, make_waves=True,
make_winds=True, wvs_bnd=['N', 'E', 'W', 'S']):
'''
Writes input.swn file from waves event for non-stationary execution
p_file - input.swn file path
time - event time at swan iso format
make_waves - activates waves input files generation (at waves_bnd)
make_winds - activates wind input files generation
more info: http://swanmodel.sourceforge.net/online_doc/swanuse/node23.html
'''
# event time (swan iso format)
t0_iso = time[0]
t1_iso = time[-1]
# .swn file parameters
sea_level = self.proj.params['sea_level']
jonswap_gamma = self.proj.params['jonswap_gamma']
cdcap = self.proj.params['cdcap']
maxerr = self.proj.params['maxerr']
coords_spherical = self.proj.params['coords_spherical']
waves_period = self.proj.params['waves_period']
# main mesh
mm = self.proj.mesh_main
# output points
x_out = self.proj.x_out
y_out = self.proj.y_out
# computational data
dt_comp = 5 # time step (minutes)
# .swn text file
t = "PROJ '{0}' '{1}'\n$\n".format(self.proj.name, id_run)
t += 'MODE NONSTAT\n'
# spherical coordinates (mercator) swich
if coords_spherical:
t += 'COORDINATES SPHER CCM\n'
# cdcap
cdcap_str = ''
if cdcap: cdcap_str = 'cdcap={0}'.format(cdcap)
# max error (caution)
maxerr_str = ''
if maxerr: maxerr_str = 'maxerr={0}'.format(maxerr)
# set level and cdcap (if available)
t += 'SET level={0} {1} {2} NAUTICAL\n$\n'.format(
sea_level, cdcap_str, maxerr_str
)
# computational grid
t += 'CGRID REGULAR {0} {1} {2} {3} {4} {5} {6} CIRCLE 72 0.0345 1.00 34\n$\n'.format(
mm.cg['xpc'], mm.cg['ypc'], mm.cg['alpc'], mm.cg['xlenc'],
mm.cg['ylenc'], mm.cg['mxc']-1, mm.cg['myc']-1)
# bathymetry
t += 'INPGRID BOTTOM REGULAR {0} {1} {2} {3} {4} {5} {6}\n'.format(
mm.dg['xpc'], mm.dg['ypc'], mm.dg['alpc'], mm.dg['mxc'],
mm.dg['myc'], mm.dg['dxinp'], mm.dg['dyinp'])
t += "READINP BOTTOM 1 '{0}' {1} 0 FREE\n$\n".format(
mm.depth_fn, mm.dg_idla)
# wind
t += 'INPGRID WIND REGULAR {0} {1} {2} {3} {4} {5} {6} NONSTAT {7} 1 HR {8}\n'.format(
mm.cg['xpc'], mm.cg['ypc'], mm.cg['alpc'], mm.cg['mxc']-1,
mm.cg['myc']-1, mm.cg['dxinp'], mm.cg['dyinp'], t0_iso, t1_iso)
t += "READINP WIND 1. SERIES '{0}' 3 0 FREE\n$\n".format('series_wind.dat')
# level
t += 'INPGRID WLEV REGULAR {0} {1} {2} {3} {4} {5} {6} NONSTAT {7} 1 HR {8}\n'.format(
mm.cg['xpc'], mm.cg['ypc'], mm.cg['alpc'], mm.cg['mxc']-1,
mm.cg['myc']-1, mm.cg['dxinp'], mm.cg['dyinp'], t0_iso, t1_iso)
t += "READINP WLEV 1. SERIES '{0}' 3 0 FREE\n$\n".format('series_level.dat')
# waves boundary conditions
if make_waves:
t += 'BOUND SHAPespec JONswap {0} {1} DSPR DEGR\n'.format(
jonswap_gamma, waves_period)
for ic in wvs_bnd:
t += "BOUN SIDE {0} CONstant FILE 'series_waves_{0}.dat'\n".format(ic)
# numerics & physics
t += 'WIND DRAG WU\n'
t += 'GEN3 ST6 5.7E-7 8.0E-6 4.0 4.0 UP HWANG VECTAU TRUE10\n'
t += 'SSWELL\n'
t += 'QUAD iquad=8\n'
t += 'WCAP\n'
t += 'PROP BSBT\n'
if not coords_spherical:
t += 'SETUP\n' # not compatible with spherical
t += 'BREA\n'
t += 'FRICTION JONSWAP\n$\n'
t += 'TRIADS\n'
t += 'DIFFRAC\n'
# output
t += "BLOCK 'COMPGRID' NOHEAD '{0}' LAY 3 HSIGN TM02 DIR TPS DSPR OUT {1} 1.0 HR\n$\n".format(
mm.output_fn, t0_iso)
# output points
if not x_out or not y_out:
pass
else:
t += "POINTS 'outpts' FILE 'points_out.dat'\n"
t += "TABLE 'outpts' NOHEAD 'table_outpts.dat' DEP HS HSWELL DIR RTP TM02 DSPR WIND WATLEV OUT {0} {1} MIN\n$\n".format(t0_iso, dt_comp)
# compute
t += 'TEST 1,0\n'
t += 'COMPUTE NONSTAT {0} {1} MIN {2}\n'.format(t0_iso, dt_comp, t1_iso)
t += 'STOP\n$\n'
# write file:
with open(p_file, 'w') as f:
f.write(t)
def build_case(self, case_id, waves_event, storm_track=None,
make_waves=True, make_winds=True, waves_bnd=['N', 'E', 'W', 'S']):
'''
Build SWAN NONSTAT case input files for given wave dataset
case_id - SWAN case index (int)
waves_event - waves event time series (pandas.Dataframe)
also contains level, tide and wind (not storm track) variables
[n x 8] (hs, per, dir, spr, U10, V10, level, tide)
storm_track - None / storm track time series (pandas.Dataframe)
storm_track generated winds have priority over waves_event winds
[n x 6] (move, vf, lon, lat, pn, p0)
'''
# SWAN case path
p_case = op.join(self.proj.p_cases, case_id)
# make execution dir
if not op.isdir(p_case): os.makedirs(p_case)
# make depth file for main mesh
self.proj.mesh_main.export_dat(p_case)
# make output points file
self.make_out_points(op.join(p_case, 'points_out.dat'))
# parse pandas time index to swan iso format
swan_iso_fmt = '%Y%m%d.%H%M'
time_swan = pd.to_datetime(waves_event.index).strftime(swan_iso_fmt).values[:]
# make wave files
if make_waves:
self.make_wave_files(p_case, waves_event, time_swan, waves_bnd)
# make wind files
# TODO: vortex model, if active, will override wind files
if make_winds:
self.make_wind_files(p_case, waves_event)
# vortex model for storm tracks
if isinstance(storm_track, pd.DataFrame):
self.make_vortex_files(p_case, storm_track)
# make water level files
self.make_level_files(p_case, waves_event)
# make input.swn file
self.make_input(
op.join(p_case, 'input.swn'), case_id, time_swan,
make_waves = make_waves, make_winds = make_winds,
)
# TODO: add optional nested mesh depth and input files
def outmat2xr(self, p_mat):
# matlab dictionary
dmat = loadmat(p_mat)
# get dates from one key
hsfs = sorted([x for x in dmat.keys() if 'Hsig' in x])
dates_str = ['_'.join(x.split('_')[1:]) for x in hsfs]
dates = [datetime.strptime(s,'%Y%m%d_%H%M%S') for s in dates_str]
# read times
l_times = []
for ds in dates_str:
xds_t = xr.Dataset(
{
'Hsig': (('X','Y',), dmat['Hsig_{0}'.format(ds)].T, {'units':'m'}),
'Tm02': (('X','Y',), dmat['Tm02_{0}'.format(ds)].T, {'units':'s'}),
'Dir': (('X','Y',), dmat['Dir_{0}'.format(ds)].T, {'units':'º'}),
'Dspr': (('X','Y',), dmat['Dspr_{0}'.format(ds)].T, {'units':'º'}),
'TPsmoo': (('X','Y',), dmat['TPsmoo_{0}'.format(ds)].T, {'units':'s'}),
}
)
l_times.append(xds_t)
# join at times dim
xds_out = xr.concat(l_times, dim='time')
xds_out = xds_out.assign_coords(time=dates)
return xds_out
def output_case(self, p_case, mesh):
'read .mat output file from non-stationary and returns xarray.Dataset'
# extract output from selected mesh
p_mat = op.join(p_case, mesh.output_fn)
xds_out = self.outmat2xr(p_mat)
# set X and Y values
X, Y = mesh.get_XY()
xds_out = xds_out.assign_coords(X=X)
xds_out = xds_out.assign_coords(Y=Y)
# rename to longitude latitude in spherical coords cases
coords_spherical = self.proj.params['coords_spherical']
if coords_spherical != None:
xds_out = xds_out.rename({'X':'lon', 'Y':'lat'})
return xds_out
def get_t0_dt(self, p_input):
'gets output points time_ini and delta_time (min) from SWAN input.swn file'
# read input.swn and file data
with open(p_input, 'r') as fR:
ls = fR.readlines()
lx = [x for x in ls if x.startswith('TABLE')][0].split(' ')
t0_str = lx[-3] # start date
dt_min = lx[-2] # dt (minutes)
swan_iso_fmt = '%Y%m%d.%H%M'
t0 = datetime.strptime(t0_str, swan_iso_fmt)
return t0, dt_min
def output_points(self, p_case):
'read table_outpts.dat output file and returns xarray.Dataset'
p_dat = op.join(p_case, 'table_outpts.dat')
# variable names
names = ['DEP', 'HS', 'HSWELL', 'DIR', 'RTP', 'TM02', 'DSPR', 'WIND',
'WATLEV', 'OUT' ]
x_out = self.proj.x_out
y_out = self.proj.y_out
# points are mixed at output file
np_pts = np.genfromtxt(p_dat)
n_rows = np_pts.shape[0]
# number of points
n_pts = len(x_out)
l_xds_pts = []
for i in range(n_pts):
ix_p = np.arange(i, n_rows, n_pts)
np_pti = np_pts[ix_p, :]
xds_pti = xr.Dataset({}) #, coords='time')
for c, n in enumerate(names):
xds_pti[n] = (('time'), np_pti[:,c])
l_xds_pts.append(xds_pti)
xds_out = xr.concat(l_xds_pts, dim='point')
# add point x and y
xds_out['x_point'] = (('point'), x_out)
xds_out['y_point'] = (('point'), y_out)
# add times dim values
t0, dt_min = self.get_t0_dt(op.join(p_case, 'input.swn'))
time_out = pd.date_range(t0, periods=len(xds_out.time), freq='{0}min'.format(dt_min))
xds_out = xds_out.assign_coords(time=time_out)
return xds_out
| 36
| 0
| 0
| 26,416
| 0
| 0
| 0
| -4
| 224
|
ed273aa6cd016beba627f472a2c9db37c8bc9752
| 5,588
|
py
|
Python
|
RL_testing_ground.py
|
sonic597/rl-testing-ground
|
c875ae640adb0de0dbe06aa152432aa0d49faed2
|
[
"MIT"
] | null | null | null |
RL_testing_ground.py
|
sonic597/rl-testing-ground
|
c875ae640adb0de0dbe06aa152432aa0d49faed2
|
[
"MIT"
] | null | null | null |
RL_testing_ground.py
|
sonic597/rl-testing-ground
|
c875ae640adb0de0dbe06aa152432aa0d49faed2
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
k = 100
iters = 1000
episodes = 50
eps_01_rewards = np.zeros(iters)
eps_1_rewards = np.zeros(iters)
eps_decay_rewards = np.zeros(iters)
ucb_rewards = np.zeros(iters)
for i in range(episodes):
random_test_list = [x for x in np.random.normal(0, 1, k)]
eps_01 = greedy_bandit(k, 0.01, iters, random_test_list)
eps_1 = greedy_bandit(k, 0.1, iters, random_test_list)
eps_decay = e_decay_bandit(k, 0.3, 0.1, iters, random_test_list)
ucb = ucb_bandit(k, 2, iters, random_test_list)
eps_01.iterate()
eps_1.iterate()
eps_decay.decay_iterate()
ucb.ucb_iterate()
eps_01_rewards += (eps_01.reward - eps_01_rewards) / (i + 1)
eps_1_rewards += (eps_1.reward - eps_1_rewards) / (i + 1)
eps_decay_rewards += (eps_decay.reward - eps_decay_rewards) / (i + 1)
ucb_rewards += (ucb.reward - ucb_rewards) / (i + 1)
plt.figure(figsize=(12, 8))
plt.plot(eps_01_rewards, label="epsilon=0.01")
plt.plot(eps_1_rewards, label="epsilon=0.1")
plt.plot(eps_decay_rewards, label="e_decay")
plt.plot(ucb_rewards, label="upper confidence bound")
plt.legend()
plt.xlabel("Iterations")
plt.ylabel("Average Reward")
plt.title("Avg Rewards after " + str(episodes) + " Episodes")
plt.show()
| 38.013605
| 115
| 0.65247
|
import numpy as np
import matplotlib.pyplot as plt
k = 100
iters = 1000
episodes = 50
class greedy_bandit:
def __init__(self, arms, elipson, iterations, avg_arm_rewards):
self.arms = arms
self.elipson = elipson
self.iterations = iterations
self.steps = 0
self.step_per_arm = np.zeros(arms)
self.mean_reward = 0
self.reward = np.zeros(iterations)
self.mean_reward_per_arm = np.zeros(arms) # predicted value, self.arm_rewards is true value
if type(avg_arm_rewards) == list:
self.arm_rewards = avg_arm_rewards
elif avg_arm_rewards == "random":
# ** samples taken from normal dist. ** np.random.normal (mean, standard dev., no. samples)
self.arm_rewards = np.random.normal(0, 1, arms)
elif avg_arm_rewards == "linspace":
# **evenly spaced rewards, used for testing, higher arm no. higher reward **
self.arm_rewards = np.linspace(0, arms - 1, arms)
def pull(self):
# make rand number to compare with elipson
compare_probablity = np.random.rand()
if self.elipson == 0 and self.steps == 0: # random action to start off
action = np.random.choice(self.arms)
elif compare_probablity < self.elipson: # explore
action = np.random.choice(self.arms)
else: # exploit
action = np.argmax(self.mean_reward_per_arm)
reward = np.random.normal(self.arm_rewards[action], 1)
self.steps += 1
self.step_per_arm[action] += 1
# based on analytical formula, saves indexing of all prev. values
self.mean_reward = self.mean_reward + (reward - self.mean_reward) / self.steps
# same thing below except updating the individual arm reward
self.mean_reward_per_arm[action] += (reward - self.mean_reward_per_arm[action]) / self.step_per_arm[action]
def iterate(self):
for iteration in range(self.iterations):
self.pull()
self.reward[iteration] = self.mean_reward
class e_decay_bandit(greedy_bandit):
def __init__(self, arms, decay_rate, elipson, iterations, avg_arm_rewards):
greedy_bandit.__init__(self, arms, elipson, iterations, avg_arm_rewards)
self.decay_rate = decay_rate
def decay_pull(self):
compare_probablity = np.random.rand()
if self.elipson == 0 and self.steps == 0: # random action to start off
action = np.random.choice(self.arms)
elif compare_probablity < self.elipson: # explore
action = np.random.choice(self.arms)
else: # exploit
action = np.argmax(self.mean_reward_per_arm)
reward = np.random.normal(self.arm_rewards[action], 1)
self.steps += 1
self.step_per_arm[action] += 1
# based on analytical formula, saves indexing of all prev. values
self.mean_reward += (reward - self.mean_reward) / self.steps
# same thing below except updating the individual arm reward
self.mean_reward_per_arm[action] += self.decay_rate * (reward - self.mean_reward_per_arm[action])
def decay_iterate(self):
for iteration in range(self.iterations):
self.decay_pull()
self.reward[iteration] = self.mean_reward
class ucb_bandit(greedy_bandit):
def __init__(self, arms, c, iteras, avg_arm_rewards):
greedy_bandit.__init__(self, arms, None, iteras, avg_arm_rewards)
self.confidence_level = c
self.t = np.zeros(arms)
def ucb_pull(self):
if self.steps == 0: # random action to start off
action = np.random.choice(self.arms)
else:
action = np.argmax(
self.mean_reward_per_arm + (self.confidence_level * (np.sqrt(np.log(self.t) / self.step_per_arm))))
reward = np.random.normal(self.arm_rewards[action], 1)
self.steps += 1
self.step_per_arm[action] += 1
for index, arm in enumerate(self.t):
if arm != action:
self.t[index] += 1
self.mean_reward += (reward - self.mean_reward) / self.steps
self.mean_reward_per_arm[action] += (reward - self.mean_reward_per_arm[action]) / self.step_per_arm[action]
def ucb_iterate(self):
for iteration in range(self.iterations):
self.ucb_pull()
self.reward[iteration] = self.mean_reward
eps_01_rewards = np.zeros(iters)
eps_1_rewards = np.zeros(iters)
eps_decay_rewards = np.zeros(iters)
ucb_rewards = np.zeros(iters)
for i in range(episodes):
random_test_list = [x for x in np.random.normal(0, 1, k)]
eps_01 = greedy_bandit(k, 0.01, iters, random_test_list)
eps_1 = greedy_bandit(k, 0.1, iters, random_test_list)
eps_decay = e_decay_bandit(k, 0.3, 0.1, iters, random_test_list)
ucb = ucb_bandit(k, 2, iters, random_test_list)
eps_01.iterate()
eps_1.iterate()
eps_decay.decay_iterate()
ucb.ucb_iterate()
eps_01_rewards += (eps_01.reward - eps_01_rewards) / (i + 1)
eps_1_rewards += (eps_1.reward - eps_1_rewards) / (i + 1)
eps_decay_rewards += (eps_decay.reward - eps_decay_rewards) / (i + 1)
ucb_rewards += (ucb.reward - ucb_rewards) / (i + 1)
plt.figure(figsize=(12, 8))
plt.plot(eps_01_rewards, label="epsilon=0.01")
plt.plot(eps_1_rewards, label="epsilon=0.1")
plt.plot(eps_decay_rewards, label="e_decay")
plt.plot(ucb_rewards, label="upper confidence bound")
plt.legend()
plt.xlabel("Iterations")
plt.ylabel("Average Reward")
plt.title("Avg Rewards after " + str(episodes) + " Episodes")
plt.show()
| 0
| 0
| 0
| 4,255
| 0
| 0
| 0
| 0
| 69
|
7c27aeb1bb208e81c2c11b9d2d0a8d3764e5179a
| 68,168
|
py
|
Python
|
nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py
|
jubick1337/NeMo
|
9d50733ba0e698b98d0019e9e697686e0a24b90e
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py
|
jubick1337/NeMo
|
9d50733ba0e698b98d0019e9e697686e0a24b90e
|
[
"Apache-2.0"
] | 1
|
2022-03-06T14:09:02.000Z
|
2022-03-06T14:09:02.000Z
|
nemo/collections/nlp/models/machine_translation/mt_enc_dec_model.py
|
jubick1337/NeMo
|
9d50733ba0e698b98d0019e9e697686e0a24b90e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['MTEncDecModel']
| 49.147801
| 254
| 0.640594
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import random
from pathlib import Path
from typing import Dict, List, Optional, Union
import numpy as np
import torch
import torch.distributed as dist
import torch.utils.data as pt_data
import webdataset as wd
from omegaconf import DictConfig, ListConfig, OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import rank_zero_only
from sacrebleu import corpus_bleu
from nemo.collections.common.data import ConcatDataset
from nemo.collections.common.losses import NLLLoss, SmoothedCrossEntropyLoss
from nemo.collections.common.metrics import GlobalAverageLossMetric
from nemo.collections.common.parts import transformer_weights_init
from nemo.collections.common.tokenizers.bytelevel_tokenizers import ByteLevelProcessor
from nemo.collections.common.tokenizers.chinese_tokenizers import ChineseProcessor
from nemo.collections.common.tokenizers.en_ja_tokenizers import EnJaProcessor, JaMecabProcessor
from nemo.collections.common.tokenizers.indic_tokenizers import IndicProcessor
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data import TarredTranslationDataset, TranslationDataset
from nemo.collections.nlp.models.enc_dec_nlp_model import EncDecNLPModel
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common import TokenClassifier
from nemo.collections.nlp.modules.common.lm_utils import get_transformer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.collections.nlp.modules.common.transformer import BeamSearchSequenceGenerator, TopKSequenceGenerator
from nemo.core.classes import Exportable
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.utils import logging, model_utils, timers
__all__ = ['MTEncDecModel']
class MTEncDecModel(EncDecNLPModel, Exportable):
"""
Encoder-decoder machine translation model.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None):
cfg = model_utils.convert_model_config_to_dict_config(cfg)
# Get global rank and total number of GPU workers for IterableDataset partitioning, if applicable
# Global_rank and local_rank is set by LightningModule in Lightning 1.2.0
self.world_size = 1
if trainer is not None:
self.world_size = trainer.num_nodes * trainer.num_devices
cfg = model_utils.maybe_update_config_version(cfg)
self.src_language = cfg.get("src_language", None)
self.tgt_language = cfg.get("tgt_language", None)
self.multilingual = cfg.get("multilingual", False)
self.multilingual_ids = []
self.special_tokens = {}
self.encoder_tokenizer_library = cfg.encoder_tokenizer.get('library', 'yttm')
self.decoder_tokenizer_library = cfg.decoder_tokenizer.get('library', 'yttm')
self.validate_input_ids = cfg.get("validate_input_ids", True)
if self.multilingual:
if isinstance(self.src_language, ListConfig) and isinstance(self.tgt_language, ListConfig):
raise ValueError(
"cfg.src_language and cfg.tgt_language cannot both be lists. We only support many-to-one or one-to-many multilingual models."
)
elif isinstance(self.src_language, ListConfig):
pass
elif isinstance(self.tgt_language, ListConfig):
for lng in self.tgt_language:
self.special_tokens["<" + lng + ">"] = "<" + lng + ">"
else:
raise ValueError(
"Expect either cfg.src_language or cfg.tgt_language to be a list when multilingual=True."
)
self.shared_embeddings = cfg.get("shared_embeddings", False)
# Instantiates tokenizers and register to be saved with NeMo Model archive
# After this call, ther will be self.encoder_tokenizer and self.decoder_tokenizer
# Which can convert between tokens and token_ids for SRC and TGT languages correspondingly.
encoder_tokenizer_model, decoder_tokenizer_model, encoder_vocab_file = None, None, None
if cfg.encoder_tokenizer.get('tokenizer_model') is not None:
encoder_tokenizer_model = self.register_artifact(
"encoder_tokenizer.tokenizer_model", cfg.encoder_tokenizer.get('tokenizer_model')
)
if cfg.decoder_tokenizer.get('tokenizer_model') is not None:
decoder_tokenizer_model = self.register_artifact(
"decoder_tokenizer.tokenizer_model", cfg.decoder_tokenizer.get('tokenizer_model')
)
if cfg.encoder_tokenizer.get('vocab_file') is not None:
encoder_vocab_file = (
self.register_artifact("encoder_tokenizer.vocab_file", cfg.encoder_tokenizer.get('vocab_file')),
)
encoder_tokenizer, decoder_tokenizer = MTEncDecModel.setup_enc_dec_tokenizers(
encoder_tokenizer_library=self.encoder_tokenizer_library,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0)
if cfg.encoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
encoder_model_name=cfg.encoder.get('model_name') if hasattr(cfg.encoder, 'model_name') else None,
encoder_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_library=self.decoder_tokenizer_library,
encoder_tokenizer_vocab_file=encoder_vocab_file,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0)
if cfg.decoder_tokenizer.get('bpe_dropout', 0.0) is not None
else 0.0,
decoder_model_name=cfg.decoder.get('model_name') if hasattr(cfg.decoder, 'model_name') else None,
decoder_r2l=cfg.decoder_tokenizer.get('r2l', False),
special_tokens=self.special_tokens,
encoder_sentencepiece_legacy=cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_sentencepiece_legacy=cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
)
self.encoder_tokenizer, self.decoder_tokenizer = encoder_tokenizer, decoder_tokenizer
if self.multilingual:
(
self.source_processor_list,
self.target_processor_list,
self.multilingual_ids,
) = MTEncDecModel.setup_multilingual_ids_and_processors(
self.src_language,
self.tgt_language,
self.encoder_tokenizer,
self.encoder_tokenizer_library,
self.decoder_tokenizer_library,
)
else:
# After this call, the model will have self.source_processor and self.target_processor objects
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
self.src_language, self.tgt_language, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
self.multilingual_ids = [None]
# TODO: Why is this base constructor call so late in the game?
super().__init__(cfg=cfg, trainer=trainer)
# encoder from NeMo, Megatron-LM, or HuggingFace
encoder_cfg_dict = OmegaConf.to_container(cfg.get('encoder'))
encoder_cfg_dict['vocab_size'] = self.encoder_vocab_size
library = encoder_cfg_dict.pop('library', 'nemo')
model_name = encoder_cfg_dict.pop('model_name', None)
pretrained = encoder_cfg_dict.pop('pretrained', False)
checkpoint_file = encoder_cfg_dict.pop('checkpoint_file', None)
self.encoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=encoder_cfg_dict,
encoder=True,
pre_ln_final_layer_norm=encoder_cfg_dict.get('pre_ln_final_layer_norm', False),
checkpoint_file=checkpoint_file,
)
# decoder from NeMo, Megatron-LM, or HuggingFace
decoder_cfg_dict = OmegaConf.to_container(cfg.get('decoder'))
decoder_cfg_dict['vocab_size'] = self.decoder_vocab_size
library = decoder_cfg_dict.pop('library', 'nemo')
model_name = decoder_cfg_dict.pop('model_name', None)
pretrained = decoder_cfg_dict.pop('pretrained', False)
self.decoder = get_transformer(
library=library,
model_name=model_name,
pretrained=pretrained,
config_dict=decoder_cfg_dict,
encoder=False,
pre_ln_final_layer_norm=decoder_cfg_dict.get('pre_ln_final_layer_norm', False),
)
# validate hidden_size of encoder and decoder
self._validate_encoder_decoder_hidden_size()
self.log_softmax = TokenClassifier(
hidden_size=self.decoder.hidden_size,
num_classes=self.decoder_vocab_size,
activation=cfg.head.activation,
log_softmax=cfg.head.log_softmax,
dropout=cfg.head.dropout,
use_transformer_init=cfg.head.use_transformer_init,
)
self.beam_search = BeamSearchSequenceGenerator(
embedding=self.decoder.embedding,
decoder=self.decoder.decoder,
log_softmax=self.log_softmax,
max_sequence_length=self.decoder.max_sequence_length,
beam_size=cfg.beam_size,
bos=self.decoder_tokenizer.bos_id,
pad=self.decoder_tokenizer.pad_id,
eos=self.decoder_tokenizer.eos_id,
len_pen=cfg.len_pen,
max_delta_length=cfg.max_generation_delta,
)
# tie embedding weights
if self.shared_embeddings:
if not cfg.get("shared_tokenizer", True):
raise ValueError("shared_tokenizer cannot be False when shared_embeddings is True")
# validate vocabulary size and embedding dimension
if (
self.encoder.embedding.token_embedding.weight.shape
!= self.decoder.embedding.token_embedding.weight.shape
):
raise ValueError(
f"Cannot tie encoder and decoder embeddings due to mismatch in embedding sizes "
f"(num_embeddings, embedding_dim): {self.encoder.embedding.token_embedding.weight.shape} (encoder) "
f"{self.decoder.embedding.token_embedding.weight.shape} (decoder)"
)
self.encoder.embedding.token_embedding.weight = self.decoder.embedding.token_embedding.weight
# tie weights of embedding and softmax matrices
self.log_softmax.mlp.layer0.weight = self.decoder.embedding.token_embedding.weight
# TODO: encoder and decoder with different hidden size?
std_init_range = 1 / self.encoder.hidden_size ** 0.5
# initialize weights if not using pretrained encoder/decoder
if not self._cfg.encoder.get('pretrained', False):
self.encoder.apply(lambda module: transformer_weights_init(module, std_init_range))
if not self._cfg.decoder.get('pretrained', False):
self.decoder.apply(lambda module: transformer_weights_init(module, std_init_range))
self.log_softmax.apply(lambda module: transformer_weights_init(module, std_init_range))
self.loss_fn = SmoothedCrossEntropyLoss(
pad_id=self.decoder_tokenizer.pad_id, label_smoothing=cfg.label_smoothing
)
self.eval_loss_fn = NLLLoss(ignore_index=self.decoder_tokenizer.pad_id)
@classmethod
def setup_multilingual_ids_and_processors(
cls, src_language, tgt_language, encoder_tokenizer, encoder_tokenizer_library, decoder_tokenizer_library
):
multilingual_ids = []
if isinstance(src_language, ListConfig):
for lng in src_language:
multilingual_ids.append(None)
else:
for lng in tgt_language:
if f"<{lng}>" not in encoder_tokenizer.vocab:
encoder_tokenizer.add_special_tokens({f"<{lng}>": f"<{lng}>"})
multilingual_ids.append(encoder_tokenizer.token_to_id(f"<{lng}>"))
if isinstance(src_language, ListConfig):
tgt_language = [tgt_language] * len(src_language)
else:
src_language = [src_language] * len(tgt_language)
source_processor_list = []
target_processor_list = []
for src_lng, tgt_lng in zip(src_language, tgt_language):
src_prcsr, tgt_prscr = MTEncDecModel.setup_pre_and_post_processing_utils(
src_lng, tgt_lng, encoder_tokenizer_library, decoder_tokenizer_library
)
source_processor_list.append(src_prcsr)
target_processor_list.append(tgt_prscr)
return source_processor_list, target_processor_list, multilingual_ids
def _validate_encoder_decoder_hidden_size(self):
"""
Validate encoder and decoder hidden sizes, and enforce same size.
Can be overridden by child classes to support encoder/decoder different
hidden_size.
"""
if self.encoder.hidden_size != self.decoder.hidden_size:
raise ValueError(
f"Class does not support encoder.hidden_size ({self.encoder.hidden_size}) != decoder.hidden_size ({self.decoder.hidden_size}). Please use bottleneck architecture instead (i.e., model.encoder.arch = 'seq2seq' in conf/aayn_bottleneck.yaml)"
)
@classmethod
def filter_predicted_ids(cls, ids, decoder_tokenizer):
ids[ids >= decoder_tokenizer.vocab_size] = decoder_tokenizer.unk_id
return ids
def test_encoder_ids(self, ids, raise_error=False):
invalid_ids = torch.logical_or((ids >= self.encoder_tokenizer.vocab_size).any(), (ids < 0).any(),)
if raise_error and invalid_ids:
raise ValueError("Encoder ids are out of range (tip: check encoder tokenizer)")
return not invalid_ids
def test_decoder_ids(self, ids, raise_error=False):
invalid_ids = torch.logical_or((ids >= self.decoder_tokenizer.vocab_size).any(), (ids < 0).any(),)
if raise_error and invalid_ids:
raise ValueError("Decoder ids are out of range (tip: check decoder tokenizer)")
return not invalid_ids
@typecheck()
def forward(self, src, src_mask, tgt, tgt_mask):
if self.validate_input_ids:
# test src/tgt for id range (i.e., hellp in catching wrong tokenizer)
self.test_encoder_ids(src, raise_error=True)
self.test_decoder_ids(tgt, raise_error=True)
src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)
tgt_hiddens = self.decoder(
input_ids=tgt, decoder_mask=tgt_mask, encoder_embeddings=src_hiddens, encoder_mask=src_mask
)
log_probs = self.log_softmax(hidden_states=tgt_hiddens)
return log_probs
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
train_loss = self.loss_fn(log_probs=log_probs, labels=labels)
tensorboard_logs = {
'train_loss': train_loss,
'lr': self._optimizer.param_groups[0]['lr'],
}
return {'loss': train_loss, 'log': tensorboard_logs}
def eval_step(self, batch, batch_idx, mode, dataloader_idx=0):
for i in range(len(batch)):
if batch[i].ndim == 3:
# Dataset returns already batched data and the first dimension of size 1 added by DataLoader
# is excess.
batch[i] = batch[i].squeeze(dim=0)
if self.multilingual:
self.source_processor = self.source_processor_list[dataloader_idx]
self.target_processor = self.target_processor_list[dataloader_idx]
src_ids, src_mask, tgt_ids, tgt_mask, labels = batch
log_probs = self(src_ids, src_mask, tgt_ids, tgt_mask)
eval_loss = self.eval_loss_fn(log_probs=log_probs, labels=labels)
# this will run encoder twice -- TODO: potentially fix
inputs, translations = self.batch_translate(src=src_ids, src_mask=src_mask)
if dataloader_idx == 0:
getattr(self, f'{mode}_loss')(loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1])
else:
getattr(self, f'{mode}_loss_{dataloader_idx}')(
loss=eval_loss, num_measurements=log_probs.shape[0] * log_probs.shape[1]
)
np_tgt = tgt_ids.detach().cpu().numpy()
ground_truths = [self.decoder_tokenizer.ids_to_text(tgt) for tgt in np_tgt]
ground_truths = [self.target_processor.detokenize(tgt.split(' ')) for tgt in ground_truths]
num_non_pad_tokens = np.not_equal(np_tgt, self.decoder_tokenizer.pad_id).sum().item()
return {
'inputs': inputs,
'translations': translations,
'ground_truths': ground_truths,
'num_non_pad_tokens': num_non_pad_tokens,
}
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self.eval_step(batch, batch_idx, 'test', dataloader_idx)
@rank_zero_only
def log_param_stats(self):
for name, p in self.named_parameters():
if p.requires_grad:
self.trainer.logger.experiment.add_histogram(name + '_hist', p, global_step=self.global_step)
self.trainer.logger.experiment.add_scalars(
name,
{'mean': p.mean(), 'stddev': p.std(), 'max': p.max(), 'min': p.min()},
global_step=self.global_step,
)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
"""
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
return self.eval_step(batch, batch_idx, 'val', dataloader_idx)
def eval_epoch_end(self, outputs, mode, global_rank):
# if user specifies one validation dataloader, then PTL reverts to giving a list of dictionary instead of a list of list of dictionary
if not outputs:
return
if isinstance(outputs[0], dict):
outputs = [outputs]
loss_list = []
sb_score_list = []
for dataloader_idx, output in enumerate(outputs):
if dataloader_idx == 0:
eval_loss = getattr(self, f'{mode}_loss').compute()
else:
eval_loss = getattr(self, f'{mode}_loss_{dataloader_idx}').compute()
inputs = list(itertools.chain(*[x['inputs'] for x in output]))
translations = list(itertools.chain(*[x['translations'] for x in output]))
ground_truths = list(itertools.chain(*[x['ground_truths'] for x in output]))
assert len(translations) == len(inputs)
assert len(translations) == len(ground_truths)
# Gather translations and ground truths from all workers
tr_and_gt = [None for _ in range(self.world_size)]
# we also need to drop pairs where ground truth is an empty string
dist.all_gather_object(
tr_and_gt, [(t, g) for (t, g) in zip(translations, ground_truths) if g.strip() != '']
)
if global_rank == 0:
_translations = []
_ground_truths = []
for rank in range(0, self.world_size):
_translations += [t for (t, g) in tr_and_gt[rank]]
_ground_truths += [g for (t, g) in tr_and_gt[rank]]
if self.multilingual and isinstance(self.tgt_language, ListConfig):
tgt_language = self.tgt_language[dataloader_idx]
else:
tgt_language = self.tgt_language
if tgt_language in ['ja', 'ja-mecab']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="ja-mecab")
elif tgt_language in ['zh']:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="zh")
else:
sacre_bleu = corpus_bleu(_translations, [_ground_truths], tokenize="13a")
# because the reduction op later is average (over word_size)
sb_score = sacre_bleu.score * self.world_size
dataset_name = "Validation" if mode == 'val' else "Test"
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Set size: {len(translations)}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Val Loss = {eval_loss}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Sacre BLEU = {sb_score / self.world_size}"
)
logging.info(
f"Dataset name: {dataset_name}, Dataloader index: {dataloader_idx}, Translation Examples:"
)
for i in range(0, 3):
ind = random.randint(0, len(translations) - 1)
logging.info(" " + '\u0332'.join(f"Example {i}:"))
logging.info(f" Input: {inputs[ind]}")
logging.info(f" Prediction: {translations[ind]}")
logging.info(f" Ground Truth: {ground_truths[ind]}")
else:
sb_score = 0.0
loss_list.append(eval_loss.cpu().numpy())
sb_score_list.append(sb_score)
if dataloader_idx == 0:
self.log(f"{mode}_loss", eval_loss, sync_dist=True)
self.log(f"{mode}_sacreBLEU", sb_score, sync_dist=True)
getattr(self, f'{mode}_loss').reset()
else:
self.log(f"{mode}_loss_dl_index_{dataloader_idx}", eval_loss, sync_dist=True)
self.log(f"{mode}_sacreBLEU_dl_index_{dataloader_idx}", sb_score, sync_dist=True)
getattr(self, f'{mode}_loss_{dataloader_idx}').reset()
if len(loss_list) > 1:
self.log(f"{mode}_loss_avg", np.mean(loss_list), sync_dist=True)
self.log(f"{mode}_sacreBLEU_avg", np.mean(sb_score_list), sync_dist=True)
def validation_epoch_end(self, outputs):
"""
Called at the end of validation to aggregate outputs.
:param outputs: list of individual outputs of each validation step.
"""
self.eval_epoch_end(outputs, 'val', self.global_rank)
def test_epoch_end(self, outputs):
self.eval_epoch_end(outputs, 'test', self.global_rank)
@classmethod
def setup_enc_dec_tokenizers(
cls,
encoder_tokenizer_library=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
encoder_r2l=False,
encoder_tokenizer_vocab_file=None,
decoder_tokenizer_library=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
decoder_r2l=False,
encoder_sentencepiece_legacy=False,
decoder_sentencepiece_legacy=False,
special_tokens={},
):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'megatron', 'byte-level']
if (
encoder_tokenizer_library not in supported_tokenizers
or decoder_tokenizer_library not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support tokenizers in {supported_tokenizers}.")
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_library,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
model_name=encoder_model_name,
vocab_file=encoder_tokenizer_vocab_file,
special_tokens=special_tokens,
use_fast=False,
r2l=encoder_r2l,
legacy=encoder_sentencepiece_legacy,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_library,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
model_name=decoder_model_name,
vocab_file=None,
special_tokens=special_tokens,
use_fast=False,
r2l=decoder_r2l,
legacy=decoder_sentencepiece_legacy,
)
# validate no token is negative for sentencepiece tokenizers
for tok_name, tok_library, tok_model, legacy in [
("encoder_tokenizer", encoder_tokenizer_library, encoder_tokenizer, encoder_sentencepiece_legacy),
("decoder_tokenizer", decoder_tokenizer_library, decoder_tokenizer, decoder_sentencepiece_legacy),
]:
if tok_library == 'sentencepiece':
negative_tokens = []
for n in ["eos_id", "bos_id", "unk_id", "pad_id"]:
v = getattr(tok_model.tokenizer, n)()
if v < 0:
negative_tokens.append(f"{n}={v}")
if negative_tokens and not legacy:
raise ValueError(
f"{tok_name}=sentencepiece has invalid negative special tokens = {negative_tokens}"
)
# If using the legacy sentencepiece tokenizer, we can add the missing tokens as "special" tokens.
else:
# If using sentencepiece legacy, eos, bos and pad need to be set/added differently.
if legacy:
# bos, eos, pad and unk may be present in the provided spm .model file, if they are, use it.
if not hasattr(tok_model, 'pad_token'):
if hasattr(tok_model.tokenizer, 'pad_id') and tok_model.tokenizer.pad_id() > 0:
tok_model.pad_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.pad_id())
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
if not hasattr(tok_model, 'bos_token'):
if hasattr(tok_model.tokenizer, 'bos_id') and tok_model.tokenizer.bos_id() > 0:
tok_model.bos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.bos_id())
else:
tok_model.add_special_tokens({'bos_token': '<bos>'})
else:
tok_model.add_special_tokens({'bos_token': '<s>'})
if not hasattr(tok_model, 'eos_token'):
if hasattr(tok_model.tokenizer, 'eos_id') and tok_model.tokenizer.eos_id() > 0:
tok_model.eos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.eos_id())
else:
tok_model.add_special_tokens({'eos_token': '<eos>'})
else:
tok_model.add_special_tokens({'eos_token': '</s>'})
return encoder_tokenizer, decoder_tokenizer
def setup_training_data(self, train_data_config: Optional[DictConfig]):
self._train_ds = MTEncDecModel._setup_dataset_from_config(
cfg=train_data_config,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
global_rank=self.global_rank,
world_size=self.world_size,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
)
self._train_dl = MTEncDecModel._setup_dataloader_from_config(cfg=train_data_config, dataset=self._train_ds,)
def setup_multiple_validation_data(self, val_data_config: Union[DictConfig, Dict]):
self.setup_validation_data(val_data_config)
def setup_multiple_test_data(self, test_data_config: Union[DictConfig, Dict]):
self.setup_test_data(test_data_config)
def setup_validation_data(self, val_data_config: Optional[DictConfig]):
self._validation_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=val_data_config,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
self._validation_dl = MTEncDecModel._setup_eval_dataloader_from_config(
cfg=val_data_config, datasets=self._validation_ds
)
# instantiate Torchmetric for each val dataloader
if self._validation_dl is not None:
for dataloader_idx in range(len(self._validation_dl)):
if dataloader_idx == 0:
setattr(
self, f'val_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
else:
setattr(
self,
f'val_loss_{dataloader_idx}',
GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
def setup_test_data(self, test_data_config: Optional[DictConfig]):
self._test_ds = MTEncDecModel._setup_eval_dataset_from_config(
cfg=test_data_config,
multilingual=self.multilingual,
multilingual_ids=self.multilingual_ids,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
)
self._test_dl = MTEncDecModel._setup_eval_dataloader_from_config(cfg=test_data_config, datasets=self._test_ds)
# instantiate Torchmetric for each test dataloader
if self._test_dl is not None:
for dataloader_idx in range(len(self._test_dl)):
if dataloader_idx == 0:
setattr(
self, f'test_loss', GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
else:
setattr(
self,
f'test_loss_{dataloader_idx}',
GlobalAverageLossMetric(dist_sync_on_step=False, take_avg_loss=True),
)
@classmethod
def _setup_dataset_from_config(
cls,
cfg: DictConfig,
encoder_tokenizer,
decoder_tokenizer,
global_rank,
world_size,
multilingual,
multilingual_ids,
):
if cfg.get("use_tarred_dataset", False) or cfg.get("dataset_type", "") == "tarred":
if cfg.get("metadata_file") is None:
raise FileNotFoundError("Trying to use tarred data set but could not find metadata path in config.")
metadata_file_list = cfg.get('metadata_file')
tar_files_list = cfg.get('tar_files', None)
if isinstance(metadata_file_list, str):
metadata_file_list = [metadata_file_list]
if tar_files_list is not None and isinstance(tar_files_list, str):
tar_files_list = [tar_files_list]
if tar_files_list is not None and len(tar_files_list) != len(metadata_file_list):
raise ValueError('The config must have the same number of tarfile paths and metadata file paths.')
datasets = []
for idx, metadata_file in enumerate(metadata_file_list):
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if tar_files_list is None:
tar_files = metadata.get('tar_files')
if tar_files is not None:
# update absolute path of tar files based on metadata_file path
valid_tar_files = []
metadata_basedir = os.path.abspath(os.path.dirname(metadata_file))
updated_fn = 0
for fn in tar_files:
# if a file does not exist, look in metadata file directory
if os.path.exists(fn):
valid_fn = fn
else:
updated_fn += 1
valid_fn = os.path.join(metadata_basedir, os.path.basename(fn))
if not os.path.exists(valid_fn):
raise RuntimeError(
f"File in tarred dataset is missing from absolute and relative paths {fn}"
)
valid_tar_files.append(valid_fn)
tar_files = valid_tar_files
logging.info(f'Updated the path of {updated_fn} tarred files')
logging.info(f'Loading from tarred dataset {tar_files}')
else:
tar_files = tar_files_list[idx]
if metadata.get('tar_files') is not None:
logging.info(
f'Tar file paths found in both cfg and metadata using one in cfg by default - {tar_files}'
)
dataset = TarredTranslationDataset(
text_tar_filepaths=tar_files,
metadata_path=metadata_file,
encoder_tokenizer=encoder_tokenizer,
decoder_tokenizer=decoder_tokenizer,
shuffle_n=cfg.get("tar_shuffle_n", 100),
shard_strategy=cfg.get("shard_strategy", "scatter"),
global_rank=global_rank,
world_size=world_size,
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[idx] if multilingual else None,
)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
sampling_technique=cfg.get('concat_sampling_technique'),
sampling_temperature=cfg.get('concat_sampling_temperature'),
sampling_probabilities=cfg.get('concat_sampling_probabilities'),
global_rank=global_rank,
world_size=world_size,
)
else:
dataset = datasets[0]
else:
src_file_list = cfg.src_file_name
tgt_file_list = cfg.tgt_file_name
if isinstance(src_file_list, str):
src_file_list = [src_file_list]
if isinstance(tgt_file_list, str):
tgt_file_list = [tgt_file_list]
if len(src_file_list) != len(tgt_file_list):
raise ValueError('The same number of filepaths must be passed in for source and target.')
datasets = []
for idx, src_file in enumerate(src_file_list):
dataset = TranslationDataset(
dataset_src=str(Path(src_file).expanduser()),
dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),
tokens_in_batch=cfg.tokens_in_batch,
clean=cfg.get("clean", False),
max_seq_length=cfg.get("max_seq_length", 512),
min_seq_length=cfg.get("min_seq_length", 1),
max_seq_length_diff=cfg.get("max_seq_length_diff", 512),
max_seq_length_ratio=cfg.get("max_seq_length_ratio", 512),
cache_ids=cfg.get("cache_ids", False),
cache_data_per_node=cfg.get("cache_data_per_node", False),
use_cache=cfg.get("use_cache", False),
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[idx] if multilingual else None,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
datasets.append(dataset)
if len(datasets) > 1:
dataset = ConcatDataset(
datasets=datasets,
shuffle=cfg.get('shuffle'),
sampling_technique=cfg.get('concat_sampling_technique'),
sampling_temperature=cfg.get('concat_sampling_temperature'),
sampling_probabilities=cfg.get('concat_sampling_probabilities'),
global_rank=global_rank,
world_size=world_size,
)
else:
dataset = datasets[0]
return dataset
@classmethod
def _setup_dataloader_from_config(cls, cfg, dataset):
if cfg.shuffle:
sampler = pt_data.RandomSampler(dataset)
else:
sampler = pt_data.SequentialSampler(dataset)
return torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=None
if (
cfg.get("use_tarred_dataset", False)
or cfg.get("dataset_type", "") == "tarred"
or isinstance(dataset, ConcatDataset)
)
else sampler,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
def replace_beam_with_sampling(self, topk=500):
self.beam_search = TopKSequenceGenerator(
embedding=self.decoder.embedding,
decoder=self.decoder.decoder,
log_softmax=self.log_softmax,
max_sequence_length=self.beam_search.max_seq_length,
beam_size=topk,
bos=self.decoder_tokenizer.bos_id,
pad=self.decoder_tokenizer.pad_id,
eos=self.decoder_tokenizer.eos_id,
)
@classmethod
def _setup_eval_dataset_from_config(
cls, cfg: DictConfig, multilingual: bool, multilingual_ids, encoder_tokenizer, decoder_tokenizer
):
src_file_name = cfg.get('src_file_name')
tgt_file_name = cfg.get('tgt_file_name')
if src_file_name is None or tgt_file_name is None:
raise ValueError(
'Validation dataloader needs both cfg.src_file_name and cfg.tgt_file_name to not be None.'
)
else:
# convert src_file_name and tgt_file_name to list of strings
if isinstance(src_file_name, str):
src_file_list = [src_file_name]
elif isinstance(src_file_name, ListConfig):
src_file_list = src_file_name
else:
raise ValueError("cfg.src_file_name must be string or list of strings")
if isinstance(tgt_file_name, str):
tgt_file_list = [tgt_file_name]
elif isinstance(tgt_file_name, ListConfig):
tgt_file_list = tgt_file_name
else:
raise ValueError("cfg.tgt_file_name must be string or list of strings")
if len(src_file_list) != len(tgt_file_list):
raise ValueError('The same number of filepaths must be passed in for source and target validation.')
datasets = []
prepend_idx = 0
for idx, src_file in enumerate(src_file_list):
if multilingual:
prepend_idx = idx
dataset = TranslationDataset(
dataset_src=str(Path(src_file).expanduser()),
dataset_tgt=str(Path(tgt_file_list[idx]).expanduser()),
tokens_in_batch=cfg.tokens_in_batch,
clean=cfg.get("clean", False),
max_seq_length=cfg.get("max_seq_length", 512),
min_seq_length=cfg.get("min_seq_length", 1),
max_seq_length_diff=cfg.get("max_seq_length_diff", 512),
max_seq_length_ratio=cfg.get("max_seq_length_ratio", 512),
cache_ids=cfg.get("cache_ids", False),
cache_data_per_node=cfg.get("cache_data_per_node", False),
use_cache=cfg.get("use_cache", False),
reverse_lang_direction=cfg.get("reverse_lang_direction", False),
prepend_id=multilingual_ids[prepend_idx] if multilingual else None,
)
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
datasets.append(dataset)
return datasets
@classmethod
def _setup_eval_dataloader_from_config(cls, cfg, datasets):
if cfg.shuffle:
sampler = pt_data.RandomSampler(datasets[0])
else:
sampler = pt_data.SequentialSampler(datasets[0])
dataloaders = []
for dataset in datasets:
dataloaders.append(
torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
sampler=None
if (cfg.get("use_tarred_dataset", False) or isinstance(datasets[0], ConcatDataset))
else sampler,
num_workers=cfg.get("num_workers", 2),
pin_memory=cfg.get("pin_memory", False),
drop_last=cfg.get("drop_last", False),
)
)
return dataloaders
@classmethod
def setup_pre_and_post_processing_utils(
cls, source_lang, target_lang, encoder_tokenizer_library, decoder_tokenizer_library
):
"""
Creates source and target processor objects for input and output pre/post-processing.
"""
source_processor, target_processor = None, None
if encoder_tokenizer_library == 'byte-level':
source_processor = ByteLevelProcessor()
elif (source_lang == 'en' and target_lang == 'ja') or (source_lang == 'ja' and target_lang == 'en'):
source_processor = EnJaProcessor(source_lang)
elif source_lang == 'ja-mecab':
source_processor = JaMecabProcessor()
elif source_lang == 'zh':
source_processor = ChineseProcessor()
elif source_lang == 'hi':
source_processor = IndicProcessor(source_lang)
elif source_lang == 'ignore':
source_processor = None
elif source_lang is not None and source_lang not in ['ja', 'zh', 'hi']:
source_processor = MosesProcessor(source_lang)
if decoder_tokenizer_library == 'byte-level':
target_processor = ByteLevelProcessor()
elif (source_lang == 'en' and target_lang == 'ja') or (source_lang == 'ja' and target_lang == 'en'):
target_processor = EnJaProcessor(target_lang)
elif target_lang == 'ja-mecab':
target_processor = JaMecabProcessor()
elif target_lang == 'zh':
target_processor = ChineseProcessor()
elif target_lang == 'hi':
target_processor = IndicProcessor(target_lang)
elif target_lang == 'ignore':
target_processor = None
elif target_lang is not None and target_lang not in ['ja', 'zh', 'hi']:
target_processor = MosesProcessor(target_lang)
return source_processor, target_processor
@classmethod
def ids_to_postprocessed_text(cls, beam_ids, tokenizer, processor, filter_beam_ids=True):
if filter_beam_ids:
beam_ids = MTEncDecModel.filter_predicted_ids(beam_ids, decoder_tokenizer=tokenizer)
translations = [tokenizer.ids_to_text(tr) for tr in beam_ids.cpu().numpy()]
if processor is not None:
translations = [processor.detokenize(translation.split(' ')) for translation in translations]
return translations
@torch.no_grad()
def batch_translate(
self, src: torch.LongTensor, src_mask: torch.LongTensor, return_beam_scores: bool = False, cache={}
):
"""
Translates a minibatch of inputs from source language to target language.
Args:
src: minibatch of inputs in the src language (batch x seq_len)
src_mask: mask tensor indicating elements to be ignored (batch x seq_len)
Returns:
translations: a list strings containing detokenized translations
inputs: a list of string containing detokenized inputs
"""
mode = self.training
timer = cache.get("timer", None)
try:
self.eval()
if timer is not None:
timer.start("encoder")
src_hiddens = self.encoder(input_ids=src, encoder_mask=src_mask)
if timer is not None:
timer.stop("encoder")
timer.start("sampler")
best_translations = self.beam_search(
encoder_hidden_states=src_hiddens, encoder_input_mask=src_mask, return_beam_scores=return_beam_scores
)
if timer is not None:
timer.stop("sampler")
if return_beam_scores:
all_translations, scores, best_translations = best_translations
scores = scores.view(-1)
all_translations = MTEncDecModel.ids_to_postprocessed_text(
all_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
best_translations = MTEncDecModel.ids_to_postprocessed_text(
best_translations, self.decoder_tokenizer, self.target_processor, filter_beam_ids=True
)
inputs = MTEncDecModel.ids_to_postprocessed_text(
src, self.encoder_tokenizer, self.source_processor, filter_beam_ids=False
)
finally:
self.train(mode=mode)
if return_beam_scores:
return inputs, all_translations, scores.data.cpu().numpy().tolist(), best_translations
return inputs, best_translations
@classmethod
def prepare_inference_batch(
cls,
text,
prepend_ids=[],
target=False,
source_processor=None,
target_processor=None,
encoder_tokenizer=None,
decoder_tokenizer=None,
device=None,
):
inputs = []
processor = source_processor if not target else target_processor
tokenizer = encoder_tokenizer if not target else decoder_tokenizer
for txt in text:
if processor is not None:
txt = processor.normalize(txt)
txt = processor.tokenize(txt)
ids = tokenizer.text_to_ids(txt)
ids = prepend_ids + [tokenizer.bos_id] + ids + [tokenizer.eos_id]
inputs.append(ids)
max_len = max(len(txt) for txt in inputs)
src_ids_ = np.ones((len(inputs), max_len)) * tokenizer.pad_id
for i, txt in enumerate(inputs):
src_ids_[i][: len(txt)] = txt
src_mask = torch.FloatTensor((src_ids_ != tokenizer.pad_id)).to(device)
src = torch.LongTensor(src_ids_).to(device)
return src, src_mask
@torch.no_grad()
def translate(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
) -> List[str]:
"""
Translates list of sentences from source language to target language.
Should be regular text, this method performs its own tokenization/de-tokenization
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
Returns:
list of translated strings
"""
# __TODO__: This will reset both source and target processors even if you want to reset just one.
if source_lang is not None or target_lang is not None:
self.source_processor, self.target_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
source_lang, target_lang, self.encoder_tokenizer_library, self.decoder_tokenizer_library
)
mode = self.training
prepend_ids = []
if self.multilingual:
if source_lang is None or target_lang is None:
raise ValueError("Expect source_lang and target_lang to infer for multilingual model.")
src_symbol = self.encoder_tokenizer.token_to_id('<' + source_lang + '>')
tgt_symbol = self.encoder_tokenizer.token_to_id('<' + target_lang + '>')
if src_symbol in self.multilingual_ids:
prepend_ids = [src_symbol]
elif tgt_symbol in self.multilingual_ids:
prepend_ids = [tgt_symbol]
if log_timing:
timer = timers.NamedTimer()
else:
timer = None
cache = {
"timer": timer,
}
try:
self.eval()
src, src_mask = MTEncDecModel.prepare_inference_batch(
text=text,
prepend_ids=prepend_ids,
target=False,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
if return_beam_scores:
_, all_translations, scores, best_translations = self.batch_translate(
src, src_mask, return_beam_scores=True, cache=cache,
)
return_val = all_translations, scores, best_translations
else:
_, best_translations = self.batch_translate(src, src_mask, return_beam_scores=False, cache=cache)
return_val = best_translations
finally:
self.train(mode=mode)
if log_timing:
timing = timer.export()
timing["mean_src_length"] = src_mask.sum().cpu().item() / src_mask.shape[0]
tgt, tgt_mask = self.prepare_inference_batch(
text=best_translations,
prepend_ids=prepend_ids,
target=True,
source_processor=self.source_processor,
target_processor=self.target_processor,
encoder_tokenizer=self.encoder_tokenizer,
decoder_tokenizer=self.decoder_tokenizer,
device=self.device,
)
timing["mean_tgt_length"] = tgt_mask.sum().cpu().item() / tgt_mask.shape[0]
if type(return_val) is tuple:
return_val = return_val + (timing,)
else:
return_val = (return_val, timing)
return return_val
def itn_translate_tn(
self,
text: List[str],
source_lang: str = None,
target_lang: str = None,
return_beam_scores: bool = False,
log_timing: bool = False,
inverse_normalizer=None,
normalizer=None,
) -> List[str]:
"""
Calls the translate() method with the option of running ITN (inverse text-normalization) on the input adn TN (text-normalization) on the output.
Pipeline : ITN -> translate -> TN
NOTE: ITN and TN objects must be initialized with the right languages.
Args:
text: list of strings to translate
source_lang: if not "ignore", corresponding MosesTokenizer and MosesPunctNormalizer will be run
target_lang: if not "ignore", corresponding MosesDecokenizer will be run
return_beam_scores: if True, returns a list of translations and their corresponding beam scores.
log_timing: if True, prints timing information.
inverse_normalizer: instance of nemo_text_processing.inverse_text_normalization.inverse_normalize.InverseNormalizer
normalizer: instance of nemo_text_processing.text_normalization.normalize.Normalizer
Returns:
list of translated strings
"""
if inverse_normalizer is not None:
text = [inverse_normalizer.normalize(example) for example in text]
translations = self.translate(text, source_lang, target_lang, return_beam_scores, log_timing)
if normalizer is not None:
translations = [normalizer.normalize(example) for example in translations]
return translations
# EncDecRNNTModel is exported in 2 parts
def list_export_subnets(self):
return ['encoder', 'decoder']
@classmethod
def list_available_models(cls) -> Optional[Dict[str, str]]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_de_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_de_transformer12x2/versions/1.0.0rc1/files/nmt_en_de_transformer12x2.nemo",
description="En->De translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_de_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_de_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_de_en_transformer12x2/versions/1.0.0rc1/files/nmt_de_en_transformer12x2.nemo",
description="De->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_de_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_es_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_es_transformer12x2/versions/1.0.0rc1/files/nmt_en_es_transformer12x2.nemo",
description="En->Es translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_es_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_es_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_es_en_transformer12x2/versions/1.0.0rc1/files/nmt_es_en_transformer12x2.nemo",
description="Es->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_es_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_fr_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_fr_transformer12x2/versions/1.0.0rc1/files/nmt_en_fr_transformer12x2.nemo",
description="En->Fr translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_fr_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_fr_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_fr_en_transformer12x2/versions/1.0.0rc1/files/nmt_fr_en_transformer12x2.nemo",
description="Fr->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_fr_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_ru_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_ru_transformer6x6/versions/1.0.0rc1/files/nmt_en_ru_transformer6x6.nemo",
description="En->Ru translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_ru_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_ru_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_ru_en_transformer6x6/versions/1.0.0rc1/files/nmt_ru_en_transformer6x6.nemo",
description="Ru->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_ru_en_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_zh_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_zh_en_transformer6x6/versions/1.0.0rc1/files/nmt_zh_en_transformer6x6.nemo",
description="Zh->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_zh_en_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_zh_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_zh_transformer6x6/versions/1.0.0rc1/files/nmt_en_zh_transformer6x6.nemo",
description="En->Zh translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_zh_transformer6x6",
)
result.append(model)
# English <-> Hindi models
model = PretrainedModelInfo(
pretrained_model_name="nmt_hi_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_hi_en_transformer12x2/versions/v1.0.0/files/nmt_hi_en_transformer12x2.nemo",
description="Hi->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_hi_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_hi_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_hi_transformer12x2/versions/v1.0.0/files/nmt_en_hi_transformer12x2.nemo",
description="En->Hi translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_hi_transformer12x2",
)
result.append(model)
# De/Fr/Es -> English models
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer12x2/versions/1.2.0/files/mnmt_deesfr_en_transformer12x2.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 12 encoder and 2 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer24x6/versions/1.2.0/files/mnmt_deesfr_en_transformer24x6.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 24 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_deesfr_en_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_deesfr_en_transformer6x6/versions/1.2.0/files/mnmt_deesfr_en_transformer6x6.nemo",
description="De/Es/Fr->En multilingual many-one translation model. The model has 6 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_deesfr_en_transformer6x6",
)
result.append(model)
# English -> De/Fr/Es models
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer12x2",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer12x2/versions/1.2.0/files/mnmt_en_deesfr_transformer12x2.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 12 encoder and 2 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer12x2",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer24x6/versions/1.2.0/files/mnmt_en_deesfr_transformer24x6.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 24 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformer6x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformer6x6/versions/1.2.0/files/mnmt_en_deesfr_transformer6x6.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 6 encoder and 6 decoder layers with hidden dim 1,024. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformer6x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="mnmt_en_deesfr_transformerbase",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/mnmt_en_deesfr_transformerbase/versions/1.2.0/files/mnmt_en_deesfr_transformerbase.nemo",
description="En->De/Es/Fr multilingual one-many translation model. The model has 6 encoder and 6 decoder layers with hidden dim 512. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:mnmt_en_deesfr_transformerbase",
)
result.append(model)
# 24x6 models
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_de_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_de_transformer24x6/versions/1.5/files/en_de_24x6.nemo",
description="En->De translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_de_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_de_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_de_en_transformer24x6/versions/1.5/files/de_en_24x6.nemo",
description="De->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_de_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_es_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_es_transformer24x6/versions/1.5/files/en_es_24x6.nemo",
description="En->Es translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_es_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_es_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_es_en_transformer24x6/versions/1.5/files/es_en_24x6.nemo",
description="Es->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_es_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_fr_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_fr_transformer24x6/versions/1.5/files/en_fr_24x6.nemo",
description="En->Fr translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_fr_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_fr_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_fr_en_transformer24x6/versions/1.5/files/fr_en_24x6.nemo",
description="Fr->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_fr_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_ru_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_ru_transformer24x6/versions/1.5/files/en_ru_24x6.nemo",
description="En->Ru translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_ru_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_ru_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_ru_en_transformer24x6/versions/1.5/files/ru_en_24x6.nemo",
description="Ru->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_ru_en_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_en_zh_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_en_zh_transformer24x6/versions/1.5/files/en_zh_24x6.nemo",
description="En->Zh translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_en_zh_transformer24x6",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="nmt_zh_en_transformer24x6",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/nmt_zh_en_transformer24x6/versions/1.5/files/zh_en_24x6.nemo",
description="Zh->En translation model. See details here: https://ngc.nvidia.com/catalog/models/nvidia:nemo:nmt_zh_en_transformer24x6",
)
result.append(model)
return result
| 0
| 40,864
| 0
| 24,770
| 0
| 0
| 0
| 1,120
| 774
|
ef3cd10a2068b30a9a75bed4bcdf212132ef1fe0
| 2,429
|
py
|
Python
|
tests/xoto3/utils/oncall_default_test.py
|
xoeye/xoto3
|
ef91cde3cce81e1ded311389358271d5c8eba02b
|
[
"MIT"
] | 16
|
2020-05-23T15:23:38.000Z
|
2022-03-18T19:28:37.000Z
|
tests/xoto3/utils/oncall_default_test.py
|
xoeye/xoto3
|
ef91cde3cce81e1ded311389358271d5c8eba02b
|
[
"MIT"
] | 9
|
2020-08-19T23:08:36.000Z
|
2021-10-06T17:16:35.000Z
|
tests/xoto3/utils/oncall_default_test.py
|
xoeye/xoto3
|
ef91cde3cce81e1ded311389358271d5c8eba02b
|
[
"MIT"
] | 2
|
2020-12-12T08:23:53.000Z
|
2021-09-03T20:25:54.000Z
|
# pylint: disable=unused-argument,unused-variable
from datetime import datetime
import pytest
from xoto3.utils.oncall_default import NotSafeToDefaultError, OnCallDefault
utcnow = OnCallDefault(datetime.utcnow)
def test_disallow_positional_without_default():
"""A positional-possible argument without a default could have a
positional argument provided after it and then we'd be unable to tell
for sure whether it had been provided intentionally.
"""
with pytest.raises(NotSafeToDefaultError):
GeorgeKwargs = OnCallDefault(lambda: dict(b=2, c=3))
| 26.692308
| 75
| 0.650473
|
# pylint: disable=unused-argument,unused-variable
from datetime import datetime
import pytest
from xoto3.utils.oncall_default import NotSafeToDefaultError, OnCallDefault
utcnow = OnCallDefault(datetime.utcnow)
def test_oncall_default_works_with_pos_or_kw():
@utcnow.apply_to("when")
def final(a: str, when: datetime = utcnow(), f: float = 1.2):
return when
assert final("a") <= utcnow()
val = datetime(1888, 8, 8, 8, 8, 8)
assert val == final("a", when=val)
assert val == final("c", f=4.2, when=val)
def test_oncall_default_works_with_kw_only():
@utcnow.apply_to("when")
def f(a: str, *, when: datetime = utcnow()):
return when
val = datetime(1900, 1, 1, 11, 11, 11)
assert val == f("3", when=val)
def test_deco_works_with_var_kwargs():
@utcnow.apply_to("when")
def f(**kwargs):
return kwargs["when"]
assert datetime.utcnow() <= f()
assert f() <= datetime.utcnow()
direct = datetime(2012, 12, 12, 12, 12, 12)
assert direct == f(when=direct)
def test_disallow_positional_without_default():
"""A positional-possible argument without a default could have a
positional argument provided after it and then we'd be unable to tell
for sure whether it had been provided intentionally.
"""
with pytest.raises(NotSafeToDefaultError):
@utcnow.apply_to("when")
def nope(when: datetime, a: int):
pass
def test_disallow_not_found_without_var_kwargs():
with pytest.raises(NotSafeToDefaultError):
@utcnow.apply_to("notthere")
def steve(a: str, *args, b=1, c=2):
pass
def test_disallow_var_args_name_matches():
with pytest.raises(NotSafeToDefaultError):
# *args itself has the default value 'new empty tuple', and if
# you want to provide a positional default you should give it
# a real name.
@utcnow.apply_to("args")
def felicity(a: str, *args):
pass
GeorgeKwargs = OnCallDefault(lambda: dict(b=2, c=3))
def test_allow_var_kwargs_merge():
# kwargs itself is a dict,
# and we will perform top-level merging
# for you if that's what you want
@GeorgeKwargs.apply_to("kwargs")
def george(a: str, **kwargs):
return kwargs
assert george("1") == dict(b=2, c=3)
assert george("2", b=3) == dict(b=3, c=3)
assert george("3", c=5, d=78) == dict(b=2, c=5, d=78)
| 0
| 469
| 0
| 0
| 0
| 1,211
| 0
| 0
| 169
|
9359e5c4e3e3bd39554fe97f058252575fbac126
| 4,385
|
py
|
Python
|
pointcloud/sample_points.py
|
ziyedy/category-priornet
|
5aa080eeff936ce3939f0d5458a2936677c15726
|
[
"MIT"
] | 72
|
2020-06-11T13:06:56.000Z
|
2021-12-07T02:57:51.000Z
|
pointcloud/sample_points.py
|
ziyedy/category-priornet
|
5aa080eeff936ce3939f0d5458a2936677c15726
|
[
"MIT"
] | 8
|
2020-07-24T09:13:14.000Z
|
2021-07-02T06:55:25.000Z
|
pointcloud/sample_points.py
|
ziyedy/category-priornet
|
5aa080eeff936ce3939f0d5458a2936677c15726
|
[
"MIT"
] | 11
|
2020-03-13T12:44:44.000Z
|
2021-05-12T05:13:26.000Z
|
if __name__ == '__main__':
main()
| 40.981308
| 122
| 0.618472
|
import os
import sys
import argparse
import numpy as np
import pandas as pd
from pyntcloud.io import write_ply
def get_skip_vertice_face_num(path):
with open(path) as file:
for i, line in enumerate(file):
if i == 0 and len(line) > 4: # not just "OFF\n"
digits = line[3:].split(" ")
vertices_num = int(digits[0])
faces_num = int(digits[1])
return 1, vertices_num, faces_num
if i == 1:
digits = line.split(' ')
vertices_num = int(digits[0])
faces_num = int(digits[1])
return 2, vertices_num, faces_num
def get_vertices_faces_from_off_file(path):
skip, vertices_num, faces_num = get_skip_vertice_face_num(path)
vertices = np.genfromtxt(path, delimiter= ' ', skip_header= skip, skip_footer= faces_num)
faces_data = np.genfromtxt(path, dtype= int, delimiter= ' ', skip_header= skip + vertices_num)
faces = faces_data[:, 1:]
return vertices, faces
def get_faces_area(v0, v1, v2):
# v0, v1, v2 are list of vectors [x, y, z] => shape: [length, 3]
return (0.5) * np.linalg.norm(np.cross((v1 - v0), (v2 - v0)), axis= 1)
def mesh2pointcloud(vertices, faces, points_num= 2048, normalize= False):
v0 = vertices[faces[:, 0]]
v1 = vertices[faces[:, 1]]
v2 = vertices[faces[:, 2]]
faces_area = get_faces_area(v0, v1, v2)
faces_prob = faces_area / faces_area.sum()
face_num = faces.shape[0]
faces_sample_id = np.random.choice(face_num, size= points_num, p= faces_prob) # (points_num, )
faces_sample = faces[faces_sample_id] # (points_num, 3)
# set barycentric coordinates u, v, w => shape: (points_num, )
u = np.random.rand(points_num, 1)
v = np.random.rand(points_num, 1)
exceed_one = (u + v) > 1
u[exceed_one] = 1 - u[exceed_one]
v[exceed_one] = 1 - v[exceed_one]
w = 1 - (u + v)
# sampling
v0_sample = vertices[faces_sample[:, 0]]
v1_sample = vertices[faces_sample[:, 1]]
v2_sample = vertices[faces_sample[:, 2]]
pointcloud = (v0_sample * u) + (v1_sample * v) + (v2_sample * w)
if normalize:
center = pointcloud.mean(axis= 0)
pointcloud -= center
distance = np.linalg.norm(pointcloud, axis= 1)
pointcloud /= distance.max()
pointcloud = pointcloud.astype(np.float32)
return pointcloud
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-source', help= "path to ModelNet dataset(e.g. ModelNet40/)", default= None)
parser.add_argument('-target', help= "path to folder of output points(e.g. ModelNet40_1024_points/)", default= None)
parser.add_argument('-point_num', type= int, default= 1024, help= "How many points are sampled from each mesh object")
parser.add_argument('-normal', dest= 'normal', action= 'store_true', help= "Normalize point clouds while sampling")
parser.set_defaults(normal= False)
args = parser.parse_args()
source_dir = args.source
categories_all = [name for name in os.listdir(source_dir) if name not in ['.DS_Store', 'README.txt']]
target_dir = args.target
os.mkdir(target_dir)
for category in categories_all:
os.mkdir(os.path.join(target_dir, category))
for mode in ['train', 'test']:
source_folder = os.path.join(source_dir, category, mode)
target_folder = os.path.join(target_dir, category, mode)
os.mkdir(target_folder)
mesh_names = [os.path.join(source_folder, name) for name in os.listdir(source_folder) if name != '.DS_Store']
for name in mesh_names:
vertices, faces = get_vertices_faces_from_off_file(name)
pointcloud = mesh2pointcloud(vertices, faces, args.point_num, normalize= args.normal)
# save model
model = pd.DataFrame()
model['x'] = pointcloud[:, 0]
model['y'] = pointcloud[:, 1]
model['z'] = pointcloud[:, 2]
name = name.split('/')[-1]
target_name = os.path.join(target_folder, name[:-4] + '.ply')
write_ply(target_name, points= model)
print('finished category: {}'.format(category))
print('Finish generating dataset: {}'.format(target_dir))
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 4,120
| 0
| -21
| 247
|
c762f4be6db752ec413c9782ce94a363b079178f
| 3,915
|
py
|
Python
|
xquotient/test/test_signup.py
|
twisted/quotient
|
95f2515219da99a77905852bc01deeb27e93466e
|
[
"MIT"
] | 6
|
2016-02-15T07:33:39.000Z
|
2018-12-03T17:20:58.000Z
|
xquotient/test/test_signup.py
|
DalavanCloud/quotient
|
95f2515219da99a77905852bc01deeb27e93466e
|
[
"MIT"
] | 1
|
2021-02-18T20:01:02.000Z
|
2021-02-18T20:01:02.000Z
|
xquotient/test/test_signup.py
|
DalavanCloud/quotient
|
95f2515219da99a77905852bc01deeb27e93466e
|
[
"MIT"
] | 4
|
2015-11-15T17:28:20.000Z
|
2018-12-03T17:20:48.000Z
|
"""
Test installation of the Quotient offering, as well as testing
signup with different combinations of selected benefactor factories
"""
| 32.625
| 92
| 0.662835
|
"""
Test installation of the Quotient offering, as well as testing
signup with different combinations of selected benefactor factories
"""
from time import time
from twisted.trial.unittest import TestCase
from twisted.python.reflect import qual
from axiom.scripts import axiomatic
from axiom.store import Store
from axiom import userbase
from axiom.test.util import getPristineStore
from xmantissa import offering, signup
from xmantissa.plugins.free_signup import freeTicket
from xmantissa.product import Product
from xquotient import exmess
from xquotient.compose import Composer
from xquotient.inbox import Inbox
def createStore(testCase):
dbpath = testCase.mktemp()
axiomatic.main(['-d', dbpath, 'mantissa', '--admin-password', 'password'])
store = Store(dbpath)
_userbase = store.findUnique(userbase.LoginSystem)
adminAccount = _userbase.accountByAddress(u'admin', u'localhost')
adminStore = adminAccount.avatars.open()
conf = adminStore.findUnique(offering.OfferingConfiguration)
conf.installOffering(getQuotientOffering(), None)
return store
def getQuotientOffering():
for off in offering.getOfferings():
if off.name == 'Quotient':
return off
def getFactories(*names):
factories = []
for factory in getQuotientOffering().benefactorFactories:
name = factory.benefactorClass.__name__.lower()
if name.endswith('benefactor'):
name = name[:-len('benefactor')]
if name in names:
factories.append(factory)
return factories
class InstallationTestCase(TestCase):
"""
Tests to ensure we can at least get as far as installing the
application and signing up. We don't really care whether the
right stuff was installed.
"""
def setUp(self):
self.store = getPristineStore(self, createStore)
self.loginSystem = self.store.findUnique(userbase.LoginSystem)
adminAvatar = self.loginSystem.accountByAddress(u'admin', u'localhost')
adminStore = adminAvatar.avatars.open()
self.signupConfig = adminStore.findUnique(signup.SignupConfiguration)
def createSignupAndSignup(self, powerups):
"""
Signup via a newly-created signup, using a unique email address.
@return: substore, which will be endowed with C{product}
"""
product = Product(store=self.store, types=[qual(p) for (name, desc, p) in powerups])
qsignup = self.signupConfig.createSignup(
u'admin@localhost',
freeTicket.itemClass,
{'prefixURL': u'signup'},
product,
u'', u'')
booth = qsignup.booth
localpart = unicode(str(time()), 'ascii')
ticket = booth.createTicket(
booth, localpart + '@localhost', product)
ticket.claim()
return self.loginSystem.accountByAddress(
localpart, u'localhost').avatars.open()
def testBasic(self):
"""
Test signup with the top-most Quotient powerup
"""
self.createSignupAndSignup([(None, None, Inbox)])
def testCompose(self):
"""
Test signup with the compose benefactor (which
depends on the top-most Quotient benefactor)
"""
self.createSignupAndSignup([(None, None, Composer)])
def testAll(self):
"""
Test signup with all benefactors
"""
self.createSignupAndSignup(
getQuotientOffering().installablePowerups)
def testDefaultMessageDisplayPrefs(self):
"""
On signup, users' preferred message format should be HTML.
"""
ss = self.createSignupAndSignup(
getQuotientOffering().installablePowerups)
self.assertEqual(ss.findUnique(
exmess.MessageDisplayPreferenceCollection).preferredFormat, u"text/html")
| 0
| 0
| 0
| 2,339
| 0
| 864
| 0
| 190
| 383
|
b26bad7359609958551c3b742dcd0172c900dc22
| 138,774
|
py
|
Python
|
blackfynn/models.py
|
Blackfynn/blackfynn-python
|
ab982e63f2cfe68d41ae269a59da629fec90bf68
|
[
"Apache-2.0"
] | 6
|
2018-01-05T16:38:11.000Z
|
2020-06-03T00:28:04.000Z
|
blackfynn/models.py
|
Blackfynn/blackfynn-python
|
ab982e63f2cfe68d41ae269a59da629fec90bf68
|
[
"Apache-2.0"
] | 119
|
2018-03-07T18:32:58.000Z
|
2021-02-03T16:10:55.000Z
|
blackfynn/models.py
|
Blackfynn/blackfynn-python
|
ab982e63f2cfe68d41ae269a59da629fec90bf68
|
[
"Apache-2.0"
] | 6
|
2018-01-19T17:09:31.000Z
|
2021-03-10T21:46:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from future.utils import PY2, string_types
import datetime
try: # Python 3
from inspect import getfullargspec
except ImportError: # Python 2
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Helpers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_package_class(data):
"""
Determines package type and returns appropriate class.
"""
content = data.get("content", data)
if "packageType" not in content:
p = Dataset
else:
ptype = content["packageType"].lower()
if ptype == "collection":
p = Collection
elif ptype == "timeseries":
p = TimeSeries
elif ptype == "dataset":
p = Dataset
else:
p = DataPackage
return p
def _flatten_file_args(files):
"""
Flatten file arguments so that upload methods can be called either as
dataset.upload(file1, file2)
or as
dataset.upload([file1, file2])
"""
if len(files) == 1 and not isinstance(files[0], string_types):
# single argument - is it iterable and not a string?
try:
files = list(files[0])
except Exception:
pass
return files
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Basics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Files
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time series
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Organizations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Datasets
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collections
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PublishInfo
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# UserStubDTO
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DatasetStatusStub
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# StatusLogEntry
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# StatusLogResponse
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collaborators
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Models & Relationships
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Helpers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python 2
if PY2:
python_to_blackfynn_type_map = {
str: "string",
unicode: "string",
int: "long",
long: "long",
float: "double",
bool: "boolean",
datetime.date: "date",
datetime.datetime: "date",
}
blackfynn_to_python_type_map = {
"string": unicode,
"long": int,
"double": float,
"boolean": bool,
"date": datetime.datetime,
}
# Python 3
else:
python_to_blackfynn_type_map = {
str: "string",
int: "long",
float: "double",
bool: "boolean",
datetime.date: "date",
datetime.datetime: "date",
}
blackfynn_to_python_type_map = {
"string": str,
"long": int,
"double": float,
"boolean": bool,
"date": datetime.datetime,
}
valid_python_types = tuple(python_to_blackfynn_type_map.keys())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Models
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Query Result
#
# Returned per "row" result of a query
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Relationships
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Proxies
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model/Relation Instance Sets
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| 30.14205
| 133
| 0.557893
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from builtins import object, zip
from future.utils import PY2, as_native_str, string_types
import datetime
import io
import os
import re
import sys
from uuid import uuid4
import dateutil
import pytz
import requests
from dateutil.parser import parse
from blackfynn import log
from blackfynn.extensions import numpy as np
from blackfynn.extensions import pandas as pd
from blackfynn.extensions import require_extension
from blackfynn.utils import get_data_type, infer_epoch, usecs_to_datetime, value_as_type
try: # Python 3
from inspect import getfullargspec
except ImportError: # Python 2
from inspect import getargspec as getfullargspec
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Helpers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def get_package_class(data):
"""
Determines package type and returns appropriate class.
"""
content = data.get("content", data)
if "packageType" not in content:
p = Dataset
else:
ptype = content["packageType"].lower()
if ptype == "collection":
p = Collection
elif ptype == "timeseries":
p = TimeSeries
elif ptype == "dataset":
p = Dataset
else:
p = DataPackage
return p
def _update_self(self, updated):
if self.id != updated.id:
raise Exception("cannot update {} with {}".format(self, updated))
self.__dict__.update(updated.__dict__)
return self
def _flatten_file_args(files):
"""
Flatten file arguments so that upload methods can be called either as
dataset.upload(file1, file2)
or as
dataset.upload([file1, file2])
"""
if len(files) == 1 and not isinstance(files[0], string_types):
# single argument - is it iterable and not a string?
try:
files = list(files[0])
except Exception:
pass
return files
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Basics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Property(object):
"""
Property of a blackfynn object.
Args:
key (str): the key of the property
value (str,number): the value of the property
fixed (bool): if true, the value cannot be changed after the property is created
hidden (bool): if true, the value is hidden on the platform
category (str): the category of the property, default: "Blackfynn"
data_type (str): one of 'string', 'integer', 'double', 'date', 'user'
"""
_data_types = ["string", "integer", "double", "date", "user", "boolean"]
def __init__(
self,
key,
value,
fixed=False,
hidden=False,
category="Blackfynn",
data_type=None,
):
self.key = key
self.fixed = fixed
self.hidden = hidden
self.category = category
if data_type is None or (data_type.lower() not in self._data_types):
dt, v = get_data_type(value)
self.data_type = dt
self.value = v
else:
self.data_type = data_type
self.value = value_as_type(value, data_type.lower())
def as_dict(self):
"""
Representation of instance as dictionary, used when calling API.
"""
return {
"key": self.key,
"value": str(self.value), # value needs to be string :-(
"dataType": self.data_type,
"fixed": self.fixed,
"hidden": self.hidden,
"category": self.category,
}
@classmethod
def from_dict(cls, data, category="Blackfynn"):
"""
Create an instance from dictionary, used when handling API response.
"""
return cls(
key=data["key"],
value=data["value"],
category=category,
fixed=data["fixed"],
hidden=data["hidden"],
data_type=data["dataType"],
)
def __str__(self):
return self.__repr__()
@as_native_str()
def __repr__(self):
return u"<Property key='{}' value='{}' type='{}' category='{}'>".format(
self.key, self.value, self.data_type, self.category
)
def _get_all_class_args(cls):
# possible class arguments
if cls == object:
return set()
class_args = set()
for base in cls.__bases__:
# get all base class argument variables
class_args.update(_get_all_class_args(base))
# get args from this class
spec = getfullargspec(cls.__init__)
class_args.update(spec[0]) # arguments
if spec[1] is not None:
class_args.add(spec[1]) # variable arguments
if spec[2] is not None:
class_args.add(spec[2]) # variable keyword arguments
return class_args
class BaseNode(object):
"""
Base class to serve all objects
"""
_api = None
_object_key = "content"
def __init__(self, id=None, int_id=None, *args, **kargs):
self.id = id
self.int_id = int_id
@classmethod
def from_dict(cls, data, api=None, object_key=None):
# which object_key are we going to use?
if object_key is not None:
obj_key = object_key
else:
obj_key = cls._object_key
# validate obj_key
if obj_key == "" or obj_key is None:
content = data
else:
content = data[obj_key]
class_args = _get_all_class_args(cls)
# find overlapping keys
kwargs = {}
thing_id = content.pop("id", None)
thing_int_id = content.pop("intId", None)
for k, v in content.items():
# check lower case var names
k_lower = k.lower()
# check camelCase --> camel_case
k_camel = re.sub(r"[A-Z]", lambda x: "_" + x.group(0).lower(), k)
# check s3case --> s3_case
k_camel_num = re.sub(r"[0-9]", lambda x: x.group(0) + "_", k)
# match with existing args
if k_lower in class_args:
key = k_lower
elif k_camel in class_args:
key = k_camel
elif k_camel_num in class_args:
key = k_camel_num
else:
key = k
# assign
kwargs[key] = v
# init class with args
item = cls.__new__(cls)
cls.__init__(item, **kwargs)
if thing_id is not None:
item.id = thing_id
if thing_int_id is not None:
item.int_id = thing_int_id
if api is not None:
item._api = api
item._api.core.set_local(item)
return item
def __eq__(self, item):
if not isinstance(item, BaseNode):
return False
elif self.exists and item.exists:
return self.id == item.id
else:
return self is item
@property
def exists(self):
"""
Whether or not the instance of this object exists on the platform.
"""
return self.id is not None
def _check_exists(self):
if not self.exists:
raise Exception(
"Object must be created on the platform before method is called."
)
def __str__(self):
return self.__repr__()
class BaseDataNode(BaseNode):
"""
Base class to serve all "data" node-types on platform, e.g. Packages and Collections.
"""
_type_name = "packageType"
def __init__(
self,
name,
type,
parent=None,
owner_id=None,
dataset_id=None,
id=None,
provenance_id=None,
**kwargs
):
super(BaseDataNode, self).__init__(id=id)
self.name = name
self._properties = {}
if isinstance(parent, string_types) or parent is None:
self.parent = parent
elif isinstance(parent, Collection):
self.parent = parent.id
else:
raise Exception("Invalid parent {}".format(parent))
self.type = type
self.dataset = dataset_id
self.owner_id = owner_id
self.provenance_id = provenance_id
self.state = kwargs.pop("state", None)
self.created_at = kwargs.pop("createdAt", None)
self.updated_at = kwargs.pop("updatedAt", None)
def update_properties(self):
self._api.data.update_properties(self)
def _set_properties(self, *entries):
# Note: Property is stored as dict of key:properties-entry to enable
# over-write of properties values based on key
for entry in entries:
assert type(entry) is Property, "Properties wrong type"
if entry.category not in self._properties:
self._properties[entry.category] = {}
self._properties[entry.category].update({entry.key: entry})
def add_properties(self, *entries):
"""
Add properties to object.
Args:
entries (list): list of Property objects to add to this object
"""
self._set_properties(*entries)
# update on platform
if self.exists:
self.update_properties()
def insert_property(
self,
key,
value,
fixed=False,
hidden=False,
category="Blackfynn",
data_type=None,
):
"""
Add property to object using simplified interface.
Args:
key (str): the key of the property
value (str,number): the value of the property
fixed (bool): if true, the value cannot be changed after the property is created
hidden (bool): if true, the value is hidden on the platform
category (str): the category of the property, default: "Blackfynn"
data_type (str): one of 'string', 'integer', 'double', 'date', 'user'
Note:
This method is being depreciated in favor of ``set_property()`` method (see below).
"""
return self.set_property(
key=key,
value=value,
fixed=fixed,
hidden=hidden,
category=category,
data_type=data_type,
)
def set_property(
self,
key,
value,
fixed=False,
hidden=False,
category="Blackfynn",
data_type=None,
):
"""
Add property to object using simplified interface.
Args:
key (str): the key of the property
value (str,number): the value of the property
fixed (bool): if true, the value cannot be changed after the property is created
hidden (bool): if true, the value is hidden on the platform
category (str): the category of the property, default: "Blackfynn"
data_type (str): one of 'string', 'integer', 'double', 'date', 'user'
"""
self._set_properties(
Property(
key=key,
value=value,
fixed=fixed,
hidden=hidden,
category=category,
data_type=data_type,
)
)
# update on platform, if possible
if self.exists:
self.update_properties()
@property
def properties(self):
"""
Returns a list of properties attached to object.
"""
props = []
for category in self._properties.values():
props.extend(category.values())
return props
def get_property(self, key, category="Blackfynn"):
"""
Returns a single property for the provided key, if available
Args:
key (str): key of the desired property
category (str, optional): category of property
Returns:
object of type ``Property``
Example::
pkg.set_property('quality', 85.0)
pkg.get_property('quality')
"""
return self._properties[category].get(key, None)
def remove_property(self, key, category="Blackfynn"):
"""
Removes property of key ``key`` and category ``category`` from the object.
Args:
key (str): key of property to remove
category (str, optional): category of property to remove
"""
if key in self._properties[category]:
# remove by setting blank
self._properties[category][key].value = ""
# update remotely
self.update_properties()
# get rid of it locally
self._properties[category].pop(key)
def update(self, **kwargs):
"""
Updates object on the platform (with any local changes) and syncs
local instance with API response object.
Exmple::
pkg = bf.get('N:package:1234-1234-1234-1234')
pkg.name = "New name"
pkg.update()
"""
self._check_exists()
r = self._api.core.update(self, **kwargs)
_update_self(self, r)
def delete(self):
"""
Delete object from platform.
"""
self._check_exists()
r = self._api.core.delete(self)
self.id = None
def as_dict(self):
d = {
"name": self.name,
self._type_name: self.type,
"properties": [m.as_dict() for m in self.properties],
}
for k in ["parent", "state", "dataset"]:
kval = self.__dict__.get(k, None)
if hasattr(self, k) and kval is not None:
d[k] = kval
if self.provenance_id is not None:
d["provenanceId"] = self.provenance_id
return d
@classmethod
def from_dict(cls, data, *args, **kwargs):
item = super(BaseDataNode, cls).from_dict(data, *args, **kwargs)
try:
item.state = data["content"]["state"]
except:
pass
item.owner_id = (
data.get("owner")
or data.get(
"ownerId",
)
or data.get("content", {}).get("ownerId") # For packages
)
# parse, store parent (ID only)
parent = data.get("parent", None)
if parent is not None:
if isinstance(parent, string_types):
item.parent = parent
else:
pkg_cls = get_package_class(parent)
p = pkg_cls.from_dict(parent, *args, **kwargs)
item.parent = p.id
def cls_add_property(prop):
cat = prop.category
if cat not in item._properties:
item._properties[cat] = {}
item._properties[cat].update({prop.key: prop})
# parse properties
if "properties" in data:
for entry in data["properties"]:
if "properties" not in entry:
# flat list of properties: [entry]
prop = Property.from_dict(entry, category=entry["category"])
cls_add_property(prop)
else:
# nested properties list [ {category,entry} ]
category = entry["category"]
for prop_entry in entry["properties"]:
prop = Property.from_dict(prop_entry, category=category)
cls_add_property(prop)
return item
class BaseCollection(BaseDataNode):
"""
Base class used for both ``Dataset`` and ``Collection``.
"""
def __init__(self, name, package_type, **kwargs):
self.storage = kwargs.pop("storage", None)
super(BaseCollection, self).__init__(name, package_type, **kwargs)
# items is None until an API response provides the item objects
# to be parsed, which then updates this instance.
self._items = None
def add(self, *items):
"""
Add items to the Collection/Dataset.
"""
self._check_exists()
for item in items:
# initialize if need be
if self._items is None:
self._items = []
if isinstance(self, Dataset):
item.parent = None
item.dataset = self.id
elif hasattr(self, "dataset"):
item.parent = self.id
item.dataset = self.dataset
# create, if not already created
new_item = self._api.core.create(item)
item.__dict__.update(new_item.__dict__)
# add item
self._items.append(item)
def remove(self, *items):
"""
Removes items, where items can be an object or the object's ID (string).
"""
self._check_exists()
for item in items:
if item not in self._items:
raise Exception("Cannot remove item, not in collection:{}".format(item))
self._api.data.delete(*items)
# remove locally
for item in items:
self._items.remove(item)
@property
def items(self):
"""
Get all items inside Dataset/Collection (i.e. non-nested items).
Note:
You can also iterate over items inside a Dataset/Colleciton without using ``.items``::
for item in my_dataset:
print("item name = ", item.name)
"""
self._check_exists()
if self._items is None:
new_self = self._get_method(self)
new_items = new_self._items
self._items = new_items if new_items is not None else []
return self._items
@property
def _get_method(self):
pass
def print_tree(self, indent=0):
"""
Prints a tree of **all** items inside object.
"""
self._check_exists()
print(u"{}{}".format(" " * indent, self))
for item in self.items:
if isinstance(item, BaseCollection):
item.print_tree(indent=indent + 2)
else:
print(u"{}{}".format(" " * (indent + 2), item))
def get_items_by_name(self, name):
"""
Get an item inside of object by name (if match is found).
Args:
name (str): the name of the item
Returns:
list of matches
Note:
This only works for **first-level** items, meaning it must exist directly inside the current object;
nested items will not be returned.
"""
self._check_exists()
# note: non-hierarchical
return [x for x in self.items if x.name == name]
def get_items_names(self):
self._check_exists()
return [x.name for x in self.items]
def upload(self, *files, **kwargs):
"""
Upload files into current object.
Args:
files: list of local files to upload. If the Blackfynn CLI Agent is
installed you can also upload a directory. See :ref:`agent` for
more information.
Keyword Args:
display_progress (boolean): If ``True``, a progress bar will be
shown to track upload progress. Defaults to ``False``.
use_agent (boolean): If ``True``, and a compatible version of the
Agent is installed, uploads will be performed by the
Blackfynn CLI Agent. This allows large file upload in excess
of 1 hour. Defaults to ``True``.
recursive (boolean): If ``True``, the nested folder structure of
the uploaded directory will be preversed. This can only be used
with the Blackfynn CLI Agent. Defaults to ``False``.
Example::
my_collection.upload('/path/to/file1.nii.gz', '/path/to/file2.pdf')
"""
self._check_exists()
files = _flatten_file_args(files)
return self._api.io.upload_files(self, files, append=False, **kwargs)
def create_collection(self, name):
"""
Create a new collection within the current object. Collections can be created within
datasets and within other collections.
Args:
name (str): The name of the to-be-created collection
Returns:
The created ``Collection`` object.
Example::
from blackfynn import Blackfynn()
bf = Blackfynn()
ds = bf.get_dataset('my_dataset')
# create collection in dataset
col1 = ds.create_collection('my_collection')
# create collection in collection
col2 = col1.create_collection('another_collection')
"""
c = Collection(name)
self.add(c)
return c
# sequence-like method
def __getitem__(self, i):
self._check_exists()
return self.items[i]
# sequence-like method
def __len__(self):
self._check_exists()
return len(self.items)
# sequence-like method
def __delitem__(self, key):
self._check_exists()
self.remove(key)
def __iter__(self):
self._check_exists()
for item in self.items:
yield item
# sequence-like method
def __contains__(self, item):
"""
Tests if item is in the collection, where item can be either
an object's ID (string) or an object's instance.
"""
self._check_exists()
if isinstance(item, string_types):
some_id = self._api.data._get_id(item)
item_ids = [x.id for x in self.items]
contains = some_id in item_ids
elif self._items is None:
return False
else:
return item in self._items
return contains
def as_dict(self):
d = super(BaseCollection, self).as_dict()
if self.owner_id is not None:
d["owner"] = self.owner_id
return d
@classmethod
def from_dict(cls, data, *args, **kwargs):
item = super(BaseCollection, cls).from_dict(data, *args, **kwargs)
item.storage = data.get("storage", None)
children = []
if "children" in data:
for child in data["children"]:
pkg_cls = get_package_class(child)
kwargs["api"] = item._api
pkg = pkg_cls.from_dict(child, *args, **kwargs)
children.append(pkg)
item.add(*children)
return item
@as_native_str()
def __repr__(self):
return u"<BaseCollection name='{}' id='{}'>".format(self.name, self.id)
class DataPackage(BaseDataNode):
"""
DataPackage is the core data object representation on the platform.
Args:
name (str): The name of the data package
package_type (str): The package type, e.g. 'TimeSeries', 'MRI', etc.
Note:
``package_type`` must be a supported package type. See our data type
registry for supported values.
"""
def __init__(self, name, package_type, **kwargs):
self.storage = kwargs.pop("storage", None)
super(DataPackage, self).__init__(name=name, type=package_type, **kwargs)
# local-only attribute
self.session = None
@property
def sources(self):
"""
Returns the sources of a DataPackage. Sources are the raw, unmodified
files (if they exist) that contains the package's data.
"""
self._check_exists()
return self._api.packages.get_sources(self)
@property
def files(self):
"""
Returns the files of a DataPackage. Files are the possibly modified
source files (e.g. converted to a different format), but they could also
be the source files themselves.
"""
self._check_exists()
return self._api.packages.get_files(self)
@property
def view(self):
"""
Returns the object(s) used to view the package. This is typically a set of
file objects, that may be the DataPackage's sources or files, but could also be
a unique object specific for the viewer.
"""
self._check_exists()
return self._api.packages.get_view(self)
def process(self):
"""
Process a data package that has successfully uploaded it's source
files but has not yet been processed by the Blackfynn ETL.
"""
self._check_exists()
return self._api.packages.process(self)
def relate_to(self, *records):
"""
Relate current ``DataPackage`` to one or more ``Record`` objects.
Args:
records (list of Records): Records to relate to data package
Returns:
``Relationship`` that defines the link
Example:
Relate package to a single record::
eeg.relate_to(participant_123)
Relate package to multiple records::
# relate to explicit list of records
eeg.relate_to(
participant_001
participant_002,
participant_003,
)
# relate to all participants
eeg.relate_to(participants.get_all())
Note:
The created relationship will be of the form ``DataPackage`` --(``belongs_to``)--> ``Record``.
"""
self._check_exists()
if isinstance(records, Record):
records = [records]
assert all(
[isinstance(r, Record) for r in records]
), "all records must be object of type Record"
# auto-create relationship type
relationships = self._api.concepts.relationships.get_all(self.dataset)
if "belongs_to" not in relationships:
r = RelationshipType(
dataset_id=self.dataset, name="belongs_to", description="belongs_to"
)
self._api.concepts.relationships.create(self.dataset, r)
return [
self._api.concepts.proxies.create(
self.dataset, self.id, "belongs_to", r, {}
)
for r in records
]
def as_dict(self):
d = super(DataPackage, self).as_dict()
if self.owner_id is not None:
d["owner"] = self.owner_id
return d
@classmethod
def from_dict(cls, data, *args, **kwargs):
data["content"]["id"] = data["content"]["nodeId"]
item = super(DataPackage, cls).from_dict(data, *args, **kwargs)
# parse objects
objects = data.get("objects", None)
if objects is not None:
for otype in ["sources", "files", "view"]:
if otype not in data["objects"]:
continue
odata = data["objects"][otype]
item.__dict__[otype] = [File.from_dict(x) for x in odata]
return item
@classmethod
def from_id(cls, id):
return self._api.packages.get(id)
@as_native_str()
def __repr__(self):
return u"<DataPackage name='{}' id='{}'>".format(self.name, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Files
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class File(BaseDataNode):
"""
File node on the Blackfynn platform. Points to some S3 location.
Args:
name (str): Name of the file (without extension)
s3_key (str): S3 key of file
s3_bucket (str): S3 bucket of file
file_type (str): Type of file, e.g. 'MPEG', 'PDF'
size (long): Size of file
Note:
``file_type`` must be a supported file type. See our file type registry
for a list of supported file types.
"""
_type_name = "fileType"
def __init__(self, name, s3_key, s3_bucket, file_type, size, pkg_id=None, **kwargs):
super(File, self).__init__(name, type=file_type, **kwargs)
# data
self.s3_key = s3_key
self.s3_bucket = s3_bucket
self.size = size
self.pkg_id = pkg_id
self.local_path = None
def as_dict(self):
d = super(File, self).as_dict()
d.update({"s3bucket": self.s3_bucket, "s3key": self.s3_key, "size": self.size})
d.pop("parent", None)
props = d.pop("properties")
return {"objectType": "file", "content": d, "properties": props}
@property
def url(self):
"""
The presigned-URL of the file.
"""
self._check_exists()
return self._api.packages.get_presigned_url_for_file(self.pkg_id, self.id)
def download(self, destination):
"""
Download the file.
Args:
destination (str): path for downloading; can be absolute file path,
prefix or destination directory.
"""
if self.type == "DirectoryViewerData":
raise NotImplementedError(
"Downloading S3 directories is currently not supported"
)
if os.path.isdir(destination):
# destination dir
f_local = os.path.join(destination, os.path.basename(self.s3_key))
if "." not in os.path.basename(destination):
# destination dir + prefix
f_local = destination + "_" + os.path.basename(self.s3_key)
else:
# exact location
f_local = destination
r = requests.get(self.url, stream=True)
with io.open(f_local, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
# set local path
self.local_path = f_local
return f_local
@as_native_str()
def __repr__(self):
return (
u"<File name='{}' type='{}' key='{}' bucket='{}' size='{}' id='{}'>".format(
self.name, self.type, self.s3_key, self.s3_bucket, self.size, self.id
)
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time series
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class TimeSeries(DataPackage):
"""
Represents a timeseries package on the platform. TimeSeries packages
contain channels, which contain time-dependent data sampled at some
frequency.
Args:
name: The name of the timeseries package
"""
def __init__(self, name, **kwargs):
kwargs.pop("package_type", None)
super(TimeSeries, self).__init__(name=name, package_type="TimeSeries", **kwargs)
@property
def start(self):
"""
The start time of time series data (over all channels)
"""
self._check_exists()
return sorted([x.start for x in self.channels])[0]
@property
def end(self):
"""
The end time (in usecs) of time series data (over all channels)
"""
self._check_exists()
return sorted([x.end for x in self.channels])[-1]
def limits(self):
"""
Returns time limit tuple (start, end) of package.
"""
channels = self.channels
start = sorted([x.start for x in channels])[0]
end = sorted([x.end for x in channels])[-1]
return start, end
def segments(self, start=None, stop=None, gap_factor=2):
"""
Returns list of contiguous data segments available for package. Segments are
assesssed for all channels, and the union of segments is returned.
Args:
start (int, datetime, optional):
Return segments starting after this time
(default earliest start of any channel)
stop (int, datetime, optional):
Return segments starting before this time
(default latest end time of any channel)
gap_factor (int, optional):
Gaps are computed by ``sampling_rate * gap_factor`` (default 2)
Returns:
List of tuples, where each tuple represents the (start, stop) of contiguous data.
"""
# flattenened list of segments across all channels
channel_segments = [
segment
for channel in self.channels
for segment in channel.segments(
start=start, stop=stop, gap_factor=gap_factor
)
]
# union all segments
union_segments = []
for begin, end in sorted(channel_segments):
if union_segments and union_segments[-1][1] >= begin - 1:
new_segment = (union_segments[-1][0], max(union_segments[-1][1], end))
union_segments.pop()
union_segments.append(new_segment)
else:
union_segments.append((begin, end))
return union_segments
@property
def channels(self):
"""
Returns list of Channel objects associated with package.
Note:
This is a dynamically generated property, so every call will make an API request.
Suggested usage::
channels = ts.channels
for ch in channels:
print(ch)
This will be much slower, as the API request is being made each time.::
for ch in ts.channels:
print(ch)
"""
self._check_exists()
# always dynamically return channel list
return self._api.timeseries.get_channels(self)
def get_channel(self, channel):
"""
Get channel by ID.
Args:
channel (str): ID of channel
"""
self._check_exists()
return self._api.timeseries.get_channel(self, channel)
def add_channels(self, *channels):
"""
Add channels to TimeSeries package.
Args:
channels: list of Channel objects.
"""
self._check_exists()
for channel in channels:
ch = self._api.timeseries.create_channel(self, channel)
channel.__dict__.update(ch.__dict__)
def remove_channels(self, *channels):
"""
Remove channels from TimeSeries package.
Args:
channels: list of Channel objects or IDs
"""
self._check_exists()
for channel in channels:
if isinstance(channel, TimeSeriesChannel):
self._api.timeseries.delete_channel(channel)
channel.id = None
channel._pkg = None
else:
self._api.timeseries.delete_channel_by_id(self.id, channel)
# ~~~~~~~~~~~~~~~~~~
# Data
# ~~~~~~~~~~~~~~~~~~
def get_data(
self, start=None, end=None, length=None, channels=None, use_cache=True
):
"""
Get timeseries data between ``start`` and ``end`` or ``start`` and ``start + length``
on specified channels (default all channels).
Args:
start (optional): start time of data (usecs or datetime object)
end (optional): end time of data (usecs or datetime object)
length (optional): length of data to retrieve, e.g. '1s', '5s', '10m', '1h'
channels (optional): list of channel objects or IDs, default all channels.
Note:
Data requests will be automatically chunked and combined into a single Pandas
DataFrame. However, you must be sure you request only a span of data that
will properly fit in memory.
See ``get_data_iter`` for an iterator approach to timeseries data retrieval.
Example:
Get 5 seconds of data from start over all channels::
data = ts.get_data(length='5s')
Get data betwen 12345 and 56789 (representing usecs since Epoch)::
data = ts.get_data(start=12345, end=56789)
Get first 10 seconds for the first two channels::
data = ts.get_data(length='10s', channels=ts.channels[:2])
"""
self._check_exists()
return self._api.timeseries.get_ts_data(
self,
start=start,
end=end,
length=length,
channels=channels,
use_cache=use_cache,
)
def get_data_iter(
self,
channels=None,
start=None,
end=None,
length=None,
chunk_size=None,
use_cache=True,
):
"""
Returns iterator over the data. Must specify **either ``end`` OR ``length``**, not both.
Args:
channels (optional): channels to retrieve data for (default: all)
start: start time of data (default: earliest time available).
end: end time of data (default: latest time avialable).
length: some time length, e.g. '1s', '5m', '1h' or number of usecs
chunk: some time length, e.g. '1s', '5m', '1h' or number of usecs
Returns:
iterator of Pandas Series, each the size of ``chunk_size``.
"""
self._check_exists()
return self._api.timeseries.get_ts_data_iter(
self,
channels=channels,
start=start,
end=end,
length=length,
chunk_size=chunk_size,
use_cache=use_cache,
)
def write_annotation_file(self, file, layer_names=None):
"""
Writes all layers to a csv .bfannot file
Args:
file : path to .bfannot output file. Appends extension if necessary
layer_names (optional): List of layer names to write
"""
return self._api.timeseries.write_annotation_file(self, file, layer_names)
def append_annotation_file(self, file):
"""
Processes .bfannot file and adds to timeseries package.
Args:
file : path to .bfannot file
"""
self._check_exists()
return self._api.timeseries.process_annotation_file(self, file)
def append_files(self, *files, **kwargs):
"""
Append files to this timeseries package.
Args:
files: list of local files to upload.
Keyword Args:
display_progress (boolean): If ``True``, a progress bar will be
shown to track upload progress. Defaults to ``False``.
use_agent (boolean): If ``True``, and a compatible version of the
Agent is installed, uploads will be performed by the
Blackfynn CLI Agent. This allows large file upload in excess
of 1 hour. Defaults to ``True``.
"""
self._check_exists()
files = _flatten_file_args(files)
return self._api.io.upload_files(self, files, append=True, **kwargs)
def stream_data(self, data):
self._check_exists()
return self._api.timeseries.stream_data(self, data)
# ~~~~~~~~~~~~~~~~~~
# Annotations
# ~~~~~~~~~~~~~~~~~~
@property
def layers(self):
"""
List of annotation layers attached to TimeSeries package.
"""
self._check_exists()
# always dynamically return annotation layers
return self._api.timeseries.get_annotation_layers(self)
def get_layer(self, id_or_name):
"""
Get annotation layer by ID or name.
Args:
id_or_name: layer ID or name
"""
self._check_exists()
layers = self.layers
matches = [x for x in layers if x.id == id_or_name]
if len(matches) == 0:
matches = [x for x in layers if x.name == id_or_name]
if len(matches) == 0:
raise Exception("No layers match criteria.")
if len(matches) > 1:
raise Exception("More than one layer matched criteria")
return matches[0]
def add_layer(self, layer, description=None):
"""
Args:
layer: TimeSeriesAnnotationLayer object or name of annotation layer
description (str, optional): description of layer
"""
self._check_exists()
return self._api.timeseries.create_annotation_layer(
self, layer=layer, description=description
)
def add_annotations(self, layer, annotations):
"""
Args:
layer: either TimeSeriesAnnotationLayer object or name of annotation layer.
Note that non existing layers will be created.
annotations: TimeSeriesAnnotation object(s)
Returns:
list of TimeSeriesAnnotation objects
"""
self._check_exists()
cur_layer = self._api.timeseries.create_annotation_layer(
self, layer=layer, description=None
)
return self._api.timeseries.create_annotations(
layer=cur_layer, annotations=annotations
)
def insert_annotation(
self,
layer,
annotation,
start=None,
end=None,
channel_ids=None,
annotation_description=None,
):
"""
Insert annotations using a more direct interface, without the need for layer/annotation objects.
Args:
layer: str of new/existing layer or annotation layer object
annotation: str of annotation event
start (optional): start of annotation
end (optional): end of annotation
channels_ids (optional): list of channel IDs to apply annotation
annotation_description (optional): description of annotation
Example:
To add annotation on layer "my-events" across all channels::
ts.insert_annotation('my-events', 'my annotation event')
To add annotation to first channel::
ts.insert_annotation('my-events', 'first channel event', channel_ids=ts.channels[0])
"""
self._check_exists()
cur_layer = self._api.timeseries.create_annotation_layer(
self, layer=layer, description=None
)
return self._api.timeseries.create_annotation(
layer=cur_layer,
annotation=annotation,
start=start,
end=end,
channel_ids=channel_ids,
description=annotation_description,
)
def delete_layer(self, layer):
"""
Delete annotation layer.
Args:
layer: annotation layer object
"""
self._check_exists()
return self._api.timeseries.delete_annotation_layer(layer)
def annotation_counts(self, start, end, layers, period, channels=None):
"""
Get annotation counts between ``start`` and ``end``.
Args:
start (datetime or microseconds) : The starting time of the range to query
end (datetime or microseconds) : The ending time of the the range to query
layers ([TimeSeriesLayer]) : List of layers for which to count annotations
period (string) : The length of time to group the counts.
Formatted as a string - e.g. '1s', '5m', '3h'
channels ([TimeSeriesChannel]) : List of channel (if omitted, all channels will be used)
"""
self._check_exists()
return self._api.timeseries.query_annotation_counts(
ts=self,
layers=layers,
channels=channels,
start=start,
end=end,
period=period,
)
@as_native_str()
def __repr__(self):
return u"<TimeSeries name='{}' id='{}'>".format(self.name, self.id)
class TimeSeriesChannel(BaseDataNode):
"""
TimeSeriesChannel represents a single source of time series data. (e.g. electrode)
Args:
name (str): Name of channel
rate (float): Rate of the channel (Hz)
start (optional): Absolute start time of all data (datetime obj)
end (optional): Absolute end time of all data (datetime obj)
unit (str, optional): Unit of measurement
channel_type (str, optional): One of 'continuous' or 'event'
source_type (str, optional): The source of data, e.g. "EEG"
group (str, optional): The channel group, default: "default"
"""
def __init__(
self,
name,
rate,
start=0,
end=0,
unit="V",
channel_type="continuous",
source_type="unspecified",
group="default",
last_annot=0,
spike_duration=None,
**kwargs
):
self.channel_type = channel_type.upper()
super(TimeSeriesChannel, self).__init__(
name=name, type=self.channel_type, **kwargs
)
self.rate = rate
self.unit = unit
self.last_annot = last_annot
self.group = group
self.start = start
self.end = end
self.spike_duration = spike_duration
self.set_property(
"Source Type",
source_type.upper(),
fixed=True,
hidden=True,
category="Blackfynn",
)
### local-only
# parent package
self._pkg = None
# sample period (in usecs)
self._sample_period = 1.0e6 / self.rate
@property
def start(self):
"""
The start time of channel data (microseconds since Epoch)
"""
return self._start
@start.setter
def start(self, start):
self._start = infer_epoch(start)
@property
def start_datetime(self):
return usecs_to_datetime(self._start)
@property
def end(self):
"""
The end time (in usecs) of channel data (microseconds since Epoch)
"""
return self._end
@end.setter
def end(self, end):
self._end = infer_epoch(end)
@property
def end_datetime(self):
return usecs_to_datetime(self._end)
def _page_delta(self, page_size):
return int((1.0e6 / self.rate) * page_size)
def update(self):
self._check_exists()
r = self._api.timeseries.update_channel(self)
self.__dict__.update(r.__dict__)
def segments(self, start=None, stop=None, gap_factor=2):
"""
Return list of contiguous segments of valid data for channel.
Args:
start (long, datetime, optional):
Return segments starting after this time (default start of channel)
stop (long, datetime, optional):
Return segments starting before this time (default end of channel)
gap_factor (int, optional):
Gaps are computed by ``sampling_period * gap_factor`` (default 2)
Returns:
List of tuples, where each tuple represents the (start, stop) of contiguous data.
"""
start = self.start if start is None else start
stop = self.end if stop is None else stop
return self._api.timeseries.get_segments(
self._pkg, self, start=start, stop=stop, gap_factor=gap_factor
)
@property
def gaps(self):
# TODO: infer gaps from segments
raise NotImplementedError
def update_properties(self):
self._api.timeseries.update_channel_properties(self)
def get_data(self, start=None, end=None, length=None, use_cache=True):
"""
Get channel data between ``start`` and ``end`` or ``start`` and ``start + length``
Args:
start (optional): start time of data (usecs or datetime object)
end (optional): end time of data (usecs or datetime object)
length (optional): length of data to retrieve, e.g. '1s', '5s', '10m', '1h'
use_cache (optional): whether to use locally cached data
Returns:
Pandas Series containing requested data for channel.
Note:
Data requests will be automatically chunked and combined into a single Pandas
Series. However, you must be sure you request only a span of data that
will properly fit in memory.
See ``get_data_iter`` for an iterator approach to timeseries data retrieval.
Example:
Get 5 seconds of data from start over all channels::
data = channel.get_data(length='5s')
Get data betwen 12345 and 56789 (representing usecs since Epoch)::
data = channel.get_data(start=12345, end=56789)
"""
return self._api.timeseries.get_ts_data(
ts=self._pkg,
start=start,
end=end,
length=length,
channels=[self],
use_cache=use_cache,
)
def get_data_iter(
self, start=None, end=None, length=None, chunk_size=None, use_cache=True
):
"""
Returns iterator over the data. Must specify **either ``end`` OR ``length``**, not both.
Args:
start (optional): start time of data (default: earliest time available).
end (optional): end time of data (default: latest time avialable).
length (optional): some time length, e.g. '1s', '5m', '1h' or number of usecs
chunk_size (optional): some time length, e.g. '1s', '5m', '1h' or number of usecs
use_cache (optional): whether to use locally cached data
Returns:
Iterator of Pandas Series, each the size of ``chunk_size``.
"""
return self._api.timeseries.get_ts_data_iter(
ts=self._pkg,
start=start,
end=end,
length=length,
channels=[self],
chunk_size=chunk_size,
use_cache=use_cache,
)
def as_dict(self):
return {
"name": self.name,
"start": self.start,
"end": self.end,
"unit": self.unit,
"rate": self.rate,
"channelType": self.channel_type,
"lastAnnotation": self.last_annot,
"group": self.group,
"spikeDuration": self.spike_duration,
"properties": [x.as_dict() for x in self.properties],
}
@as_native_str()
def __repr__(self):
return u"<TimeSeriesChannel name='{}' id='{}'>".format(self.name, self.id)
class TimeSeriesAnnotationLayer(BaseNode):
"""
Annotation layer containing one or more annotations. Layers are used
to separate annotations into logically distinct groups when applied
to the same data package.
Args:
name: Name of the layer
time_series_id: The TimeSeries ID which the layer applies
description: Description of the layer
"""
_object_key = None
def __init__(self, name, time_series_id, description=None, **kwargs):
super(TimeSeriesAnnotationLayer, self).__init__(**kwargs)
self.name = name
self.time_series_id = time_series_id
self.description = description
def iter_annotations(self, window_size=10, channels=None):
"""
Iterate over annotations according to some window size (seconds).
Args:
window_size (float): Number of seconds in window
channels: List of channel objects or IDs
Yields:
List of annotations found in current window.
"""
self._check_exists()
ts = self._api.core.get(self.time_series_id)
return self._api.timeseries.iter_annotations(
ts=ts, layer=self, channels=channels, window_size=window_size
)
def add_annotations(self, annotations):
"""
Add annotations to layer.
Args:
annotations (str): List of annotation objects to add.
"""
self._check_exists()
return self._api.timeseries.create_annotations(
layer=self, annotations=annotations
)
def insert_annotation(
self, annotation, start=None, end=None, channel_ids=None, description=None
):
"""
Add annotations; proxy for ``add_annotations``.
Args:
annotation (str): Annotation string
start: Start time (usecs or datetime)
end: End time (usecs or datetime)
channel_ids: list of channel IDs
Returns:
The created annotation object.
"""
self._check_exists()
return self._api.timeseries.create_annotation(
layer=self,
annotation=annotation,
start=start,
end=end,
channel_ids=channel_ids,
description=description,
)
def annotations(self, start=None, end=None, channels=None):
"""
Get annotations between ``start`` and ``end`` over ``channels`` (all channels by default).
Args:
start: Start time
end: End time
channels: List of channel objects or IDs
"""
self._check_exists()
ts = self._api.core.get(self.time_series_id)
return self._api.timeseries.get_annotations(
ts=ts, layer=self, channels=channels, start=start, end=end
)
def annotation_counts(self, start, end, period, channels=None):
"""
The number of annotations between ``start`` and ``end`` over selected
channels (all by default).
Args:
start (datetime or microseconds) : The starting time of the range to query
end (datetime or microseconds) : The ending time of the the range to query
period (string) : The length of time to group the counts.
Formatted as a string - e.g. '1s', '5m', '3h'
channels ([TimeSeriesChannel]) : List of channel (if omitted, all channels will be used)
"""
self._check_exists()
ts = self._api.core.get(self.time_series_id)
return self._api.timeseries.query_annotation_counts(
ts=ts, layers=[self], channels=channels, start=start, end=end, period=period
)
def delete(self):
"""
Delete annotation layer.
"""
self._check_exists()
return self._api.timeseries.delete_annotation_layer(self)
def as_dict(self):
return {"name": self.name, "description": self.description}
@as_native_str()
def __repr__(self):
return u"<TimeSeriesAnnotationLayer name='{}' id='{}'>".format(
self.name, self.id
)
class TimeSeriesAnnotation(BaseNode):
"""
Annotation is an event on one or more channels in a dataset
Args:
label (str): The label for the annotation
channel_ids: List of channel IDs that annotation applies
start: Start time
end: End time
name: Name of annotation
layer_id: Layer ID for annoation (all annotations exist on a layer)
time_series_id: TimeSeries package ID
description: Description of annotation
"""
_object_key = None
def __init__(
self,
label,
channel_ids,
start,
end,
name="",
layer_id=None,
time_series_id=None,
description=None,
**kwargs
):
self.user_id = kwargs.pop("userId", None)
super(TimeSeriesAnnotation, self).__init__(**kwargs)
self.name = ""
self.label = label
self.channel_ids = channel_ids
self.start = start
self.end = end
self.description = description
self.layer_id = layer_id
self.time_series_id = time_series_id
def delete(self):
self._check_exists()
return self._api.timeseries.delete_annotation(annot=self)
def as_dict(self):
channel_ids = self.channel_ids
if not isinstance(channel_ids, list):
channel_ids = [channel_ids]
return {
"name": self.name,
"label": self.label,
"channelIds": channel_ids,
"start": self.start,
"end": self.end,
"description": self.description,
"layer_id": self.layer_id,
"time_series_id": self.time_series_id,
}
@as_native_str()
def __repr__(self):
date = datetime.datetime.fromtimestamp(self.start / 1e6)
return u"<TimeSeriesAnnotation label='{}' layer='{}' start='{}'>".format(
self.label, self.layer_id, date.isoformat()
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# User
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class User(BaseNode):
_object_key = ""
def __init__(
self,
email,
first_name,
last_name,
credential="",
photo_url="",
url="",
authy_id=0,
accepted_terms="",
color=None,
is_super_admin=False,
*args,
**kwargs
):
kwargs.pop("preferredOrganization", None)
self.storage = kwargs.pop("storage", None)
super(User, self).__init__(*args, **kwargs)
self.email = email
self.first_name = first_name
self.last_name = last_name
self.credential = credential
self.photo_url = photo_url
self.color = color
self.url = url
self.authy_id = authy_id
self.accepted_terms = ""
self.is_super_admin = is_super_admin
@as_native_str()
def __repr__(self):
return u"<User email='{}' id='{}'>".format(self.email, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Organizations
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Organization(BaseNode):
_object_key = "organization"
def __init__(
self,
name,
encryption_key_id="",
slug=None,
terms=None,
features=None,
subscription_state=None,
*args,
**kwargs
):
self.storage = kwargs.pop("storage", None)
super(Organization, self).__init__(*args, **kwargs)
self.name = name
self.terms = terms
self.features = features or []
self.subscription_state = subscription_state
self.encryption_key_id = encryption_key_id
self.slug = name.lower().replace(" ", "-") if slug is None else slug
@property
def datasets(self):
"""
Return all datasets for user for an organization (current context).
"""
self._check_exists()
return self._api.datasets.get_all()
@property
def members(self):
return self._api.organizations.get_members(self)
@as_native_str()
def __repr__(self):
return u"<Organization name='{}' id='{}'>".format(self.name, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Datasets
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Dataset(BaseCollection):
def __init__(
self,
name,
description=None,
status=None,
tags=None,
automatically_process_packages=False,
**kwargs
):
kwargs.pop("package_type", None)
kwargs.pop("type", None)
super(Dataset, self).__init__(name, "DataSet", **kwargs)
self.description = description or ""
self._status = status
self._tags = tags or []
self.automatically_process_packages = automatically_process_packages
# remove things that do not apply (a bit hacky)
for k in (
"parent",
"type",
"set_ready",
"set_unavailable",
"set_error",
"state",
"dataset",
):
self.__dict__.pop(k, None)
@as_native_str()
def __repr__(self):
return u"<Dataset name='{}' id='{}'>".format(self.name, self.id)
@property
def status(self):
"""Get the current status."""
return self._status
@property
def tags(self):
"""Get the current tags."""
return self._tags
@status.setter
def status(self, value):
raise AttributeError("Dataset.status is read-only.")
@tags.setter
def tags(self, value):
if isinstance(value, list) and all(
isinstance(elem, string_types) for elem in value
):
self._tags = value
else:
raise AttributeError("Dataset.tags should be a list of strings.")
def get_topology(self):
"""Returns the set of Models and Relationships defined for the dataset
Returns:
dict: Keys are either ``models`` or ``relationships``. Values are
the list of objects of that type
"""
return self._api.concepts.get_topology(self)
def get_graph_summary(self):
""" Returns summary metrics about the knowledge graph """
return self._api.concepts.get_summary(self)
def published(self):
return self._api.datasets.published(self.id)
def status_log(self, limit=25, offset=0):
return self._api.datasets.status_log(self.id, limit, offset)
def package_count(self):
return self._api.datasets.package_count(self.id)
def team_collaborators(self):
return self._api.datasets.team_collaborators(self.id)
def user_collaborators(self):
return self._api.datasets.user_collaborators(self.id)
def get_packages_by_filename(self, filename):
return self._api.datasets.get_packages_by_filename(self.id, filename)
def owner(self):
return self._api.datasets.owner(self.id)
def models(self):
"""
Returns:
List of models defined in Dataset
"""
return self._api.concepts.get_all(self.id)
def relationships(self):
"""
Returns:
List of relationships defined in Dataset
"""
return self._api.concepts.relationships.get_all(self.id)
def get_model(self, name_or_id):
"""
Retrieve a ``Model`` by name or id
Args:
name_or_id (str or int): name or id of the model
Returns:
The requested ``Model`` in Dataset
Example::
mouse = ds.get_model('mouse')
"""
return self._api.concepts.get(self.id, name_or_id)
def get_relationship(self, name_or_id):
"""
Retrieve a ``RelationshipType`` by name or id
Args:
name_or_id (str or int): name or id of the relationship
Returns:
The requested ``RelationshipType``
Example::
belongsTo = ds.get_relationship('belongs-to')
"""
return self._api.concepts.relationships.get(self.id, name_or_id)
def get_connected_models(self, name_or_id):
"""Retrieve all models connected to the given model
Connected is defined as model that can be reached by following
outgoing relationships starting at the current model
Args:
name_or_id: Name or id of the model
Return:
List of ``Model`` objects
Example::
connected_models = ds.get_related_models('patient')
"""
return self._api.concepts.get_connected(self.id, name_or_id)
def create_model(
self, name, display_name=None, description=None, schema=None, **kwargs
):
"""
Defines a ``Model`` on the platform.
Args:
name (str): Name of the model
description (str, optional): Description of the model
schema (list, optional): Definition of the model's schema as list of ModelProperty objects.
Returns:
The newly created ``Model``
Note:
It is required that a model includes at least _one_ property that serves as the "title".
Example:
Create a participant model, including schema::
from blackfynn import ModelProperty
ds.create_model('participant',
description = 'a human participant in a research study',
schema = [
ModelProperty('name', data_type=str, title=True),
ModelProperty('age', data_type=int)
]
)
Or define schema using dictionary::
ds.create_model('participant',
schema = [
{
'name': 'full_name',
'data_type': str,
'title': True
},
{
'name': 'age',
'data_type': int,
}
])
You can also create a model and define schema later::
# create model
pt = ds.create_model('participant')
# define schema
pt.add_property('name', str, title=True)
pt.add_property('age', int)
"""
c = Model(
dataset_id=self.id,
name=name,
display_name=display_name if display_name else name,
description=description,
schema=schema,
**kwargs
)
return self._api.concepts.create(self.id, c)
def create_relationship_type(
self, name, description, schema=None, source=None, destination=None, **kwargs
):
"""
Defines a ``RelationshipType`` on the platform.
Args:
name (str): name of the relationship
description (str): description of the relationship
schema (dict, optional): definitation of the relationship's schema
Returns:
The newly created ``RelationshipType``
Example::
ds.create_relationship_type('belongs-to', 'this belongs to that')
"""
r = RelationshipType(
dataset_id=self.id,
name=name,
description=description,
source=source,
destination=destination,
schema=schema,
**kwargs
)
return self._api.concepts.relationships.create(self.id, r)
def import_model(self, template):
"""
Imports a model based on the given template into the dataset
Args:
template (ModelTemplate): the ModelTemplate to import
Returns:
A list of ModelProperty objects that have been imported into the dataset
"""
return self._api.templates.apply(self, template)
@property
def _get_method(self):
return self._api.datasets.get
def as_dict(self):
return dict(
name=self.name,
description=self.description,
automaticallyProcessPackages=self.automatically_process_packages,
properties=[p.as_dict() for p in self.properties],
tags=self.tags,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collections
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Collection(BaseCollection):
def __init__(self, name, **kwargs):
kwargs.pop("package_type", None)
super(Collection, self).__init__(name, package_type="Collection", **kwargs)
@property
def _get_method(self):
return self._api.packages.get
@as_native_str()
def __repr__(self):
return u"<Collection name='{}' id='{}'>".format(self.name, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PublishInfo
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PublishInfo(BaseNode):
def __init__(self, status, doi, dataset_id, version_count, last_published):
self.status = status
self.doi = doi
self.dataset_id = dataset_id
self.version_count = version_count
self.last_published = last_published
@classmethod
def from_dict(cls, data):
return cls(
status=data.get("status"),
doi=data.get("latest_doi"),
dataset_id=data.get("publishedDatasetId"),
version_count=data.get("publishedVersionCount"),
last_published=data.get("lastPublishedDate"),
)
@as_native_str()
def __repr__(self):
return u"<PublishInfo status='{}' dataset_id='{}' version_count='{}' last_published='{}' doi='{}'>".format(
self.status,
self.dataset_id,
self.version_count,
self.last_published,
self.doi,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# UserStubDTO
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class UserStubDTO(BaseNode):
def __init__(self, node_id, first_name, last_name):
self.node_id = node_id
self.first_name = first_name
self.last_name = last_name
@classmethod
def from_dict(cls, data):
return cls(
node_id=data.get("nodeId"),
first_name=data.get("firstName"),
last_name=data.get("lastName"),
)
@as_native_str()
def __repr__(self):
return u"<User node_id='{}' first_name='{}' last_name='{}' >".format(
self.node_id, self.first_name, self.last_name
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# DatasetStatusStub
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class DatasetStatusStub(BaseNode):
def __init__(self, id, name, display_name):
self.id = id
self.name = name
self.display_name = display_name
@classmethod
def from_dict(cls, data):
return cls(
id=data.get("id"),
name=data.get("name"),
display_name=data.get("displayName"),
)
@as_native_str()
def __repr__(self):
return u"<DatasetStatus id='{}' name='{}' display_name='{}'>".format(
self.id, self.name, self.display_name
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# StatusLogEntry
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class StatusLogEntry(BaseNode):
def __init__(self, user, status, updated_at):
self.user = user
self.status = status
self.updated_at = updated_at
@classmethod
def from_dict(cls, data):
return cls(
user=UserStubDTO.from_dict(data.get("user")),
status=DatasetStatusStub.from_dict(data.get("status")),
updated_at=parse(data.get("updatedAt")),
)
@as_native_str()
def __repr__(self):
return u"<StatusLogEntry user='{}' status='{}' updated_at='{}' >".format(
self.user, self.status, self.updated_at
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# StatusLogResponse
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class StatusLogResponse(BaseNode):
def __init__(self, limit, offset, total_count, entries):
self.limit = limit
self.offset = offset
self.total_count = total_count
self.entries = entries
@classmethod
def from_dict(cls, data):
return cls(
limit=data.get("limit"),
offset=data.get("offset"),
total_count=data.get("totalCount"),
entries=[StatusLogEntry.from_dict(e) for e in data.get("entries")],
)
@as_native_str()
def __repr__(self):
return u"<StatusLogResponse limit='{}' offset='{}' total_count='{}' entries='{}' >".format(
self.limit, self.offset, self.total_count, self.entries
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Collaborators
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class UserCollaborator(BaseNode):
def __init__(self, id, first_name, last_name, email, role):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.email = email
self.role = role
@classmethod
def from_dict(cls, data):
return cls(
id=data["id"],
first_name=data["firstName"],
last_name=data["lastName"],
email=data["email"],
role=data["role"],
)
@property
def name(self):
return "{} {}".format(self.first_name, self.last_name)
@as_native_str()
def __repr__(self):
return u"<UserCollaborator name='{}' email='{}' role='{}' id='{}'>".format(
self.name, self.email, self.role, self.id
)
class TeamCollaborator(BaseNode):
def __init__(self, id, name, role):
self.id = id
self.name = name
self.role = role
@classmethod
def from_dict(cls, data):
return cls(id=data["id"], name=data["name"], role=data["role"])
@as_native_str()
def __repr__(self):
return u"<TeamCollaborator name='{}' role='{}' id='{}'>".format(
self.name, self.role, self.id
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Models & Relationships
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Helpers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Python 2
if PY2:
python_to_blackfynn_type_map = {
str: "string",
unicode: "string",
int: "long",
long: "long",
float: "double",
bool: "boolean",
datetime.date: "date",
datetime.datetime: "date",
}
blackfynn_to_python_type_map = {
"string": unicode,
"long": int,
"double": float,
"boolean": bool,
"date": datetime.datetime,
}
# Python 3
else:
python_to_blackfynn_type_map = {
str: "string",
int: "long",
float: "double",
bool: "boolean",
datetime.date: "date",
datetime.datetime: "date",
}
blackfynn_to_python_type_map = {
"string": str,
"long": int,
"double": float,
"boolean": bool,
"date": datetime.datetime,
}
valid_python_types = tuple(python_to_blackfynn_type_map.keys())
def target_type_string(target):
if isinstance(target, Model):
return target.type
elif isinstance(target, str):
return target
else:
raise Exception("target must be a string or model")
class ModelPropertyType(object):
"""
Representation of model property types in the platform.
"""
def __init__(self, data_type, format=None, unit=None):
# Is this a supported literal Python type?
if isinstance(data_type, type) and data_type in python_to_blackfynn_type_map:
self.data_type = data_type
# Otherwise this must be a string representation of a Blackfynn type
elif (
isinstance(data_type, string_types)
and data_type.lower() in blackfynn_to_python_type_map
):
self.data_type = blackfynn_to_python_type_map[data_type.lower()]
else:
raise Exception(
"Cannot create ModelPropertyType with data_type={}".format(data_type)
)
self.format = format
self.unit = unit
@property
def _blackfynn_type(self):
return python_to_blackfynn_type_map[self.data_type]
@staticmethod
def _build_from(data):
"""
Construct a ``ModelPropertyType`` from any data source. This is responsible
for dispatching construction to subclasses for special cases such as
enumerated and array types.
"""
if isinstance(data, ModelPropertyType):
return data
elif isinstance(data, dict) and (
data["type"].lower() == "array" or "items" in data
):
return ModelPropertyEnumType.from_dict(data)
return ModelPropertyType.from_dict(data)
@classmethod
def from_dict(cls, data):
if isinstance(data, dict):
return cls(
data_type=data["type"], format=data.get("format"), unit=data.get("unit")
)
# Single string
return cls(data_type=data)
def as_dict(self):
if (self.format is None) and (self.unit is None):
return self._blackfynn_type
return dict(type=self._blackfynn_type, format=self.format, unit=self.unit)
def _decode_value(self, value):
"""
Decode a model value received from the Blackfynn API into the Python
representation mandated by this `ModelPropertyType`.
"""
if value is None:
return None
elif self.data_type == bool:
if isinstance(value, bool):
return value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == "false":
return False
elif value.lower() == "true":
return True
else:
return bool(value)
else:
return bool(value)
elif self.data_type in (datetime.date, datetime.datetime):
if isinstance(value, (datetime.date, datetime.datetime)):
return value
else:
return dateutil.parser.parse(value)
return self.data_type(value)
def _encode_value(self, value):
"""
Encode a Python value into something that can be sent to the Blackfynn API.
"""
if value is None:
return None
elif isinstance(value, (datetime.date, datetime.datetime)):
if value.tzinfo is None or value.tzinfo.utcoffset(value) is None:
value = pytz.utc.localize(value)
v = value.isoformat()
# isoformat() does not include microseconds if microseconds is
# 0, but we always need microseconds in the formatted string
if not value.microsecond:
v = "{}.000000{}".format(v[:-6], v[-6:])
return v
return self.data_type(value)
@as_native_str()
def __repr__(self):
return u"<ModelPropertyType data_type='{}' format='{}' unit='{}'".format(
self.data_type, self.format, self.unit
)
class ModelPropertyEnumType(ModelPropertyType):
"""
A special case of a ``ModelPropertyType`` that contains enumerated values
and arrays of values.
This can take one of several forms:
* If ``enum`` is a list of objects, then the values of this property may
only be one of the given values.
* If ``multi_select`` is ``True``, then values of this property may be lists
of objects.
* If ``enum`` is a list of objects *and* ``multi_select`` is ``True``, then
values of this property must be lists of items in ``enum``.
"""
def __init__(
self, data_type, format=None, unit=None, enum=None, multi_select=False
):
super(ModelPropertyEnumType, self).__init__(data_type, format, unit)
if enum is not None:
enum = list(enum)
self.enum = enum
self.multi_select = multi_select
self.selection_type = "array" if self.multi_select else "enum"
def _decode_value(self, value):
"""
Decode a model value received from the Blackfynn API into the Python
representation mandated by this ``ModelPropertyType``.
"""
if value is None:
return None
self._assert_value_in_enum(value)
if self.multi_select:
return [super(ModelPropertyEnumType, self)._decode_value(v) for v in value]
return super(ModelPropertyEnumType, self)._decode_value(value)
def _encode_value(self, value):
"""
Encode a Python value into something that can be sent to the Blackfynn API.
"""
if value is None:
return None
self._assert_value_in_enum(value)
if self.multi_select:
return [super(ModelPropertyEnumType, self)._encode_value(v) for v in value]
return super(ModelPropertyEnumType, self)._encode_value(value)
def _assert_value_in_enum(self, value):
"""Check that values are in the enumerated type."""
if self.enum and self.multi_select:
for v in value:
if v not in self.enum:
raise Exception(
"Value '{}' is not a member of {}".format(v, self.enum)
)
elif self.enum and value not in self.enum:
raise Exception("Value '{}' is not a member of {}".format(value, self.enum))
@classmethod
def from_dict(cls, data):
selection_type = data["type"].lower()
multi_select = selection_type == "array"
data_type = data["items"].get("type")
format = data["items"].get("format")
unit = data["items"].get("unit")
enum = data["items"].get("enum")
return cls(
data_type=data_type,
format=format,
unit=unit,
enum=enum,
multi_select=multi_select,
)
def as_dict(self):
return dict(
type=self.selection_type,
items=dict(
type=self._blackfynn_type,
format=self.format,
unit=self.unit,
enum=self.enum,
),
)
@as_native_str()
def __repr__(self):
return u"<ModelPropertyEnumType data_type='{}' format='{}' unit='{}' enum='{}'".format(
self.data_type, self.format, self.unit, self.enum
)
class BaseModelProperty(object):
"""
Fallback values for property fields are resolved as follows:
(1) A property is `required` if it is the title of the model
(2) A property is `default` if it is `required`. `default` is deprecated and
`required` is now the source of truth.
(3) If the `display_name` for a property is not provided use `name` instead
The default values of both `required` and `default` can be on be overridden
by passing in explicit
"""
def __init__(
self,
name,
display_name=None,
data_type=str,
id=None,
locked=False,
default=None,
title=False,
description="",
required=None,
):
assert (
" " not in name
), "name cannot contain spaces, alternative names include {} and {}".format(
name.replace(" ", "_"), name.replace(" ", "-")
)
if required is None:
required = title
if default is None:
default = required
if display_name is None:
display_name = name
self.id = id
self.name = name
self.display_name = display_name
self.type = data_type # passed through @type.setter
self.locked = locked
self.default = default
self.title = title
self.description = description
self.required = required
@classmethod
def from_tuple(cls, data):
name = data[0]
data_type = data[1]
try:
display_name = data[2]
except:
display_name = name
try:
title = data[3]
except:
title = False
try:
required = data[4]
except:
required = False
return cls(
name=name,
display_name=display_name,
data_type=data_type,
title=title,
required=required,
)
@classmethod
def from_dict(cls, data):
display_name = data.get("displayName", data.get("display_name"))
data_type = data.get("data_type", data.get("dataType"))
locked = data.get("locked", False)
default = data.get("default")
title = data.get("title", data.get("conceptTitle", False))
id = data.get("id", None)
required = data.get("required")
description = data.get("description", "")
return cls(
name=data["name"],
display_name=display_name,
data_type=data_type,
id=id,
locked=locked,
default=default,
title=title,
required=required,
description=description,
)
def as_dict(self):
return dict(
id=self.id,
name=self.name,
displayName=self.display_name,
dataType=self._type.as_dict(),
locked=self.locked,
default=self.default,
conceptTitle=self.title,
description=self.description,
required=self.required,
)
def as_tuple(self):
return (self.name, self.type, self.display_name, self.title, self.required)
@property
def type(self):
return self._type.data_type
@type.setter
def type(self, type):
self._type = ModelPropertyType._build_from(type)
@property
def unit(self):
return self._type.unit
@property
def format(self):
return self._type.format
@property
def enum(self):
return self._type.enum
@property
def multi_select(self):
return self._type.multi_select
@as_native_str()
def __repr__(self):
return u"<BaseModelProperty name='{}' {}>".format(self.name, self.type)
class LinkedModelProperty(BaseNode):
def __init__(self, name, target, display_name=None, id=None, position=None):
assert (
" " not in name
), "name cannot contain spaces, alternative names include {} and {}".format(
name.replace(" ", "_"), name.replace(" ", "-")
)
self._object_key = ""
self.name = name
self.id = id
self.position = position
if display_name is None:
self.display_name = name
else:
self.display_name = display_name
if isinstance(target, Model):
self.target = target.id
elif isinstance(target, string_types):
self.target = target
else:
raise Exception("'target' must be an id or a Model object")
def as_dict(self):
dct = {"name": self.name, "displayName": self.display_name, "to": self.target}
if self.position is not None:
dct["position"] = self.position
return dct
@classmethod
def from_dict(cls, data):
if "link" in data:
# data came from a GET request
link = data["link"]
else:
# data came from a POST or PUT
link = data
name = link["name"]
display_name = link.get("displayName", link.get("display_name", name))
target = link["to"]
id = link.get("id")
position = link.get("position")
return cls(
name=name,
target=target,
display_name=display_name,
id=id,
position=position,
)
@as_native_str()
def __repr__(self):
return "<LinkedModelProperty name='{}' id='{}'>".format(self.name, self.id)
class BaseModelValue(object):
def __init__(self, name, value, data_type=None):
assert (
" " not in name
), "name cannot contain spaces, alternative names include {} and {}".format(
name.replace(" ", "_"), name.replace(" ", "-")
)
self.name = name
self.data_type = ModelPropertyType._build_from(data_type)
self.value = value # Decoded in @value.setter
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = self.data_type._decode_value(value)
@property
def type(self):
return self.data_type.data_type
@classmethod
def from_tuple(cls, data):
return cls(name=data[0], value=value[1])
@classmethod
def from_dict(cls, data):
return cls(
name=data["name"],
value=data["value"],
data_type=data.get("data_type", data.get("dataType")),
)
def as_dict(self):
return dict(
name=self.name,
value=self.data_type._encode_value(self.value),
dataType=self.data_type._blackfynn_type,
)
def as_tuple(self):
return (self.name, self.value)
@as_native_str()
def __repr__(self):
return u"<BaseModelValue name='{}' value='{}' {}>".format(
self.name, self.value, self.type
)
class LinkedModelValue(BaseNode):
def __init__(
self,
source_model,
target_model,
source_record,
target_record,
link_type,
id=None,
):
self.source_model = source_model
self.target_model = target_model
self.source_record_id = source_record
self.target_record_id = target_record
self.type = link_type
self.id = id
@classmethod
def from_dict(cls, data, source_model, target_model, link_type):
return cls(
source_model=source_model,
target_model=target_model,
source_record=data["from"],
target_record=data["to"],
link_type=link_type,
id=data["id"],
)
def as_dict(self):
return dict(schemaLinkedPropertyId=self.type.id, to=self.target_record_id)
@property
def source_record(self):
return self.source_model.get(self.source_record_id)
@property
def target_record(self):
return self.target_model.get(self.target_record_id)
return self.model.get(self.target_id)
@as_native_str()
def __repr__(self):
return "<LinkedModelValue type={} id={}>".format(self.type, self.id)
class BaseModelNode(BaseNode):
_object_key = ""
_property_cls = BaseModelProperty
def __init__(
self,
dataset_id,
name,
display_name=None,
description=None,
locked=False,
default=True,
*args,
**kwargs
):
assert (
" " not in name
), "type cannot contain spaces, alternative types include {} and {}".format(
name.replace(" ", "_"), name.replace(" ", "-")
)
self.type = name
self.dataset_id = dataset_id
self.display_name = display_name or name
self.description = description or ""
self.locked = locked
self.created_at = kwargs.pop("createdAt", None)
self.updated_at = kwargs.pop("updatedAt", None)
schema = kwargs.pop("schema", None)
self.linked = kwargs.pop("linked", {})
super(BaseModelNode, self).__init__(*args, **kwargs)
self.schema = dict()
if schema is None:
return
self._add_properties(schema)
def _add_property(
self, name, display_name=None, data_type=str, title=False, description=""
):
prop = self._property_cls(
name=name,
display_name=display_name,
data_type=data_type,
title=title,
description=description,
)
self.schema[prop.name] = prop
return prop
def _add_properties(self, properties):
if isinstance(properties, list):
for p in properties:
if isinstance(p, dict):
prop = self._property_cls.from_dict(p)
elif isinstance(p, tuple):
prop = self._property_cls.from_tuple(p)
elif isinstance(p, string_types):
prop = self._property_cls(name=p)
elif isinstance(p, self._property_cls):
prop = p
else:
raise Exception("unsupported property value: {}".format(type(p)))
self.schema[prop.name] = prop
elif isinstance(properties, dict):
for k, v in properties.items():
self._add_property(name=k, data_type=v)
else:
raise Exception(
"invalid type {}; properties must either be a dict or list".format(
type(properties)
)
)
def _validate_values_against_schema(self, values):
data_keys = set(values.keys())
schema_keys = set(self.schema.keys())
assert (
data_keys <= schema_keys
), "Invalid properties: {}.\n\nAn instance of {} should only include values for properties defined in its schema: {}".format(
data_keys - schema_keys, self.type, schema_keys
)
# should be overridden by sub-class
def update(self):
pass
def add_property(
self, name, data_type=str, display_name=None, title=False, description=""
):
"""
Appends a property to the object's schema and updates the object on the platform.
Args:
name (str): Name of the property
data_type (type, optional): Python type of the property. Defaults to ``string_types``.
display_name (str, optional): Display name for the property.
title (bool, optional): If True, the property will be used in the title on the platform
description (str, optional): Description of the property
Example:
Adding a new property with the default data_type::
mouse.add_property('name')
Adding a new property with the ``float`` data_type::
mouse.add_property('weight', float)
"""
prop = self._add_property(
name,
data_type=data_type,
display_name=display_name,
title=title,
description=description,
)
self.update()
return prop
def add_properties(self, properties):
"""
Appends multiple properties to the object's schema and updates the object
on the platform.
Args:
properties (list): List of properties to add
Note:
At least one property on a model needs to serve as the model's title.
See ``title`` argument in example(s) below.
Example:
Add properties using ``ModelProperty`` objects::
model.add_properties([
ModelProperty('name', data_type=str, title=True),
ModelProperty('age', data_type=int)
])
Add properties defined as list of dictionaries::
model.add_properties([
{
'name': 'full_name',
'type': str,
'title': True
},
{
'name': 'age',
'type': int,
}
])
"""
self._add_properties(properties)
self.update()
def add_linked_property(self, name, target_model, display_name=None):
"""
Add a linked property to the model.
Args:
name (str): Name of the property
target_model (Model): Model that the property will link to
display_name (str, optional): Display name of the property
"""
payload = LinkedModelProperty(
name, target=target_model, display_name=display_name
)
prop = self._api.concepts.create_linked_property(self.dataset_id, self, payload)
self.linked[prop.name] = prop
return prop
def add_linked_properties(self, properties):
"""
Add multiple linked properties to the model.
Args:
properties (list): List of LinkedModelProperty objects
"""
props = self._api.concepts.create_linked_properties(
self.dataset_id, self, properties
)
for prop in props:
self.linked[prop.name] = prop
return props
def remove_property(self, property):
"""
Remove property from model schema.
Args:
property (string, ModelProperty): Property to remove. Can be property name, id, or object.
"""
# verify property in schema
prop_name = None
if isinstance(property, string_types):
# assume property name first, then assume ID
if property in self.schema:
# property is name
prop_name = property
else:
# property may be id
ids = [x.id for x in self.schema.values()]
if property in ids:
prop_name = self.schema.value()[ids.index(property)].name
elif isinstance(property, ModelProperty):
prop_name = property.name
else:
raise Exception(
"Expected 'property' argument of type string or ModelProperty, found type {}".format(
type(property)
)
)
if prop_name is None:
raise Exception(
"Property '{}' not found in model's schema.".format(property)
)
prop_id = self.schema.get(prop_name).id
self._api.concepts.delete_property(self.dataset_id, self, prop_id)
self.schema.pop(prop_name)
def remove_linked_property(self, prop):
"""
Delete the linked property with the given name or id.
"""
# verify linked property is in schema
if isinstance(prop, string_types):
# assume property name or ID
for p in self.linked.values():
if prop == p.id or prop == p.name:
prop_id = p.id
prop_name = p.name
break
else:
raise Exception(
"Property '{}' not found in model's schema.".format(property)
)
elif isinstance(prop, ModelProperty):
prop_name = prop.name
prop_id = prop.id
else:
raise Exception(
"Expected a LinkedModelProperty, found type {}".format(type(property))
)
self._api.concepts.delete_linked_property(self.dataset_id, self, prop_id)
self.linked.pop(prop_name)
def get_property(self, name):
"""
Gets the property object by name.
Example:
>>> mouse.get_propery('weight').type
float
"""
return self.schema.get(name, None)
def get_linked_properties(self):
"""
Get all linked properties attached to this Model.
"""
return self._api.concepts.get_linked_properties(self.dataset_id, self)
def get_linked_property(self, name):
"""
Get a linked property by name or id.
"""
for k, v in self.get_linked_properties().items():
if k == name or v.id == name:
return v
raise Exception("No linked property found with name or id '{}'".format(name))
def as_dict(self):
return dict(
name=self.type,
displayName=self.display_name,
description=self.description,
locked=self.locked,
schema=[p.as_dict() for p in self.schema.values()],
)
class BaseRecord(BaseNode):
_object_key = ""
_value_cls = BaseModelValue
def __init__(self, dataset_id, type, *args, **kwargs):
self.type = type
self.dataset_id = dataset_id
self.created_at = kwargs.pop("createdAt", None)
self.created_by = kwargs.pop("createdBy", None)
self.updated_at = kwargs.pop("updatedAt", None)
self.updated_by = kwargs.pop("updatedBy", None)
values = kwargs.pop("values", None)
super(BaseRecord, self).__init__(*args, **kwargs)
self._values = dict()
if values is None:
return
self._set_values(values)
def _set_value(self, name, value):
if name in self._values:
v = self._values[name]
v.value = value
else:
v = self._value_cls(name=name, value=value)
self._values[v.name] = v
def _set_values(self, values):
if isinstance(values, list):
for v in values:
if isinstance(v, dict):
value = self._value_cls.from_dict(v)
elif isinstance(v, tuple):
value = self._value_cls.from_tuple(v)
elif isinstance(v, self._value_cls):
value = v
else:
raise Exception("unsupported value: {}".format(type(v)))
self._values[value.name] = value
elif isinstance(values, dict):
for k, v in values.items():
self._set_value(name=k, value=v)
else:
raise Exception(
"invalid type {}; values must either be a dict or list".format(
type(values)
)
)
@property
def values(self):
return {v.name: v.value for v in self._values.values()}
# should be overridden by sub-class
def update(self):
pass
def get(self, name):
"""
Returns:
The value of the property if it exists. None otherwise.
"""
value = self._values.get(name, None)
return value.value if value is not None else None
def set(self, name, value):
"""
Updates the value of an existing property or creates a new property
if one with the given name does not exist.
Note:
Updates the object on the platform.
"""
self._set_value(name, value)
try:
self.update()
except:
raise Exception("local object updated, but failed to update remotely")
def as_dict(self):
return {"values": [v.as_dict() for v in self._values.values()]}
@as_native_str()
def __repr__(self):
return u"<BaseRecord type='{}' id='{}'>".format(self.type, self.id)
class ModelTemplate(BaseNode):
_object_key = None
def __init__(
self,
name,
properties,
category=None,
id=None,
display_name=None,
schema=None,
description=None,
required=None,
*args,
**kwargs
):
assert name is not None, "ModelTemplate name must be defined"
assert properties is not None, "ModelTemplate properties must be defined"
self.id = id
self.schema = schema or "http://schema.blackfynn.io/model/draft-01/schema"
self.name = name
self.display_name = display_name
self.description = description or name
self.category = category
self.required = required or []
if isinstance(properties, list) and isinstance(properties[0], tuple):
self.properties = ModelTemplate.properties_from_tuples(properties)
else:
self.properties = properties
super(ModelTemplate, self).__init__(*args, **kwargs)
@classmethod
def properties_from_tuples(cls, tuples):
d = dict()
nested = dict()
for tuple in tuples:
name = "{}".format(tuple[0])
data_type = tuple[1]
nested["type"] = data_type
nested["description"] = name
d[name] = nested
return d
def as_dict(self):
return {
"$schema": self.schema,
"name": self.name,
"description": self.description,
"category": self.category,
"properties": self.properties,
"required": self.required,
}
@classmethod
def from_dict(cls, data, *args, **kwargs):
template = cls(
schema=data["$schema"],
name=data["name"],
description=data["description"],
category=data["category"],
required=data["required"],
properties=data["properties"],
display_name=data.get("displayName", None),
)
template.id = data.get("$id", None)
return template
@as_native_str()
def __repr__(self):
return u"<ModelTemplate name='{}' id='{}'>".format(self.name, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Models
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ModelProperty(BaseModelProperty):
@as_native_str()
def __repr__(self):
return u"<ModelProperty name='{}' {}>".format(self.name, self.type)
class ModelValue(BaseModelValue):
@as_native_str()
def __repr__(self):
return u"<ModelValue name='{}' value='{}' {}>".format(
self.name, self.value, self.type
)
class ModelSelect(object):
def __init__(self, *join_keys):
self.join_keys = [target_type_string(k) for k in join_keys]
def as_dict(self):
return {"Concepts": {"joinKeys": self.join_keys}}
@as_native_str()
def __repr__(self):
join_keys = [target_type_string(k) for k in self.join_keys]
return u"<ModelSelect join_keys='{}'>".format(",".join(join_keys))
class ModelFilter(object):
def __init__(self, key, operator, value):
self.key = key
self.operator = operator
self.value = value
def as_dict(self):
return {
"key": self.key,
"predicate": {"operation": self.operator, "value": self.value},
}
@as_native_str()
def __repr__(self):
return u"<ModelFilter key='{}' operator='{}' value='{}'>".format(
self.key, self.operator, self.value
)
class ModelJoin(object):
def __init__(self, target, *filters):
self.target = target
self.filters = [
ModelFilter(*f) if not isinstance(f, ModelFilter) else f for f in filters
]
def as_dict(self):
key = target_type_string(self.target)
return {
"targetType": {"concept": {"type": key}},
"filters": [f.as_dict() for f in self.filters],
"key": key,
}
@as_native_str()
def __repr__(self):
return u"<ModelJoin targetType='{}' filter='{}', key='{}'>".format(
target_type_string(self.target), self.filters, self.key
)
class Model(BaseModelNode):
"""
Representation of a Model in the knowledge graph.
"""
_object_key = ""
_property_cls = ModelProperty
def __init__(
self,
dataset_id,
name,
display_name=None,
description=None,
locked=False,
*args,
**kwargs
):
self.count = kwargs.pop("count", None)
self.state = kwargs.pop("state", None)
self._logger = log.get_logger("blackfynn.models.Model")
super(Model, self).__init__(
dataset_id, name, display_name, description, locked, *args, **kwargs
)
def update(self):
"""
Updates the details of the ``Model`` on the platform.
Example::
mouse.update()
Note:
Currently, you can only append new properties to a ``Model``.
"""
self._check_exists()
_update_self(self, self._api.concepts.update(self.dataset_id, self))
def delete(self):
"""
Deletes a model from the platform. Must not have any instances.
"""
return self._api.concepts.delete(self.dataset_id, self)
def get_all(self, limit=100, offset=0):
"""
Retrieves all records of the model from the platform.
Returns:
List of ``Record``
Example::
mice = mouse.get_all()
"""
return self._api.concepts.instances.get_all(
self.dataset_id, self, limit=limit, offset=offset
)
def get(self, id):
"""
Retrieves a record of the model by id from the platform.
Args:
id: the Blackfynn id of the model
Returns:
A single ``Record``
Example::
mouse_001 = mouse.get(123456789)
"""
return self._api.concepts.instances.get(self.dataset_id, id, self)
def query(self):
"""
Run a query with this model as the join target.
"""
return self._api.concepts.query.new(self, self.dataset_id)
def get_connected(self):
"""Retrieves all connected models
Connected is defined as model that can be reached by following
outgoing relationships starting at the current model
Args:
id: The Blackfynn id of the "root" model
Returns:
A list of models connected to the given model
Example::
connected_models = mouse.get_connected()
"""
return self._api.concepts.get_connected(self.dataset_id, self.id)
def create_record(self, values=dict()):
"""
Creates a record of the model on the platform.
Args:
values (dict, optional): values for properties defined in the `Model` schema
Returns:
The newly created ``Record``
Example::
mouse_002 = mouse.create_record({"id": 2, "weight": 2.2})
"""
self._check_exists()
data_keys = set(values.keys())
schema_keys = set(self.schema.keys())
assert (
len(data_keys & schema_keys) > 0
), "An instance of {} must include values for at least one of its properties: {}".format(
self.type, schema_keys
)
self._validate_values_against_schema(values)
values = [
dict(name=k, value=v, dataType=self.schema.get(k)._type)
for k, v in values.items()
]
ci = Record(dataset_id=self.dataset_id, type=self.type, values=values)
ci = self._api.concepts.instances.create(self.dataset_id, ci)
return ci
def create_records(self, values_list):
"""
Creates multiple records of the model on the platform.
Args:
values_list (list): array of dictionaries corresponding to record values.
Returns:
List of newly created ``Record`` objects.
Example::
mouse.create_records([
{ 'id': 311, 'weight': 1.9 },
{ 'id': 312, 'weight': 2.1 },
{ 'id': 313, 'weight': 1.8 },
{ 'id': 314, 'weight': 2.3 },
{ 'id': 315, 'weight': 2.0 }
])
"""
self._check_exists()
schema_keys = set(self.schema.keys())
for values in values_list:
data_keys = set(values.keys())
assert (
len(data_keys & schema_keys) > 0
), "An instance of {} must include values for at least one of its propertes: {}".format(
self.type, schema_keys
)
self._validate_values_against_schema(values)
ci_list = [
Record(
dataset_id=self.dataset_id,
type=self.type,
values=[
dict(name=k, value=v, dataType=self.schema.get(k)._type)
for k, v in values.items()
],
)
for values in values_list
]
return self._api.concepts.instances.create_many(self.dataset_id, self, *ci_list)
def from_dataframe(self, df):
return self.create_many(*df.to_dict(orient="records"))
def delete_records(self, *records):
"""
Deletes one or more records of a concept from the platform.
Args:
*records: instances and/or ids of records to delete
Returns:
``None``
Logs the list of records that failed to delete.
Example::
mouse.delete(mouse_002, 123456789, mouse_003.id)
"""
result = self._api.concepts.delete_instances(self.dataset_id, self, *records)
for error in result["errors"]:
self._logger.error(
"Failed to delete instance {} with error: {}".format(error[0], error[1])
)
def get_related(self):
"""
Returns a list of related model types and counts of those
relationships.
"Related" indicates that the model could be connected to the current
model via some relationship, i.e. ``B`` is "related to" ``A`` if there
exist ``A -[relationship]-> B``. Note that the directionality
matters. If ``B`` is the queried model, ``A`` would not appear in the
list of "related" models.
Returns:
List of ``Model`` objects related via a defined relationship
Example::
related_models = mouse.get_related()
"""
return self._api.concepts.get_related(self.dataset_id, self)
def __iter__(self):
for record in self.get_all():
yield record
@as_native_str()
def __repr__(self):
return u"<Model type='{}' id='{}'>".format(self.type, self.id)
class Record(BaseRecord):
"""
Represents a record of a ``Model``.
Includes its neighbors, relationships, and links.
"""
_object_key = ""
_value_cls = ModelValue
def _get_relationship_type(self, relationship):
return (
relationship.type
if isinstance(relationship, RelationshipType)
else relationship
)
def _get_links(self, model):
return self._api.concepts.instances.relations(self.dataset_id, self, model)
def get_related(self, model=None, group=False):
"""
Returns all related records.
Args:
model (str, Model, optional): Return only related records of this type
group (bool, optional): If true, group results by model type (dict)
Returns:
List of ``Record`` objects. If ``group`` is ``True``, then the result
is a dictionary of ``RecordSet`` objects keyed by model names.
Example:
Get all connected records of type ``disease`` with relationship ``has``::
mouse_001.get_related('disease', 'has')
Get all connected records::
mouse_001.get_related()
"""
if model is None:
# return all connected records
related_by_model = self._api.concepts.instances.get_all_related(
self.dataset_id, self
)
if group:
return related_by_model
else:
if len(related_by_model) == 1:
# try to retain RecordSet type
return list(related_by_model.values())[0]
# mixed return types, cannot keep RecordSets
related = []
for model_name, model_related in related_by_model.items():
related.extend(model_related)
return related
else:
return self._api.concepts.instances.get_all_related_of_type(
self.dataset_id, self, model
)
def get_files(self):
"""
All files related to the current record.
Returns:
List of data objects i.e. ``DataPackage``
Example::
mouse_001.get_files()
"""
return self._api.concepts.files(self.dataset_id, self.type, self)
def relate_to(
self, destinations, relationship_type="related_to", values=None, direction="to"
):
"""
Relate record to one or more ``Record`` or ``DataPackage`` objects.
Args:
destinations (list of Record or DataPackage):
A list containing the ``Record`` or ``DataPackage`` objects to relate to current record
relationship_type (RelationshipType, str, optional):
Type of relationship to create
values (list of dictionaries, optional):
A list of dictionaries corresponding to relationship values
direction (str, optional):
Relationship direction. Valid values are ``'to'`` and ``'from'``
Returns:
List of created ``Relationship`` objects.
.. note::
Destinations must all be of type ``DataPackage`` or ``Record``; you cannot mix destination types.
Example:
Relate to a single ``Record``, define relationship type::
mouse_001.relate_to(lab_009, 'located_at')
Relate to multiple ``DataPackage`` objects::
mouse_001.relate_to([eeg, mri1, mri2])
"""
self._check_exists()
# accept object or list
if isinstance(destinations, (Record, DataPackage)):
destinations = [destinations]
if isinstance(destinations, Collection):
destinations = destinations.items
if not destinations:
return None
# default values
if values is None:
values = [dict() for _ in destinations] if values is None else values
else:
values = [dict(name=k, value=v) for val in values for k, v in val.items()]
assert len(destinations) == len(
values
), "Length of values must match length of destinations"
# check type
if not (
all([isinstance(d, DataPackage) for d in destinations])
or all([isinstance(d, Record) for d in destinations])
):
raise Exception(
"All destinations must be of object type Record or DataPackage"
)
# auto-create relationship type
if isinstance(relationship_type, string_types):
relationships_types = self._api.concepts.relationships.get_all(
self.dataset_id
)
if relationship_type not in relationships_types:
r = RelationshipType(
dataset_id=self.dataset_id,
name=relationship_type,
description=relationship_type,
source=self.model.id,
destination=destinations[0].model.id,
)
relationship_type = self._api.concepts.relationships.create(
self.dataset_id, r
)
else:
relationship_type = relationships_types[relationship_type]
# relationships (to packages)
if isinstance(destinations[0], DataPackage):
# if linking packages, link one at a time
result = [
self._api.concepts.relationships.instances.link(
self.dataset_id, relationship_type, self, d, values=v
)
for d, v in zip(destinations, values)
]
return RelationshipSet(relationship_type, result)
# relationships (to records)
if direction == "to":
relationships = [
Relationship(
type=relationship_type.type,
dataset_id=self.dataset_id,
source=self,
destination=d.id,
values=v,
)
for d, v in zip(destinations, values)
]
elif direction == "from":
relationships = [
Relationship(
type=relationship_type.type,
dataset_id=self.dataset_id,
source=d.id,
destination=self,
values=v,
)
for d, v in zip(destinations, values)
]
else:
raise Exception('Direction must be value "to" or "from"')
# use batch endpoint to create relationships
return self._api.concepts.relationships.instances.create_many(
self.dataset_id, relationship_type, *relationships
)
def get_linked_values(self):
"""
Get all link values attached to this Record.
"""
return self._api.concepts.instances.get_linked_values(
self.dataset_id, self.model, self
)
def get_linked_value(self, link):
"""
Get a link value by name or id.
"""
all_links = self.get_linked_values()
# First assume link is a link value id:
for l in all_links:
if link == l.id:
return l
# Then assume link is a linked property name:
try:
prop_id = self.model.get_linked_property(link).id
except:
raise Exception(
"No link found with a name or ID matching '{}'".format(link)
)
else:
for l in all_links:
if prop_id == l.type.id:
return l
raise Exception("No link found with a name or ID matching '{}'".format(link))
def add_linked_value(self, target, link):
"""
Attach a linked property value to the Record.
target: the id or Record object of the target record
link: the id or LinkedModelProperty object of the link type
"""
model = self.model
if isinstance(target, Record):
target = target.id
if isinstance(link, LinkedModelProperty):
link_id = link.id
elif isinstance(link, string_types):
link_id = model.get_linked_property(link).id
payload = dict(
name=model.type,
displayName=model.display_name,
schemaLinkedPropertyId=link_id,
to=target,
)
return self._api.concepts.instances.create_link(
self.dataset_id, self.model, self, payload
)
def delete_linked_value(self, link_name):
"""
Delete a link by name or id.
"""
link = self.get_linked_value(link_name)
self._api.concepts.instances.remove_link(
self.dataset_id, self.model, self, link
)
@property
def model(self):
"""
The ``Model`` of the current record.
Returns:
A single ``Model``.
"""
return self._api.concepts.get(self.dataset_id, self.type)
def update(self):
"""
Updates the values of the record on the platform (after modification).
Example::
mouse_001.set('name', 'Mickey')
mouse_001.update()
"""
self._check_exists()
_update_self(self, self._api.concepts.instances.update(self.dataset_id, self))
def delete(self):
"""
Deletes the instance from the platform.
Example::
mouse_001.delete()
"""
return self._api.concepts.instances.delete(self.dataset_id, self)
@as_native_str()
def __repr__(self):
return u"<Record type='{}' id='{}'>".format(self.type, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Query Result
#
# Returned per "row" result of a query
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class QueryResult(object):
def __init__(self, dataset_id, target, joined):
self.dataset_id = dataset_id
self._target = target
self._joined = joined
@property
def target(self):
"""
Get the target of the query.
Returns:
the target of the query.
Example::
For the following query,
Review.query() \
.filter("is_complete", "eq", False) \
.join("reviewer", ("id", "eq", "12345")) \
.select(reviewer")
.run()
a record whose type is `review` would be the target.
"""
return self._target
def get(self, model):
"""
Get the result for a specific join key appearing in a `select()`.
Args:
model: (string|Model) The type of the record to retrieve from this
query result.
Returns:
the record whose type matches the given model, or None if no such
record exists.
Example::
For the following query,
result = Review.query() \
.filter("is_complete", "eq", False) \
.join("reviewer", ("id", "eq", "12345")) \
.select(reviewer")
.run()
reviewer_record = result.get("reviewer") # Also equivalent to `result.get(Reviewer)`
"""
return self._joined.get(target_type_string(model), None)
def items(self):
"""
Gets all (model:string, record:Record) instances contained in this
query result.
Returns:
A list of (model:string, record:Record) pairs contained in this
query result.
"""
return self._joined.items()
def __getitem__(self, model):
return self.get(model)
def __contains__(self, model):
return target_type_string(model) in self._joined
def as_dict(self):
d = {t: record.as_dict() for (t, record) in self._joined.items()}
d["targetValue"] = self.target.as_dict()
return d
@as_native_str()
def __repr__(self):
return u"<QueryResult dataset='{}' target='{}'>".format(
self.dataset_id, self._target.id
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Relationships
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class RelationshipProperty(BaseModelProperty):
@as_native_str()
def __repr__(self):
return u"<RelationshipProperty name='{}' {}>".format(self.name, self.type)
class RelationshipValue(BaseModelValue):
@as_native_str()
def __repr__(self):
return u"<RelationshipValue name='{}' value='{}' {}>".format(
self.name, self.value, self.type
)
class RelationshipType(BaseModelNode):
"""
Model for defining a relationships.
"""
_object_key = ""
_property_cls = RelationshipProperty
def __init__(
self,
dataset_id,
name,
display_name=None,
description=None,
locked=False,
source=None,
destination=None,
*args,
**kwargs
):
kwargs.pop("type", None)
self.destination = destination
self.source = source
super(RelationshipType, self).__init__(
dataset_id, name, display_name, description, locked, *args, **kwargs
)
def update(self):
raise Exception("Updating Relationships is not available at this time.")
# TODO: _update_self(self, self._api.concepts.relationships.update(self.dataset_id, self))
# TODO: delete when update is supported, handled in super-class
def add_property(self, name, display_name=None, data_type=str):
raise Exception("Updating Relationships is not available at this time.")
# TODO: delete when update is supported, handled in super-class
def add_properties(self, properties):
raise Exception("Updating Relationships is not available at this time.")
def delete(self):
raise Exception("Deleting Relationships is not available at this time.")
# TODO: self._api.concepts.relationships.delete(self.dataset_id, self)
def get_all(self):
"""
Retrieves all relationships of this type from the platform.
Returns:
List of ``Relationship``
Example::
belongs_to_relationships = belongs_to.get_all()
"""
return self._api.concepts.relationships.instances.get_all(self.dataset_id, self)
def get(self, id):
"""
Retrieves a relationship by id from the platform.
Args:
id (int): the id of the instance
Returns:
A single ``Relationship``
Example::
mouse_001 = mouse.get(123456789)
"""
return self._api.concepts.relationships.instances.get(self.dataset_id, id, self)
def relate(self, source, destination, values=dict()):
"""
Relates a ``Record`` to another ``Record`` or ``DataPackage`` using current relationship.
Args:
source (Record, DataPackage): record or data package the relationship orginates from
destination (Record, DataPackage): record or data package the relationship points to
values (dict, optional): values for properties defined in the relationship's schema
Returns:
The newly created ``Relationship``
Example:
Create a relationship between a ``Record`` and a ``DataPackage``::
from_relationship.relate(mouse_001, eeg)
Create a relationship (with values) between a ``Record`` and a ``DataPackage``::
from_relationship.relate(mouse_001, eeg, {"date": datetime.datetime(1991, 02, 26, 07, 0)})
"""
self._check_exists()
self._validate_values_against_schema(values)
return self._api.concepts.relationships.instances.link(
self.dataset_id, self, source, destination, values
)
def create(self, items):
"""
Create multiple relationships between records using current relationship type.
Args:
items (list): List of relationships to be created.
Each relationship should be either a dictionary or tuple.
If relationships are dictionaries, they are required to have
``from``/``to`` or ``source``/``destination`` keys.
There is an optional ``values`` key which can be used
to attach metadata to the relationship;
``values`` should be a dictionary with key/value pairs.
If relationships are tuples, they must be in the form
``(source, dest)``.
Returns:
Array of newly created ``Relationships`` objects
Example:
Create multiple relationships (dictionary format)::
diagnosed_with.create([
{ 'from': participant_001, 'to': parkinsons},
{ 'from': participant_321, 'to': als}
])
Create multiple relationships (tuple format)::
diagnosed_with.create([
(participant_001, parkinsons),
(participant_321, als)
])
"""
self._check_exists()
# handle non-array
if isinstance(items, (dict, tuple)):
items = [items]
relations = []
for value in items:
# get source, destination, and values
if isinstance(value, tuple):
src, dest = value
vals = {}
elif isinstance(value, dict):
src = value.get("from", value.get("source"))
dest = value.get("to", value.get("destination"))
vals = value.get("values", {})
else:
raise Exception(
"Expected relationship as tuple or dictionary, found {}".format(
type(value)
)
)
# Check sources and destinations
if not isinstance(src, (Record, DataPackage, string_types)):
raise Exception(
"source must be object of type Record, DataPackage, or UUID"
)
if not isinstance(dest, (Record, DataPackage, string_types)):
raise Exception(
"destination must be object of type Record, DataPackage, or UUID"
)
# create local relationship object
relations.append(
Relationship(
dataset_id=self.dataset_id,
type=self.type,
source=src,
destination=dest,
values=[
dict(name=k, value=v, dataType=self.schema.get(k).type)
for k, v in vals.items()
],
)
)
return self._api.concepts.relationships.instances.create_many(
self.dataset_id, self, *relations
)
def as_dict(self):
d = super(RelationshipType, self).as_dict()
d["type"] = "relationship"
if self.source is not None:
d["from"] = self.source
if self.destination is not None:
d["to"] = self.destination
return d
@as_native_str()
def __repr__(self):
return u"<RelationshipType type='{}' id='{}'>".format(self.type, self.id)
class Relationship(BaseRecord):
"""
A single instance of a ``RelationshipType``.
"""
_object_key = ""
def __init__(self, dataset_id, type, source, destination, *args, **kwargs):
assert isinstance(
source, (Record, string_types, DataPackage)
), "source must be Model, UUID, or DataPackage"
assert isinstance(
destination, (Record, string_types, DataPackage)
), "destination must be Model, UUID, or DataPackage"
if isinstance(source, (Record, DataPackage)):
source = source.id
if isinstance(destination, (Record, DataPackage)):
destination = destination.id
self.source = source
self.destination = destination
kwargs.pop("schemaRelationshipId", None)
super(Relationship, self).__init__(dataset_id, type, *args, **kwargs)
def relationship(self):
"""
Retrieves the relationship definition of this instance from the platform
Returns:
A single ``RelationshipType``.
"""
return self._api.concepts.relationships.get(self.dataset_id, self.type)
# TODO: delete when update is supported, handled in super-class
def set(self, name, value):
raise Exception("Updating a Relationship is not available at this time.")
def update(self):
raise Exception("Updating a Relationship is not available at this time.")
# TODO: _update_self(self, self._api.concepts.relationships.instances.update(self.dataset_id, self))
def delete(self):
"""
Deletes the instance from the platform.
Example::
mouse_001_eeg_link.delete()
"""
return self._api.concepts.relationships.instances.delete(self.dataset_id, self)
@classmethod
def from_dict(cls, data, *args, **kwargs):
d = dict(
source=data.pop("from", None), destination=data.pop("to", None), **data
)
item = super(Relationship, cls).from_dict(d, *args, **kwargs)
return item
def as_dict(self):
d = super(Relationship, self).as_dict()
d["to"] = self.destination
d["from"] = self.source
return d
@as_native_str()
def __repr__(self):
return u"<Relationship type='{}' id='{}' source='{}' destination='{}'>".format(
self.type, self.id, self.source, self.destination
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Proxies
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class ProxyInstance(BaseRecord):
_object_key = ""
def __init__(self, dataset_id, type, *args, **kwargs):
super(ProxyInstance, self).__init__(dataset_id, type, *args, **kwargs)
def item(self):
if self.type == "proxy:package":
package_id = self.get("id")
return self._api.packages.get(package_id)
else:
raise Exception("unsupported proxy type: {}".format(self.type))
def update(self):
raise Exception("Updating a ProxyInstance is not available at this time.")
def set(self, name, value):
raise Exception("Updating a ProxyInstance is not available at this time.")
@as_native_str()
def __repr__(self):
return u"<ProxyInstance type='{}' id='{}'>".format(self.type, self.id)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model/Relation Instance Sets
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class BaseInstanceList(list):
_accept_type = None
def __init__(self, type, *args, **kwargs):
super(BaseInstanceList, self).__init__(*args, **kwargs)
assert isinstance(type, self._accept_type), "type must be type {}".format(
self._accept_type
)
self.type = type
def as_dataframe(self):
pass
class RecordSet(BaseInstanceList):
_accept_type = Model
@require_extension
def as_dataframe(self, record_id_column_name=None):
"""
Convert the list of ``Record`` objects to a pandas DataFrame
Args:
record_id_column_name (string): If set, a column with the desired
name will be prepended to this dataframe that contains record ids.
Returns:
pd.DataFrame
"""
cols = list(self.type.schema.keys())
if record_id_column_name:
if record_id_column_name in cols:
raise ValueError(
"There is already a column called '{}' in this data set.".format(
record_id_column_name
)
)
cols.insert(0, record_id_column_name)
data = []
for instance in self:
values = dict(instance.values)
if record_id_column_name:
values[record_id_column_name] = instance.id
data.append(values)
df = pd.DataFrame(data=data, columns=cols)
return df
class RelationshipSet(BaseInstanceList):
_accept_type = RelationshipType
@require_extension
def as_dataframe(self):
"""
Converts the list of ``Relationship`` objects to a pandas DataFrame
Returns:
pd.DataFrame
.. note::
In addition to the values in each relationship instance, the DataFrame
contains three columns that describe each instance:
- ``__source__``: ID of the instance's source
- ``__destination__``: ID of the instance's destination
- ``__type__``: Type of relationship that the instance is
"""
cols = ["__source__", "__destination__", "__type__"]
cols.extend(self.type.schema.keys())
data = []
for instance in self:
d = {}
d["_type"] = self.type.type
d["_source"] = instance.source
d["_destination"] = instance.destination
for name, value in instance.values.items():
d[name] = value
data.append(d)
df = pd.DataFrame(data=data, columns=cols)
return df
| 0
| 24,648
| 0
| 106,871
| 0
| 922
| 0
| 144
| 1,588
|
ff45a14b78903909a6ffd1b4566901c7dab86a97
| 3,024
|
py
|
Python
|
api/tests/cards/test_card_read.py
|
onecrayon/api.ashes.live
|
72709fb4e53220aa9b48749a51f5b834ebb2ca42
|
[
"0BSD"
] | 11
|
2020-09-13T16:49:21.000Z
|
2021-07-29T06:17:58.000Z
|
api/tests/cards/test_card_read.py
|
onecrayon/api.ashes.live
|
72709fb4e53220aa9b48749a51f5b834ebb2ca42
|
[
"0BSD"
] | 49
|
2020-09-11T05:23:02.000Z
|
2022-03-02T18:31:00.000Z
|
api/tests/cards/test_card_read.py
|
onecrayon/api.ashes.live
|
72709fb4e53220aa9b48749a51f5b834ebb2ca42
|
[
"0BSD"
] | 1
|
2022-03-27T22:11:29.000Z
|
2022-03-27T22:11:29.000Z
|
from fastapi import status
from fastapi.testclient import TestClient
from api import db
from api.models import Card
def test_get_legacy_card(client: TestClient, session: db.Session):
"""Must be able to read JSON for a legacy card"""
# This is handled by a migration normally (legacy cards can't normally be created by this API)
card = (
session.query(Card)
.filter(Card.stub == "example-phoenixborn", Card.is_legacy == True)
.first()
)
card.json["release"]["is_legacy"] = True
card.json["is_legacy"] = True
db.flag_modified(card, "json")
session.commit()
response = client.get("/v2/cards/example-phoenixborn", params={"show_legacy": True})
assert response.status_code == status.HTTP_200_OK
assert response.json()["is_legacy"] == True, response.json()
def test_get_card(client: TestClient, session: db.Session):
"""Must be able to read non-Legacy JSON"""
response = client.get("/v2/cards/example-phoenixborn")
assert response.status_code == status.HTTP_200_OK
assert "is_legacy" not in response.json(), response.json()
def test_get_nonexistent_card(client: TestClient, session: db.Session):
"""Must throw an error when the card doesn't exist"""
response = client.get("/v2/cards/no-luck")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.json()
def test_get_details_no_card(client: TestClient, session: db.Session):
"""Must fail properly when requesting non-existent card"""
response = client.get("/v2/cards/no-luck/details")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.json()
def test_get_details_root_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find root Phoenixborn cards"""
response = client.get("/v2/cards/example-conjured-alteration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_root_non_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find root non-Phoenixborn cards"""
response = client.get("/v2/cards/summon-example-conjuration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_root_phoenixborn_conjurations(
client: TestClient, session: db.Session
):
"""Must properly find conjurations when looking up Phoenixborn unique"""
response = client.get("/v2/cards/example-ally/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_non_phoenixborn_conjuration(
client: TestClient, session: db.Session
):
"""Must properly find root summons for non-Phoenixborn conjurations"""
response = client.get("/v2/cards/example-conjuration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find connected cards when looking up Phoenixborn"""
response = client.get("/v2/cards/example-phoenixborn/details")
assert response.status_code == status.HTTP_200_OK
| 39.789474
| 98
| 0.737765
|
from fastapi import status
from fastapi.testclient import TestClient
from api import db
from api.models import Card
def test_get_legacy_card(client: TestClient, session: db.Session):
"""Must be able to read JSON for a legacy card"""
# This is handled by a migration normally (legacy cards can't normally be created by this API)
card = (
session.query(Card)
.filter(Card.stub == "example-phoenixborn", Card.is_legacy == True)
.first()
)
card.json["release"]["is_legacy"] = True
card.json["is_legacy"] = True
db.flag_modified(card, "json")
session.commit()
response = client.get("/v2/cards/example-phoenixborn", params={"show_legacy": True})
assert response.status_code == status.HTTP_200_OK
assert response.json()["is_legacy"] == True, response.json()
def test_get_card(client: TestClient, session: db.Session):
"""Must be able to read non-Legacy JSON"""
response = client.get("/v2/cards/example-phoenixborn")
assert response.status_code == status.HTTP_200_OK
assert "is_legacy" not in response.json(), response.json()
def test_get_nonexistent_card(client: TestClient, session: db.Session):
"""Must throw an error when the card doesn't exist"""
response = client.get("/v2/cards/no-luck")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.json()
def test_get_details_no_card(client: TestClient, session: db.Session):
"""Must fail properly when requesting non-existent card"""
response = client.get("/v2/cards/no-luck/details")
assert response.status_code == status.HTTP_404_NOT_FOUND, response.json()
def test_get_details_root_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find root Phoenixborn cards"""
response = client.get("/v2/cards/example-conjured-alteration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_root_non_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find root non-Phoenixborn cards"""
response = client.get("/v2/cards/summon-example-conjuration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_root_phoenixborn_conjurations(
client: TestClient, session: db.Session
):
"""Must properly find conjurations when looking up Phoenixborn unique"""
response = client.get("/v2/cards/example-ally/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_non_phoenixborn_conjuration(
client: TestClient, session: db.Session
):
"""Must properly find root summons for non-Phoenixborn conjurations"""
response = client.get("/v2/cards/example-conjuration/details")
assert response.status_code == status.HTTP_200_OK
def test_get_details_phoenixborn(client: TestClient, session: db.Session):
"""Must properly find connected cards when looking up Phoenixborn"""
response = client.get("/v2/cards/example-phoenixborn/details")
assert response.status_code == status.HTTP_200_OK
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
793d046d209b11ed4917626afa99782cc07d3bdf
| 13,796
|
py
|
Python
|
cbm/foi/foi_v1.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 17
|
2021-01-18T07:27:01.000Z
|
2022-03-10T12:26:21.000Z
|
cbm/foi/foi_v1.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 4
|
2021-04-29T11:20:44.000Z
|
2021-12-06T10:19:17.000Z
|
cbm/foi/foi_v1.py
|
CsabaWirnhardt/cbm
|
1822addd72881057af34ac6a7c2a1f02ea511225
|
[
"BSD-3-Clause"
] | 47
|
2021-01-21T08:25:22.000Z
|
2022-03-21T14:28:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import psycopg2
# from psycopg2 import Error
import subprocess
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from rasterstats import zonal_stats
from fiona import open as fopen
import fiona
from yaml import load, FullLoader
from os.path import dirname, abspath, join, normpath
from pathlib import Path
from cbm.utils import config
from cbm.datas import db
import geopandas as gpd
path_foi_func = normpath(join(dirname(abspath(__file__)), 'foi_db_func'))
def main(reference_data, raster_classif_file, yaml_file, pre_min_het,
pre_max_het, area_threshold, db_str='main'):
"""FOI assessment is based on spatial analysis of a thematic raster
produced in advance.
The thematic raster can be the result of a any image/raster processing
method yielding a class label for each pixel - crop classification, behavior
analysis of land phenomenon, gridded data on soil, slope, humidity, etc.
The starting point was the idea that inside of an homgeneous parcel we
should have only one type of pixel.
For example if the thematic raster is the result of a crop classification,
inside a parcel we should have only one type of pixels that represent the
respective crop
If the thematic raster is the result of a behaviour analysis, all the pixels
inside a parcel should behave in the same way during a period of time.
The FOI assessment is based on the analysis made on the presence and
distribution of different types of pixels inside the FOI.
Args:
reference_data (str): Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality
The parcels poligons in .shp file format or
database table name without .shp ending.
raster_classif_file (str): Thematic raster - classification raster, or
raster from other source that will be used for testing
heterogeneity and cardinality
yaml_file: YAML file that holds the classes of thematic raster file.
can be also a simple list of values in the notebook corespondence
between pixel values and names for the classes
pre_min_het: Minimum thresholds for heterogeneity checks.
pre_max_het: Maximum thresholds for heterogeneity checks.
area_threshold: Minimum area for clusters selection.
Returns:
bool: True if successful, False otherwise.
"""
# database connection string
if type(db_str) is str:
db_connection = f"PG:{db.conn_str(db_str)}"
elif type(db_str) is list:
db_connection = "PG:host={} port={} dbname={} user={} password={}".format(
*db_str)
# ogr2ogr options
geom_field_name = "GEOMETRY_NAME=wkb_geometry"
overwrite_option = "-OVERWRITE"
geom_type = "MULTIPOLYGON"
output_format = "PostgreSQL"
# Path for storing the processed data - final spatial data that will be
# exported after database processing
processed_data = normpath(join('foi', 'processed_data'))
os.makedirs(processed_data, exist_ok=True)
# Path for storing the final output data
output_data = normpath(join('foi', 'output_data'))
os.makedirs(output_data, exist_ok=True)
reference_data_name = os.path.splitext(os.path.basename(reference_data))[0]
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
reference_data_table = f.read()
except Exception:
reference_data_table = reference_data_name
# Vector file resulted from the raster stats pixel count
# pixelcount_output = f'{output_data}pixel_count_{reference_data_table}.shp'
pixelcount_output = f'{processed_data}/{reference_data_name}_pixelcount.shp'
# Vector file resulted from raster to vector process (polygonize)
polygonize_output = f'{processed_data}/{reference_data_name}_polygonize.shp'
# Name of the table to be created in the database - import of the pixel
# count into the database
pixelcount_table = f"{reference_data_name}_pixelcount"
# Name of the table to be created in the database - import of the
# polygonize result into the database
polygonize_table = f"{reference_data_name}_polygonize"
# Name and path of the files resulted from the analysis
heterogeneity_output = f'{output_data}/{reference_data_name}_foih_v1.shp'
cardinality_output = f'{output_data}/{reference_data_name}_foic_v1.shp'
cardinality_output_clusters = f'{output_data}/{reference_data_name}_foic_clusters_v1.shp'
sql = "SELECT * FROM " + reference_data_table + ";"
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
gpd_data = gpd.read_postgis(
sql=sql, con=ps_connection, geom_col='wkb_geometry')
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
# print("PostgreSQL connection is closed")
temp_reference_data = f'foi/{reference_data_name}_temp.shp'
gpd_data.to_file(temp_reference_data)
shape = fiona.open(temp_reference_data)
spatialRef = shape.crs["init"]
# print("Vector EPSG: ", spatialRef)
# Import reference data shapefile to database.
# Overwrite option is needed, otherwise the import will append new
# values to the ones existing in the table
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-lco",
geom_field_name, "-a_srs", spatialRef, "-nln",
reference_data_table, "-f", "PostgreSQL", db_connection,
reference_data])
# Reading the values from yaml file
conf = load(open(yaml_file, 'r').read(), Loader=FullLoader)
category_map = conf['category_map']
rst_fields = list(category_map.values())
# Counting the number of pixels for each parcel. The fields with names of
# the classes from yaml file will be added,
# and updated with the number of pixels from each category
with fopen(temp_reference_data, 'r') as input:
spatialRef = input.crs["init"]
schema = input.schema
for i in rst_fields:
schema['properties'][i] = 'int:5'
rst_attribs = dict.fromkeys(rst_fields, 0)
with fopen(pixelcount_output, 'w', 'ESRI Shapefile', schema) as output:
for i, vct_feat in enumerate(input):
vct_val_dict = dict(vct_feat['properties'])
rst_val_dict = zonal_stats(
vct_feat, raster_classif_file,
categorical=True, copy_properties=True,
category_map=category_map, nodata=-999)[0]
vct_val_dict.update(rst_attribs)
for lu in rst_val_dict:
vct_val_dict[lu] = rst_val_dict.get(lu)
for atrib in vct_val_dict:
vct_feat['properties'][atrib] = vct_val_dict.get(atrib)
output.write(vct_feat)
print("Finished pixel calculation!")
# Import resulted shapefile, with the number of pixels for each class to
# database. Overwrite option is needed, otherwise the
# import will append new values to the ones existing in the table
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-a_srs",
spatialRef, "-nln", pixelcount_table, "-f", "PostgreSQL",
db_connection, pixelcount_output])
# Number of classes from the thematic raster
num_classes = len(category_map)
# Minimum and maximum thresholds for heterogeneity checks. In this example,
# any parcel
# with percentage of pixels for one class between 30 and 70 from the total,
# will be considered heterogenous.
# min_heterogeneity_threshold = 30
# max_heterogeneity_threshold = 70
min_heterogeneity_threshold = pre_min_het
max_heterogeneity_threshold = pre_max_het
# Calling the PostgreSQL function wich checks the heterogeneity.
# The function calculates the percentages and sets an attribute
# "foi_h" to 1 when the percentage of pixels is between thresholds
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
# call stored procedure
cursor.callproc('public.check_heterogeneity', (
pixelcount_table, num_classes, min_heterogeneity_threshold,
max_heterogeneity_threshold))
print("Running function to check heterogeneity")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
print("PostgreSQL connection is closed")
print("Heterogeneity assessment function finished")
# Export processed data - heterogeneity, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
heterogeneity_output, db_connection, pixelcount_table])
print("Heterogeneity analysis output downloaded")
# Polygonize the thematic raster. The process takes into account only
# one band (in this case - first band). Can be used with 8 connected
# pixels or with 4 connected pixels.
connectedness = '-8'
sourceRaster = gdal.Open(raster_classif_file)
band = sourceRaster.GetRasterBand(1)
srs = osr.SpatialReference(wkt=sourceRaster.GetProjection())
dst_layername = polygonize_output
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource(dst_layername)
dst_layer = dst_ds.CreateLayer(dst_layername, srs=srs)
fd = ogr.FieldDefn("DN", ogr.OFTInteger)
dst_layer.CreateField(fd)
dst_field = dst_layer.GetLayerDefn().GetFieldIndex("DN")
gdal.Polygonize(band, None, dst_layer, dst_field,
[connectedness], callback=None)
dst_ds.Destroy()
# Import polygonize result to database
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-lco",
geom_field_name, "-nln", polygonize_table, "-f",
output_format, db_connection, polygonize_output])
# Names of the tables to be created in the database during the processing
processed_clusters = polygonize_table + "_clusters"
processed_cardinality = polygonize_table + "_cardin"
# Spatial data to be tested - parcels that will be checked for cardinality
# (I think we should use the same data as for heterogeneity)
# reference_table = 'reference_data'
# Minimum area for clusters selection - only clusters bigger that the
# threshold will be counted
# area_threshold = 2000
# Calling the PostgreSQL function wich checks the cardinality. The function
# fixes the geometry for the spatial data resulted from polygnize, clips
# the polygonize result with the parcels that needs to be checked,
# calculates the area of the clusters inside each parcel, selects the
# clusters that are more than one type, each of them bigger that the
# threshold, in each parcel.
# The function creates two new tables: one with the clusters that matches
# the conditions, the other with data to be tested and a new column
# "foi_c" wich is 1 if the parcel has more that two types of clusters
# with the area bigger than the thershold
# TO DO: put the unique identifier as function param
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
# call stored procedure
# cursor.callproc('public.check_cardinality', (
# polygonize_table, reference_data_table, area_threshold))
cursor.execute(
"CALL public.check_cardinality_procedure( %s, %s, %s, %s); ",
(polygonize_table, reference_data_table, area_threshold, 10000))
print("Running function to check cardinality")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
print("PostgreSQL connection is closed")
# Export processed data - clusters, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
cardinality_output_clusters, db_connection,
processed_clusters])
print("Cardinality assessment function finished")
# Export processed data - data to be tested with "foi_c" flag, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
cardinality_output, db_connection, processed_cardinality])
print("Cardinality analysis output downloaded")
filelist_temp = [f for f in os.listdir(
'foi') if f.startswith(Path(temp_reference_data).stem)]
for f in filelist_temp:
os.remove(os.path.join('foi', f))
if __name__ == "__main__":
import sys
main(sys.argv)
| 40.696165
| 100
| 0.683531
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Gilbert Voican, Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import os
import psycopg2
# from psycopg2 import Error
import subprocess
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
from rasterstats import zonal_stats
from fiona import open as fopen
import fiona
from yaml import load, FullLoader
from os.path import dirname, abspath, join, normpath
from pathlib import Path
from cbm.utils import config
from cbm.datas import db
import geopandas as gpd
path_foi_func = normpath(join(dirname(abspath(__file__)), 'foi_db_func'))
def main(reference_data, raster_classif_file, yaml_file, pre_min_het,
pre_max_het, area_threshold, db_str='main'):
"""FOI assessment is based on spatial analysis of a “thematic” raster
produced in advance.
The thematic raster can be the result of a any image/raster processing
method yielding a class label for each pixel - crop classification, behavior
analysis of land phenomenon, gridded data on soil, slope, humidity, etc.
The starting point was the idea that inside of an homgeneous parcel we
should have only one type of pixel.
For example if the thematic raster is the result of a crop classification,
inside a parcel we should have only one type of pixels that represent the
respective crop
If the thematic raster is the result of a behaviour analysis, all the pixels
inside a parcel should behave in the same way during a period of time.
The FOI assessment is based on the analysis made on the presence and
distribution of different types of pixels inside the FOI.
Args:
reference_data (str): Spatial data to be tested -
parcels that will be checked for heterogeneity and cardinality
The parcels poligons in .shp file format or
database table name without .shp ending.
raster_classif_file (str): Thematic raster - classification raster, or
raster from other source that will be used for testing
heterogeneity and cardinality
yaml_file: YAML file that holds the classes of thematic raster file.
can be also a simple list of values in the notebook corespondence
between pixel values and names for the classes
pre_min_het: Minimum thresholds for heterogeneity checks.
pre_max_het: Maximum thresholds for heterogeneity checks.
area_threshold: Minimum area for clusters selection.
Returns:
bool: True if successful, False otherwise.
"""
# database connection string
if type(db_str) is str:
db_connection = f"PG:{db.conn_str(db_str)}"
elif type(db_str) is list:
db_connection = "PG:host={} port={} dbname={} user={} password={}".format(
*db_str)
def db_conn():
if type(db_str) is str:
return db.conn(db_str)
elif type(db_str) is list:
return psycopg2.connect("host={} port={} dbname={} user={} password={}".format(*db_str))
# ogr2ogr options
geom_field_name = "GEOMETRY_NAME=wkb_geometry"
overwrite_option = "-OVERWRITE"
geom_type = "MULTIPOLYGON"
output_format = "PostgreSQL"
# Path for storing the processed data - final spatial data that will be
# exported after database processing
processed_data = normpath(join('foi', 'processed_data'))
os.makedirs(processed_data, exist_ok=True)
# Path for storing the final output data
output_data = normpath(join('foi', 'output_data'))
os.makedirs(output_data, exist_ok=True)
reference_data_name = os.path.splitext(os.path.basename(reference_data))[0]
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
reference_data_table = f.read()
except Exception:
reference_data_table = reference_data_name
# Vector file resulted from the raster stats pixel count
# pixelcount_output = f'{output_data}pixel_count_{reference_data_table}.shp'
pixelcount_output = f'{processed_data}/{reference_data_name}_pixelcount.shp'
# Vector file resulted from raster to vector process (polygonize)
polygonize_output = f'{processed_data}/{reference_data_name}_polygonize.shp'
# Name of the table to be created in the database - import of the pixel
# count into the database
pixelcount_table = f"{reference_data_name}_pixelcount"
# Name of the table to be created in the database - import of the
# polygonize result into the database
polygonize_table = f"{reference_data_name}_polygonize"
# Name and path of the files resulted from the analysis
heterogeneity_output = f'{output_data}/{reference_data_name}_foih_v1.shp'
cardinality_output = f'{output_data}/{reference_data_name}_foic_v1.shp'
cardinality_output_clusters = f'{output_data}/{reference_data_name}_foic_clusters_v1.shp'
sql = "SELECT * FROM " + reference_data_table + ";"
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
gpd_data = gpd.read_postgis(
sql=sql, con=ps_connection, geom_col='wkb_geometry')
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
# print("PostgreSQL connection is closed")
temp_reference_data = f'foi/{reference_data_name}_temp.shp'
gpd_data.to_file(temp_reference_data)
shape = fiona.open(temp_reference_data)
spatialRef = shape.crs["init"]
# print("Vector EPSG: ", spatialRef)
# Import reference data shapefile to database.
# Overwrite option is needed, otherwise the import will append new
# values to the ones existing in the table
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-lco",
geom_field_name, "-a_srs", spatialRef, "-nln",
reference_data_table, "-f", "PostgreSQL", db_connection,
reference_data])
# Reading the values from yaml file
conf = load(open(yaml_file, 'r').read(), Loader=FullLoader)
category_map = conf['category_map']
rst_fields = list(category_map.values())
# Counting the number of pixels for each parcel. The fields with names of
# the classes from yaml file will be added,
# and updated with the number of pixels from each category
with fopen(temp_reference_data, 'r') as input:
spatialRef = input.crs["init"]
schema = input.schema
for i in rst_fields:
schema['properties'][i] = 'int:5'
rst_attribs = dict.fromkeys(rst_fields, 0)
with fopen(pixelcount_output, 'w', 'ESRI Shapefile', schema) as output:
for i, vct_feat in enumerate(input):
vct_val_dict = dict(vct_feat['properties'])
rst_val_dict = zonal_stats(
vct_feat, raster_classif_file,
categorical=True, copy_properties=True,
category_map=category_map, nodata=-999)[0]
vct_val_dict.update(rst_attribs)
for lu in rst_val_dict:
vct_val_dict[lu] = rst_val_dict.get(lu)
for atrib in vct_val_dict:
vct_feat['properties'][atrib] = vct_val_dict.get(atrib)
output.write(vct_feat)
print("Finished pixel calculation!")
# Import resulted shapefile, with the number of pixels for each class to
# database. Overwrite option is needed, otherwise the
# import will append new values to the ones existing in the table
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-a_srs",
spatialRef, "-nln", pixelcount_table, "-f", "PostgreSQL",
db_connection, pixelcount_output])
# Number of classes from the thematic raster
num_classes = len(category_map)
# Minimum and maximum thresholds for heterogeneity checks. In this example,
# any parcel
# with percentage of pixels for one class between 30 and 70 from the total,
# will be considered heterogenous.
# min_heterogeneity_threshold = 30
# max_heterogeneity_threshold = 70
min_heterogeneity_threshold = pre_min_het
max_heterogeneity_threshold = pre_max_het
# Calling the PostgreSQL function wich checks the heterogeneity.
# The function calculates the percentages and sets an attribute
# "foi_h" to 1 when the percentage of pixels is between thresholds
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
# call stored procedure
cursor.callproc('public.check_heterogeneity', (
pixelcount_table, num_classes, min_heterogeneity_threshold,
max_heterogeneity_threshold))
print("Running function to check heterogeneity")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
print("PostgreSQL connection is closed")
print("Heterogeneity assessment function finished")
# Export processed data - heterogeneity, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
heterogeneity_output, db_connection, pixelcount_table])
print("Heterogeneity analysis output downloaded")
# Polygonize the thematic raster. The process takes into account only
# one band (in this case - first band). Can be used with 8 connected
# pixels or with 4 connected pixels.
connectedness = '-8'
sourceRaster = gdal.Open(raster_classif_file)
band = sourceRaster.GetRasterBand(1)
srs = osr.SpatialReference(wkt=sourceRaster.GetProjection())
dst_layername = polygonize_output
drv = ogr.GetDriverByName("ESRI Shapefile")
dst_ds = drv.CreateDataSource(dst_layername)
dst_layer = dst_ds.CreateLayer(dst_layername, srs=srs)
fd = ogr.FieldDefn("DN", ogr.OFTInteger)
dst_layer.CreateField(fd)
dst_field = dst_layer.GetLayerDefn().GetFieldIndex("DN")
gdal.Polygonize(band, None, dst_layer, dst_field,
[connectedness], callback=None)
dst_ds.Destroy()
# Import polygonize result to database
subprocess.call(["ogr2ogr", overwrite_option, "-nlt", geom_type, "-lco",
geom_field_name, "-nln", polygonize_table, "-f",
output_format, db_connection, polygonize_output])
# Names of the tables to be created in the database during the processing
processed_clusters = polygonize_table + "_clusters"
processed_cardinality = polygonize_table + "_cardin"
# Spatial data to be tested - parcels that will be checked for cardinality
# (I think we should use the same data as for heterogeneity)
# reference_table = 'reference_data'
# Minimum area for clusters selection - only clusters bigger that the
# threshold will be counted
# area_threshold = 2000
# Calling the PostgreSQL function wich checks the cardinality. The function
# fixes the geometry for the spatial data resulted from polygnize, clips
# the polygonize result with the parcels that needs to be checked,
# calculates the area of the clusters inside each parcel, selects the
# clusters that are more than one type, each of them bigger that the
# threshold, in each parcel.
# The function creates two new tables: one with the clusters that matches
# the conditions, the other with data to be tested and a new column
# "foi_c" wich is 1 if the parcel has more that two types of clusters
# with the area bigger than the thershold
# TO DO: put the unique identifier as function param
try:
ps_connection = db_conn()
ps_connection.autocommit = True
cursor = ps_connection.cursor()
# call stored procedure
# cursor.callproc('public.check_cardinality', (
# polygonize_table, reference_data_table, area_threshold))
cursor.execute(
"CALL public.check_cardinality_procedure( %s, %s, %s, %s); ",
(polygonize_table, reference_data_table, area_threshold, 10000))
print("Running function to check cardinality")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
finally:
# closing database connection.
if(ps_connection):
cursor.close()
ps_connection.close()
print("PostgreSQL connection is closed")
# Export processed data - clusters, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
cardinality_output_clusters, db_connection,
processed_clusters])
print("Cardinality assessment function finished")
# Export processed data - data to be tested with "foi_c" flag, to shapefile
subprocess.call(["ogr2ogr", "-f", "ESRI Shapefile",
cardinality_output, db_connection, processed_cardinality])
print("Cardinality analysis output downloaded")
filelist_temp = [f for f in os.listdir(
'foi') if f.startswith(Path(temp_reference_data).stem)]
for f in filelist_temp:
os.remove(os.path.join('foi', f))
if __name__ == "__main__":
import sys
main(sys.argv)
| 6
| 0
| 0
| 0
| 0
| 196
| 0
| 0
| 27
|
e549886f38a359ea6d5cf82b0fcbe44d7f043c7e
| 3,657
|
py
|
Python
|
laylm/trainer/task.py
|
nunenuh/layoutlm.pytorch
|
30853b0a37247b5463836156d9f345d84da050f0
|
[
"MIT"
] | 1
|
2021-01-25T16:31:54.000Z
|
2021-01-25T16:31:54.000Z
|
laylm/trainer/task.py
|
nunenuh/layoutlm.pytorch
|
30853b0a37247b5463836156d9f345d84da050f0
|
[
"MIT"
] | null | null | null |
laylm/trainer/task.py
|
nunenuh/layoutlm.pytorch
|
30853b0a37247b5463836156d9f345d84da050f0
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch.optim as optim
| 33.550459
| 104
| 0.570686
|
import torch
import torch.nn as nn
import torch.optim as optim
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
)
from .metrics import FullMetrics
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
class TaskLayoutLM(pl.LightningModule):
def __init__(self, model, tokenizer, grad_clip=1.0, hparams={}):
super().__init__()
self.model = model
self.tokenizer = tokenizer
self.grad_clip = grad_clip
self.hparams = hparams
self.metrics = FullMetrics(tokenizer)
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None
):
return self.model(
input_ids,
bbox,
attention_mask,
token_type_ids,
position_ids,
head_mask,
inputs_embeds,
labels
)
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
def shared_step(self, batch, batch_idx):
inputs = {
"input_ids": batch[0].to(self.device),
"attention_mask": batch[1].to(self.device),
"token_type_ids": batch[2].to(self.device),
"labels": batch[3].to(self.device),
"bbox": batch[4].to(self.device)
}
outputs = self.model(**inputs)
loss, logits = outputs[0], outputs[1]
metrics = self.metrics(inputs, outputs)
return loss, logits, metrics
def training_step(self, batch, batch_idx):
loss, logits, metrics = self.shared_step(batch, batch_idx)
self.log('trn_loss', loss, prog_bar=True, logger=True)
self.log('trn_acc', metrics['accuracy'], prog_bar=True, logger=True)
self.log('trn_f1', metrics['f1'], prog_bar=True, logger=True)
self.log('trn_precision', metrics['precision'], prog_bar=True, logger=True)
self.log('trn_recall', metrics['recall'], prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
loss, logits, metrics = self.shared_step(batch, batch_idx)
self.log('val_loss', loss, prog_bar=True, logger=True)
self.log('val_acc', metrics['accuracy'], prog_bar=True, logger=True)
self.log('val_f1', metrics['f1'], prog_bar=True, logger=True)
self.log('val_precision', metrics['precision'], prog_bar=True, logger=True)
self.log('val_recall', metrics['recall'], prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in self.model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
{
"params": [
p
for n, p in self.model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW( optimizer_grouped_parameters, lr=5e-5, eps=1e-8 )
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=1)
return [optimizer], [scheduler]
| 0
| 0
| 0
| 3,371
| 0
| 0
| 0
| 96
| 135
|
85c4f894595399211d734bb79e045b6413317d18
| 557
|
py
|
Python
|
2020/Day_10/part1.py
|
Adilius/adventofcode
|
d0d3ad1a0430c3732d108ad8ef2b4d218a37944b
|
[
"MIT"
] | 2
|
2020-12-01T14:50:51.000Z
|
2020-12-03T17:08:43.000Z
|
2020/Day_10/part1.py
|
Adilius/adventofcode
|
d0d3ad1a0430c3732d108ad8ef2b4d218a37944b
|
[
"MIT"
] | null | null | null |
2020/Day_10/part1.py
|
Adilius/adventofcode
|
d0d3ad1a0430c3732d108ad8ef2b4d218a37944b
|
[
"MIT"
] | null | null | null |
input_file = open("input.txt", "r")
lines = input_file.read().splitlines() #Read whole file, split by newlines
lines = [int(i) for i in lines]
lines.sort()
lines.append(lines[-1]+3)
oneDifference = 0
threeDifference = 0
previousJoltage = 0
for outlet in lines:
if outlet - 3 == previousJoltage:
threeDifference += 1
else:
oneDifference += 1
previousJoltage = outlet
print(lines)
print("oneDifference:", oneDifference)
print("threeDifference:", threeDifference)
answer = oneDifference * threeDifference
print("Answer:", answer)
| 26.52381
| 75
| 0.709156
|
input_file = open("input.txt", "r")
lines = input_file.read().splitlines() #Read whole file, split by newlines
lines = [int(i) for i in lines]
lines.sort()
lines.append(lines[-1]+3)
oneDifference = 0
threeDifference = 0
previousJoltage = 0
for outlet in lines:
if outlet - 3 == previousJoltage:
threeDifference += 1
else:
oneDifference += 1
previousJoltage = outlet
print(lines)
print("oneDifference:", oneDifference)
print("threeDifference:", threeDifference)
answer = oneDifference * threeDifference
print("Answer:", answer)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e82f452914c75b9c2ebe56457a4ddeef9877b340
| 1,231
|
py
|
Python
|
yeti.py
|
chaitanyakrishna/yeti
|
796257311a84cd84873fd2423d5ccce231459560
|
[
"Apache-2.0"
] | null | null | null |
yeti.py
|
chaitanyakrishna/yeti
|
796257311a84cd84873fd2423d5ccce231459560
|
[
"Apache-2.0"
] | null | null | null |
yeti.py
|
chaitanyakrishna/yeti
|
796257311a84cd84873fd2423d5ccce231459560
|
[
"Apache-2.0"
] | 1
|
2021-11-16T13:21:56.000Z
|
2021-11-16T13:21:56.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(format='%(levelname)s:%(module)s:%(message)s', level=logging.ERROR)
COMMANDS = {
'webserver': webserver,
'syncdb': syncdb,
}
if __name__ == '__main__':
main()
| 25.122449
| 112
| 0.682372
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import argparse
from core.web import webapp
logging.basicConfig(format='%(levelname)s:%(module)s:%(message)s', level=logging.ERROR)
def webserver(args):
# Enable debug and autoreload in dev
webapp.debug = args.debug
if webapp.debug:
webapp.jinja_env.auto_reload = True
webapp.config['TEMPLATES_AUTO_RELOAD'] = True
syncdb()
print "[+] Yeti started. Point browser to http://localhost:5000/{}".format(" (debug)" if args.debug else "")
webapp.run(host="0.0.0.0")
def syncdb(args=None):
from core.internals import Internals
Internals.syncdb()
COMMANDS = {
'webserver': webserver,
'syncdb': syncdb,
}
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='sub-command help', dest="command")
webserver = subparsers.add_parser('webserver', help='Launch yeti webserver')
webserver.add_argument('--debug', action='store_true', help="Run Flask in debug mode")
subparsers.add_parser('syncdb', help='Sync database with Yeti version')
args = parser.parse_args()
command = args.command
COMMANDS[command](args)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 858
| 0
| 0
| 114
|
d4dc1d4acf798618eb4e02139f69296e021d2d15
| 1,261
|
py
|
Python
|
main.py
|
MunityVR/Game
|
d22f3dcf8a394eca26cf90d4be38b6c328c8e707
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
MunityVR/Game
|
d22f3dcf8a394eca26cf90d4be38b6c328c8e707
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
MunityVR/Game
|
d22f3dcf8a394eca26cf90d4be38b6c328c8e707
|
[
"CC0-1.0"
] | null | null | null |
# Spaces
minsp = 0
maxsp = 500
# Admin's configuration
print("Hey Admin!")
sw = str(input("Make up a word : "))
guesses = float(input("How many guesses should the user have : "))
h = str(input("Do you want to give the user a hint? y/n : "))
guess = ''
gc = 0
# Admin's hint
if h == 'y':
hint = str(input("What hint do you wanna give to the user, when the user type down \'Hint\' : "))
elif h == 'n':
hint = str("I didn\'t give you a hint.")
# Spaces - So user can't see what the admin did.
while minsp != maxsp:
print("")
minsp = minsp + 1
print("Hey User!")
# The game starts here.
while guess != sw or gc != guesses:
guess = str(input("Enter a guess : "))
gc = gc + 1
# Events during/end the game.
if guess == sw:
print("You win!")
exit()
elif guess == 'Np =>':
print("You know something that nobody else knows!")
gc = gc - 1
elif guess == 'Amazing Grace':
print("Alan Jackson!")
gc = gc - 1
elif guess == 'Hint' or guess == 'hint':
print("Admin > " + str(hint))
elif guess == 'GCount' or guess == 'gcount':
print(str(gc) + "/" + str(guesses))
gc = gc - 1
elif gc == guesses:
print("You lose!")
exit()
| 24.25
| 101
| 0.547978
|
# Spaces
minsp = 0
maxsp = 500
# Admin's configuration
print("Hey Admin!")
sw = str(input("Make up a word : "))
guesses = float(input("How many guesses should the user have : "))
h = str(input("Do you want to give the user a hint? y/n : "))
guess = ''
gc = 0
# Admin's hint
if h == 'y':
hint = str(input("What hint do you wanna give to the user, when the user type down \'Hint\' : "))
elif h == 'n':
hint = str("I didn\'t give you a hint.")
# Spaces - So user can't see what the admin did.
while minsp != maxsp:
print("")
minsp = minsp + 1
print("Hey User!")
# The game starts here.
while guess != sw or gc != guesses:
guess = str(input("Enter a guess : "))
gc = gc + 1
# Events during/end the game.
if guess == sw:
print("You win!")
exit()
elif guess == 'Np =>':
print("You know something that nobody else knows!")
gc = gc - 1
elif guess == 'Amazing Grace':
print("Alan Jackson!")
gc = gc - 1
elif guess == 'Hint' or guess == 'hint':
print("Admin > " + str(hint))
elif guess == 'GCount' or guess == 'gcount':
print(str(gc) + "/" + str(guesses))
gc = gc - 1
elif gc == guesses:
print("You lose!")
exit()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
185a70f8f9c5531ed16ad5da0552a6bd1da2bbad
| 4,294
|
py
|
Python
|
pycspr/client/events.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | 2
|
2021-10-05T07:50:20.000Z
|
2021-10-06T09:58:19.000Z
|
pycspr/client/events.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | null | null | null |
pycspr/client/events.py
|
MrKrautee/casper-python-sdk
|
13e9da83de8892583da7ada91e4231e6f85e5e13
|
[
"Apache-2.0"
] | null | null | null |
# Map: channel type <-> event type.
_CHANNEL_TO_TYPE = {
NodeSseChannelType.deploys: {
NodeSseEventType.ApiVersion,
NodeSseEventType.DeployAccepted
},
NodeSseChannelType.main: {
NodeSseEventType.ApiVersion,
NodeSseEventType.BlockAdded,
NodeSseEventType.DeployProcessed,
NodeSseEventType.Fault,
NodeSseEventType.Step
},
NodeSseChannelType.sigs: {
NodeSseEventType.ApiVersion,
NodeSseEventType.FinalitySignature
}
}
| 34.629032
| 78
| 0.62599
|
import enum
import requests
import sseclient
from json import loads
from typing import Callable
from typing import Generator
from typing import Tuple
from pycspr.api import NodeConnectionInfo
class NodeSseChannelType(enum.Enum):
""" Enumeration over set of exposed node SEE event types. """
deploys = enum.auto()
main = enum.auto()
sigs = enum.auto()
class NodeSseEventType(enum.Enum):
""" Enumeration over set of exposed node SEE event types. """
ApiVersion = enum.auto()
BlockAdded = enum.auto()
DeployAccepted = enum.auto()
DeployProcessed = enum.auto()
Fault = enum.auto()
FinalitySignature = enum.auto()
Step = enum.auto()
# Map: channel type <-> event type.
_CHANNEL_TO_TYPE = {
NodeSseChannelType.deploys: {
NodeSseEventType.ApiVersion,
NodeSseEventType.DeployAccepted
},
NodeSseChannelType.main: {
NodeSseEventType.ApiVersion,
NodeSseEventType.BlockAdded,
NodeSseEventType.DeployProcessed,
NodeSseEventType.Fault,
NodeSseEventType.Step
},
NodeSseChannelType.sigs: {
NodeSseEventType.ApiVersion,
NodeSseEventType.FinalitySignature
}
}
class EventsClient:
""" Bind to a node's event stream. """
def __init__(self, node: NodeConnectionInfo):
"""
Constructor EventChannel.
:param node: Information required to connect to a node.
"""
self._node = node
def get_events(self, callback: Callable, channel_type: NodeSseChannelType,
event_type: NodeSseEventType, event_id: int = 0) -> None:
"""
Binds to a node's event stream - events are passed to callback for
processing.
:param callback: Callback to invoke whenever an event of relevant type
is received.
:param channel_type: Type of event channel to which to bind.
:param event_type: Type of event type to listen for
(all if unspecified).
:param event_id: Identifer of event from which to start stream
listening.
"""
# validate that the channel supports the event type.
if channel_type not in _CHANNEL_TO_TYPE:
raise ValueError(f"Unsupported SSE channel: {channel_type.name}.")
if event_type not in _CHANNEL_TO_TYPE[channel_type]:
raise ValueError(f"Unsupported SSE channel/event permutation: "
f"{channel_type.name}:{event_type.name}.")
# get and set sse clieant
params = f"?start_from={event_id}" if event_id else ""
url = f"{self._node.address_sse}/{channel_type.name.lower()}{params}"
stream = requests.get(url, stream=True)
self._sse_client = sseclient.SSEClient(stream)
for event_type, event_id, payload in self._yield_events(self):
callback(self._channel_type, event_type, event_id, payload)
def _yield_events(self) -> Generator:
""" Yields events streaming from node. """
try:
for event in self._sse_client.events():
parsed = self._parse_event(event.id, loads(event.data))
if parsed:
yield parsed
except Exception as err:
try:
self._sse_client.close()
finally:
raise err
def _parse_event(self, event_id: int, payload: dict
) -> Tuple[NodeSseEventType, int, dict]:
""" Parses raw event data for upstream processing. """
events = {
"ApiVersion": NodeSseEventType.ApiVersion,
"BlockAdded": NodeSseEventType.BlockAdded,
"DeployProcessed": NodeSseEventType.DeployProcessed,
"Fault": NodeSseEventType.Fault,
"Step": NodeSseEventType.Step,
"DeployAccepted": NodeSseEventType.DeployAccepted,
"FinalitySignature": NodeSseEventType.FinalitySignature
}
for event_name in events:
if event_name in payload:
return events.get(event_name), event_id, payload
# @TODO: process unkown event_type
print(f"Unknown event occured. even_id: {event_id}\n"
f"payload: {payload}")
return None
| 0
| 0
| 0
| 3,517
| 0
| 0
| 0
| 16
| 246
|
562a96982f8632cd52e3eb1b793d70b9c95e52cb
| 2,400
|
py
|
Python
|
truck_microservice/truck/models.py
|
getnosleep/VirtualUnjam
|
bae08eec9756c963dab409c6e4e7397ef019cc8a
|
[
"MIT"
] | null | null | null |
truck_microservice/truck/models.py
|
getnosleep/VirtualUnjam
|
bae08eec9756c963dab409c6e4e7397ef019cc8a
|
[
"MIT"
] | null | null | null |
truck_microservice/truck/models.py
|
getnosleep/VirtualUnjam
|
bae08eec9756c963dab409c6e4e7397ef019cc8a
|
[
"MIT"
] | null | null | null |
# library imports
# property imports
# import socket # Koennte die richtige Loesung sein...
| 37.5
| 135
| 0.737083
|
# library imports
from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
# property imports
from .properties import *
class TruckEntity(models.Model):
# Params
id = models.PositiveIntegerField(default=ID, primary_key=True)
address = models.TextField(default=ADDRESS_SELF, max_length=50)
length = models.FloatField(default=LENGTH)
distance = models.FloatField(default=DISTANCE)
# Movement
currentDistance = models.FloatField(default=.0)
currentRouteSection = models.FloatField(default=.0, validators=[MinValueValidator(.0)])
currentSpeed = models.FloatField(default=.0, validators=[MinValueValidator(MIN_SPEED), MaxValueValidator(MAX_SPEED)])
acceleration = models.FloatField(default=.0, validators=[MinValueValidator(MIN_ACCELERATION), MaxValueValidator(MAX_ACCELERATION)])
targetRouteSection = models.FloatField(default=20000.0, validators=[MinValueValidator(.0)])
targetSpeed = models.FloatField(default=.0, validators=[MinValueValidator(MIN_SPEED), MaxValueValidator(MAX_SPEED)])
# Convoy
leadingTruckAddress = models.TextField(default=None, blank=True, null=True, max_length=50)
frontTruckAddress = models.TextField(default=None, blank=True, null=True, max_length=50)
backTruckAddress = models.TextField(default=None, blank=True, null=True, max_length=50)
position = models.PositiveIntegerField(default=None, blank=True, null=True)
polling = models.BooleanField(default=False)
broken = models.BooleanField(default=False)
# Computed
def closing(self):
return self.distance > self.currentDistance and self.position and not self.leadingTruckAddress == ADDRESS_SELF
def accelerating(self):
return self.acceleration > .0
def decelerating(self):
return self.acceleration < .0
def movementStats(self):
return [self.currentRouteSection, self.currentSpeed, self.acceleration]
def targetStats(self):
return [self.targetRouteSection, self.targetSpeed]
@property
def minSpeed(self):
return MIN_SPEED
@property
def maxSpeed(self):
return MAX_SPEED
@property
def minAcceleration(self):
return MIN_ACCELERATION
@property
def maxAcceleration(self):
return MAX_ACCELERATION
# import socket # Koennte die richtige Loesung sein...
| 0
| 176
| 0
| 1,980
| 0
| 0
| 0
| 61
| 89
|
96a9acf8dbfedc8c99c78a3eb9a85b6b5ebf731b
| 1,106
|
py
|
Python
|
x1/append_nop.py
|
pepepper/lab
|
63b15a9455681353560b176256b59715a44aa6e0
|
[
"CC0-1.0"
] | null | null | null |
x1/append_nop.py
|
pepepper/lab
|
63b15a9455681353560b176256b59715a44aa6e0
|
[
"CC0-1.0"
] | 1
|
2021-06-28T06:30:13.000Z
|
2021-06-28T06:30:13.000Z
|
x1/append_nop.py
|
pepepper/lab
|
63b15a9455681353560b176256b59715a44aa6e0
|
[
"CC0-1.0"
] | 1
|
2021-06-27T20:36:07.000Z
|
2021-06-27T20:36:07.000Z
|
#!/usr/bin/env python3
main()
| 30.722222
| 139
| 0.512658
|
#!/usr/bin/env python3
import sys
def main():
if len(sys.argv) < 5:
print(f'Usage: {sys.argv[0]} top.bin bottom_loop.bin bottom_reset.bin out.bin reset_fill_from reset_fill_to', file=sys.stderr)
sys.exit(1)
with open(sys.argv[1], 'rb') as tf, open(sys.argv[2], 'rb') as btmlf, open(sys.argv[3], 'rb') as btmrf, open(sys.argv[4], 'wb') as out:
top, btml, btmr = tf.read(), btmlf.read(), btmrf.read()
fill_from, fill_to = int(sys.argv[5]), int(sys.argv[6])
out.write(top)
if fill_from == 0:
blen, b = len(btmr), btmr
else:
blen, b = len(btml), btml
for i in range((1024 * 64 - len(top) - blen) // 4):
out.write(b'\x00\x00\xa0\xe1')
out.write(b)
for i in range(1024 * 1024 * 15 // (1024 * 64) - 1):
if fill_from <= i+1 < fill_to:
blen, b = len(btmr), btmr
else:
blen, b = len(btml), btml
for j in range((1024 * 64 - blen) // 4):
out.write(b'\x00\x00\xa0\xe1')
out.write(b)
main()
| 0
| 0
| 0
| 0
| 0
| 1,038
| 0
| -11
| 46
|
818ccd30f153dce67753185ecd7905c078fbf3f3
| 4,310
|
py
|
Python
|
tests/test_task.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
tests/test_task.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
tests/test_task.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import unittest
TASKS = ['hello world',
'I hate VMware',
'me too',
'This is a very long task name, a very very long name',
'school sucks',
'one more',
'last one']
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testHosts']
unittest.main()
| 35.04065
| 91
| 0.655452
|
import unittest, types
from pyvisdk import Vim
from tests.common import get_options
from pyvisdk.facade.task import TaskManager
def nothing():
pass
def random_string(n):
import random
import string
return ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(n))
TASKS = ['hello world',
'I hate VMware',
'me too',
'This is a very long task name, a very very long name',
'school sucks',
'one more',
'last one']
class Test_Task(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.options = get_options()
cls.vim = Vim(cls.options.server)
cls.vim.login(cls.options.username, cls.options.password)
cls.manager = TaskManager(cls.vim)
cls.obj = cls.vim.getHostSystems()[0]
cls.cleanUpStaleTasks()
@classmethod
def cleanUpStaleTasks(cls):
for task in cls.manager._managed_object.recentTask:
if task.info.state in ['running', 'queued']:
task.SetTaskState('error', None, None)
@classmethod
def tearDownClass(cls):
cls.vim.logout()
def test_task(self):
with self.manager.task(self.obj, TASKS[0]):
pass
def test_task__error(self):
with self.assertRaises(Exception):
with self.manager.task(self.obj, TASKS[1]):
raise Exception()
def test_wrap(self):
task = self.manager.task(self.obj, TASKS[2])
func = task.wraps(nothing)
func()
def test_step(self):
task = self.manager.task(self.obj, TASKS[3])
task.step([nothing, nothing, nothing])
def test_step_manually(self):
with self.manager.task(self.obj, TASKS[4]) as task:
task.update_progress(10)
task.update_progress(20)
task.update_progress(90)
class Test_TaskWait(Test_Task):
def test_waitForTask_ref(self):
dummy_task = self.manager.task(self.obj, TASKS[5])
task = dummy_task._managed_object
task.SetTaskState("success")
self.assertEquals(task.core.waitForTask(task.ref), 'success')
def test_waitForTask_regular(self):
dummy_task = self.manager.task(self.obj, TASKS[5])
task = dummy_task._managed_object
task.SetTaskState("success")
self.assertEquals(task.core.waitForTask(task), 'success')
def test_waitForTask_fail(self):
from pyvisdk.exceptions import VisdkTaskError
dummy_task = self.manager.task(self.obj, TASKS[5])
task = dummy_task._managed_object
task.SetTaskState("error", None, None)
with self.assertRaises(VisdkTaskError):
task.core.waitForTask(task)
def _change_task(self, task, new_state):
task.SetTaskState(new_state, None, None)
def _waitForTask_timeout(self, is_running, wait_timeout, how_long=5):
from pyvisdk.exceptions import VisdkTaskError
import threading
dummy_task = self.manager.task(self.obj, TASKS[5])
task = dummy_task._managed_object
if is_running:
task.SetTaskState("running", None, None)
timer = threading.Timer(how_long, self._change_task, args=[task, "success"])
timer.start()
if wait_timeout is not None and how_long > wait_timeout:
with self.assertRaises(VisdkTaskError):
task.core.waitForTask(task, wait_timeout)
else:
self.assertEquals(task.core.waitForTask(task, wait_timeout), 'success')
timer.join()
def test_waitForTask_timeout_running_infinite_wait(self):
self._waitForTask_timeout(True, None)
def test_waitForTask_timeout_queued_infinite_wait(self):
self._waitForTask_timeout(False, None)
def test_waitForTask_timeout_running_long_wait(self):
self._waitForTask_timeout(True, 10)
def test_waitForTask_timeout_queued_long_wait(self):
self._waitForTask_timeout(False, 10)
def test_waitForTask_timeout_running_short_wait(self):
self._waitForTask_timeout(True, 1)
def test_waitForTask_timeout_queued_short_wait(self):
self._waitForTask_timeout(False, 1)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testHosts']
unittest.main()
| 0
| 520
| 0
| 3,127
| 0
| 130
| 0
| 46
| 170
|
30eaa2105d07e43d9a8ec898704e63e5994f330e
| 12,680
|
py
|
Python
|
MyUtils/ImageProcessing.py
|
mairob/Semantic-segmentation-and-Depth-estimation
|
d9624cdbde000a0c41e1025f89aa6edfdf947045
|
[
"MIT"
] | 6
|
2018-06-15T21:18:58.000Z
|
2021-07-05T08:41:21.000Z
|
MyUtils/ImageProcessing.py
|
mairob/Semantic-segmentation-and-Depth-estimation
|
d9624cdbde000a0c41e1025f89aa6edfdf947045
|
[
"MIT"
] | null | null | null |
MyUtils/ImageProcessing.py
|
mairob/Semantic-segmentation-and-Depth-estimation
|
d9624cdbde000a0c41e1025f89aa6edfdf947045
|
[
"MIT"
] | 4
|
2018-06-15T21:19:08.000Z
|
2021-07-05T08:41:23.000Z
|
#! /usr/bin/python3
#############################################################
### Helper File for TFRecords and Image manipulation ########
#############################################################
import tensorflow as tf
import numpy as np
## Label mapping for Cityscapes (34 classes)
Cityscapes34_ID_2_RGB = [(0,0,0), (0,0,0), (0,0,0), (0,0,0), (0,0,0)
# 0=unlabeled, ego vehicle,rectification border, oo roi, static
,(111,74,0),(81,0,81),(128,64,128),(244,35,232),(250,170,160)
# 5=dynamic, 6=ground, 7=road, 8=sidewalk, 9=parking
,(230,150,140), (70,70,70), (102,102,156),(190,153,153),(180,165,180)
# 10=rail track, 11=building, 12=wall, 13=fence, 14=guard rail
,(150,100,100),(150,120, 90),(153,153,153),(153,153,153),(250,170, 30)
# 15= bridge, 16=tunnel, 17=pole, 18=polegroup, 19=traffic light
,(220,220,0),(107,142,35),(152,251,152),(70,130,180),(220,20,60)
# 20=traffic sign 21=vegetation, 22=terrain, 23=sky, 24=person
,(255,0,0),(0,0,142),(0,0,70),(0,60,100),(0,0,90), (0,0,110), (0,80,100), (0,0,230), (119, 11, 32)]
# 25=rider, 26=car, 22=terrain, 27=truck, 28=bus, 29=caravan, 30= trailer, 31=train, 32=motorcyle ,33=bicycle
## Label mapping for Cityscapes (19 classes + '255'=wildcard)
Cityscapes20_ID_2_RGB = [(128,64,128),(244,35,232), (70,70,70), (102,102,156),(190,153,153)
#0=road, 1=sidewalk, 2=building, 3=wall, 4=fence
,(153,153,153), (250,170, 30), (220,220,0),(107,142,35),(152,251,152),(70,130,180),(220,20,60)
# 5= pole, 6=traffic light, 7= traffic sign, 8= vegetation,9= terrain, 10=sky, 11=person
,(255,0,0),(0,0,142),(0,0,70),(0,60,100), (0,80,100), (0,0,230), (119, 11, 32), (255,255,255)]
# 12=rider, 13=car, 14=truck, 15=bus, 16=train, 17=motorcycle, 18=bicycle, #255 --cast via tf.minimum
Pred_2_ID = [7, 8, 11, 12, 13
#0=road, 1=sidewalk, 2=building, 3=wall, 4=fence
,17 , 19, 20, 21, 22, 23, 24
# 5= pole, 6=traffic light, 7= traffic sign, 8= vegetation,9= terrain, 10=sky, 11=person
,25 , 26, 27, 28, 31, 32, 33, -1]
# 12=rider, 13=car, 14=truck, 15=bus, 16=train, 17=motorcycle, 18=bicycle, #255 --cast via tf.minimum
##################################################################################
################## Functions for Image Preprocessing #############################
##################################################################################
def read_and_decode(filename_queue, hasDisparity=False, constHeight=1024, constWidth=1024):
"""Decode images from TF-Records Bytestream. TF-Record must be compiled with the "make_tf_record.py"-script!
Args:
filename_queue: String representation of TF-Records (returned from tf.train.string_input_producer([TFRECORD_FILENAME])
filename_queue: Boolean, needed for procession disparity maps
constHeight, constWidth: Expected shapes of Images to decode
Returns:
Decoded image and mask
"""
with tf.name_scope("Input_Decoder"):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if not hasDisparity:
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
image_shape = tf.stack([constHeight, constWidth, 3])
annotation_shape = tf.stack([constHeight, constWidth, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, annotation_shape)
return image, annotation
else:
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string),
'disp_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
disparity = tf.decode_raw(features['disp_raw'], tf.int16) #uint6
image_shape = tf.stack([constHeight, constWidth, 3])
masks_shape = tf.stack([constHeight, constWidth, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, masks_shape)
disparity = tf.reshape(disparity, masks_shape)
return image, annotation, disparity
def decode_labels(mask, num_images=1, num_classes=20, label=Cityscapes20_ID_2_RGB):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
label: List, which value to assign for different classes
Returns:
A batch with num_images RGB images of the same size as the input.
"""
from PIL import Image
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label[k]
outputs[i] = np.array(img)
return outputs
from tensorflow.python.ops import control_flow_ops
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor):
"""Flips an image randomly and applies the same to an annotation tensor.
Args:
image_tensor, annotation_tensor: 3-D-Tensors
Returns:
Flipped image and gt.
"""
random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(image_tensor),
fn2=lambda: image_tensor)
randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(annotation_tensor),
fn2=lambda: annotation_tensor)
return randomly_flipped_img, randomly_flipped_annotation
def random_crop_and_pad_image_and_labels(image, sem_labels, dep_labels, size):
"""Randomly crops `image` together with `labels`.
Args:
image: A Tensor with shape [D_1, ..., D_K, N]
labels: A Tensor with shape [D_1, ..., D_K, M]
size: A Tensor with shape [K] indicating the crop size.
Returns:
A tuple of (cropped_image, cropped_label).
"""
combined = tf.concat([image, sem_labels, dep_labels], axis=2)
print("combined : ", str(combined.get_shape()[:]))
combined_crop = tf.random_crop(combined, [size[0], size[1],5])
print("combined_crop : ", str(combined_crop.get_shape()[:]))
channels = tf.unstack(combined_crop, axis=-1)
image = tf.stack([channels[0],channels[1],channels[2]], axis=-1)
sem_label = tf.expand_dims(channels[3], axis=2)
dep_label = tf.expand_dims(channels[4], axis=2)
return image, sem_label, dep_label
##################################################################################
################## Functions for Image Postprocessing #############################
##################################################################################
def generate_prediction_Img(mask, num_images=1, num_classes= 20, label=Pred_2_ID):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
label: List, which value to assign for different classes
Returns:
A batch with num_images RGB images of the same size as the input.
"""
from PIL import Image
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w), dtype=np.uint8)
for i in range(num_images):
img = Image.new('L', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label[k]
outputs[i] = np.array(img)
return outputs
def plot_depthmap(mask):
"""Network output as [w, h, 1]-Tensor is transformed to a heatmap for easier visual interpretation
Args:
mask: result of inference (depth = 1)
Returns:
A RGB-Image (representation of the depth prediction as heatmap
"""
import matplotlib.pyplot as plt
cmap = plt.get_cmap('hot')
gray = mask[0,:,:,0].astype(np.uint16)
divisor = np.max(gray) - np.min(gray)
if divisor != 0:
normed = (gray - np.min(gray)) / divisor
else:
normed = (gray - np.min(gray))
rgba_img = cmap(normed)
rgb_img = np.delete(rgba_img, 3,2)
return (65535 * rgb_img).astype(np.float32)
| 44.181185
| 133
| 0.593612
|
#! /usr/bin/python3
#############################################################
### Helper File for TFRecords and Image manipulation ########
#############################################################
import tensorflow as tf
import numpy as np
## Label mapping for Cityscapes (34 classes)
Cityscapes34_ID_2_RGB = [(0,0,0), (0,0,0), (0,0,0), (0,0,0), (0,0,0)
# 0=unlabeled, ego vehicle,rectification border, oo roi, static
,(111,74,0),(81,0,81),(128,64,128),(244,35,232),(250,170,160)
# 5=dynamic, 6=ground, 7=road, 8=sidewalk, 9=parking
,(230,150,140), (70,70,70), (102,102,156),(190,153,153),(180,165,180)
# 10=rail track, 11=building, 12=wall, 13=fence, 14=guard rail
,(150,100,100),(150,120, 90),(153,153,153),(153,153,153),(250,170, 30)
# 15= bridge, 16=tunnel, 17=pole, 18=polegroup, 19=traffic light
,(220,220,0),(107,142,35),(152,251,152),(70,130,180),(220,20,60)
# 20=traffic sign 21=vegetation, 22=terrain, 23=sky, 24=person
,(255,0,0),(0,0,142),(0,0,70),(0,60,100),(0,0,90), (0,0,110), (0,80,100), (0,0,230), (119, 11, 32)]
# 25=rider, 26=car, 22=terrain, 27=truck, 28=bus, 29=caravan, 30= trailer, 31=train, 32=motorcyle ,33=bicycle
## Label mapping for Cityscapes (19 classes + '255'=wildcard)
Cityscapes20_ID_2_RGB = [(128,64,128),(244,35,232), (70,70,70), (102,102,156),(190,153,153)
#0=road, 1=sidewalk, 2=building, 3=wall, 4=fence
,(153,153,153), (250,170, 30), (220,220,0),(107,142,35),(152,251,152),(70,130,180),(220,20,60)
# 5= pole, 6=traffic light, 7= traffic sign, 8= vegetation,9= terrain, 10=sky, 11=person
,(255,0,0),(0,0,142),(0,0,70),(0,60,100), (0,80,100), (0,0,230), (119, 11, 32), (255,255,255)]
# 12=rider, 13=car, 14=truck, 15=bus, 16=train, 17=motorcycle, 18=bicycle, #255 --cast via tf.minimum
Pred_2_ID = [7, 8, 11, 12, 13
#0=road, 1=sidewalk, 2=building, 3=wall, 4=fence
,17 , 19, 20, 21, 22, 23, 24
# 5= pole, 6=traffic light, 7= traffic sign, 8= vegetation,9= terrain, 10=sky, 11=person
,25 , 26, 27, 28, 31, 32, 33, -1]
# 12=rider, 13=car, 14=truck, 15=bus, 16=train, 17=motorcycle, 18=bicycle, #255 --cast via tf.minimum
##################################################################################
################## Functions for Image Preprocessing #############################
##################################################################################
def read_and_decode(filename_queue, hasDisparity=False, constHeight=1024, constWidth=1024):
"""Decode images from TF-Records Bytestream. TF-Record must be compiled with the "make_tf_record.py"-script!
Args:
filename_queue: String representation of TF-Records (returned from tf.train.string_input_producer([TFRECORD_FILENAME])
filename_queue: Boolean, needed for procession disparity maps
constHeight, constWidth: Expected shapes of Images to decode
Returns:
Decoded image and mask
"""
with tf.name_scope("Input_Decoder"):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
if not hasDisparity:
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
image_shape = tf.stack([constHeight, constWidth, 3])
annotation_shape = tf.stack([constHeight, constWidth, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, annotation_shape)
return image, annotation
else:
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'mask_raw': tf.FixedLenFeature([], tf.string),
'disp_raw': tf.FixedLenFeature([], tf.string)
})
image = tf.decode_raw(features['image_raw'], tf.uint8)
annotation = tf.decode_raw(features['mask_raw'], tf.uint8)
disparity = tf.decode_raw(features['disp_raw'], tf.int16) #uint6
image_shape = tf.stack([constHeight, constWidth, 3])
masks_shape = tf.stack([constHeight, constWidth, 1])
image = tf.reshape(image, image_shape)
annotation = tf.reshape(annotation, masks_shape)
disparity = tf.reshape(disparity, masks_shape)
return image, annotation, disparity
def decode_labels(mask, num_images=1, num_classes=20, label=Cityscapes20_ID_2_RGB):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
label: List, which value to assign for different classes
Returns:
A batch with num_images RGB images of the same size as the input.
"""
from PIL import Image
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label[k]
outputs[i] = np.array(img)
return outputs
def apply_with_random_selector(x, func, num_cases):
from tensorflow.python.ops import control_flow_ops
with tf.name_scope("Random_Selector"):
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
with tf.name_scope("Color_distortion"):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
from tensorflow.python.ops import control_flow_ops
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor):
"""Flips an image randomly and applies the same to an annotation tensor.
Args:
image_tensor, annotation_tensor: 3-D-Tensors
Returns:
Flipped image and gt.
"""
random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[])
randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(image_tensor),
fn2=lambda: image_tensor)
randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0),
fn1=lambda: tf.image.flip_left_right(annotation_tensor),
fn2=lambda: annotation_tensor)
return randomly_flipped_img, randomly_flipped_annotation
def random_crop_and_pad_image_and_labels(image, sem_labels, dep_labels, size):
"""Randomly crops `image` together with `labels`.
Args:
image: A Tensor with shape [D_1, ..., D_K, N]
labels: A Tensor with shape [D_1, ..., D_K, M]
size: A Tensor with shape [K] indicating the crop size.
Returns:
A tuple of (cropped_image, cropped_label).
"""
combined = tf.concat([image, sem_labels, dep_labels], axis=2)
print("combined : ", str(combined.get_shape()[:]))
combined_crop = tf.random_crop(combined, [size[0], size[1],5])
print("combined_crop : ", str(combined_crop.get_shape()[:]))
channels = tf.unstack(combined_crop, axis=-1)
image = tf.stack([channels[0],channels[1],channels[2]], axis=-1)
sem_label = tf.expand_dims(channels[3], axis=2)
dep_label = tf.expand_dims(channels[4], axis=2)
return image, sem_label, dep_label
def preprocessImage(image, central_crop_fraction= 0.875):
with tf.name_scope("Preprocessing"):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
distorted_image = apply_with_random_selector( image, lambda x, ordering: distort_color(x, ordering, fast_mode=True),num_cases=4)
image = tf.subtract(distorted_image, 0.5)
image = tf.multiply(image, 2.0)
return image
##################################################################################
################## Functions for Image Postprocessing #############################
##################################################################################
def generate_prediction_Img(mask, num_images=1, num_classes= 20, label=Pred_2_ID):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
label: List, which value to assign for different classes
Returns:
A batch with num_images RGB images of the same size as the input.
"""
from PIL import Image
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w), dtype=np.uint8)
for i in range(num_images):
img = Image.new('L', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label[k]
outputs[i] = np.array(img)
return outputs
def plot_depthmap(mask):
"""Network output as [w, h, 1]-Tensor is transformed to a heatmap for easier visual interpretation
Args:
mask: result of inference (depth = 1)
Returns:
A RGB-Image (representation of the depth prediction as heatmap
"""
import matplotlib.pyplot as plt
cmap = plt.get_cmap('hot')
gray = mask[0,:,:,0].astype(np.uint16)
divisor = np.max(gray) - np.min(gray)
if divisor != 0:
normed = (gray - np.min(gray)) / divisor
else:
normed = (gray - np.min(gray))
rgba_img = cmap(normed)
rgb_img = np.delete(rgba_img, 3,2)
return (65535 * rgb_img).astype(np.float32)
| 0
| 0
| 0
| 0
| 0
| 2,685
| 0
| 0
| 75
|
6ea46e439fa32f14f73fb96fd66ffd82937d8f5e
| 3,149
|
py
|
Python
|
analyser/analyser.py
|
yimig/pyfilm-spliter
|
baf586e976a7a99373a13e1923f250eaf70f77b8
|
[
"Apache-2.0"
] | null | null | null |
analyser/analyser.py
|
yimig/pyfilm-spliter
|
baf586e976a7a99373a13e1923f250eaf70f77b8
|
[
"Apache-2.0"
] | null | null | null |
analyser/analyser.py
|
yimig/pyfilm-spliter
|
baf586e976a7a99373a13e1923f250eaf70f77b8
|
[
"Apache-2.0"
] | null | null | null |
import data_connector.model_sentence
import data_connector.model_word
import sys
sys.path.append("../")
| 36.616279
| 119
| 0.58463
|
import analyser.caption_factory
import data_connector.model_sentence
import data_connector.model_word
import data_connector.data_manager
from xml.etree.ElementTree import *
import re
import sys
sys.path.append("../")
from log_writer import LogWriter
class Analyser:
@property
def sentence_list(self):
return self.__sentence_list
@sentence_list.setter
def sentence_list(self, value):
self.__sentence_list = value
@property
def word_list(self):
return self.__word_list
@word_list.setter
def word_list(self, value):
self.__word_list = value
def __init__(self, start_id: int = 0):
try:
self.__db_setting = self.__decode_xml()
print('分解字幕文件并分割视频....')
self.__sentence_list = analyser.caption_factory.CaptionFactory.load_dir(self.__caption_path, start_id,
self.__audio_path)
self.__word_list = []
try:
dm = data_connector.data_manager.DataManager(self.__db_setting)
print('分析例句并上传....')
for sentence in self.__sentence_list:
self.__split_word(sentence)
dm.execute_sql(sentence.to_sql())
print('上传分析结果....')
for word in self.__word_list:
dm.execute_sql(word.to_sql())
dm.close_connection()
except Exception as e:
LogWriter.write_warning(e, "连接数据库失败")
raise e
except Exception as e:
LogWriter.write_warning(e, "读取配置文件失败")
raise e
print('作业结束。')
# 从例句中分割出单词并保存
def __split_word(self, sentence: data_connector.model_sentence.ModelSentence):
words = sentence.s_en.split(' ')
dm = data_connector.data_manager.DataManager(self.__db_setting)
for word in words:
clean_word = re.sub('[,.!?:]', '', word.lower())
trans = dm.get_translation(clean_word)
if trans:
is_include = False
for word_model in self.__word_list:
if clean_word == word_model.word:
word_model.sentences += '|' + str(sentence.s_id)
is_include = True
if not is_include:
self.__word_list.append(data_connector.model_word.ModelWord(clean_word, str(sentence.s_id), trans))
dm.close_connection()
# 解析xml文件
def __decode_xml(self):
db_setting = {}
setting = parse('.\\setting.xml')
for item in setting.iterfind('path'):
self.__caption_path = item.findtext('caption_path')
self.__audio_path = item.findtext('audio_path')
for item in setting.iterfind('database'):
db_setting.update({'server': item.findtext('server')})
db_setting.update({'user': item.findtext('user')})
db_setting.update({'password': item.findtext('password')})
db_setting.update({'database': item.findtext('db_name')})
return db_setting
| 180
| 232
| 0
| 2,583
| 0
| 0
| 0
| 36
| 133
|
90c849bc94c70a99e4e373256b50144ecdba6cd3
| 13,498
|
py
|
Python
|
python38/Lib/site-packages/openpyxl/reader/worksheet.py
|
alextusinean/TikTokCommentScraper
|
94ae7d5e20129f33a18857197794758b62909b22
|
[
"MIT"
] | 28
|
2021-07-23T16:08:55.000Z
|
2022-03-15T16:19:32.000Z
|
python38/Lib/site-packages/openpyxl/reader/worksheet.py
|
alextusinean/TikTokCommentScraper
|
94ae7d5e20129f33a18857197794758b62909b22
|
[
"MIT"
] | 4
|
2021-10-08T03:28:30.000Z
|
2022-02-08T13:01:40.000Z
|
python38/Lib/site-packages/openpyxl/reader/worksheet.py
|
alextusinean/TikTokCommentScraper
|
94ae7d5e20129f33a18857197794758b62909b22
|
[
"MIT"
] | 11
|
2021-07-15T04:40:27.000Z
|
2022-03-19T14:01:12.000Z
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
"""Reader for a single worksheet."""
from io import BytesIO
# compatibility imports
# package imports
from openpyxl.worksheet import Worksheet
from openpyxl.worksheet.iter_worksheet import IterableWorksheet
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
pass
return xml_source
def read_worksheet(xml_source, parent, preset_title, shared_strings,
style_table, color_index=None, worksheet_path=None):
"""Read an xml worksheet"""
if worksheet_path:
ws = IterableWorksheet(parent, preset_title,
worksheet_path, xml_source, shared_strings, style_table)
else:
ws = Worksheet(parent, preset_title)
fast_parse(ws, xml_source, shared_strings, style_table, color_index)
return ws
| 41.027356
| 106
| 0.602682
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
"""Reader for a single worksheet."""
from io import BytesIO
# compatibility imports
from openpyxl.xml.functions import iterparse
# package imports
from openpyxl.cell import Cell
from openpyxl.worksheet import Worksheet, ColumnDimension, RowDimension
from openpyxl.worksheet.iter_worksheet import IterableWorksheet
from openpyxl.worksheet.page import PageMargins, PrintOptions, PageSetup
from openpyxl.worksheet.protection import SheetProtection
from openpyxl.worksheet.views import SheetView
from openpyxl.xml.constants import SHEET_MAIN_NS, REL_NS
from openpyxl.xml.functions import safe_iterator
from openpyxl.styles import Color
from openpyxl.formatting import ConditionalFormatting
from openpyxl.worksheet.properties import parse_sheetPr
from openpyxl.utils import (
coordinate_from_string,
get_column_letter,
column_index_from_string
)
def _get_xml_iter(xml_source):
"""
Possible inputs: strings, bytes, members of zipfile, temporary file
Always return a file like object
"""
if not hasattr(xml_source, 'read'):
try:
xml_source = xml_source.encode("utf-8")
except (AttributeError, UnicodeDecodeError):
pass
return BytesIO(xml_source)
else:
try:
xml_source.seek(0)
except:
pass
return xml_source
class WorkSheetParser(object):
COL_TAG = '{%s}col' % SHEET_MAIN_NS
ROW_TAG = '{%s}row' % SHEET_MAIN_NS
CELL_TAG = '{%s}c' % SHEET_MAIN_NS
VALUE_TAG = '{%s}v' % SHEET_MAIN_NS
FORMULA_TAG = '{%s}f' % SHEET_MAIN_NS
MERGE_TAG = '{%s}mergeCell' % SHEET_MAIN_NS
INLINE_STRING = "{%s}is/{%s}t" % (SHEET_MAIN_NS, SHEET_MAIN_NS)
INLINE_RICHTEXT = "{%s}is/{%s}r/{%s}t" % (SHEET_MAIN_NS, SHEET_MAIN_NS, SHEET_MAIN_NS)
def __init__(self, ws, xml_source, shared_strings, style_table, color_index=None):
self.ws = ws
self.source = xml_source
self.shared_strings = shared_strings
self.style_table = style_table
self.color_index = color_index
self.guess_types = ws.parent._guess_types
self.data_only = ws.parent.data_only
self.styles = [dict(style) for style in self.ws.parent._cell_styles]
self.keep_vba = ws.parent.vba_archive is not None
def parse(self):
dispatcher = {
'{%s}mergeCells' % SHEET_MAIN_NS: self.parse_merge,
'{%s}col' % SHEET_MAIN_NS: self.parse_column_dimensions,
'{%s}row' % SHEET_MAIN_NS: self.parse_row_dimensions,
'{%s}printOptions' % SHEET_MAIN_NS: self.parse_print_options,
'{%s}pageMargins' % SHEET_MAIN_NS: self.parse_margins,
'{%s}pageSetup' % SHEET_MAIN_NS: self.parse_page_setup,
'{%s}headerFooter' % SHEET_MAIN_NS: self.parse_header_footer,
'{%s}conditionalFormatting' % SHEET_MAIN_NS: self.parser_conditional_formatting,
'{%s}autoFilter' % SHEET_MAIN_NS: self.parse_auto_filter,
'{%s}sheetProtection' % SHEET_MAIN_NS: self.parse_sheet_protection,
'{%s}dataValidations' % SHEET_MAIN_NS: self.parse_data_validation,
'{%s}sheetPr' % SHEET_MAIN_NS: self.parse_properties,
'{%s}legacyDrawing' % SHEET_MAIN_NS: self.parse_legacy_drawing,
'{%s}sheetViews' % SHEET_MAIN_NS: self.parse_sheet_views,
}
tags = dispatcher.keys()
stream = _get_xml_iter(self.source)
it = iterparse(stream, tag=tags)
for _, element in it:
tag_name = element.tag
if tag_name in dispatcher:
dispatcher[tag_name](element)
element.clear()
# Handle parsed conditional formatting rules together.
if len(self.ws.conditional_formatting.parse_rules):
self.ws.conditional_formatting.update(self.ws.conditional_formatting.parse_rules)
def parse_cell(self, element):
value = element.find(self.VALUE_TAG)
if value is not None:
value = value.text
formula = element.find(self.FORMULA_TAG)
data_type = element.get('t', 'n')
coordinate = element.get('r')
style_id = element.get('s')
# assign formula to cell value unless only the data is desired
if formula is not None and not self.data_only:
data_type = 'f'
if formula.text:
value = "=" + formula.text
else:
value = "="
formula_type = formula.get('t')
if formula_type:
self.ws.formula_attributes[coordinate] = {'t': formula_type}
si = formula.get('si') # Shared group index for shared formulas
if si:
self.ws.formula_attributes[coordinate]['si'] = si
ref = formula.get('ref') # Range for shared formulas
if ref:
self.ws.formula_attributes[coordinate]['ref'] = ref
style = {}
if style_id is not None:
style_id = int(style_id)
style = self.styles[style_id]
column, row = coordinate_from_string(coordinate)
cell = Cell(self.ws, column, row, **style)
self.ws._add_cell(cell)
if value is not None:
if data_type == 'n':
value = cell._cast_numeric(value)
elif data_type == 'b':
value = bool(int(value))
elif data_type == 's':
value = self.shared_strings[int(value)]
elif data_type == 'str':
data_type = 's'
else:
if data_type == 'inlineStr':
data_type = 's'
child = element.find(self.INLINE_STRING)
if child is None:
child = element.find(self.INLINE_RICHTEXT)
if child is not None:
value = child.text
if self.guess_types or value is None:
cell.value = value
else:
cell._value=value
cell.data_type=data_type
def parse_merge(self, element):
for mergeCell in safe_iterator(element, ('{%s}mergeCell' % SHEET_MAIN_NS)):
self.ws.merge_cells(mergeCell.get('ref'))
def parse_column_dimensions(self, col):
min = int(col.get('min')) if col.get('min') else 1
max = int(col.get('max')) if col.get('max') else 1
# Ignore ranges that go up to the max column 16384. Columns need to be extended to handle
# ranges without creating an entry for every single one.
if max != 16384:
for colId in range(min, max + 1):
column = get_column_letter(colId)
attrs = dict(col.attrib)
attrs['index'] = column
attrs['worksheet'] = self.ws
if column not in self.ws.column_dimensions:
dim = ColumnDimension(**attrs)
self.ws.column_dimensions[column] = dim
def parse_row_dimensions(self, row):
attrs = dict(row.attrib)
attrs['worksheet'] = self.ws
dim = RowDimension(**attrs)
self.ws.row_dimensions[dim.index] = dim
for cell in safe_iterator(row, self.CELL_TAG):
self.parse_cell(cell)
def parse_print_options(self, element):
self.ws.print_options = PrintOptions(**element.attrib)
def parse_margins(self, element):
self.page_margins = PageMargins(**element.attrib)
def parse_page_setup(self, element):
id_key = '{%s}id' % REL_NS
if id_key in element.attrib.keys():
element.attrib.pop(id_key)
self.ws.page_setup = PageSetup(**element.attrib)
def parse_header_footer(self, element):
oddHeader = element.find('{%s}oddHeader' % SHEET_MAIN_NS)
if oddHeader is not None and oddHeader.text is not None:
self.ws.header_footer.setHeader(oddHeader.text)
oddFooter = element.find('{%s}oddFooter' % SHEET_MAIN_NS)
if oddFooter is not None and oddFooter.text is not None:
self.ws.header_footer.setFooter(oddFooter.text)
def parser_conditional_formatting(self, element):
range_string = element.get('sqref')
cfRules = element.findall('{%s}cfRule' % SHEET_MAIN_NS)
if range_string not in self.ws.conditional_formatting.parse_rules:
self.ws.conditional_formatting.parse_rules[range_string] = []
for cfRule in cfRules:
if not cfRule.get('type') or cfRule.get('type') == 'dataBar':
# dataBar conditional formatting isn't supported, as it relies on the complex <extLst> tag
continue
rule = {'type': cfRule.get('type')}
for attr in ConditionalFormatting.rule_attributes:
if cfRule.get(attr) is not None:
if attr == 'priority':
rule[attr] = int(cfRule.get(attr))
else:
rule[attr] = cfRule.get(attr)
formula = cfRule.findall('{%s}formula' % SHEET_MAIN_NS)
for f in formula:
if 'formula' not in rule:
rule['formula'] = []
rule['formula'].append(f.text)
colorScale = cfRule.find('{%s}colorScale' % SHEET_MAIN_NS)
if colorScale is not None:
rule['colorScale'] = {'cfvo': [], 'color': []}
cfvoNodes = colorScale.findall('{%s}cfvo' % SHEET_MAIN_NS)
for node in cfvoNodes:
cfvo = {}
if node.get('type') is not None:
cfvo['type'] = node.get('type')
if node.get('val') is not None:
cfvo['val'] = node.get('val')
rule['colorScale']['cfvo'].append(cfvo)
colorNodes = colorScale.findall('{%s}color' % SHEET_MAIN_NS)
for color in colorNodes:
attrs = dict(color.items())
color = Color(**attrs)
rule['colorScale']['color'].append(color)
iconSet = cfRule.find('{%s}iconSet' % SHEET_MAIN_NS)
if iconSet is not None:
rule['iconSet'] = {'cfvo': []}
for iconAttr in ConditionalFormatting.icon_attributes:
if iconSet.get(iconAttr) is not None:
rule['iconSet'][iconAttr] = iconSet.get(iconAttr)
cfvoNodes = iconSet.findall('{%s}cfvo' % SHEET_MAIN_NS)
for node in cfvoNodes:
cfvo = {}
if node.get('type') is not None:
cfvo['type'] = node.get('type')
if node.get('val') is not None:
cfvo['val'] = node.get('val')
rule['iconSet']['cfvo'].append(cfvo)
self.ws.conditional_formatting.parse_rules[range_string].append(rule)
def parse_auto_filter(self, element):
self.ws.auto_filter.ref = element.get("ref")
for fc in safe_iterator(element, '{%s}filterColumn' % SHEET_MAIN_NS):
filters = fc.find('{%s}filters' % SHEET_MAIN_NS)
if filters is None:
continue
vals = [f.get("val") for f in safe_iterator(filters, '{%s}filter' % SHEET_MAIN_NS)]
blank = filters.get("blank")
self.ws.auto_filter.add_filter_column(fc.get("colId"), vals, blank=blank)
for sc in safe_iterator(element, '{%s}sortCondition' % SHEET_MAIN_NS):
self.ws.auto_filter.add_sort_condition(sc.get("ref"), sc.get("descending"))
def parse_sheet_protection(self, element):
values = element.attrib
self.ws.protection = SheetProtection(**values)
password = values.get("password")
if password is not None:
self.ws.protection.set_password(password, True)
def parse_data_validation(self, element):
from openpyxl.worksheet.datavalidation import parser
for tag in safe_iterator(element, "{%s}dataValidation" % SHEET_MAIN_NS):
dv = parser(tag)
self.ws._data_validations.append(dv)
def parse_properties(self, element):
self.ws.sheet_properties = parse_sheetPr(element)
def parse_legacy_drawing(self, element):
if self.keep_vba:
# Create an id that will not clash with any other ids that will
# be generated.
self.ws.vba_controls = 'vbaControlId'
def parse_sheet_views(self, element):
for el in element.findall("{%s}sheetView" % SHEET_MAIN_NS):
# according to the specification the last view wins
pass
self.ws.sheet_view = SheetView.from_tree(el)
def fast_parse(ws, xml_source, shared_strings, style_table, color_index=None, keep_vba=False):
parser = WorkSheetParser(ws, xml_source, shared_strings, style_table, color_index)
parser.parse()
del parser
def read_worksheet(xml_source, parent, preset_title, shared_strings,
style_table, color_index=None, worksheet_path=None):
"""Read an xml worksheet"""
if worksheet_path:
ws = IterableWorksheet(parent, preset_title,
worksheet_path, xml_source, shared_strings, style_table)
else:
ws = Worksheet(parent, preset_title)
fast_parse(ws, xml_source, shared_strings, style_table, color_index)
return ws
| 0
| 0
| 0
| 11,371
| 0
| 194
| 0
| 408
| 288
|
86cd014089a67861caf0503440063d428b748d8d
| 6,297
|
py
|
Python
|
tutorials/tutorial_4_adding_a_dataset.py
|
AliAbdulHussain/moabb
|
d308f024636802c21b06726457ce5ddacd418e16
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/tutorial_4_adding_a_dataset.py
|
AliAbdulHussain/moabb
|
d308f024636802c21b06726457ce5ddacd418e16
|
[
"BSD-3-Clause"
] | null | null | null |
tutorials/tutorial_4_adding_a_dataset.py
|
AliAbdulHussain/moabb
|
d308f024636802c21b06726457ce5ddacd418e16
|
[
"BSD-3-Clause"
] | null | null | null |
"""
=================================
Creating a dataset class in MOABB
=================================
"""
# Authors: Pedro L. C. Rodrigues, Sylvain Chevallier
#
# https://github.com/plcrodrigues/Workshop-MOABB-BCI-Graz-2019
import numpy as np
from scipy.io import savemat
from moabb.paradigms import LeftRightImagery
from moabb.evaluations import WithinSessionEvaluation
from pyriemann.classification import MDM
from pyriemann.estimation import Covariances
from sklearn.pipeline import make_pipeline
##############################################################################
# Creating some Data
# ------------------
#
# To illustrate the creation of a dataset class in MOABB, we first create an
# example dataset saved in .mat file. It contains a single fake recording on
# 8 channels lasting for 150 seconds (sampling frequency 256 Hz). We have
# included the script that creates this dataset and have uploaded it online.
# The fake dataset is available on the
# [Zenodo website](https://sandbox.zenodo.org/record/369543)
def create_example_dataset():
"""Create a fake example for a dataset"""
sfreq = 256
t_recording = 150
t_trial = 1 # duration of a trial
intertrial = 2 # time between end of a trial and the next one
n_chan = 8
x = np.zeros((n_chan + 1, t_recording * sfreq)) # electrodes + stimulus
stim = np.zeros(t_recording * sfreq)
t_offset = 1.0 # offset where the trials start
n_trials = 40
rep = np.linspace(0, 4 * t_trial, t_trial * sfreq)
signal = np.sin(2 * np.pi / t_trial * rep)
for n in range(n_trials):
label = n % 2 + 1 # alternate between class 0 and class 1
tn = int(t_offset * sfreq + n * (t_trial + intertrial) * sfreq)
stim[tn] = label
noise = 0.1 * np.random.randn(n_chan, len(signal))
x[:-1, tn:(tn + t_trial * sfreq)] = label * signal + noise
x[-1, :] = stim
return x, sfreq
# Create the fake data
for subject in [1, 2, 3]:
x, fs = create_example_dataset()
filename = 'subject_' + str(subject).zfill(2) + '.mat'
mdict = {}
mdict['x'] = x
mdict['fs'] = fs
savemat(filename, mdict)
##############################################################################
# Creating a Dataset Class
# ------------------------
#
# We will create now a dataset class using the fake data simulated with the
# code from above. For this, we first need to import the right classes from
# MOABB
# - `dl` is a very useful script that downloads automatically a dataset online
# if it is not yet available in the user's computer. The script knows where
# to download the files because we create a global variable telling the URL
# where to fetch the data.
# - `BaseDataset` is the basic class that we overload to create our dataset.
#
# The global variable with the dataset's URL should specify an online
# repository where all the files are stored.
ExampleDataset_URL = 'https://sandbox.zenodo.org/record/369543/files/'
# The `ExampleDataset` needs to implement only 3 functions:
# - `__init__` for indicating the parameter of the dataset
# - `_get_single_subject_data` to define how to process the data once they
# have been downloaded
# - `data_path` to define how the data are downloaded.
##############################################################################
# Using the ExampleDataset
# ------------------------
#
# Now that the `ExampleDataset` is defined, it could be instanciated directly.
# The rest of the code follows the steps described in the previous tutorials.
dataset = ExampleDataset()
paradigm = LeftRightImagery()
X, labels, meta = paradigm.get_data(dataset=dataset, subjects=[1])
evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=dataset,
overwrite=True)
pipelines = {}
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM(metric='riemann'))
scores = evaluation.process(pipelines)
print(scores)
##############################################################################
# Pushing on MOABB Github
# -----------------------
#
# If you want to make your dataset available to everyone, you could upload
# your data on public server (like Zenodo or Figshare) and signal that you
# want to add your dataset to MOABB in the [dedicated issue](https://github.com/NeuroTechX/moabb/issues/1). # noqa: E501
# You could then follow the instructions on [how to contribute](https://github.com/NeuroTechX/moabb/blob/master/CONTRIBUTING.md) # noqa: E501
| 37.706587
| 143
| 0.610767
|
"""
=================================
Creating a dataset class in MOABB
=================================
"""
# Authors: Pedro L. C. Rodrigues, Sylvain Chevallier
#
# https://github.com/plcrodrigues/Workshop-MOABB-BCI-Graz-2019
import numpy as np
from scipy.io import savemat, loadmat
import mne
from moabb.datasets.base import BaseDataset
from moabb.datasets import download as dl
from moabb.paradigms import LeftRightImagery
from moabb.evaluations import WithinSessionEvaluation
from pyriemann.classification import MDM
from pyriemann.estimation import Covariances
from sklearn.pipeline import make_pipeline
##############################################################################
# Creating some Data
# ------------------
#
# To illustrate the creation of a dataset class in MOABB, we first create an
# example dataset saved in .mat file. It contains a single fake recording on
# 8 channels lasting for 150 seconds (sampling frequency 256 Hz). We have
# included the script that creates this dataset and have uploaded it online.
# The fake dataset is available on the
# [Zenodo website](https://sandbox.zenodo.org/record/369543)
def create_example_dataset():
"""Create a fake example for a dataset"""
sfreq = 256
t_recording = 150
t_trial = 1 # duration of a trial
intertrial = 2 # time between end of a trial and the next one
n_chan = 8
x = np.zeros((n_chan + 1, t_recording * sfreq)) # electrodes + stimulus
stim = np.zeros(t_recording * sfreq)
t_offset = 1.0 # offset where the trials start
n_trials = 40
rep = np.linspace(0, 4 * t_trial, t_trial * sfreq)
signal = np.sin(2 * np.pi / t_trial * rep)
for n in range(n_trials):
label = n % 2 + 1 # alternate between class 0 and class 1
tn = int(t_offset * sfreq + n * (t_trial + intertrial) * sfreq)
stim[tn] = label
noise = 0.1 * np.random.randn(n_chan, len(signal))
x[:-1, tn:(tn + t_trial * sfreq)] = label * signal + noise
x[-1, :] = stim
return x, sfreq
# Create the fake data
for subject in [1, 2, 3]:
x, fs = create_example_dataset()
filename = 'subject_' + str(subject).zfill(2) + '.mat'
mdict = {}
mdict['x'] = x
mdict['fs'] = fs
savemat(filename, mdict)
##############################################################################
# Creating a Dataset Class
# ------------------------
#
# We will create now a dataset class using the fake data simulated with the
# code from above. For this, we first need to import the right classes from
# MOABB
# - `dl` is a very useful script that downloads automatically a dataset online
# if it is not yet available in the user's computer. The script knows where
# to download the files because we create a global variable telling the URL
# where to fetch the data.
# - `BaseDataset` is the basic class that we overload to create our dataset.
#
# The global variable with the dataset's URL should specify an online
# repository where all the files are stored.
ExampleDataset_URL = 'https://sandbox.zenodo.org/record/369543/files/'
# The `ExampleDataset` needs to implement only 3 functions:
# - `__init__` for indicating the parameter of the dataset
# - `_get_single_subject_data` to define how to process the data once they
# have been downloaded
# - `data_path` to define how the data are downloaded.
class ExampleDataset(BaseDataset):
'''
Dataset used to exemplify the creation of a dataset class in MOABB.
The data samples have been simulated and has no physiological meaning
whatsoever.
'''
def __init__(self):
super().__init__(
subjects=[1, 2, 3],
sessions_per_subject=1,
events={'left_hand': 1, 'right_hand': 2},
code='Example dataset',
interval=[0, 0.75],
paradigm='imagery',
doi='')
def _get_single_subject_data(self, subject):
"""return data for a single subject"""
file_path_list = self.data_path(subject)
data = loadmat(file_path_list[0])
x = data['x']
fs = data['fs']
ch_names = ['ch' + str(i) for i in range(8)] + ['stim']
ch_types = ['eeg' for i in range(8)] + ['stim']
info = mne.create_info(ch_names, fs, ch_types)
raw = mne.io.RawArray(x, info)
sessions = {}
sessions['session_1'] = {}
sessions['session_1']['run_1'] = raw
return sessions
def data_path(self, subject, path=None, force_update=False,
update_path=None, verbose=None):
"""Download the data from one subject"""
if subject not in self.subject_list:
raise(ValueError("Invalid subject number"))
url = '{:s}subject_0{:d}.mat'.format(ExampleDataset_URL, subject)
path = dl.data_path(url, 'ExampleDataset')
return [path] # it has to return a list
##############################################################################
# Using the ExampleDataset
# ------------------------
#
# Now that the `ExampleDataset` is defined, it could be instanciated directly.
# The rest of the code follows the steps described in the previous tutorials.
dataset = ExampleDataset()
paradigm = LeftRightImagery()
X, labels, meta = paradigm.get_data(dataset=dataset, subjects=[1])
evaluation = WithinSessionEvaluation(paradigm=paradigm, datasets=dataset,
overwrite=True)
pipelines = {}
pipelines['MDM'] = make_pipeline(Covariances('oas'), MDM(metric='riemann'))
scores = evaluation.process(pipelines)
print(scores)
##############################################################################
# Pushing on MOABB Github
# -----------------------
#
# If you want to make your dataset available to everyone, you could upload
# your data on public server (like Zenodo or Figshare) and signal that you
# want to add your dataset to MOABB in the [dedicated issue](https://github.com/NeuroTechX/moabb/issues/1). # noqa: E501
# You could then follow the instructions on [how to contribute](https://github.com/NeuroTechX/moabb/blob/master/CONTRIBUTING.md) # noqa: E501
| 0
| 0
| 0
| 1,542
| 0
| 0
| 0
| 40
| 95
|
f4f59e9616efbd52c782c87011c67a4852f64da0
| 486
|
py
|
Python
|
test/sqlalchemy_filterparams_tests/database_test.py
|
cbrand/python-sqlalchemy-filterparams
|
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
|
[
"MIT"
] | 2
|
2016-02-24T03:07:26.000Z
|
2016-05-22T22:00:40.000Z
|
test/sqlalchemy_filterparams_tests/database_test.py
|
cbrand/python-sqlalchemy-filterparams
|
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
|
[
"MIT"
] | null | null | null |
test/sqlalchemy_filterparams_tests/database_test.py
|
cbrand/python-sqlalchemy-filterparams
|
6e555cfe9e2f0f2c5f6d6606485de50bc76aaf73
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
| 20.25
| 50
| 0.713992
|
# -*- encoding: utf-8 -*-
from unittest import TestCase
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy_filterparams_tests.models import (
Base,
)
class BaseDatabaseTest(TestCase):
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
session = Session()
def setUp(self):
Base.metadata.create_all(self.engine)
def tearDown(self):
Base.metadata.drop_all(self.engine)
| 0
| 0
| 0
| 263
| 0
| 0
| 0
| 82
| 113
|
49aaf3536a9b3013f2535a7951571b5299a8099f
| 604
|
py
|
Python
|
heisen/core/__init__.py
|
HeisenCore/heisen
|
0cd4d27822960553a8e83a72c7dfeefa76e65c06
|
[
"MIT"
] | 5
|
2016-08-30T07:51:08.000Z
|
2021-09-13T11:30:05.000Z
|
heisen/core/__init__.py
|
HeisenCore/heisen
|
0cd4d27822960553a8e83a72c7dfeefa76e65c06
|
[
"MIT"
] | 15
|
2016-09-15T19:21:24.000Z
|
2016-10-22T16:22:15.000Z
|
heisen/core/__init__.py
|
HeisenCore/heisen
|
0cd4d27822960553a8e83a72c7dfeefa76e65c06
|
[
"MIT"
] | null | null | null |
rpc_call = get_rpc_connection()
| 27.454545
| 80
| 0.692053
|
from heisen.config import settings
from jsonrpclib.request import ConnectionPool
def get_rpc_connection():
if settings.CREDENTIALS:
username, passowrd = settings.CREDENTIALS[0]
else:
username = passowrd = None
servers = {'self': []}
for instance_number in range(settings.INSTANCE_COUNT):
servers['self'].append((
'localhost', settings.RPC_PORT + instance_number, username, passowrd
))
servers.update(getattr(settings, 'RPC_SERVERS', {}))
return ConnectionPool(servers, 'heisen', settings.APP_NAME)
rpc_call = get_rpc_connection()
| 0
| 0
| 0
| 0
| 0
| 465
| 0
| 37
| 67
|
5bc89390a9afb112693a9caadb722ffe80a659c2
| 1,331
|
py
|
Python
|
pytition/petition/management/commands/update.py
|
lpoujade/Pytition
|
b66a4b358dc4e7dc368ec30e34e124f21920371e
|
[
"BSD-3-Clause"
] | 1
|
2020-08-13T23:01:48.000Z
|
2020-08-13T23:01:48.000Z
|
pytition/petition/management/commands/update.py
|
lpoujade/Pytition
|
b66a4b358dc4e7dc368ec30e34e124f21920371e
|
[
"BSD-3-Clause"
] | 14
|
2020-08-05T17:27:54.000Z
|
2020-09-25T02:11:32.000Z
|
pytition/petition/management/commands/update.py
|
lpoujade/Pytition
|
b66a4b358dc4e7dc368ec30e34e124f21920371e
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
from functools import partial
run = partial(subprocess.run, shell=True, check=True)
| 34.128205
| 156
| 0.593539
|
import subprocess
from functools import partial
from django.core.management.base import BaseCommand
from django.conf import settings
from pathlib import Path
import os
run = partial(subprocess.run, shell=True, check=True)
class Command(BaseCommand):
"""Update pytition install
"""
def handle(self, *args, **options):
try:
git_path = Path(settings.BASE_DIR).parent
os.chdir(git_path)
run("git checkout master && git pull")
version_cmd = "curl -s https://api.github.com/repos/pytition/pytition/releases/latest | grep 'tag_name' | cut -d : -f2,3 | tr -d \\\" | tr -d ,"
version = run(version_cmd, capture_output=True).stdout.decode().strip()
checkout_cmd = f"git checkout {version}"
run(checkout_cmd)
run("pip3 install --upgrade -r requirements.txt")
os.chdir(settings.BASE_DIR)
run("python3 manage.py migrate")
run("python3 manage.py collectstatic --no-input")
run("python3 manage.py compilemessages")
except subprocess.CalledProcessError as e:
print(e)
if e.stdout != None:
print(f"stdout: {e.stdout}")
if e.stderr != None:
print(f"stderr:{e.stderr}")
| 0
| 0
| 0
| 1,084
| 0
| 0
| 0
| 32
| 111
|
d8a9231416df4329b68705f67027a39e5c572589
| 3,401
|
py
|
Python
|
bcml4pheno/ttbarzp.py
|
sheride/bcml4pheno
|
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
|
[
"Apache-2.0"
] | null | null | null |
bcml4pheno/ttbarzp.py
|
sheride/bcml4pheno
|
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
|
[
"Apache-2.0"
] | null | null | null |
bcml4pheno/ttbarzp.py
|
sheride/bcml4pheno
|
c9629dafcdbee0a4c28ceb7b28c9862de8479a24
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: ttbarzp.ipynb (unless otherwise specified).
__all__ = ['get_elijah_ttbarzp_cs', 'get_manuel_ttbarzp_cs', 'import47Ddata', 'get47Dfeatures']
# Cell
import numpy as np
import tensorflow as tf
# Cell
def get_elijah_ttbarzp_cs():
r"""
Contains cross section information produced by Elijah for $pp \to t\overline{t} \; Z'$ collider phenomenology.
Returns list containing signal masses, signal cross sections (for those masses, in pb), and background cross sections
(also in pb)
"""
# Z' masses (GeV) for which Elijah created signal samples
elijah_masses = [10, 50, 100, 200, 350, 500, 1000, 2000, 5000]
# signal cross sections (pb)
elijah_sig_css = [9.801, 0.5445, 0.1442, 0.03622, 0.009998, 0.003802, 0.0003936, 2.034e-05, 2.748e-08]
# background cross sections (pb)
elijah_bg_css = [0.106, 0.0117, 5.58]
return [elijah_masses, elijah_sig_css, elijah_bg_css]
# Cell
def get_manuel_ttbarzp_cs():
r"""
Contains cross section information produced through MadGraph by Manuel for collider phenomenology regarding
the semihadronic, semileptonic $pp \to t\overline{t} \; Z', Z' \to b\overline{b}$ channel
"""
# Z' masses (GeV) for which I (Elijah) created signal samples
manuel_masses = [350, 500, 750, 1000, 2000, 3000, 4000]
# signal cross sections (pb)
manuel_sig_css = [0.001395, 0.0007823, 0.0003429, 0.0001692, 1.808e-05, 1.325e-06, 4.456e-07]
# background cross sections (pb)
manuel_bg_css = [0.1339, 0.01187, 5.603]
return [manuel_masses, manuel_sig_css, manuel_bg_css]
# Cell
def import47Ddata(name):
r"""
Imports `name.npy` file containing 47-dimensional data for training
Available files:
- bgh.npy (Standard Model background 1, $pp \to t\overline{t}h$)
- bg4t.npy (Standard Model background 2, $pp \to t\overline{t}t\overline{t}$)
- bgnoh.npy (Standard Model background 3, $pp \to t\overline{t} \; \setminus \; h$)
- sig350G.npy ($Z'$ signal, $m_{Z'} = 350$ GeV)
- sig500G.npy ($Z'$ signal, $m_{Z'} = 500$ GeV)
- sig1T.npy ($Z'$ signal, $m_{Z'} = 1$ TeV)
- sig2T.npy ($Z'$ signal, $m_{Z'} = 2$ TeV)
- sig4T.npy ($Z'$ signal, $m_{Z'} = 4$ TeV)
"""
if name[-4:] == '.npy':
name = name[:-4]
url = 'https://storage.googleapis.com/ttbarzp/47dim/'
try:
path = tf.keras.utils.get_file(f'{name}.npy', url + name + '.npy')
data = np.load(path)
return data
except:
print(f"{name}.npy doesn't appear to exist")
# Cell
def get47Dfeatures():
"""
Returns list containing the names of the 47 features found in the data accessible through
`ttbarzp.import47Ddata()`
"""
return [
'pT b1', 'pT b2', 'pT b3', 'pT b4',
'sdEta b1 b2', 'sdEta b1 b3', 'sdEta b1 b4', 'sdEta b2 b3', 'sdEta b2 b4', 'sdEta b3 b4',
'sdPhi b1 b2', 'sdPhi b1 b3', 'sdPhi b1 b4', 'sdPhi b2 b3', 'sdPhi b2 b4', 'sdPhi b3 b4',
'dR b1 b2', 'dR b1 b3', 'dR b1 b4', 'dR b2 b3', 'dR b2 b4', 'dR b3 b4',
'MET', 'pT l', 'MT l MET',
'M b1 b2', 'M b1 b3', 'M b1 b4', 'M b2 b3', 'M b2 b4', 'M b3 b4',
'MT b1 l MET', 'MT b2 l MET', 'MT b3 l MET', 'MT b4 l MET',
'M j1 j2', 'pT j1', 'pT j2', 'dR j1 j2',
'dR b1 l', 'dR b2 l', 'dR b3 l', 'dR b4 l',
'sdPhi b1 l', 'sdPhi b2 l', 'sdPhi b3 l', 'sdPhi b4 l']
| 41.987654
| 121
| 0.614525
|
# AUTOGENERATED! DO NOT EDIT! File to edit: ttbarzp.ipynb (unless otherwise specified).
__all__ = ['get_elijah_ttbarzp_cs', 'get_manuel_ttbarzp_cs', 'import47Ddata', 'get47Dfeatures']
# Cell
import numpy as np
import tensorflow as tf
# Cell
def get_elijah_ttbarzp_cs():
r"""
Contains cross section information produced by Elijah for $pp \to t\overline{t} \; Z'$ collider phenomenology.
Returns list containing signal masses, signal cross sections (for those masses, in pb), and background cross sections
(also in pb)
"""
# Z' masses (GeV) for which Elijah created signal samples
elijah_masses = [10, 50, 100, 200, 350, 500, 1000, 2000, 5000]
# signal cross sections (pb)
elijah_sig_css = [9.801, 0.5445, 0.1442, 0.03622, 0.009998, 0.003802, 0.0003936, 2.034e-05, 2.748e-08]
# background cross sections (pb)
elijah_bg_css = [0.106, 0.0117, 5.58]
return [elijah_masses, elijah_sig_css, elijah_bg_css]
# Cell
def get_manuel_ttbarzp_cs():
r"""
Contains cross section information produced through MadGraph by Manuel for collider phenomenology regarding
the semihadronic, semileptonic $pp \to t\overline{t} \; Z', Z' \to b\overline{b}$ channel
"""
# Z' masses (GeV) for which I (Elijah) created signal samples
manuel_masses = [350, 500, 750, 1000, 2000, 3000, 4000]
# signal cross sections (pb)
manuel_sig_css = [0.001395, 0.0007823, 0.0003429, 0.0001692, 1.808e-05, 1.325e-06, 4.456e-07]
# background cross sections (pb)
manuel_bg_css = [0.1339, 0.01187, 5.603]
return [manuel_masses, manuel_sig_css, manuel_bg_css]
# Cell
def import47Ddata(name):
r"""
Imports `name.npy` file containing 47-dimensional data for training
Available files:
- bgh.npy (Standard Model background 1, $pp \to t\overline{t}h$)
- bg4t.npy (Standard Model background 2, $pp \to t\overline{t}t\overline{t}$)
- bgnoh.npy (Standard Model background 3, $pp \to t\overline{t} \; \setminus \; h$)
- sig350G.npy ($Z'$ signal, $m_{Z'} = 350$ GeV)
- sig500G.npy ($Z'$ signal, $m_{Z'} = 500$ GeV)
- sig1T.npy ($Z'$ signal, $m_{Z'} = 1$ TeV)
- sig2T.npy ($Z'$ signal, $m_{Z'} = 2$ TeV)
- sig4T.npy ($Z'$ signal, $m_{Z'} = 4$ TeV)
"""
if name[-4:] == '.npy':
name = name[:-4]
url = 'https://storage.googleapis.com/ttbarzp/47dim/'
try:
path = tf.keras.utils.get_file(f'{name}.npy', url + name + '.npy')
data = np.load(path)
return data
except:
print(f"{name}.npy doesn't appear to exist")
# Cell
def get47Dfeatures():
"""
Returns list containing the names of the 47 features found in the data accessible through
`ttbarzp.import47Ddata()`
"""
return [
'pT b1', 'pT b2', 'pT b3', 'pT b4',
'sdEta b1 b2', 'sdEta b1 b3', 'sdEta b1 b4', 'sdEta b2 b3', 'sdEta b2 b4', 'sdEta b3 b4',
'sdPhi b1 b2', 'sdPhi b1 b3', 'sdPhi b1 b4', 'sdPhi b2 b3', 'sdPhi b2 b4', 'sdPhi b3 b4',
'dR b1 b2', 'dR b1 b3', 'dR b1 b4', 'dR b2 b3', 'dR b2 b4', 'dR b3 b4',
'MET', 'pT l', 'MT l MET',
'M b1 b2', 'M b1 b3', 'M b1 b4', 'M b2 b3', 'M b2 b4', 'M b3 b4',
'MT b1 l MET', 'MT b2 l MET', 'MT b3 l MET', 'MT b4 l MET',
'M j1 j2', 'pT j1', 'pT j2', 'dR j1 j2',
'dR b1 l', 'dR b2 l', 'dR b3 l', 'dR b4 l',
'sdPhi b1 l', 'sdPhi b2 l', 'sdPhi b3 l', 'sdPhi b4 l']
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
27d6a8f043d6d02fb9eb37d08984a8c17e445752
| 457
|
py
|
Python
|
backend_application/app/wikisearch/migrations/0005_article_urls.py
|
cdeler/woogle
|
5774ce380947734bf0ed82bd27cdc5359bc1a646
|
[
"MIT"
] | null | null | null |
backend_application/app/wikisearch/migrations/0005_article_urls.py
|
cdeler/woogle
|
5774ce380947734bf0ed82bd27cdc5359bc1a646
|
[
"MIT"
] | 54
|
2018-07-05T13:39:48.000Z
|
2018-09-24T09:58:40.000Z
|
backend_application/app/wikisearch/migrations/0005_article_urls.py
|
cdeler/woogle
|
5774ce380947734bf0ed82bd27cdc5359bc1a646
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.7 on 2018-08-06 02:02
| 24.052632
| 121
| 0.621444
|
# Generated by Django 2.0.7 on 2018-08-06 02:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wikisearch', '0004_auto_20180730_2216'),
]
operations = [
migrations.AddField(
model_name='article',
name='urls',
field=models.CharField(default='None', help_text='url of wiki article', max_length=2000, verbose_name='URL'),
),
]
| 0
| 0
| 0
| 343
| 0
| 0
| 0
| 19
| 46
|
aa0c45fc09b8a64070231bbbba2d3e138ed5f826
| 1,172
|
py
|
Python
|
setup.py
|
EKT/pyrundeck
|
cfc5140d6ef336a54efcf1915df202ff35a7a492
|
[
"BSD-3-Clause"
] | 2
|
2016-10-19T07:26:27.000Z
|
2021-02-04T10:26:15.000Z
|
setup.py
|
EKT/pyrundeck
|
cfc5140d6ef336a54efcf1915df202ff35a7a492
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
EKT/pyrundeck
|
cfc5140d6ef336a54efcf1915df202ff35a7a492
|
[
"BSD-3-Clause"
] | 2
|
2016-10-19T07:26:35.000Z
|
2017-07-24T10:14:29.000Z
|
from setuptools import setup, find_packages
setup(
name="PyRundeck",
version="0.3.7",
description="A thin, pure Python wrapper for the Rundeck API",
author="Panagiotis Koutsourakis",
author_email="[email protected]",
license='BSD',
url='https://github.com/EKT/pyrundeck',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Internet :: REST API client',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='rest api client rundeck',
packages=find_packages(exclude=['tests', '*_virtualenv', 'doc']),
install_requires=[
'lxml>=3.4.4',
'requests>=2.7.0',
'pyopenssl>=0.15.1',
'ndg-httpsclient>=0.4.0',
'pyasn1>=0.1.8',
'pyyaml>=3.11'
]
)
| 33.485714
| 71
| 0.583618
|
from setuptools import setup, find_packages
setup(
name="PyRundeck",
version="0.3.7",
description="A thin, pure Python wrapper for the Rundeck API",
author="Panagiotis Koutsourakis",
author_email="[email protected]",
license='BSD',
url='https://github.com/EKT/pyrundeck',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Internet :: REST API client',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='rest api client rundeck',
packages=find_packages(exclude=['tests', '*_virtualenv', 'doc']),
install_requires=[
'lxml>=3.4.4',
'requests>=2.7.0',
'pyopenssl>=0.15.1',
'ndg-httpsclient>=0.4.0',
'pyasn1>=0.1.8',
'pyyaml>=3.11'
]
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
335a7affe187d0655121af5cae9cd6180241399b
| 3,928
|
py
|
Python
|
src/product/migrations/0001_initial.py
|
gabriel-gn/magalu-favorites-api
|
c9e2e3c6752530272198ef8c12357016149c1d34
|
[
"MIT"
] | null | null | null |
src/product/migrations/0001_initial.py
|
gabriel-gn/magalu-favorites-api
|
c9e2e3c6752530272198ef8c12357016149c1d34
|
[
"MIT"
] | null | null | null |
src/product/migrations/0001_initial.py
|
gabriel-gn/magalu-favorites-api
|
c9e2e3c6752530272198ef8c12357016149c1d34
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.2 on 2021-05-16 03:23
import django.core.validators
| 46.761905
| 176
| 0.580193
|
# Generated by Django 3.2.2 on 2021-05-16 03:23
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
('modified', models.DateTimeField(editable=False)),
('title', models.CharField(default='Novo Produto', max_length=256)),
('description', models.TextField(blank=True, default='')),
('price', models.FloatField(validators=[django.core.validators.MinValueValidator(0)])),
('image', models.CharField(default='https://www.pngkey.com/png/full/75-754812_question-mark-image-point-d-interrogation-png.png', max_length=256)),
],
options={
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProductBrand',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
('modified', models.DateTimeField(editable=False)),
('name', models.CharField(default='Nova Marca', max_length=256)),
('category', models.PositiveIntegerField(choices=[(0, 'Outros'), (1, 'Alimentação'), (2, 'Entretenimento'), (3, 'Informática'), (4, 'Jardinagem')], default=0)),
],
options={
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='UserFavorites',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
('modified', models.DateTimeField(editable=False)),
('products', models.ManyToManyField(blank=True, to='product.Product')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProductReview',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(editable=False)),
('modified', models.DateTimeField(editable=False)),
('rating', models.IntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
('description', models.TextField(blank=True, default='')),
('product', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.product')),
('user', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created'],
'abstract': False,
},
),
migrations.AddField(
model_name='product',
name='brand',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.productbrand'),
),
]
| 6
| 0
| 0
| 3,715
| 0
| 0
| 0
| 41
| 90
|
2ccf31155892bd9aa8de51d4885b66f2bda3d3f0
| 12,401
|
py
|
Python
|
pysnmp/APPN-DLUR-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/APPN-DLUR-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/APPN-DLUR-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module APPN-DLUR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/APPN-DLUR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:08:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
SnaControlPointName, = mibBuilder.importSymbols("APPN-MIB", "SnaControlPointName")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
snanauMIB, = mibBuilder.importSymbols("SNA-NAU-MIB", "snanauMIB")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, IpAddress, Integer32, Counter32, Bits, Gauge32, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, ModuleIdentity, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "Integer32", "Counter32", "Bits", "Gauge32", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "ModuleIdentity", "iso", "TimeTicks")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
dlurMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 34, 5))
if mibBuilder.loadTexts: dlurMIB.setLastUpdated('9705101500Z')
if mibBuilder.loadTexts: dlurMIB.setOrganization('IETF SNA NAU MIB WG / AIW APPN/HPR MIBs SIG')
dlurObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1))
dlurNodeInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 1))
dlurNodeCapabilities = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1))
dlurNodeCpName = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 1), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNodeCpName.setStatus('current')
dlurReleaseLevel = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurReleaseLevel.setStatus('current')
dlurAnsSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("continueOrStop", 1), ("stopOnly", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurAnsSupport.setStatus('current')
dlurMultiSubnetSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurMultiSubnetSupport.setStatus('current')
dlurDefaultDefPrimDlusName = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 5), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDefaultDefPrimDlusName.setStatus('current')
dlurNetworkNameForwardingSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNetworkNameForwardingSupport.setStatus('current')
dlurNondisDlusDlurSessDeactSup = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNondisDlusDlurSessDeactSup.setStatus('current')
dlurDefaultDefBackupDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2), )
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusTable.setStatus('current')
dlurDefaultDefBackupDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurDefaultDefBackupDlusIndex"))
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusEntry.setStatus('current')
dlurDefaultDefBackupDlusIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusIndex.setStatus('current')
dlurDefaultDefBackupDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1, 2), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusName.setStatus('current')
dlurPuInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 2))
dlurPuTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1), )
if mibBuilder.loadTexts: dlurPuTable.setStatus('current')
dlurPuEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurPuName"))
if mibBuilder.loadTexts: dlurPuEntry.setStatus('current')
dlurPuName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 17)))
if mibBuilder.loadTexts: dlurPuName.setStatus('current')
dlurPuSscpSuppliedName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuSscpSuppliedName.setStatus('current')
dlurPuStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("reset", 1), ("pendReqActpuRsp", 2), ("pendActpu", 3), ("pendActpuRsp", 4), ("active", 5), ("pendLinkact", 6), ("pendDactpuRsp", 7), ("pendInop", 8), ("pendInopActpu", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuStatus.setStatus('current')
dlurPuAnsSupport = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("continue", 1), ("stop", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuAnsSupport.setStatus('current')
dlurPuLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("internal", 1), ("downstream", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuLocation.setStatus('current')
dlurPuLsName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuLsName.setStatus('current')
dlurPuDlusSessnStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reset", 1), ("pendingActive", 2), ("active", 3), ("pendingInactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDlusSessnStatus.setStatus('current')
dlurPuActiveDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuActiveDlusName.setStatus('current')
dlurPuDefPrimDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDefPrimDlusName.setStatus('current')
dlurPuDefBackupDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2), )
if mibBuilder.loadTexts: dlurPuDefBackupDlusTable.setStatus('current')
dlurPuDefBackupDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurPuDefBackupDlusPuName"), (0, "APPN-DLUR-MIB", "dlurPuDefBackupDlusIndex"))
if mibBuilder.loadTexts: dlurPuDefBackupDlusEntry.setStatus('current')
dlurPuDefBackupDlusPuName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 17)))
if mibBuilder.loadTexts: dlurPuDefBackupDlusPuName.setStatus('current')
dlurPuDefBackupDlusIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: dlurPuDefBackupDlusIndex.setStatus('current')
dlurPuDefBackupDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 3), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDefBackupDlusName.setStatus('current')
dlurDlusInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 3))
dlurDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1), )
if mibBuilder.loadTexts: dlurDlusTable.setStatus('current')
dlurDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurDlusName"))
if mibBuilder.loadTexts: dlurDlusEntry.setStatus('current')
dlurDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1, 1), SnaControlPointName())
if mibBuilder.loadTexts: dlurDlusName.setStatus('current')
dlurDlusSessnStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reset", 1), ("pendingActive", 2), ("active", 3), ("pendingInactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDlusSessnStatus.setStatus('current')
dlurConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2))
dlurCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2, 1))
dlurGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2, 2))
dlurCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 34, 5, 2, 1, 1)).setObjects(("APPN-DLUR-MIB", "dlurConfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlurCompliance = dlurCompliance.setStatus('current')
dlurConfGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 34, 5, 2, 2, 1)).setObjects(("APPN-DLUR-MIB", "dlurNodeCpName"), ("APPN-DLUR-MIB", "dlurReleaseLevel"), ("APPN-DLUR-MIB", "dlurAnsSupport"), ("APPN-DLUR-MIB", "dlurMultiSubnetSupport"), ("APPN-DLUR-MIB", "dlurNetworkNameForwardingSupport"), ("APPN-DLUR-MIB", "dlurNondisDlusDlurSessDeactSup"), ("APPN-DLUR-MIB", "dlurDefaultDefPrimDlusName"), ("APPN-DLUR-MIB", "dlurDefaultDefBackupDlusName"), ("APPN-DLUR-MIB", "dlurPuSscpSuppliedName"), ("APPN-DLUR-MIB", "dlurPuStatus"), ("APPN-DLUR-MIB", "dlurPuAnsSupport"), ("APPN-DLUR-MIB", "dlurPuLocation"), ("APPN-DLUR-MIB", "dlurPuLsName"), ("APPN-DLUR-MIB", "dlurPuDlusSessnStatus"), ("APPN-DLUR-MIB", "dlurPuActiveDlusName"), ("APPN-DLUR-MIB", "dlurPuDefPrimDlusName"), ("APPN-DLUR-MIB", "dlurPuDefBackupDlusName"), ("APPN-DLUR-MIB", "dlurDlusSessnStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlurConfGroup = dlurConfGroup.setStatus('current')
mibBuilder.exportSymbols("APPN-DLUR-MIB", dlurPuTable=dlurPuTable, dlurPuDefBackupDlusName=dlurPuDefBackupDlusName, dlurNodeCpName=dlurNodeCpName, dlurPuStatus=dlurPuStatus, dlurNetworkNameForwardingSupport=dlurNetworkNameForwardingSupport, dlurPuDefBackupDlusTable=dlurPuDefBackupDlusTable, dlurDlusInfo=dlurDlusInfo, dlurPuAnsSupport=dlurPuAnsSupport, dlurDefaultDefBackupDlusIndex=dlurDefaultDefBackupDlusIndex, dlurGroups=dlurGroups, dlurPuName=dlurPuName, dlurCompliances=dlurCompliances, dlurPuDefPrimDlusName=dlurPuDefPrimDlusName, dlurPuDefBackupDlusEntry=dlurPuDefBackupDlusEntry, dlurMultiSubnetSupport=dlurMultiSubnetSupport, dlurNodeInfo=dlurNodeInfo, dlurPuLsName=dlurPuLsName, dlurDlusName=dlurDlusName, dlurConformance=dlurConformance, dlurNodeCapabilities=dlurNodeCapabilities, dlurDlusTable=dlurDlusTable, dlurCompliance=dlurCompliance, dlurDefaultDefPrimDlusName=dlurDefaultDefPrimDlusName, dlurDlusSessnStatus=dlurDlusSessnStatus, dlurPuDefBackupDlusIndex=dlurPuDefBackupDlusIndex, dlurPuLocation=dlurPuLocation, dlurDefaultDefBackupDlusTable=dlurDefaultDefBackupDlusTable, dlurPuInfo=dlurPuInfo, PYSNMP_MODULE_ID=dlurMIB, dlurObjects=dlurObjects, dlurConfGroup=dlurConfGroup, dlurPuDefBackupDlusPuName=dlurPuDefBackupDlusPuName, dlurNondisDlusDlurSessDeactSup=dlurNondisDlusDlurSessDeactSup, dlurPuActiveDlusName=dlurPuActiveDlusName, dlurReleaseLevel=dlurReleaseLevel, dlurAnsSupport=dlurAnsSupport, dlurDefaultDefBackupDlusEntry=dlurDefaultDefBackupDlusEntry, dlurPuSscpSuppliedName=dlurPuSscpSuppliedName, dlurDefaultDefBackupDlusName=dlurDefaultDefBackupDlusName, dlurPuEntry=dlurPuEntry, dlurPuDlusSessnStatus=dlurPuDlusSessnStatus, dlurMIB=dlurMIB, dlurDlusEntry=dlurDlusEntry)
| 127.845361
| 1,703
| 0.756149
|
#
# PySNMP MIB module APPN-DLUR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/APPN-DLUR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:08:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
SnaControlPointName, = mibBuilder.importSymbols("APPN-MIB", "SnaControlPointName")
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
snanauMIB, = mibBuilder.importSymbols("SNA-NAU-MIB", "snanauMIB")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, IpAddress, Integer32, Counter32, Bits, Gauge32, ObjectIdentity, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, MibIdentifier, ModuleIdentity, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "IpAddress", "Integer32", "Counter32", "Bits", "Gauge32", "ObjectIdentity", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "MibIdentifier", "ModuleIdentity", "iso", "TimeTicks")
DisplayString, TextualConvention, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "TruthValue")
dlurMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 34, 5))
if mibBuilder.loadTexts: dlurMIB.setLastUpdated('9705101500Z')
if mibBuilder.loadTexts: dlurMIB.setOrganization('IETF SNA NAU MIB WG / AIW APPN/HPR MIBs SIG')
dlurObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1))
dlurNodeInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 1))
dlurNodeCapabilities = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1))
dlurNodeCpName = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 1), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNodeCpName.setStatus('current')
dlurReleaseLevel = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2)).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurReleaseLevel.setStatus('current')
dlurAnsSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("continueOrStop", 1), ("stopOnly", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurAnsSupport.setStatus('current')
dlurMultiSubnetSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 4), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurMultiSubnetSupport.setStatus('current')
dlurDefaultDefPrimDlusName = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 5), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDefaultDefPrimDlusName.setStatus('current')
dlurNetworkNameForwardingSupport = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNetworkNameForwardingSupport.setStatus('current')
dlurNondisDlusDlurSessDeactSup = MibScalar((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 1, 7), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurNondisDlusDlurSessDeactSup.setStatus('current')
dlurDefaultDefBackupDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2), )
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusTable.setStatus('current')
dlurDefaultDefBackupDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurDefaultDefBackupDlusIndex"))
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusEntry.setStatus('current')
dlurDefaultDefBackupDlusIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusIndex.setStatus('current')
dlurDefaultDefBackupDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 1, 2, 1, 2), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDefaultDefBackupDlusName.setStatus('current')
dlurPuInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 2))
dlurPuTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1), )
if mibBuilder.loadTexts: dlurPuTable.setStatus('current')
dlurPuEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurPuName"))
if mibBuilder.loadTexts: dlurPuEntry.setStatus('current')
dlurPuName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 17)))
if mibBuilder.loadTexts: dlurPuName.setStatus('current')
dlurPuSscpSuppliedName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuSscpSuppliedName.setStatus('current')
dlurPuStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("reset", 1), ("pendReqActpuRsp", 2), ("pendActpu", 3), ("pendActpuRsp", 4), ("active", 5), ("pendLinkact", 6), ("pendDactpuRsp", 7), ("pendInop", 8), ("pendInopActpu", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuStatus.setStatus('current')
dlurPuAnsSupport = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("continue", 1), ("stop", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuAnsSupport.setStatus('current')
dlurPuLocation = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("internal", 1), ("downstream", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuLocation.setStatus('current')
dlurPuLsName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuLsName.setStatus('current')
dlurPuDlusSessnStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reset", 1), ("pendingActive", 2), ("active", 3), ("pendingInactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDlusSessnStatus.setStatus('current')
dlurPuActiveDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuActiveDlusName.setStatus('current')
dlurPuDefPrimDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 17))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDefPrimDlusName.setStatus('current')
dlurPuDefBackupDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2), )
if mibBuilder.loadTexts: dlurPuDefBackupDlusTable.setStatus('current')
dlurPuDefBackupDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurPuDefBackupDlusPuName"), (0, "APPN-DLUR-MIB", "dlurPuDefBackupDlusIndex"))
if mibBuilder.loadTexts: dlurPuDefBackupDlusEntry.setStatus('current')
dlurPuDefBackupDlusPuName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 17)))
if mibBuilder.loadTexts: dlurPuDefBackupDlusPuName.setStatus('current')
dlurPuDefBackupDlusIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: dlurPuDefBackupDlusIndex.setStatus('current')
dlurPuDefBackupDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 2, 2, 1, 3), SnaControlPointName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurPuDefBackupDlusName.setStatus('current')
dlurDlusInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 1, 3))
dlurDlusTable = MibTable((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1), )
if mibBuilder.loadTexts: dlurDlusTable.setStatus('current')
dlurDlusEntry = MibTableRow((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1), ).setIndexNames((0, "APPN-DLUR-MIB", "dlurDlusName"))
if mibBuilder.loadTexts: dlurDlusEntry.setStatus('current')
dlurDlusName = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1, 1), SnaControlPointName())
if mibBuilder.loadTexts: dlurDlusName.setStatus('current')
dlurDlusSessnStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 34, 5, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reset", 1), ("pendingActive", 2), ("active", 3), ("pendingInactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: dlurDlusSessnStatus.setStatus('current')
dlurConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2))
dlurCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2, 1))
dlurGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 34, 5, 2, 2))
dlurCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 34, 5, 2, 1, 1)).setObjects(("APPN-DLUR-MIB", "dlurConfGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlurCompliance = dlurCompliance.setStatus('current')
dlurConfGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 34, 5, 2, 2, 1)).setObjects(("APPN-DLUR-MIB", "dlurNodeCpName"), ("APPN-DLUR-MIB", "dlurReleaseLevel"), ("APPN-DLUR-MIB", "dlurAnsSupport"), ("APPN-DLUR-MIB", "dlurMultiSubnetSupport"), ("APPN-DLUR-MIB", "dlurNetworkNameForwardingSupport"), ("APPN-DLUR-MIB", "dlurNondisDlusDlurSessDeactSup"), ("APPN-DLUR-MIB", "dlurDefaultDefPrimDlusName"), ("APPN-DLUR-MIB", "dlurDefaultDefBackupDlusName"), ("APPN-DLUR-MIB", "dlurPuSscpSuppliedName"), ("APPN-DLUR-MIB", "dlurPuStatus"), ("APPN-DLUR-MIB", "dlurPuAnsSupport"), ("APPN-DLUR-MIB", "dlurPuLocation"), ("APPN-DLUR-MIB", "dlurPuLsName"), ("APPN-DLUR-MIB", "dlurPuDlusSessnStatus"), ("APPN-DLUR-MIB", "dlurPuActiveDlusName"), ("APPN-DLUR-MIB", "dlurPuDefPrimDlusName"), ("APPN-DLUR-MIB", "dlurPuDefBackupDlusName"), ("APPN-DLUR-MIB", "dlurDlusSessnStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
dlurConfGroup = dlurConfGroup.setStatus('current')
mibBuilder.exportSymbols("APPN-DLUR-MIB", dlurPuTable=dlurPuTable, dlurPuDefBackupDlusName=dlurPuDefBackupDlusName, dlurNodeCpName=dlurNodeCpName, dlurPuStatus=dlurPuStatus, dlurNetworkNameForwardingSupport=dlurNetworkNameForwardingSupport, dlurPuDefBackupDlusTable=dlurPuDefBackupDlusTable, dlurDlusInfo=dlurDlusInfo, dlurPuAnsSupport=dlurPuAnsSupport, dlurDefaultDefBackupDlusIndex=dlurDefaultDefBackupDlusIndex, dlurGroups=dlurGroups, dlurPuName=dlurPuName, dlurCompliances=dlurCompliances, dlurPuDefPrimDlusName=dlurPuDefPrimDlusName, dlurPuDefBackupDlusEntry=dlurPuDefBackupDlusEntry, dlurMultiSubnetSupport=dlurMultiSubnetSupport, dlurNodeInfo=dlurNodeInfo, dlurPuLsName=dlurPuLsName, dlurDlusName=dlurDlusName, dlurConformance=dlurConformance, dlurNodeCapabilities=dlurNodeCapabilities, dlurDlusTable=dlurDlusTable, dlurCompliance=dlurCompliance, dlurDefaultDefPrimDlusName=dlurDefaultDefPrimDlusName, dlurDlusSessnStatus=dlurDlusSessnStatus, dlurPuDefBackupDlusIndex=dlurPuDefBackupDlusIndex, dlurPuLocation=dlurPuLocation, dlurDefaultDefBackupDlusTable=dlurDefaultDefBackupDlusTable, dlurPuInfo=dlurPuInfo, PYSNMP_MODULE_ID=dlurMIB, dlurObjects=dlurObjects, dlurConfGroup=dlurConfGroup, dlurPuDefBackupDlusPuName=dlurPuDefBackupDlusPuName, dlurNondisDlusDlurSessDeactSup=dlurNondisDlusDlurSessDeactSup, dlurPuActiveDlusName=dlurPuActiveDlusName, dlurReleaseLevel=dlurReleaseLevel, dlurAnsSupport=dlurAnsSupport, dlurDefaultDefBackupDlusEntry=dlurDefaultDefBackupDlusEntry, dlurPuSscpSuppliedName=dlurPuSscpSuppliedName, dlurDefaultDefBackupDlusName=dlurDefaultDefBackupDlusName, dlurPuEntry=dlurPuEntry, dlurPuDlusSessnStatus=dlurPuDlusSessnStatus, dlurMIB=dlurMIB, dlurDlusEntry=dlurDlusEntry)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
43c22f960b133202f8a7111b1fe3f2235264bfa8
| 136
|
py
|
Python
|
planning/urls.py
|
essanpupil/moneytracker
|
506424ff287d4c093430e93169fedb7ee7da34f7
|
[
"MIT"
] | null | null | null |
planning/urls.py
|
essanpupil/moneytracker
|
506424ff287d4c093430e93169fedb7ee7da34f7
|
[
"MIT"
] | null | null | null |
planning/urls.py
|
essanpupil/moneytracker
|
506424ff287d4c093430e93169fedb7ee7da34f7
|
[
"MIT"
] | null | null | null |
from django.urls import path
from planning.views import main
app_name = 'planning'
urlpatterns = [
path('', main, name='main'),
]
| 15.111111
| 32
| 0.691176
|
from django.urls import path
from planning.views import main
app_name = 'planning'
urlpatterns = [
path('', main, name='main'),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
838292766d385db372fa0d4af4505071b2dc24b9
| 610
|
py
|
Python
|
baseConverter.py
|
Alex0Blackwell/python-projects
|
2abb51d1267913bcdd4807e1a10b931c90eb1bef
|
[
"MIT"
] | 2
|
2019-11-13T17:45:53.000Z
|
2020-02-23T02:25:30.000Z
|
baseConverter.py
|
Alex0Blackwell/python-projects
|
2abb51d1267913bcdd4807e1a10b931c90eb1bef
|
[
"MIT"
] | null | null | null |
baseConverter.py
|
Alex0Blackwell/python-projects
|
2abb51d1267913bcdd4807e1a10b931c90eb1bef
|
[
"MIT"
] | null | null | null |
# Base conversion algorithm
# Fastest way to convert bases
# Note this does it in reverse so the strings and the strings elements
# must be reversed
usrNum = int(input("Enter a number to convert:\n"))
usrBase = int(input("Enter a base:\n"))
print(baseConvert(usrNum, usrBase))
| 27.727273
| 77
| 0.639344
|
# Base conversion algorithm
# Fastest way to convert bases
# Note this does it in reverse so the strings and the strings elements
# must be reversed
def baseConvert(num, base):
# Given a number and a base to convert to
res = ''
q = num
while(q > 0):
r = q % base
# Reverse string because the whole string will eventually be reversed
res += ',' + str(r)[::-1]
q = q // base
return res[::-1] # Return the whole string reversed
usrNum = int(input("Enter a number to convert:\n"))
usrBase = int(input("Enter a base:\n"))
print(baseConvert(usrNum, usrBase))
| 0
| 0
| 0
| 0
| 0
| 307
| 0
| 0
| 23
|
0ed91efc5dacfde2c285ab9386a5f5ce32dd3da1
| 1,785
|
py
|
Python
|
backend/classification/features/extraction.py
|
PolyCortex/polydodo
|
473d5a8b89e9bdb68ba9592241e45d30b86b471c
|
[
"MIT"
] | 13
|
2020-06-02T03:17:10.000Z
|
2022-03-23T04:06:52.000Z
|
backend/classification/features/extraction.py
|
PolyCortex/polydodo
|
473d5a8b89e9bdb68ba9592241e45d30b86b471c
|
[
"MIT"
] | 43
|
2020-07-15T04:21:06.000Z
|
2022-03-06T00:32:19.000Z
|
backend/classification/features/extraction.py
|
PolyCortex/polydodo
|
473d5a8b89e9bdb68ba9592241e45d30b86b471c
|
[
"MIT"
] | 2
|
2020-12-27T07:21:18.000Z
|
2021-09-16T20:06:47.000Z
|
"""Feature extraction tools based off a two channel EEG recording"""
import numpy as np
from classification.config.constants import (EEG_CHANNELS, AGE_FEATURE_BINS)
from classification.features.pipeline import get_feature_union
def get_eeg_features(epochs, in_bed_seconds, out_of_bed_seconds):
"""Returns the continuous feature matrix
Input
-------
epochs: mne.Epochs object with signals with or without annotations
in_bed_seconds: timespan, in seconds, from which the subject started
the recording and went to bed
out_of_bed_seconds: timespan, in seconds, from which the subject
started the recording and got out of bed
Returns
-------
Array of size (nb_epochs, nb_continuous_features)
"""
features = []
feature_union = get_feature_union()
for channel in EEG_CHANNELS:
channel_epochs = epochs.copy().pick_channels({channel})
channel_features = feature_union.transform(channel_epochs)
features.append(channel_features)
return np.hstack(tuple(features))
def get_non_eeg_features(age, sex, nb_epochs):
"""Returns the categorical feature matrix
Input
-------
age: Age of the subject
sex: Sex of the subject
nb_epochs: corresponds to the nb of epochs which will be analyzed.
Returns
-------
Array of size (nb_epochs,nb_categorical_features), which contains
(duplicated) value for all epochs because it concerns the same subject.
"""
age_category = next(
category_index
for category_index, age_range in enumerate(AGE_FEATURE_BINS)
if age >= age_range[0] and age <= age_range[1]
)
X_categorical = [sex.value, age_category]
return np.array(X_categorical * nb_epochs).reshape(nb_epochs, -1)
| 30.775862
| 75
| 0.707003
|
"""Feature extraction tools based off a two channel EEG recording"""
import numpy as np
from classification.config.constants import (
EEG_CHANNELS,
AGE_FEATURE_BINS,
)
from classification.features.pipeline import get_feature_union
def get_eeg_features(epochs, in_bed_seconds, out_of_bed_seconds):
"""Returns the continuous feature matrix
Input
-------
epochs: mne.Epochs object with signals with or without annotations
in_bed_seconds: timespan, in seconds, from which the subject started
the recording and went to bed
out_of_bed_seconds: timespan, in seconds, from which the subject
started the recording and got out of bed
Returns
-------
Array of size (nb_epochs, nb_continuous_features)
"""
features = []
feature_union = get_feature_union()
for channel in EEG_CHANNELS:
channel_epochs = epochs.copy().pick_channels({channel})
channel_features = feature_union.transform(channel_epochs)
features.append(channel_features)
return np.hstack(tuple(features))
def get_non_eeg_features(age, sex, nb_epochs):
"""Returns the categorical feature matrix
Input
-------
age: Age of the subject
sex: Sex of the subject
nb_epochs: corresponds to the nb of epochs which will be analyzed.
Returns
-------
Array of size (nb_epochs,nb_categorical_features), which contains
(duplicated) value for all epochs because it concerns the same subject.
"""
age_category = next(
category_index
for category_index, age_range in enumerate(AGE_FEATURE_BINS)
if age >= age_range[0] and age <= age_range[1]
)
X_categorical = [sex.value, age_category]
return np.array(X_categorical * nb_epochs).reshape(nb_epochs, -1)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
|
f052b9fc28af42e699049bdfe2b0ac01d467c316
| 187
|
py
|
Python
|
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
user_details/give_default.py
|
Shreyanshsachan/College-Predictor
|
87068aa1d1a889ced586ff155bc2b5d9a78340f7
|
[
"MIT"
] | null | null | null |
preference_list_of_user=[]
| 20.777778
| 31
| 0.84492
|
preference_list_of_user=[]
def give(def_list):
Def=def_list
global preference_list_of_user
preference_list_of_user=Def
return Def
def give_to_model():
return preference_list_of_user
| 0
| 0
| 0
| 0
| 0
| 116
| 0
| 0
| 45
|
58988f8f98d9e6ccb12c9b0191a886bc9755df2f
| 669
|
py
|
Python
|
OPTED2fiveletter.py
|
caranha/wordle_helper
|
731259e14f704a48bd84f528085dc8a70898adee
|
[
"RSA-MD"
] | null | null | null |
OPTED2fiveletter.py
|
caranha/wordle_helper
|
731259e14f704a48bd84f528085dc8a70898adee
|
[
"RSA-MD"
] | null | null | null |
OPTED2fiveletter.py
|
caranha/wordle_helper
|
731259e14f704a48bd84f528085dc8a70898adee
|
[
"RSA-MD"
] | null | null | null |
import re
from bs4 import BeautifulSoup
from string import ascii_lowercase
base_file = "OPTED/words_X.html"
fiveletter = []
for letter in ascii_lowercase:
file = base_file.replace("X",letter)
with open(file, "r") as f:
html = f.read()
words = BeautifulSoup(html, "html.parser").find_all("b")
for w in words:
wt = w.get_text().lower()
length = len(wt) == 5
duplicate = wt in fiveletter
ascii = not re.search("[\W]", wt)
if length and ascii and not duplicate:
fiveletter.append(wt)
with open("five_letter_words.txt", "w") as f:
for w in fiveletter:
f.write(w+"\n")
| 22.3
| 60
| 0.617339
|
import os
import re
from bs4 import BeautifulSoup
from string import ascii_lowercase
base_file = "OPTED/words_X.html"
fiveletter = []
for letter in ascii_lowercase:
file = base_file.replace("X",letter)
with open(file, "r") as f:
html = f.read()
words = BeautifulSoup(html, "html.parser").find_all("b")
for w in words:
wt = w.get_text().lower()
length = len(wt) == 5
duplicate = wt in fiveletter
ascii = not re.search("[\W]", wt)
if length and ascii and not duplicate:
fiveletter.append(wt)
with open("five_letter_words.txt", "w") as f:
for w in fiveletter:
f.write(w+"\n")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -12
| 22
|
4861261a51c5c31e103d20e20adeece80de4bce7
| 1,198
|
py
|
Python
|
rotate/ciphers.py
|
Noxell-zs/ROT13
|
36f1b788616979d6644448e0a8cc728bb2c15ad9
|
[
"MIT"
] | null | null | null |
rotate/ciphers.py
|
Noxell-zs/ROT13
|
36f1b788616979d6644448e0a8cc728bb2c15ad9
|
[
"MIT"
] | null | null | null |
rotate/ciphers.py
|
Noxell-zs/ROT13
|
36f1b788616979d6644448e0a8cc728bb2c15ad9
|
[
"MIT"
] | null | null | null |
"""Realization of algorithms for char-by-char text encryption.
Functions:
rot_13(str) -> str
Character offset by 13 positions.
caesar_1(str) -> str
Character offset by 1 position.
"""
def rot_13(char_in: str) -> str:
"""Character offset by 13 positions.
Parameters:
char_in : function
Character of the source text.
Returns:
char_out : str
Character of the encrypted text.
"""
num = ord(char_in[0])
if (97 <= num <= 109) or (65 <= num <= 77):
char_out = chr(num + 13)
elif (110 <= num <= 122) or (78 <= num <= 90):
char_out = chr(num - 13)
else:
char_out = char_in
return char_out
def caesar_1(char_in: str) -> str:
"""Character offset by 1 position.
Parameters:
char_in : function
Character of the source text.
Returns:
char_out : str
Character of the encrypted text.
"""
num = ord(char_in[0])
if (97 <= num <= 121) or (65 <= num <= 89):
char_out = chr(num + 1)
elif num == 122 or num == 90:
char_out = chr(num - 25)
else:
char_out = char_in
return char_out
| 20.655172
| 62
| 0.551753
|
"""Realization of algorithms for char-by-char text encryption.
Functions:
rot_13(str) -> str
Character offset by 13 positions.
caesar_1(str) -> str
Character offset by 1 position.
"""
def rot_13(char_in: str) -> str:
"""Character offset by 13 positions.
Parameters:
char_in : function
Character of the source text.
Returns:
char_out : str
Character of the encrypted text.
"""
num = ord(char_in[0])
if (97 <= num <= 109) or (65 <= num <= 77):
char_out = chr(num + 13)
elif (110 <= num <= 122) or (78 <= num <= 90):
char_out = chr(num - 13)
else:
char_out = char_in
return char_out
def caesar_1(char_in: str) -> str:
"""Character offset by 1 position.
Parameters:
char_in : function
Character of the source text.
Returns:
char_out : str
Character of the encrypted text.
"""
num = ord(char_in[0])
if (97 <= num <= 121) or (65 <= num <= 89):
char_out = chr(num + 1)
elif num == 122 or num == 90:
char_out = chr(num - 25)
else:
char_out = char_in
return char_out
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6776cf0bd080254a676d541e382d4eb2f31a05f5
| 7,640
|
py
|
Python
|
pymatgen/analysis/tests/test_diffusion_analyzer.py
|
rajeshprasanth/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/tests/test_diffusion_analyzer.py
|
rajeshprasanth/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/tests/test_diffusion_analyzer.py
|
rajeshprasanth/pymatgen
|
eb6cd95230c11ac761a96ebf82b98f71177bb71f
|
[
"MIT"
] | 1
|
2018-10-28T01:41:38.000Z
|
2018-10-28T01:41:38.000Z
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest2 as unittest
import os
"""
TODO: Change the module doc.
"""
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "5/2/13"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
if __name__ == '__main__':
unittest.main()
| 41.978022
| 85
| 0.610602
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import unittest2 as unittest
import os
import json
import random
import numpy as np
import csv
import scipy.constants as const
from pymatgen.analysis.diffusion_analyzer import DiffusionAnalyzer,\
get_conversion_factor, fit_arrhenius
from pymatgen.core.structure import Structure
from pymatgen.util.testing import PymatgenTest
from monty.tempfile import ScratchDir
"""
TODO: Change the module doc.
"""
__author__ = "shyuepingong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__status__ = "Beta"
__date__ = "5/2/13"
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class FuncTest(unittest.TestCase):
def test_get_conversion_factor(self):
filepath = os.path.join(test_dir, 'LiFePO4.cif')
s = Structure.from_file(filepath)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(41370704.343540139,
get_conversion_factor(s, "Li", 600),
delta=20)
def test_fit_arrhenius(self):
Ea = 0.5
k = const.k / const.e
c = 12
temps = np.array([300, 1000, 500])
diffusivities = c * np.exp(-Ea/(k * temps))
diffusivities *= np.array([1.00601834013,
1.00803236262,
0.98609720824])
r = fit_arrhenius(temps, diffusivities)
self.assertAlmostEqual(r[0], Ea)
self.assertAlmostEqual(r[1], c)
self.assertAlmostEqual(r[2], 0.000895566)
# when not enough values for error estimate
r2 = fit_arrhenius([1, 2], [10, 10])
self.assertAlmostEqual(r2[0], 0)
self.assertAlmostEqual(r2[1], 10)
self.assertEqual(r2[2], None)
class DiffusionAnalyzerTest(PymatgenTest):
def test_init(self):
# Diffusion vasprun.xmls are rather large. We are only going to use a
# very small preprocessed run for testing. Note that the results are
# unreliable for short runs.
with open(os.path.join(test_dir, "DiffusionAnalyzer.json")) as f:
dd = json.load(f)
d = DiffusionAnalyzer.from_dict(dd)
# large tolerance because scipy constants changed between 0.16.1 and 0.17
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.diffusivity, 1.16083658794e-06, 7)
self.assertAlmostEqual(d.conductivity_std_dev, 0.0097244677795984488, 7)
self.assertAlmostEqual(d.diffusivity_std_dev, 9.1013023085561779e-09, 7)
self.assertArrayAlmostEqual(
d.conductivity_components,
[45.9109703, 26.2856302, 150.5405727], 3)
self.assertArrayAlmostEqual(
d.diffusivity_components,
[7.49601236e-07, 4.90254273e-07, 2.24649255e-06])
self.assertArrayAlmostEqual(
d.conductivity_components_std_dev,
[0.0063579, 0.0180862, 0.0217917]
)
self.assertArrayAlmostEqual(
d.diffusivity_components_std_dev,
[8.9465670e-09, 2.4931224e-08, 2.2636384e-08]
)
self.assertArrayAlmostEqual(
d.max_ion_displacements,
[1.4620659693989553, 1.2787303484445025, 3.419618540097756,
2.340104469126246, 2.6080973517594233, 1.3928579365672844,
1.3561505956708932, 1.6699242923686253, 1.0352389639563648,
1.1662520093955808, 1.2322019205885841, 0.8094210554832534,
1.9917808504954169, 1.2684148391206396, 2.392633794162402,
2.566313049232671, 1.3175030435622759, 1.4628945430952793,
1.0984921286753002, 1.2864482076554093, 0.655567027815413,
0.5986961164605746, 0.5639091444309045, 0.6166004192954059,
0.5997911580422605, 0.4374606277579815, 1.1865683960470783,
0.9017064371676591, 0.6644840367853767, 1.0346375380664645,
0.6177630142863979, 0.7952002051914302, 0.7342686123054011,
0.7858047956905577, 0.5570732369065661, 1.0942937746885417,
0.6509372395308788, 1.0876687380413455, 0.7058162184725,
0.8298306317598585, 0.7813913747621343, 0.7337655232056153,
0.9057161616236746, 0.5979093093186919, 0.6830333586985015,
0.7926500894084628, 0.6765180009988608, 0.8555866032968998,
0.713087091642237, 0.7621007695790749])
self.assertEqual(d.sq_disp_ions.shape, (50, 206))
self.assertAlmostEqual(d.max_framework_displacement, 1.18656839605)
ss = list(d.get_drift_corrected_structures(10, 1000, 20))
self.assertEqual(len(ss), 50)
n = random.randint(0, 49)
n_orig = n * 20 + 10
self.assertArrayAlmostEqual(
ss[n].cart_coords - d.structure.cart_coords + d.drift[:, n_orig, :],
d.disp[:, n_orig, :])
d = DiffusionAnalyzer.from_dict(d.as_dict())
self.assertIsInstance(d, DiffusionAnalyzer)
#Ensure summary dict is json serializable.
json.dumps(d.get_summary_dict(include_msd_t=True))
d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature,
d.time_step, d.step_skip, smoothed="max")
self.assertAlmostEqual(d.conductivity, 74.165372613735684, 4)
self.assertAlmostEqual(d.diffusivity, 1.14606446822e-06, 7)
d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature,
d.time_step, d.step_skip, smoothed=False)
self.assertAlmostEqual(d.conductivity, 27.20479170406027, 4)
self.assertAlmostEqual(d.diffusivity, 4.25976905436e-07, 7)
d = DiffusionAnalyzer(d.structure, d.disp, d.specie, d.temperature,
d.time_step, d.step_skip,
smoothed="constant", avg_nsteps=100)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
# Can't average over 2000 steps because this is a 1000-step run.
self.assertRaises(ValueError, DiffusionAnalyzer,
d.structure, d.disp, d.specie, d.temperature,
d.time_step, d.step_skip, smoothed="constant",
avg_nsteps=2000)
d = DiffusionAnalyzer.from_structures(
list(d.get_drift_corrected_structures()),
d.specie, d.temperature, d.time_step,
d.step_skip, d.smoothed, avg_nsteps=100)
self.assertAlmostEqual(d.conductivity, 47.404056230438741, 4)
self.assertAlmostEqual(d.diffusivity, 7.4226016496716148e-07, 7)
d.export_msdt("test.csv")
with open("test.csv") as f:
data = []
for row in csv.reader(f):
if row:
data.append(row)
data.pop(0)
data = np.array(data, dtype=np.float64)
self.assertArrayAlmostEqual(data[:, 1], d.msd)
os.remove("test.csv")
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 6,710
| 0
| 0
| 0
| 131
| 245
|
d94e383e015801e9caaa411360f5d5c3c970581d
| 9,511
|
py
|
Python
|
checker/logic.py
|
ucam-cl-dtg/equality_checker
|
6a31d3dd360f821e36c4742e1d5139d7292f8319
|
[
"Apache-2.0"
] | 7
|
2020-07-18T08:04:27.000Z
|
2022-03-07T06:46:17.000Z
|
checker/logic.py
|
ucam-cl-dtg/equality_checker
|
6a31d3dd360f821e36c4742e1d5139d7292f8319
|
[
"Apache-2.0"
] | 1
|
2022-03-18T17:05:54.000Z
|
2022-03-18T17:05:54.000Z
|
checker/logic.py
|
ucam-cl-dtg/equality_checker
|
6a31d3dd360f821e36c4742e1d5139d7292f8319
|
[
"Apache-2.0"
] | 1
|
2020-07-18T08:04:28.000Z
|
2020-07-18T08:04:28.000Z
|
import sympy
from .utils import known_equal_pair, contains_incorrect_symbols
from .utils import EqualityType
from .parsing import logic_parser, UnsafeInputException
__all__ = ["check"]
KNOWN_PAIRS = dict()
def parse_expression(expression_str, *, local_dict=None):
"""Take a string containing a mathematical expression and return a sympy expression.
Wrap the parsing class function parse_expr(...) and catch any exceptions
that occur.
- 'local_dict' can be a dictionary of (name, sympy.Symbol(...)) pairs, where
the string 'name' will not be split up and will be turned into the symbol
specified. It may be None.
"""
try:
return logic_parser.parse_expr(expression_str, local_dict=local_dict)
except logic_parser.ParsingException:
print("Incorrectly formatted expression.")
print("Fail: '{}'.".format(expression_str))
return None
def exact_match(test_expr, target_expr):
"""Test if the entered expression exactly matches the known expression.
This performs as little simplification of the boolean expression as
possible, allowing only the commutativity or AND and OR.
Returns True if the sympy expressions have the same internal structure,
and False if not.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
print("[EXACT TEST]")
if test_expr == target_expr:
print("Exact Match (with '==')")
return True
elif sympy.srepr(test_expr) == sympy.srepr(target_expr):
# This is a (perfectly acceptable) hack for ordering the atoms of each
# term, but a more explicit method may be preferable in the future.
print("Exact Match (with 'srepr')")
return True
else:
return False
def symbolic_equality(test_expr, target_expr):
"""Test if two expressions are symbolically equivalent.
Use the sympy 'simplify_logic' function to simplify the two boolean
expressions as much as possible. Two equilvalent expressions MUST simplify
to the same thing, and then they can be tested for equivalence again.
Returns True if sympy can determine that the two expressions are equal,
and returns False if they are not equal.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
print("[SYMBOLIC TEST]")
try:
simplified_target = sympy.simplify_logic(target_expr)
simplified_test = sympy.simplify_logic(test_expr)
if simplified_target == simplified_test or sympy.srepr(simplified_target) == sympy.srepr(simplified_test):
print("Symbolic match.")
print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr))
KNOWN_PAIRS[(target_expr, test_expr)] = EqualityType.SYMBOLIC
return True
else:
return False
except NotImplementedError as e:
print("{0}: {1} - Can't check symbolic equality!".format(type(e).__name__, str(e).capitalize()))
return False
def expr_equality(test_expr, target_expr):
"""Given two sympy expressions: test for exact, symbolic and numeric equality.
Check two sympy expressions for equality, throwing a TypeError if either
of the provided sympy objects is not an expression.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
equality_type = EqualityType.EXACT
equal = exact_match(test_expr, target_expr)
if not equal:
# Then try checking for symbolic equality:
equality_type = EqualityType.SYMBOLIC
equal = symbolic_equality(test_expr, target_expr)
return equal, equality_type
def general_equality(test_expr, target_expr):
"""Given two general sympy objects: test for exact, symbolic and numeric equality.
- 'test_expr' should be the untrusted sympy object to check.
- 'target_expr' should be the trusted sympy object to match against.
"""
equal, equality_type = known_equal_pair(KNOWN_PAIRS, test_expr, target_expr)
# If this is a known pair: return immediately:
if equal:
return equal, equality_type
else:
print("[[EXPRESSION CHECK]]")
return expr_equality(test_expr, target_expr)
def check(test_str, target_str, *, symbols=None, check_symbols=True, description=None,
_quiet=False):
"""The main checking function, calls each of the equality checking functions as required.
Returns a dict describing the equality; with important keys being 'equal',
and 'equality_type'. The key 'error' is added if something went wrong, and
this should always be checked for first.
- 'test_str' should be the untrusted string for sympy to parse.
- 'target_str' should be the trusted string to parse and match against.
- 'symbols' should be a string list or comma separated string of symbols
not to split during parsing.
- 'check_symbols' indicates whether to verfiy the symbols used in each
expression are exactly the same or not; setting this to False will
allow symbols which cancel out to be included (probably don't want this
in questions).
- 'description' is an optional description to print before the checker's
output to stdout which can be used to improve logging.
- '_quiet' is an internal argument used to suppress some output when
this function is called from plus_minus_checker().
"""
# Suppress this output if necessary:
if not _quiet:
print("=" * 50)
# For logging purposes, if we have a description: print it!
if description is not None:
print(description)
print("=" * 50)
print("[LOGIC]")
# If nothing to parse, fail. On server, this will be caught in check_endpoint()
if (target_str == "") or (test_str == ""):
print("ERROR: No input provided!")
if not _quiet:
print("=" * 50)
return dict(error="Empty string as argument.")
# Cleanup the strings before anything is done to them:
error_is_test = False
try:
target_str = logic_parser.cleanup_string(target_str, reject_unsafe_input=True)
error_is_test = True
test_str = logic_parser.cleanup_string(test_str, reject_unsafe_input=True)
except UnsafeInputException:
print("ERROR: Input contained non-whitelisted characters!")
result = dict(error="Bad input provided!")
if error_is_test:
print("Test string: '{}'".format(test_str))
result["syntax_error"] = str(True).lower()
if not _quiet:
print("=" * 50)
return result
print("Target string: '{}'".format(target_str))
print("Test string: '{}'".format(test_str))
print("[[PARSE EXPRESSIONS]]")
# Parse the trusted target expression:
target_expr = parse_expression(target_str)
# Parse the untrusted test expression:
test_expr = parse_expression(test_str)
result = dict(target=target_str, test=test_str)
if target_expr is None:
print("ERROR: TRUSTED EXPRESSION CANNOT BE PARSED!")
if not _quiet:
print("=" * 50)
result["error"] = "Parsing TARGET Expression Failed!"
result["code"] = 400 # This is fatal!
return result
if test_expr is None:
print("Incorrectly formatted ToCheck expression.")
if not _quiet:
print("=" * 50)
result["error"] = "Parsing Test Expression Failed!"
result["syntax_error"] = str(True).lower()
return result
result["parsed_target"] = str(target_expr)
result["parsed_test"] = str(test_expr)
# Now check for symbol match and equality:
try:
print("Parsed Target: {0}\nParsed ToCheck: {1}".format(target_expr, test_expr))
if check_symbols: # Do we have same set of symbols in each?
incorrect_symbols = contains_incorrect_symbols(test_expr, target_expr)
if incorrect_symbols is not None:
print("[[RESULT]]\nEquality: False")
if not _quiet:
print("=" * 50)
result["equal"] = str(False).lower()
result["equality_type"] = EqualityType.SYMBOLIC.value
result["incorrect_symbols"] = incorrect_symbols
return result
# Then check for equality proper:
equal, equality_type = general_equality(test_expr, target_expr)
except (SyntaxError, TypeError, AttributeError) as e:
print("Error when comparing expressions: '{}'.".format(e))
if not _quiet:
print("=" * 50)
result["error"] = "Comparison of expressions failed: '{}'".format(e)
return result
print("[[RESULT]]")
if equal and (equality_type is not EqualityType.EXACT) and ((target_expr, test_expr) not in KNOWN_PAIRS):
print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr))
KNOWN_PAIRS[(target_expr, test_expr)] = equality_type
print("Equality: {}".format(equal))
if not _quiet:
print("=" * 50)
result["equal"] = str(equal).lower()
result["equality_type"] = equality_type.value
return result
| 40.819742
| 114
| 0.66134
|
import sympy
from .utils import known_equal_pair, contains_incorrect_symbols
from .utils import EqualityType
from .parsing import logic_parser, UnsafeInputException
__all__ = ["check"]
KNOWN_PAIRS = dict()
def parse_expression(expression_str, *, local_dict=None):
"""Take a string containing a mathematical expression and return a sympy expression.
Wrap the parsing class function parse_expr(...) and catch any exceptions
that occur.
- 'local_dict' can be a dictionary of (name, sympy.Symbol(...)) pairs, where
the string 'name' will not be split up and will be turned into the symbol
specified. It may be None.
"""
try:
return logic_parser.parse_expr(expression_str, local_dict=local_dict)
except logic_parser.ParsingException:
print("Incorrectly formatted expression.")
print("Fail: '{}'.".format(expression_str))
return None
def exact_match(test_expr, target_expr):
"""Test if the entered expression exactly matches the known expression.
This performs as little simplification of the boolean expression as
possible, allowing only the commutativity or AND and OR.
Returns True if the sympy expressions have the same internal structure,
and False if not.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
print("[EXACT TEST]")
if test_expr == target_expr:
print("Exact Match (with '==')")
return True
elif sympy.srepr(test_expr) == sympy.srepr(target_expr):
# This is a (perfectly acceptable) hack for ordering the atoms of each
# term, but a more explicit method may be preferable in the future.
print("Exact Match (with 'srepr')")
return True
else:
return False
def symbolic_equality(test_expr, target_expr):
"""Test if two expressions are symbolically equivalent.
Use the sympy 'simplify_logic' function to simplify the two boolean
expressions as much as possible. Two equilvalent expressions MUST simplify
to the same thing, and then they can be tested for equivalence again.
Returns True if sympy can determine that the two expressions are equal,
and returns False if they are not equal.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
print("[SYMBOLIC TEST]")
try:
simplified_target = sympy.simplify_logic(target_expr)
simplified_test = sympy.simplify_logic(test_expr)
if simplified_target == simplified_test or sympy.srepr(simplified_target) == sympy.srepr(simplified_test):
print("Symbolic match.")
print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr))
KNOWN_PAIRS[(target_expr, test_expr)] = EqualityType.SYMBOLIC
return True
else:
return False
except NotImplementedError as e:
print("{0}: {1} - Can't check symbolic equality!".format(type(e).__name__, str(e).capitalize()))
return False
def expr_equality(test_expr, target_expr):
"""Given two sympy expressions: test for exact, symbolic and numeric equality.
Check two sympy expressions for equality, throwing a TypeError if either
of the provided sympy objects is not an expression.
- 'test_expr' should be the untrusted sympy expression to check.
- 'target_expr' should be the trusted sympy expression to match against.
"""
equality_type = EqualityType.EXACT
equal = exact_match(test_expr, target_expr)
if not equal:
# Then try checking for symbolic equality:
equality_type = EqualityType.SYMBOLIC
equal = symbolic_equality(test_expr, target_expr)
return equal, equality_type
def general_equality(test_expr, target_expr):
"""Given two general sympy objects: test for exact, symbolic and numeric equality.
- 'test_expr' should be the untrusted sympy object to check.
- 'target_expr' should be the trusted sympy object to match against.
"""
equal, equality_type = known_equal_pair(KNOWN_PAIRS, test_expr, target_expr)
# If this is a known pair: return immediately:
if equal:
return equal, equality_type
else:
print("[[EXPRESSION CHECK]]")
return expr_equality(test_expr, target_expr)
def check(test_str, target_str, *, symbols=None, check_symbols=True, description=None,
_quiet=False):
"""The main checking function, calls each of the equality checking functions as required.
Returns a dict describing the equality; with important keys being 'equal',
and 'equality_type'. The key 'error' is added if something went wrong, and
this should always be checked for first.
- 'test_str' should be the untrusted string for sympy to parse.
- 'target_str' should be the trusted string to parse and match against.
- 'symbols' should be a string list or comma separated string of symbols
not to split during parsing.
- 'check_symbols' indicates whether to verfiy the symbols used in each
expression are exactly the same or not; setting this to False will
allow symbols which cancel out to be included (probably don't want this
in questions).
- 'description' is an optional description to print before the checker's
output to stdout which can be used to improve logging.
- '_quiet' is an internal argument used to suppress some output when
this function is called from plus_minus_checker().
"""
# Suppress this output if necessary:
if not _quiet:
print("=" * 50)
# For logging purposes, if we have a description: print it!
if description is not None:
print(description)
print("=" * 50)
print("[LOGIC]")
# If nothing to parse, fail. On server, this will be caught in check_endpoint()
if (target_str == "") or (test_str == ""):
print("ERROR: No input provided!")
if not _quiet:
print("=" * 50)
return dict(error="Empty string as argument.")
# Cleanup the strings before anything is done to them:
error_is_test = False
try:
target_str = logic_parser.cleanup_string(target_str, reject_unsafe_input=True)
error_is_test = True
test_str = logic_parser.cleanup_string(test_str, reject_unsafe_input=True)
except UnsafeInputException:
print("ERROR: Input contained non-whitelisted characters!")
result = dict(error="Bad input provided!")
if error_is_test:
print("Test string: '{}'".format(test_str))
result["syntax_error"] = str(True).lower()
if not _quiet:
print("=" * 50)
return result
print("Target string: '{}'".format(target_str))
print("Test string: '{}'".format(test_str))
print("[[PARSE EXPRESSIONS]]")
# Parse the trusted target expression:
target_expr = parse_expression(target_str)
# Parse the untrusted test expression:
test_expr = parse_expression(test_str)
result = dict(target=target_str, test=test_str)
if target_expr is None:
print("ERROR: TRUSTED EXPRESSION CANNOT BE PARSED!")
if not _quiet:
print("=" * 50)
result["error"] = "Parsing TARGET Expression Failed!"
result["code"] = 400 # This is fatal!
return result
if test_expr is None:
print("Incorrectly formatted ToCheck expression.")
if not _quiet:
print("=" * 50)
result["error"] = "Parsing Test Expression Failed!"
result["syntax_error"] = str(True).lower()
return result
result["parsed_target"] = str(target_expr)
result["parsed_test"] = str(test_expr)
# Now check for symbol match and equality:
try:
print("Parsed Target: {0}\nParsed ToCheck: {1}".format(target_expr, test_expr))
if check_symbols: # Do we have same set of symbols in each?
incorrect_symbols = contains_incorrect_symbols(test_expr, target_expr)
if incorrect_symbols is not None:
print("[[RESULT]]\nEquality: False")
if not _quiet:
print("=" * 50)
result["equal"] = str(False).lower()
result["equality_type"] = EqualityType.SYMBOLIC.value
result["incorrect_symbols"] = incorrect_symbols
return result
# Then check for equality proper:
equal, equality_type = general_equality(test_expr, target_expr)
except (SyntaxError, TypeError, AttributeError) as e:
print("Error when comparing expressions: '{}'.".format(e))
if not _quiet:
print("=" * 50)
result["error"] = "Comparison of expressions failed: '{}'".format(e)
return result
print("[[RESULT]]")
if equal and (equality_type is not EqualityType.EXACT) and ((target_expr, test_expr) not in KNOWN_PAIRS):
print("INFO: Adding known pair ({0}, {1})".format(target_expr, test_expr))
KNOWN_PAIRS[(target_expr, test_expr)] = equality_type
print("Equality: {}".format(equal))
if not _quiet:
print("=" * 50)
result["equal"] = str(equal).lower()
result["equality_type"] = equality_type.value
return result
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
559890790ae2649fffcbdb6731e70ef5186638d7
| 222
|
py
|
Python
|
Kattis/How Many Digits/howmanydigits.py
|
DeepSpace2/Comptitive-Programming
|
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
|
[
"MIT"
] | 1
|
2021-11-12T16:39:40.000Z
|
2021-11-12T16:39:40.000Z
|
Kattis/How Many Digits/howmanydigits.py
|
DeepSpace2/Comptitive-Programming
|
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
|
[
"MIT"
] | null | null | null |
Kattis/How Many Digits/howmanydigits.py
|
DeepSpace2/Comptitive-Programming
|
13212d9dbc73ab87519b0596fdb0147d40c7eaa8
|
[
"MIT"
] | 3
|
2021-07-01T11:46:19.000Z
|
2021-09-12T13:49:04.000Z
|
from math import e, log10, pi
while True:
try:
n = int(input())
except EOFError:
break
if n == 0:
print(1)
else:
print(int(n * log10(n / e) + log10(2 * pi * n) / 2) + 1)
| 20.181818
| 64
| 0.463964
|
from math import e, log10, pi
while True:
try:
n = int(input())
except EOFError:
break
if n == 0:
print(1)
else:
print(int(n * log10(n / e) + log10(2 * pi * n) / 2) + 1)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
72eb43c27020f9c97d40a6a12b90946e9a888bc7
| 10,665
|
py
|
Python
|
game24/gameconsole.py
|
Adoyan-Grigor/game24
|
4619e953ed94248669759850b9efb812ecf54786
|
[
"Apache-2.0"
] | null | null | null |
game24/gameconsole.py
|
Adoyan-Grigor/game24
|
4619e953ed94248669759850b9efb812ecf54786
|
[
"Apache-2.0"
] | null | null | null |
game24/gameconsole.py
|
Adoyan-Grigor/game24
|
4619e953ed94248669759850b9efb812ecf54786
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
try:
import builtins
raw_input = getattr(builtins, 'input')
except ImportError:
pass
MSG_MENU_MAIN = '''1. Play (p)
2. Check answer (c)
3. Quit (q)'''
MSG_MENU_PLAY = '''1. Definitely no solutions (n)
2. Give me a hint (h)
3. I gave up, show me the answer (s)
4. Back to the main menu (b)
5. Quit the game (q)'''
MSG_MENU_SET_END = '''1. One more set (n)
2. Back to the main menu (b)
3. Quit the game (q)'''
MSG_MENU_PLAY_RIGHT = '''1. Try other solutions (t)
2. Next hand (n)
3. Show me the answers (s)
4. Quit the game (q)'''
MSG_SELECT = 'Your choice: '
MSG_INVALID_INPUT = 'Invalid input!'
MSG_MENU_HAND_END = '''1. One more hand (n)
2. Quit this set, back to the main menu (b)
3. Quit the game (q)'''
MSG_SELECT = 'Your choice: '
MSG_INVALID_INPUT = 'Invalid input!'
MSG_INVALID_INTEGER = 'Invalid integer: %s'
MSG_PLAY_NEW_SET = 'Set %d'
MSG_PLAY_NEW_HAND = 'Hand %d: %s'
MSG_PLAY_INPUT_EXPR = 'Input your solution or one of the above choices: '
MSG_PLAY_RIGHT = 'Good Job!'
MSG_PLAY_FIND_BUG = '''Great Job!
You not only solved the problem, but also found a bug!
Please report to me with the cards and your solution if you don't mind.'''
MSG_PLAY_WRONG = "Sorry! It's not correct!"
MSG_PLAY_NO_ANSWER = 'Seems no solutions'
MSG_PLAY_NO_CARDS = 'Set end, your result'
MSG_INPUT_NUMBERS = 'Please input %d integers: '
INPUT_EOF = '\x00'
if __name__ == '__main__':
main()
| 30.913043
| 83
| 0.509048
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import sys
import argparse
import readline
import random
import traceback
try:
import builtins
raw_input = getattr(builtins, 'input')
except ImportError:
pass
from game24 import calc, game
MSG_MENU_MAIN = '''1. Play (p)
2. Check answer (c)
3. Quit (q)'''
MSG_MENU_PLAY = '''1. Definitely no solutions (n)
2. Give me a hint (h)
3. I gave up, show me the answer (s)
4. Back to the main menu (b)
5. Quit the game (q)'''
MSG_MENU_SET_END = '''1. One more set (n)
2. Back to the main menu (b)
3. Quit the game (q)'''
MSG_MENU_PLAY_RIGHT = '''1. Try other solutions (t)
2. Next hand (n)
3. Show me the answers (s)
4. Quit the game (q)'''
MSG_SELECT = 'Your choice: '
MSG_INVALID_INPUT = 'Invalid input!'
MSG_MENU_HAND_END = '''1. One more hand (n)
2. Quit this set, back to the main menu (b)
3. Quit the game (q)'''
MSG_SELECT = 'Your choice: '
MSG_INVALID_INPUT = 'Invalid input!'
MSG_INVALID_INTEGER = 'Invalid integer: %s'
MSG_PLAY_NEW_SET = 'Set %d'
MSG_PLAY_NEW_HAND = 'Hand %d: %s'
MSG_PLAY_INPUT_EXPR = 'Input your solution or one of the above choices: '
MSG_PLAY_RIGHT = 'Good Job!'
MSG_PLAY_FIND_BUG = '''Great Job!
You not only solved the problem, but also found a bug!
Please report to me with the cards and your solution if you don't mind.'''
MSG_PLAY_WRONG = "Sorry! It's not correct!"
MSG_PLAY_NO_ANSWER = 'Seems no solutions'
MSG_PLAY_NO_CARDS = 'Set end, your result'
MSG_INPUT_NUMBERS = 'Please input %d integers: '
INPUT_EOF = '\x00'
class GameConsole(game.Game):
def __init__(self, target=24, count=4, face2ten=False, showcard=False):
super(GameConsole, self).__init__(target, count, face2ten)
self.showcard = showcard
@staticmethod
def raw_input_ex(prompt='', default=''):
'''enhance raw_input to support default input and also flat EOF'''
try:
readline.set_startup_hook(lambda: readline.insert_text(default))
try:
return input(prompt)
finally:
readline.set_startup_hook()
except EOFError:
return INPUT_EOF
@staticmethod
def print_title(title, dechar='', delen=50):
print(dechar * delen)
print(title)
print(dechar * delen)
@staticmethod
def ui_menu(menu, choices, eof=True):
'''show a menu, and return the selection'''
GameConsole.print_title(menu, dechar='-')
while True:
r = GameConsole.raw_input_ex(MSG_SELECT).strip()
if r == '' or (r in choices and len(r) > 1):
print(MSG_INVALID_INPUT)
continue
elif r in choices or (eof and r == INPUT_EOF):
print()
return r
print(MSG_INVALID_INPUT)
def ui_check_answer(self):
'''show answers for user provided integers'''
while True:
r = self.raw_input_ex(MSG_INPUT_NUMBERS % self.count).strip()
try:
integers = [int(s) for s in r.strip().split()]
except ValueError:
print(MSG_INVALID_INPUT)
continue
if len(integers) != self.count:
print(MSG_INVALID_INPUT)
continue
break
answers = calc.solve(integers, self.target)
if answers:
s = '\n'.join([str(expr) for expr in answers])
else:
s = MSG_PLAY_NO_ANSWER
self.print_title(s)
def main(self):
'''the main entry of the game console'''
choices = '1p2c3q'
while True:
r = self.ui_menu(MSG_MENU_MAIN, choices)
if r in '1p':
self.play()
elif r in '2c':
self.ui_check_answer()
elif r in ('3q' + INPUT_EOF):
return
def print_result(self):
solved = 0
failed = 0
hinted = 0
for hand in self.hands:
if hand.result == game.HAND_RESULT_SOLVED:
solved += 1
elif hand.result == game.HAND_RESULT_HINTED:
hinted += 1
elif hand.result == game.HAND_RESULT_FAILED:
failed += 1
print()
print('Total %d hands solved' % solved)
print('Total %d hands solved with hint' % hinted)
print('Total %d hands failed to solve' % failed)
print()
def ui_menu_and_expr(self, menu, choices, eof=True):
hand_ints = self.hands[-1].integers
self.print_title(menu, dechar='-')
while True:
r = self.raw_input_ex(MSG_PLAY_INPUT_EXPR).strip()
if r == '' or (r in choices and len(r) > 1):
print(MSG_INVALID_INPUT)
continue
elif r in choices or (eof and r == INPUT_EOF):
print()
return r
try:
expr = calc.parse(r)
except ValueError as e:
print(str(e))
continue
integers = expr.get_integers()
for i in integers:
if i not in hand_ints:
print(MSG_INVALID_INTEGER % i)
break
else:
return expr
def play(self):
while True:
if not self.hands:
self.print_title(MSG_PLAY_NEW_SET % self.seti, dechar='*')
hand = self.new_hand()
if not hand:
# no enough cards for a new hand
self.print_title(MSG_PLAY_NO_CARDS, dechar='*')
self.print_result()
choices = '1n2b3q'
r = self.ui_menu(MSG_MENU_SET_END, choices)
if r in '1n':
# renew the set
self.reset()
continue
elif r in ('2b' + INPUT_EOF):
# back to the main menu
return
elif r in '3q':
sys.exit(0)
print()
if self.showcard:
sc = hand.str_cards()
else:
sc = ' '.join([str(i) for i in hand.integers])
self.print_title(MSG_PLAY_NEW_HAND % (len(self.hands), sc),
dechar='+')
print()
while True:
choices = '1n2h3s4b5q'
r = self.ui_menu_and_expr(MSG_MENU_PLAY, choices)
if isinstance(r, calc.Expr):
expr = r
check_r = str(r)
if expr.value == self.target:
hand.solved()
if not self.calculating_the_number_of_numbers(check_r, sc):
print(MSG_INVALID_INPUT)
continue
s = MSG_PLAY_RIGHT
self.print_title(s)
choices = '1t2n3s4q'
r = self.ui_menu(MSG_MENU_PLAY_RIGHT, choices, eof=False)
if r in '1t':
continue
elif r in '2n':
break
elif r in '3s':
self.print_title(hand.str_answer())
elif r in '4q':
sys.exit(0)
else:
self.print_title(MSG_PLAY_WRONG)
continue
elif r in '1n':
# no answer
if hand.answers:
self.print_title(MSG_PLAY_WRONG)
continue
else:
hand.solved()
self.print_title(MSG_PLAY_RIGHT)
elif r in '2h':
# show a hint
if hand.answers:
hand.hinted()
self.print_title(hand.str_hint())
continue
else:
self.print_title(MSG_PLAY_NO_ANSWER)
elif r in '3s':
# show the answer
if hand.answers:
s = hand.str_answer()
else:
s = MSG_PLAY_NO_ANSWER
self.print_title(s)
elif r in ('4b' + INPUT_EOF):
# back to the main menu
return
elif r in '5q':
sys.exit(0)
# this hand is end
break
def calculating_the_number_of_numbers(self, r, sc):
"""calculates how many numbers are in the user input"""
numb = ''
check_list = []
choices = '1234567890'
r = r.split()
sc = sc.split()
for i in r:
if i in '+-*×/÷':
r.remove(i)
if len(r) != len(sc):
return False
return True
def arg_parse():
parser = argparse.ArgumentParser(description='Play the 24 Game')
parser.add_argument('-c', type=int, default=4, dest='count',
help='the number of integers to play with, default=4')
parser.add_argument('-C', action='store_true', dest='showcard',
help='show cards instead of integers under interactive mode')
parser.add_argument('-d', action='store_true', dest='debug',
help='enable debug output')
parser.add_argument('-i', action='store_true', dest='interactive',
help='interactive mode, all positional integers arguments omitted')
parser.add_argument('-N', action='store_true', dest='face2ten',
help='under interactive mode, set J Q K to 10, default=11,12,13')
parser.add_argument('-t', type=int, default=24, dest='target',
help='the game target, default=24')
parser.add_argument('integers', nargs='*')
r = parser.parse_args()
if not r.interactive and len(r.integers) == 0:
r.interactive = True
return r
def main():
args = arg_parse()
try:
if args.interactive:
gc = GameConsole(args.target, args.count,
args.face2ten, args.showcard)
gc.main()
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
if args.debug:
traceback.print_exc()
else:
print(str(e), file=sys.stderr)
sys.exit(3)
if __name__ == '__main__':
main()
| 4
| 983
| 0
| 6,546
| 0
| 1,440
| 0
| -28
| 203
|
ce74a3c506ce96d7da83678be3ac5f3605bd112f
| 839
|
py
|
Python
|
dynamodb-serverless/functions/put/handler.py
|
koki-nakamura22/serverless-framework-practice
|
b6fb96cc97ecb7a1fa167c7cccb143510466d350
|
[
"MIT"
] | null | null | null |
dynamodb-serverless/functions/put/handler.py
|
koki-nakamura22/serverless-framework-practice
|
b6fb96cc97ecb7a1fa167c7cccb143510466d350
|
[
"MIT"
] | null | null | null |
dynamodb-serverless/functions/put/handler.py
|
koki-nakamura22/serverless-framework-practice
|
b6fb96cc97ecb7a1fa167c7cccb143510466d350
|
[
"MIT"
] | null | null | null |
import os
import boto3
# DynamoDB object
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(f"TestUsersTable-{os.environ['STAGE']}")
| 20.463415
| 95
| 0.588796
|
import json
import os
import boto3
from faker import Faker
# DynamoDB object
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(f"TestUsersTable-{os.environ['STAGE']}")
def __truncate():
response = table.scan()
key_names = [ x["AttributeName"] for x in table.key_schema ]
delete_keys = [ { k:v for k,v in x.items() if k in key_names } for x in response["Items"] ]
with table.batch_writer() as batch:
for key in delete_keys:
batch.delete_item(Key = key)
def __put(id, name):
table.put_item(
Item = {
"id" : id,
"name" : name,
}
)
def put(event, context):
__truncate()
fake = Faker()
for n in range(1, 10 + 1):
__put(str(n).zfill(3), fake.name())
response = {
"statusCode": 200,
}
return response
| 0
| 0
| 0
| 0
| 0
| 586
| 0
| -8
| 113
|
8411765f0f08514141bd4c621bce5644bb0156cd
| 398
|
py
|
Python
|
Chapter07/4985_07_01-logs.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 43
|
2017-03-27T18:58:26.000Z
|
2022-03-25T15:29:45.000Z
|
Chapter07/4985_07_01-logs.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 2
|
2018-07-02T09:23:47.000Z
|
2018-08-23T13:57:41.000Z
|
Chapter07/4985_07_01-logs.py
|
mapenthusiast/QGIS-Python-Programming-Cookbook-Second-Edition
|
1b2fefdb09f614a2005976a451f882a198c6c9c5
|
[
"MIT"
] | 31
|
2017-03-08T06:37:22.000Z
|
2021-12-17T21:51:30.000Z
|
# Using Log Files
# Settings/Options/System/Environment (use custom variables)
# QGIS_LOG_FILE=/qgis_data/log.txt
# Restart QGIS
# Message to log file:
QgsLogger.logMessageToFile("This is a message to a log file.")
# Message to QGIS Log Window ( yellow triangle icon in the lower right)
QgsMessageLog.logMessage("This is a message from the Python Console", "Python Console", QgsMessageLog.INFO)
| 36.181818
| 107
| 0.776382
|
# Using Log Files
# Settings/Options/System/Environment (use custom variables)
# QGIS_LOG_FILE=/qgis_data/log.txt
# Restart QGIS
# Message to log file:
QgsLogger.logMessageToFile("This is a message to a log file.")
# Message to QGIS Log Window ( yellow triangle icon in the lower right)
QgsMessageLog.logMessage("This is a message from the Python Console", "Python Console", QgsMessageLog.INFO)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fad016a754f61df9c72c04956901d978db0b6df6
| 1,500
|
py
|
Python
|
paddleslim/nas/ofa/utils/utils.py
|
zhuguiqian/PaddleSlim
|
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
|
[
"Apache-2.0"
] | null | null | null |
paddleslim/nas/ofa/utils/utils.py
|
zhuguiqian/PaddleSlim
|
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
|
[
"Apache-2.0"
] | null | null | null |
paddleslim/nas/ofa/utils/utils.py
|
zhuguiqian/PaddleSlim
|
c363c91c36bb9ada41f755c0ec4df3282ccdd6f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 31.914894
| 74
| 0.692667
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def compute_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
sub_center = sub_kernel_size // 2
start = center - sub_center
end = center + sub_center + 1
assert end - start == sub_kernel_size
return start, end
def get_same_padding(kernel_size):
assert isinstance(kernel_size, int)
assert kernel_size % 2 > 0, "kernel size must be odd number"
return kernel_size // 2
def convert_to_list(value, n):
return [value, ] * n
def search_idx(num, sorted_nestlist):
max_num = -1
max_idx = -1
for idx in range(len(sorted_nestlist)):
task_ = sorted_nestlist[idx]
max_num = task_[-1]
max_idx = len(task_) - 1
for phase_idx in range(len(task_)):
if num <= task_[phase_idx]:
return idx, phase_idx
assert num > max_num
return len(sorted_nestlist) - 1, max_idx
| 0
| 0
| 0
| 0
| 0
| 793
| 0
| 0
| 92
|
9e42668859a3e942dd8cf341d42cb36a048ac54f
| 3,715
|
py
|
Python
|
backend/openbd.py
|
n-yU/shisho
|
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
|
[
"MIT"
] | 1
|
2021-08-20T05:34:31.000Z
|
2021-08-20T05:34:31.000Z
|
backend/openbd.py
|
n-yU/shisho
|
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
|
[
"MIT"
] | null | null | null |
backend/openbd.py
|
n-yU/shisho
|
dc99a2d90dde3599af62a6a59a4aabf6b5a72011
|
[
"MIT"
] | null | null | null |
from logging import getLogger, StreamHandler, DEBUG, Formatter
#
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
handler.setFormatter(Formatter('[openBD] %(asctime)s - %(message)s'))
| 35.04717
| 155
| 0.602692
|
from logging import getLogger, StreamHandler, DEBUG, Formatter
from typing import Dict, Union
import re
import json
import requests
import MeCab
from neologdn import normalize
# ロガー設定
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
handler.setFormatter(Formatter('[openBD] %(asctime)s - %(message)s'))
class OpenBD:
# openBD: https://openbd.jp/
def __init__(self, isbn10: int, mecab: MeCab.Tagger):
""""インスタンス生成時の初期化処理
Args:
isbn10 (int): OpenBDへリクエストする書籍のISBN-10
mecab (MeCab.Tagger): MeCab設定(辞書等)
"""
self.isbn10 = isbn10 # 書籍のISBN-10
self.result = self.get_json_from_openbd() # openBDへのリクエスト結果
self.mecab = mecab # MeCab設定
def get_json_from_openbd(self) -> str:
"""openBDから書籍情報取得
Returns:
str: openBDリクエスト結果
"""
# 指定ISBN-10の書籍情報を取得する, openBDエンドポイント
openbd_endpoint = 'https://api.openbd.jp/v1/get?isbn={0}'.format(self.isbn10)
try:
response = requests.get(openbd_endpoint)
response.raise_for_status()
except requests.RequestException as e:
# ステータスコード200番台以外 -> エラーログ出力
logger.debug(e)
return 'FAILED'
openbd = json.loads(response.text)[0] # 書籍情報 from openBD
# openBDで書籍情報が見つからないケース
if openbd is None:
return 'NOT FOUND'
self.openbd = openbd
return 'OK'
def get_std_info(self) -> Union[Dict[str, str], bool]:
if self.result != 'OK':
logger.debug('openBDからの書籍情報取得に失敗しているため基本情報を取得できません')
return False
# 基本情報取得
title = self.openbd['summary']['title'] # タイトル
publisher = self.openbd['summary']['publisher'] # 出版社
authors = self.openbd['summary']['author'] # 著者
cover = self.openbd['summary']['cover'] # 表紙画像URL
# ISBN-10ベース情報
isbn10 = self.isbn10
amazon = 'https://www.amazon.co.jp/dp/{0}'.format(isbn10)
# 出版日: 形式が異なるため一時変数に代入後処理
tmp_pubdate = self.openbd['summary']['pubdate']
if len(tmp_pubdate) == 8:
# pubdate: yyyyMMdd
pubdate = '{0}-{1}-{2}'.format(tmp_pubdate[:4], tmp_pubdate[4:6], tmp_pubdate[6:])
else:
# pubdare: yyyy-MM
pubdate = '{0}-01'.format(tmp_pubdate)
# 書籍詳細(目次や概要など)の取得
if self.openbd['onix']['CollateralDetail'].get('TextContent'):
# 複数ある場合は連結
detail = ' '.join([text_content['Text'].replace('\n', ' ') for text_content in self.openbd['onix']['CollateralDetail']['TextContent']])
else:
# 詳細が存在しない場合 -> 未登録とする
detail = '未登録'
# 書籍説明(タイトル,出版社,著者,詳細を連結した文章)テキスト(処理前)
# neologdnによる正規化 -> 数字削除(目次対策)
tmp_description = re.sub(r'[0-9]+', ' ', normalize('{0} {1} {2} {3}'.format(title, publisher, authors, detail)))
# 書籍説明テキストの分かち書きと品詞フィルタリング
description_word_list = [] # 書籍説明テキスト対象単語
for line in self.mecab.parse(tmp_description).splitlines():
chunks = line.split('\t')
if len(chunks) > 3 and (chunks[3].startswith('動詞') or chunks[3].startswith('形容詞') or chunks[3].startswith('名詞')):
# 動詞or形容詞or名詞 -> 訓練対象
description_word_list.append(chunks[0])
# 書籍説明テキスト(処理後): Doc2Vec訓練時に書籍を表す文章として使用
description = ' '.join(description_word_list)
info = dict(amazon=amazon, isbn10=isbn10, cover=cover, title=title, publisher=publisher, authors=authors, pubdate=pubdate, description=description)
return info
| 1,134
| 0
| 0
| 2,909
| 0
| 0
| 0
| -19
| 155
|
f7fe38c9a4b8c5796670a8aa33b5cb1b8bbd7c39
| 5,246
|
py
|
Python
|
src/jetson/Sensors/sensors_simple.py
|
ichalkiad/VW_challenge
|
333222010ecf3d1ca4a0e181239f761c975453e9
|
[
"Apache-2.0"
] | 1
|
2017-08-16T08:42:49.000Z
|
2017-08-16T08:42:49.000Z
|
src/jetson/Sensors/sensors_simple.py
|
ichalkiad/VW_challenge
|
333222010ecf3d1ca4a0e181239f761c975453e9
|
[
"Apache-2.0"
] | 4
|
2017-08-09T23:01:30.000Z
|
2017-08-24T16:44:13.000Z
|
src/jetson/Sensors/sensors_simple.py
|
yhalk/vw_challenge_ECR
|
c1ff50070d0f7367ccfbf473c69e90fd2be5e85e
|
[
"Apache-2.0"
] | null | null | null |
import ev3dev.ev3 as ev3
import sys
#Create camera sensor object
camera = OnBoardCamera()
| 43
| 280
| 0.609989
|
import paho.mqtt.client as mqtt
import ev3dev.ev3 as ev3
import ctypes
import numpy as np
import sys
import cv2
from Sensors.mpu6050.mpu6050 import MPU6050
import smbus
from Sensors.odometry import Odometry
import sys, serial
from serial.tools import list_ports
class Sensor(object):
def __init__(self, *args, **kwargs):
pass
def read(self):
raise ValueError('This function must be implemented by ')
class IR_teensy(Sensor):
def __init__(self):
self.ports = list(list_ports.comports()) # get all the connected serial devices
self.serial_port = serial.Serial('/dev/'+self.ports[0].name) # connect to the first
def debug(self):
'''
Use if cannot connect to the port
This function will print all found serial devices and prints the name and index of the port
'''
for i, item in enumerate(self.ports):
print(i + ' : ' + item.name)
def read(self):
'''
Reads the current value from the teensy
Returns:
Distance in cm
'''
measurement = self.serial_port.readline() # read the measurement
measurement = measurement.decode('utf-8').split('\r') # change it to utf and split it on funny characters
return measurement[0] # only return the actual measurment
class IMU2(Sensor):
def __init__(self, bus='/dev/i2c-1', address=0x68):
self.bus = smbus.SMBus(1)
self.address = address
self.mpu = MPU6050(self.bus,self.address, 'IMU')
def read(self):
'''
Reads the current values from the IMU using the mpu library
Returns:
tuple containing: pitch, roll, gyro x,y,z, accel x,y,z these values are scaled and NOT raw
'''
return self.mpu.read_all()
class IMU(Sensor):
def __init__(self, path_to_shared_lib_mpu='/home/nvidia/jetson-robot/IOInterface/jetson/Sensors/mpu/libmpu.so', bus_filename='/dev/i2c-1', bus_adresses=[0x68, 0x69]):
bus_filename = bus_filename.encode('ascii')
self.libmpu = ctypes.cdll.LoadLibrary(path_to_shared_lib_mpu)
self.file_descriptors = [self.libmpu.initIMU(bus_filename, bus_adress) for bus_adress in bus_adresses]
self.data_c_arrays = [(ctypes.c_int16*7)() for _ in range(len(bus_adresses))]
self.name = 'imu'
self.data_sources = ["temperature", "acceleration", "gyro"]
def read(self):
data_dict = {}
for idx, (file_descriptor, data_c_array) in enumerate(zip(self.file_descriptors, self.data_c_arrays)):
self.libmpu.readIMU(file_descriptor, data_c_array)
data_np_array = np.array(data_c_array)
data_dict['temperature_{}'.format(idx)] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration_{}'.format(idx)] = np.array([int(data_np_array[1]),
int(data_np_array[2]),
int(data_np_array[3]),
])
data_dict['gyro_{}'.format(idx)] = np.array([int(data_np_array[4]),
int(data_np_array[5]),
int(data_np_array[6]),
])
return data_dict
def read_sensor_nr(self, sensor_nr):
# TODO: Ask Max, if the magic values for temperature conversion are correct.
data_dict = {}
self.libmpu.readIMU(self.file_descriptors[sensor_nr], self.data_c_arrays[sensor_nr])
data_np_array = np.array(self.data_c_arrays[sensor_nr])
data_dict['temperature'] = data_np_array[0] / 340.0 + 36.53
data_dict['acceleration'] = np.array([int(data_np_array[1]), int(data_np_array[2]), int(data_np_array[3])])
data_dict['gyro'] = np.array([int(data_np_array[4]), int(data_np_array[5]), int(data_np_array[6])])
return data_dict
def get_data_sources(self):
return self.data_sources
class OnBoardCamera(Sensor):
def __init__(self):
self.name = 'onBoardCamera'
self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)160, height=(int)120, format=(string)I420, framerate=(fraction)30/1 ! nvvidconv flip-method=2 ! video/x-raw, format=(string)I420 ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
#self.cap = cv2.VideoCapture("nvcamerasrc ! video/x-raw(memory:NVMM), width=(int)(160), height=(int)(120),format=(string)I420, framerate=(fraction)2/1 ! nvvidconv flip-method=0 ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink")
def read(self):
if self.cap.isOpened():
ret_val, frame = self.cap.read();
frame = cv2.flip(frame,0)
frame = cv2.flip(frame,1)
else:
raise ValueError('Camera not opened. Sorry this message is not really helpful, blame openCV :-) ')
return {'onBoardCamera':frame}
def clean_buf(self):
for i in range(5):
self.cap.grab()
#Create camera sensor object
camera = OnBoardCamera()
| 0
| 0
| 0
| 4,810
| 0
| 0
| 0
| 28
| 313
|
790f64346cac505157953135acdaf67a66ffe6fe
| 23,905
|
py
|
Python
|
mphys/integrated_forces.py
|
timryanb/mphys
|
74560a163034a0006a17811ba1206bab00f1f775
|
[
"Apache-2.0"
] | 8
|
2022-02-22T18:08:56.000Z
|
2022-03-14T13:32:46.000Z
|
mphys/integrated_forces.py
|
timryanb/mphys
|
74560a163034a0006a17811ba1206bab00f1f775
|
[
"Apache-2.0"
] | 15
|
2022-02-22T15:10:15.000Z
|
2022-03-23T16:15:09.000Z
|
mphys/integrated_forces.py
|
timryanb/mphys
|
74560a163034a0006a17811ba1206bab00f1f775
|
[
"Apache-2.0"
] | 8
|
2022-02-22T18:08:35.000Z
|
2022-03-17T16:21:08.000Z
|
if __name__ == '__main__':
check_integrated_surface_force_partials()
| 53.004435
| 110
| 0.474001
|
import numpy as np
import openmdao.api as om
class IntegratedSurfaceForces(om.ExplicitComponent):
def setup(self):
self.add_input('aoa',desc = 'angle of attack', units='rad',tags=['mphys_input'])
self.add_input('yaw',desc = 'yaw angle',units='rad',tags=['mphys_input'])
self.add_input('ref_area', val = 1.0,tags=['mphys_input'])
self.add_input('moment_center',shape=3,tags=['mphys_input'])
self.add_input('ref_length', val = 1.0,tags=['mphys_input'])
self.add_input('q_inf', val = 1.0,tags=['mphys_input'])
self.add_input('x_aero', shape_by_conn=True,
distributed=True,
desc = 'surface coordinates',
tags=['mphys_coupling'])
self.add_input('f_aero', shape_by_conn=True,
distributed=True,
desc = 'dimensional forces at nodes',
tags=['mphys_coupling'])
self.add_output('C_L', desc = 'Lift coefficient', tags=['mphys_result'])
self.add_output('C_D', desc = 'Drag coefficient', tags=['mphys_result'])
self.add_output('C_X', desc = 'X Force coefficient', tags=['mphys_result'])
self.add_output('C_Y', desc = 'Y Force coefficient', tags=['mphys_result'])
self.add_output('C_Z', desc = 'Z Force coefficient', tags=['mphys_result'])
self.add_output('CM_X', desc = 'X Moment coefficient', tags=['mphys_result'])
self.add_output('CM_Y', desc = 'Y Moment coefficient', tags=['mphys_result'])
self.add_output('CM_Z', desc = 'Z Moment coefficient', tags=['mphys_result'])
self.add_output('Lift', desc = 'Total Lift', tags=['mphys_result'])
self.add_output('Drag', desc = 'Total Drag', tags=['mphys_result'])
self.add_output('F_X', desc = 'Total X Force', tags=['mphys_result'])
self.add_output('F_Y', desc = 'Total Y Force', tags=['mphys_result'])
self.add_output('F_Z', desc = 'Total Z Force', tags=['mphys_result'])
self.add_output('M_X', desc = 'Total X Moment', tags=['mphys_result'])
self.add_output('M_Y', desc = 'Total Y Moment', tags=['mphys_result'])
self.add_output('M_Z', desc = 'Total Z Moment', tags=['mphys_result'])
def compute(self,inputs,outputs):
aoa = inputs['aoa']
yaw = inputs['yaw']
area = inputs['ref_area']
q_inf = inputs['q_inf']
xc = inputs['moment_center'][0]
yc = inputs['moment_center'][1]
zc = inputs['moment_center'][2]
c = inputs['ref_length']
x = inputs['x_aero'][0::3]
y = inputs['x_aero'][1::3]
z = inputs['x_aero'][2::3]
fx = inputs['f_aero'][0::3]
fy = inputs['f_aero'][1::3]
fz = inputs['f_aero'][2::3]
fx_total = self.comm.allreduce(np.sum(fx))
fy_total = self.comm.allreduce(np.sum(fy))
fz_total = self.comm.allreduce(np.sum(fz))
outputs['F_X'] = fx_total
outputs['F_Y'] = fy_total
outputs['F_Z'] = fz_total
outputs['C_X'] = fx_total / (q_inf * area)
outputs['C_Y'] = fy_total / (q_inf * area)
outputs['C_Z'] = fz_total / (q_inf * area)
outputs['Lift'] = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)
outputs['Drag'] = ( fx_total * np.cos(aoa) * np.cos(yaw)
- fy_total * np.sin(yaw)
+ fz_total * np.sin(aoa) * np.cos(yaw)
)
outputs['C_L'] = outputs['Lift'] / (q_inf * area)
outputs['C_D'] = outputs['Drag'] / (q_inf * area)
m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))
m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))
m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))
outputs['M_X'] = m_x
outputs['M_Y'] = m_y
outputs['M_Z'] = m_z
outputs['CM_X'] = m_x / (q_inf * area * c)
outputs['CM_Y'] = m_y / (q_inf * area * c)
outputs['CM_Z'] = m_z / (q_inf * area * c)
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
aoa = inputs['aoa']
yaw = inputs['yaw']
area = inputs['ref_area']
q_inf = inputs['q_inf']
xc = inputs['moment_center'][0]
yc = inputs['moment_center'][1]
zc = inputs['moment_center'][2]
c = inputs['ref_length']
x = inputs['x_aero'][0::3]
y = inputs['x_aero'][1::3]
z = inputs['x_aero'][2::3]
fx = inputs['f_aero'][0::3]
fy = inputs['f_aero'][1::3]
fz = inputs['f_aero'][2::3]
fx_total = self.comm.allreduce(np.sum(fx))
fy_total = self.comm.allreduce(np.sum(fy))
fz_total = self.comm.allreduce(np.sum(fz))
lift = -fx_total * np.sin(aoa) + fz_total * np.cos(aoa)
drag = ( fx_total * np.cos(aoa) * np.cos(yaw)
- fy_total * np.sin(yaw)
+ fz_total * np.sin(aoa) * np.cos(yaw)
)
m_x = self.comm.allreduce( np.dot(fz,(y-yc)) - np.dot(fy,(z-zc)))
m_y = self.comm.allreduce(-np.dot(fz,(x-xc)) + np.dot(fx,(z-zc)))
m_z = self.comm.allreduce( np.dot(fy,(x-xc)) - np.dot(fx,(y-yc)))
if mode == 'fwd':
if 'aoa' in d_inputs:
daoa_rad = d_inputs['aoa']
if 'Lift' in d_outputs or 'C_L' in d_outputs:
d_lift_d_aoa = ( - fx_total * np.cos(aoa) * daoa_rad
- fz_total * np.sin(aoa) * daoa_rad )
if 'Lift' in d_outputs:
d_outputs['Lift'] += d_lift_d_aoa
if 'C_L' in d_outputs:
d_outputs['C_L'] += d_lift_d_aoa / (q_inf * area)
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag_d_aoa = ( fx_total * (-np.sin(aoa) * daoa_rad) * np.cos(yaw)
+ fz_total * ( np.cos(aoa) * daoa_rad) * np.cos(yaw))
if 'Drag' in d_outputs:
d_outputs['Drag'] += d_drag_d_aoa
if 'C_D' in d_outputs:
d_outputs['C_D'] += d_drag_d_aoa / (q_inf * area)
if 'yaw' in d_inputs:
dyaw_rad = d_inputs['yaw']
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag_d_yaw = ( fx_total * np.cos(aoa) * (-np.sin(yaw) * dyaw_rad)
- fy_total * np.cos(yaw) * dyaw_rad
+ fz_total * np.sin(aoa) * (-np.sin(yaw) * dyaw_rad)
)
if 'Drag' in d_outputs:
d_outputs['Drag'] += d_drag_d_yaw
if 'C_D' in d_outputs:
d_outputs['C_D'] += d_drag_d_yaw / (q_inf * area)
if 'ref_area' in d_inputs:
d_nondim = - d_inputs['ref_area'] / (q_inf * area**2.0)
if 'C_X' in d_outputs:
d_outputs['C_X'] += fx_total * d_nondim
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += fy_total * d_nondim
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += fz_total * d_nondim
if 'C_L' in d_outputs:
d_outputs['C_L'] += lift * d_nondim
if 'C_D' in d_outputs:
d_outputs['C_D'] += drag * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim / c
if 'moment_center' in d_inputs:
dxc = d_inputs['moment_center'][0]
dyc = d_inputs['moment_center'][1]
dzc = d_inputs['moment_center'][2]
if 'M_X' in d_outputs:
d_outputs['M_X'] += -fz_total * dyc + fy_total * dzc
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += fz_total * dxc - fx_total * dzc
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += -fy_total * dxc + fx_total * dyc
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += (-fz_total * dyc + fy_total * dzc) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += ( fz_total * dxc - fx_total * dzc) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += (-fy_total * dxc + fx_total * dyc) / (q_inf * area * c)
if 'ref_length' in d_inputs:
d_nondim = - d_inputs['ref_length'] / (q_inf * area * c**2.0)
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim
if 'q_inf' in d_inputs:
d_nondim = - d_inputs['q_inf'] / (q_inf**2.0 * area)
if 'C_X' in d_outputs:
d_outputs['C_X'] += fx_total * d_nondim
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += fy_total * d_nondim
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += fz_total * d_nondim
if 'C_L' in d_outputs:
d_outputs['C_L'] += lift * d_nondim
if 'C_D' in d_outputs:
d_outputs['C_D'] += drag * d_nondim
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_outputs['CM_Y'] += m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += m_z * d_nondim / c
if 'x_aero' in d_inputs:
dx = d_inputs['x_aero'][0::3]
dy = d_inputs['x_aero'][1::3]
dz = d_inputs['x_aero'][2::3]
if 'M_X' in d_outputs:
d_outputs['M_X'] += np.dot(fz,dy) - np.dot(fy,dz)
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += -np.dot(fz,dx) + np.dot(fx,dz)
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += np.dot(fy,dx) - np.dot(fx,dy)
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += ( np.dot(fz,dy) - np.dot(fy,dz)) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += (-np.dot(fz,dx) + np.dot(fx,dz)) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += ( np.dot(fy,dx) - np.dot(fx,dy)) / (q_inf * area * c)
if 'f_aero' in d_inputs:
dfx = d_inputs['f_aero'][0::3]
dfy = d_inputs['f_aero'][1::3]
dfz = d_inputs['f_aero'][2::3]
dfx_total = np.sum(dfx)
dfy_total = np.sum(dfy)
dfz_total = np.sum(dfz)
if 'F_X' in d_outputs:
d_outputs['F_X'] += dfx_total
if 'F_Y' in d_outputs:
d_outputs['F_Y'] += dfy_total
if 'F_Z' in d_outputs:
d_outputs['F_Z'] += dfz_total
if 'C_X' in d_outputs:
d_outputs['C_X'] += dfx_total / (q_inf * area)
if 'C_Y' in d_outputs:
d_outputs['C_Y'] += dfy_total / (q_inf * area)
if 'C_Z' in d_outputs:
d_outputs['C_Z'] += dfz_total / (q_inf * area)
if 'Lift' in d_outputs:
d_outputs['Lift'] += -dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)
if 'Drag' in d_outputs:
d_outputs['Drag'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)
- dfy_total * np.sin(yaw)
+ dfz_total * np.sin(aoa) * np.cos(yaw)
)
if 'C_L' in d_outputs:
d_outputs['C_L'] += (-dfx_total * np.sin(aoa) + dfz_total * np.cos(aoa)) / (q_inf * area)
if 'C_D' in d_outputs:
d_outputs['C_D'] += ( dfx_total * np.cos(aoa) * np.cos(yaw)
- dfy_total * np.sin(yaw)
+ dfz_total * np.sin(aoa) * np.cos(yaw)
) / (q_inf * area)
if 'M_X' in d_outputs:
d_outputs['M_X'] += np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))
if 'M_Y' in d_outputs:
d_outputs['M_Y'] += -np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))
if 'M_Z' in d_outputs:
d_outputs['M_Z'] += np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))
if 'CM_X' in d_outputs:
d_outputs['CM_X'] += ( np.dot(dfz,(y-yc)) - np.dot(dfy,(z-zc))) / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_outputs['CM_Y'] += (-np.dot(dfz,(x-xc)) + np.dot(dfx,(z-zc))) / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_outputs['CM_Z'] += ( np.dot(dfy,(x-xc)) - np.dot(dfx,(y-yc))) / (q_inf * area * c)
elif mode == 'rev':
if 'aoa' in d_inputs:
if 'Lift' in d_outputs or 'C_L' in d_outputs:
d_lift = d_outputs['Lift'] if 'Lift' in d_outputs else 0.0
d_cl = d_outputs['C_L'] if 'C_L' in d_outputs else 0.0
d_inputs['aoa'] += ( - fx_total * np.cos(aoa)
- fz_total * np.sin(aoa)
) * (d_lift + d_cl / (q_inf * area))
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0
d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0
d_inputs['aoa'] += ( fx_total * (-np.sin(aoa)) * np.cos(yaw)
+ fz_total * ( np.cos(aoa)) * np.cos(yaw)
) * (d_drag + d_cd / (q_inf * area))
if 'yaw' in d_inputs:
if 'Drag' in d_outputs or 'C_D' in d_outputs:
d_drag = d_outputs['Drag'] if 'Drag' in d_outputs else 0.0
d_cd = d_outputs['C_D'] if 'C_D' in d_outputs else 0.0
d_inputs['yaw'] += ( fx_total * np.cos(aoa) * (-np.sin(yaw))
- fy_total * np.cos(yaw)
+ fz_total * np.sin(aoa) * (-np.sin(yaw))
) * (d_drag + d_cd / (q_inf * area))
if 'ref_area' in d_inputs:
d_nondim = - 1.0 / (q_inf * area**2.0)
if 'C_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_X'] * fx_total * d_nondim
if 'C_Y' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_Y'] * fy_total * d_nondim
if 'C_Z' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_Z'] * fz_total * d_nondim
if 'C_L' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_L'] * lift * d_nondim
if 'C_D' in d_outputs:
d_inputs['ref_area'] += d_outputs['C_D'] * drag * d_nondim
if 'CM_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_X'] * m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_Y'] * m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_inputs['ref_area'] += d_outputs['CM_Z'] * m_z * d_nondim / c
if 'moment_center' in d_inputs:
if 'M_X' in d_outputs:
d_inputs['moment_center'][1] += -fz_total * d_outputs['M_X']
d_inputs['moment_center'][2] += fy_total * d_outputs['M_X']
if 'M_Y' in d_outputs:
d_inputs['moment_center'][0] += fz_total * d_outputs['M_Y']
d_inputs['moment_center'][2] += -fx_total * d_outputs['M_Y']
if 'M_Z' in d_outputs:
d_inputs['moment_center'][0] += -fy_total * d_outputs['M_Z']
d_inputs['moment_center'][1] += fx_total * d_outputs['M_Z']
if 'CM_X' in d_outputs:
d_inputs['moment_center'][1] += -fz_total * d_outputs['CM_X'] / (q_inf * area * c)
d_inputs['moment_center'][2] += fy_total * d_outputs['CM_X'] / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_inputs['moment_center'][0] += fz_total * d_outputs['CM_Y'] / (q_inf * area * c)
d_inputs['moment_center'][2] += -fx_total * d_outputs['CM_Y'] / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_inputs['moment_center'][0] += -fy_total * d_outputs['CM_Z'] / (q_inf * area * c)
d_inputs['moment_center'][1] += fx_total * d_outputs['CM_Z'] / (q_inf * area * c)
if 'ref_length' in d_inputs:
d_nondim = - 1.0 / (q_inf * area * c**2.0)
if 'CM_X' in d_outputs:
d_inputs['ref_length'] += m_x * d_nondim * d_outputs['CM_X']
if 'CM_X' in d_outputs:
d_inputs['ref_length'] += m_y * d_nondim * d_outputs['CM_Y']
if 'CM_Z' in d_outputs:
d_inputs['ref_length'] += m_z * d_nondim * d_outputs['CM_Z']
if 'q_inf' in d_inputs:
d_nondim = - 1.0 / (q_inf**2.0 * area)
if 'C_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_X'] * fx_total * d_nondim
if 'C_Y' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_Y'] * fy_total * d_nondim
if 'C_Z' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_Z'] * fz_total * d_nondim
if 'C_L' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_L'] * lift * d_nondim
if 'C_D' in d_outputs:
d_inputs['q_inf'] += d_outputs['C_D'] * drag * d_nondim
if 'CM_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_X'] * m_x * d_nondim / c
if 'CM_X' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_Y'] * m_y * d_nondim / c
if 'CM_Z' in d_outputs:
d_inputs['q_inf'] += d_outputs['CM_Z'] * m_z * d_nondim / c
if 'x_aero' in d_inputs:
nondim = 1.0 / (q_inf * area * c)
dm_x = d_outputs['M_X'] if 'M_X' in d_outputs else 0.0
dm_y = d_outputs['M_Y'] if 'M_Y' in d_outputs else 0.0
dm_z = d_outputs['M_Z'] if 'M_Z' in d_outputs else 0.0
dcm_x = d_outputs['CM_X']*nondim if 'CM_X' in d_outputs else 0.0
dcm_y = d_outputs['CM_Y']*nondim if 'CM_Y' in d_outputs else 0.0
dcm_z = d_outputs['CM_Z']*nondim if 'CM_Z' in d_outputs else 0.0
d_inputs['x_aero'][0::3] += -fz * (dm_y + dcm_y) + fy * (dm_z + dcm_z)
d_inputs['x_aero'][1::3] += fz * (dm_x + dcm_x) - fx * (dm_z + dcm_z)
d_inputs['x_aero'][2::3] += -fy * (dm_x + dcm_x) + fx * (dm_y + dcm_y)
if 'f_aero' in d_inputs:
if 'F_X' in d_outputs:
d_inputs['f_aero'][0::3] += d_outputs['F_X']
if 'F_Y' in d_outputs:
d_inputs['f_aero'][1::3] += d_outputs['F_Y']
if 'F_Z' in d_outputs:
d_inputs['f_aero'][2::3] += d_outputs['F_Z']
if 'C_X' in d_outputs:
d_inputs['f_aero'][0::3] += d_outputs['C_X'] / (q_inf * area)
if 'C_Y' in d_outputs:
d_inputs['f_aero'][1::3] += d_outputs['C_Y'] / (q_inf * area)
if 'C_Z' in d_outputs:
d_inputs['f_aero'][2::3] += d_outputs['C_Z'] / (q_inf * area)
if 'Lift' in d_outputs:
d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['Lift']
d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['Lift']
if 'Drag' in d_outputs:
d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['Drag']
d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['Drag']
d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['Drag']
if 'C_L' in d_outputs:
d_inputs['f_aero'][0::3] += -np.sin(aoa) * d_outputs['C_L'] / (q_inf * area)
d_inputs['f_aero'][2::3] += np.cos(aoa) * d_outputs['C_L'] / (q_inf * area)
if 'C_D' in d_outputs:
d_inputs['f_aero'][0::3] += np.cos(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)
d_inputs['f_aero'][1::3] += -np.sin(yaw) * d_outputs['C_D'] / (q_inf * area)
d_inputs['f_aero'][2::3] += np.sin(aoa) * np.cos(yaw) * d_outputs['C_D'] / (q_inf * area)
if 'M_X' in d_outputs:
d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['M_X']
d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['M_X']
if 'M_Y' in d_outputs:
d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['M_Y']
d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['M_Y']
if 'M_Z' in d_outputs:
d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['M_Z']
d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['M_Z']
if 'CM_X' in d_outputs:
d_inputs['f_aero'][1::3] += -(z-zc) * d_outputs['CM_X'] / (q_inf * area * c)
d_inputs['f_aero'][2::3] += (y-yc) * d_outputs['CM_X'] / (q_inf * area * c)
if 'CM_Y' in d_outputs:
d_inputs['f_aero'][0::3] += (z-zc) * d_outputs['CM_Y'] / (q_inf * area * c)
d_inputs['f_aero'][2::3] += -(x-xc) * d_outputs['CM_Y'] / (q_inf * area * c)
if 'CM_Z' in d_outputs:
d_inputs['f_aero'][0::3] += -(y-yc) * d_outputs['CM_Z'] / (q_inf * area * c)
d_inputs['f_aero'][1::3] += (x-xc) * d_outputs['CM_Z'] / (q_inf * area * c)
def check_integrated_surface_force_partials():
nnodes = 3
prob = om.Problem()
ivc = om.IndepVarComp()
ivc.add_output('aoa',val=45.0, units='deg')
ivc.add_output('yaw',val=135.0, units='deg')
ivc.add_output('ref_area',val=0.2)
ivc.add_output('moment_center',shape=3,val=np.zeros(3))
ivc.add_output('ref_length', val = 3.0)
ivc.add_output('q_inf',val=10.0)
ivc.add_output('x_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)
ivc.add_output('f_aero',shape=3*nnodes,val=np.random.rand(3*nnodes),distributed=True)
prob.model.add_subsystem('ivc',ivc,promotes_outputs=['*'])
prob.model.add_subsystem('forces',IntegratedSurfaceForces(),
promotes_inputs=['*'])
prob.setup(force_alloc_complex=True)
prob.run_model()
prob.check_partials(compact_print=True, method='cs')
if __name__ == '__main__':
check_integrated_surface_force_partials()
| 0
| 0
| 0
| 22,882
| 0
| 858
| 0
| 1
| 90
|
7c3b77cba219a97b12762ac1a37f632c5f68d380
| 11,331
|
py
|
Python
|
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long,too-many-arguments,too-many-locals
| 31.828652
| 119
| 0.662519
|
# Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long,too-many-arguments,too-many-locals
import json
import os
import click
from platformio import fs
from platformio.package.commands.install import install_project_dependencies
from platformio.package.manager.platform import PlatformPackageManager
from platformio.platform.exception import UnknownBoard
from platformio.project.config import ProjectConfig
from platformio.project.generator import ProjectGenerator
from platformio.project.helpers import is_platformio_project
def validate_boards(ctx, param, value): # pylint: disable=W0613
pm = PlatformPackageManager()
for id_ in value:
try:
pm.board_config(id_)
except UnknownBoard:
raise click.BadParameter(
"`%s`. Please search for board ID using `platformio boards` "
"command" % id_
)
return value
@click.command("init", short_help="Initialize a project or update existing")
@click.option(
"--project-dir",
"-d",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-b", "--board", multiple=True, metavar="ID", callback=validate_boards)
@click.option("--ide", type=click.Choice(ProjectGenerator.get_supported_ides()))
@click.option("-e", "--environment", help="Update existing environment")
@click.option("-O", "--project-option", multiple=True)
@click.option("--env-prefix", default="")
@click.option("--no-install-dependencies", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
def project_init_cmd(
project_dir,
board,
ide,
environment,
project_option,
env_prefix,
no_install_dependencies,
silent,
):
is_new_project = not is_platformio_project(project_dir)
if is_new_project:
if not silent:
print_header(project_dir)
init_base_project(project_dir)
if environment:
update_project_env(project_dir, environment, project_option)
elif board:
update_board_envs(project_dir, board, project_option, env_prefix)
# resolve project dependencies
if not no_install_dependencies and (environment or board):
install_project_dependencies(
options=dict(
project_dir=project_dir,
environments=[environment] if environment else [],
silent=silent,
)
)
if ide:
if not silent:
click.echo(
"Updating metadata for the %s IDE..." % click.style(ide, fg="cyan")
)
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
os.path.join(project_dir, "platformio.ini")
)
config.validate()
ProjectGenerator(config, environment, ide, board).generate()
if is_new_project:
init_cvs_ignore(project_dir)
if not silent:
print_footer(is_new_project)
def print_header(project_dir):
if project_dir == os.getcwd():
click.secho("\nThe current working directory ", fg="yellow", nl=False)
try:
click.secho(project_dir, fg="cyan", nl=False)
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan", nl=False)
click.secho(" will be used for the project.", fg="yellow")
click.echo("")
click.echo("The next files/directories have been created in ", nl=False)
try:
click.secho(project_dir, fg="cyan")
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan")
click.echo("%s - Put project header files here" % click.style("include", fg="cyan"))
click.echo(
"%s - Put here project specific (private) libraries"
% click.style("lib", fg="cyan")
)
click.echo("%s - Put project source files here" % click.style("src", fg="cyan"))
click.echo(
"%s - Project Configuration File" % click.style("platformio.ini", fg="cyan")
)
def print_footer(is_new_project):
if is_new_project:
return click.secho(
"\nProject has been successfully initialized! Useful commands:\n"
"`pio run` - process/build project from the current directory\n"
"`pio run --target upload` or `pio run -t upload` "
"- upload firmware to a target\n"
"`pio run --target clean` - clean project (remove compiled files)"
"\n`pio run --help` - additional information",
fg="green",
)
return click.secho(
"Project has been successfully updated!",
fg="green",
)
def init_base_project(project_dir):
with fs.cd(project_dir):
config = ProjectConfig()
config.save()
dir_to_readme = [
(config.get("platformio", "src_dir"), None),
(config.get("platformio", "include_dir"), init_include_readme),
(config.get("platformio", "lib_dir"), init_lib_readme),
(config.get("platformio", "test_dir"), init_test_readme),
]
for (path, cb) in dir_to_readme:
if os.path.isdir(path):
continue
os.makedirs(path)
if cb:
cb(path)
def init_include_readme(include_dir):
with open(os.path.join(include_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project header files.
A header file is a file containing C declarations and macro definitions
to be shared between several project source files. You request the use of a
header file in your project source file (C, C++, etc) located in `src` folder
by including it, with the C preprocessing directive `#include'.
```src/main.c
#include "header.h"
int main (void)
{
...
}
```
Including a header file produces the same results as copying the header file
into each source file that needs it. Such copying would be time-consuming
and error-prone. With a header file, the related declarations appear
in only one place. If they need to be changed, they can be changed in one
place, and programs that include the header file will automatically use the
new version when next recompiled. The header file eliminates the labor of
finding and changing all the copies as well as the risk that a failure to
find one copy will result in inconsistencies within a program.
In C, the usual convention is to give header files names that end with `.h'.
It is most portable to use only letters, digits, dashes, and underscores in
header file names, and at most one dot.
Read more about using header files in official GCC documentation:
* Include Syntax
* Include Operation
* Once-Only Headers
* Computed Includes
https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html
""",
)
def init_lib_readme(lib_dir):
with open(os.path.join(lib_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project specific (private) libraries.
PlatformIO will compile them to static libraries and link into executable file.
The source code of each library should be placed in a an own separate directory
("lib/your_library_name/[here are source files]").
For example, see a structure of the following two libraries `Foo` and `Bar`:
|--lib
| |
| |--Bar
| | |--docs
| | |--examples
| | |--src
| | |- Bar.c
| | |- Bar.h
| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html
| |
| |--Foo
| | |- Foo.c
| | |- Foo.h
| |
| |- README --> THIS FILE
|
|- platformio.ini
|--src
|- main.c
and a contents of `src/main.c`:
```
#include <Foo.h>
#include <Bar.h>
int main (void)
{
...
}
```
PlatformIO Library Dependency Finder will find automatically dependent
libraries scanning project source files.
More information about PlatformIO Library Dependency Finder
- https://docs.platformio.org/page/librarymanager/ldf.html
""",
)
def init_test_readme(test_dir):
with open(os.path.join(test_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for PlatformIO Test Runner and project tests.
Unit Testing is a software testing method by which individual units of
source code, sets of one or more MCU program modules together with associated
control data, usage procedures, and operating procedures, are tested to
determine whether they are fit for use. Unit testing finds problems early
in the development cycle.
More information about PlatformIO Unit Testing:
- https://docs.platformio.org/en/latest/advanced/unit-testing/index.html
""",
)
def init_cvs_ignore(project_dir):
conf_path = os.path.join(project_dir, ".gitignore")
if os.path.isfile(conf_path):
return
with open(conf_path, mode="w", encoding="utf8") as fp:
fp.write(".pio\n")
def update_board_envs(project_dir, board_ids, project_option, env_prefix):
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
used_boards = []
for section in config.sections():
cond = [section.startswith("env:"), config.has_option(section, "board")]
if all(cond):
used_boards.append(config.get(section, "board"))
pm = PlatformPackageManager()
modified = False
for id_ in board_ids:
board_config = pm.board_config(id_)
if id_ in used_boards:
continue
used_boards.append(id_)
modified = True
envopts = {"platform": board_config["platform"], "board": id_}
# find default framework for board
frameworks = board_config.get("frameworks")
if frameworks:
envopts["framework"] = frameworks[0]
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
envopts[_name.strip()] = _value.strip()
section = "env:%s%s" % (env_prefix, id_)
config.add_section(section)
for option, value in envopts.items():
config.set(section, option, value)
if modified:
config.save()
def update_project_env(project_dir, environment, project_option):
if not project_option:
return
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
section = "env:%s" % environment
if not config.has_section(section):
config.add_section(section)
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
config.set(section, _name.strip(), _value.strip())
config.save()
| 0
| 2,060
| 0
| 0
| 0
| 7,890
| 0
| 215
| 476
|
c91b624711d1778d78556d13356f05fa1dcaaef7
| 701
|
py
|
Python
|
exercise_monitoring_camera.py
|
Guvalif/aidor-acceleration-02
|
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
|
[
"MIT"
] | 1
|
2018-08-20T02:14:24.000Z
|
2018-08-20T02:14:24.000Z
|
exercise_monitoring_camera.py
|
Guvalif/imedio_0801
|
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
|
[
"MIT"
] | null | null | null |
exercise_monitoring_camera.py
|
Guvalif/imedio_0801
|
afa7aa45bf26f1c2b7f189b6320599357f1e17d3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = 'Kazuyuki TAKASE'
__copyright__ = 'PLEN Project Company Inc, and all authors.'
__license__ = 'The MIT License (http://opensource.org/licenses/mit-license.php)'
#
# =============================================================================
from cv2 import VideoCapture
#
# =============================================================================
CAMERA_INDEX = 0
MOTION_PIN = 26
camera = VideoCapture(CAMERA_INDEX)
wiringPiSetupGpio()
#
# =============================================================================
while True:
#
pass
| 23.366667
| 82
| 0.476462
|
# -*- coding: utf-8 -*-
__author__ = 'Kazuyuki TAKASE'
__copyright__ = 'PLEN Project Company Inc, and all authors.'
__license__ = 'The MIT License (http://opensource.org/licenses/mit-license.php)'
# 外部プログラムの読み込み
# =============================================================================
from time import sleep
from cv2 import VideoCapture, imwrite
from wiringpi import *
# 定数定義・初期化処理
# =============================================================================
CAMERA_INDEX = 0
MOTION_PIN = 26
camera = VideoCapture(CAMERA_INDEX)
wiringPiSetupGpio()
# メインループ
# =============================================================================
while True:
# これ以降を自分で作成
pass
| 114
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 44
|
4cc967e9e9d1ac88abda9c1076b57abe84fc47bc
| 32,902
|
py
|
Python
|
src/timeseries.py
|
AmberCrafter/pythonlib_statistic
|
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
|
[
"MIT"
] | null | null | null |
src/timeseries.py
|
AmberCrafter/pythonlib_statistic
|
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
|
[
"MIT"
] | null | null | null |
src/timeseries.py
|
AmberCrafter/pythonlib_statistic
|
0fd49283c8dd75c5d1ade064be3318eabf74bdfe
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# if used ubuntu 20.10 or later, interpreter set as #!/bin/python and use pip instead of pip3
# =================================================================== #
# platfrom check
# dateutil check and import
try:
from dateutil.relativedelta import relativedelta
except:
import os, sys, subprocess
if os.name=='nt':
subprocess.check_call([sys.executable, "-m", "pip", "install", "dateutil"])
elif os.name=='posix':
subprocess.check_call([sys.executable, "-m", "pip3", "install", "dateutil"])
else:
raise "Unknow platform, please install 'dateutil' by yourself."
# =================================================================== #
# platfrom check
# numpy check and import
try:
import numpy as np
except:
import os, sys, subprocess
if os.name=='nt':
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy"])
elif os.name=='posix':
subprocess.check_call([sys.executable, "-m", "pip3", "install", "numpy"])
else:
raise "Unknow platform, please install 'numpy' by yourself."
# =================================================================== #
import datetime
if __name__ == "__main__":
# Implement the object
myobj = Time()
# Input data
import datetime, random
st = datetime.datetime(2020,1,1)
number = 50000
time = [st+datetime.timedelta(hours=val) for val in range(number)]
data = [[random.gauss(10,5) for _ in range(4)] for _ in range(number)]
myobj.input(time,data,header=['a','b','c','d'])
# Calculate and Get result
# myobj.hour(1,500)
myobj.set_config(outputPara_list=['mean','std','max','quartile'])
myobj.season()
myobj.set_config(asDict=True)
result = myobj.get()
print(result)
| 43.578808
| 159
| 0.554951
|
#!/bin/python3
# if used ubuntu 20.10 or later, interpreter set as #!/bin/python and use pip instead of pip3
# =================================================================== #
# platfrom check
# dateutil check and import
try:
from dateutil.relativedelta import relativedelta
except:
import os,sys,subprocess
if os.name=='nt':
subprocess.check_call([sys.executable, "-m", "pip", "install", "dateutil"])
elif os.name=='posix':
subprocess.check_call([sys.executable, "-m", "pip3", "install", "dateutil"])
else:
raise "Unknow platform, please install 'dateutil' by yourself."
from dateutil.relativedelta import relativedelta
# =================================================================== #
# platfrom check
# numpy check and import
try:
import numpy as np
except:
import os,sys,subprocess
if os.name=='nt':
subprocess.check_call([sys.executable, "-m", "pip", "install", "numpy"])
elif os.name=='posix':
subprocess.check_call([sys.executable, "-m", "pip3", "install", "numpy"])
else:
raise "Unknow platform, please install 'numpy' by yourself."
import numpy as np
# =================================================================== #
import datetime
from typing import Union
class Time(object):
'''
storageForward: True -> storage value in starttime <br>
storageForward: False -> storage value in endtime
'''
def __init__(self):
self.set_config(init=True)
def _check_data(self):
if not "self.time" in locals(): raise "Please check input time."
if not "self.data" in locals(): raise "Please check input data."
return True
@staticmethod
def _set_ndarray(data):
if isinstance(data,dict):
for key in data.keys():
data[key]=np.array(data[key])
else:
data=np.array(data)
return data
@staticmethod
def _set_header(data,header=None):
'''
only used to format output data
'''
# ----------------------------------------------------------- #
# here i'm not sure what data type i need to use.
# thus, if data=np.array(obj(dict)), then we need
# to use data.item() to get the data
try:
data=data.item()
except:
pass
# ----------------------------------------------------------- #
if header!=None:
dummy={}
for i,head in enumerate(header):
if isinstance(data,dict):
for key in data.keys():
if i==0: dummy[key]={}
dummy[key][head]=data[key][:,i]
else:
dummy[head]=data[:,i]
return dummy
return data
@staticmethod
def _fixTime(time,data,timeStep:dict,zeroPara:dict,storageForward:bool,outputPara_list:list,
starttime:datetime.datetime=None,endtime:datetime.datetime=None):
# def _fixTime(time,data,timeStep:dict,ratio:int,zeroPara:dict,storageForward:bool,starttime:datetime.datetime=None,endtime:datetime.datetime=None):
'''
zeroPara: set start datetime para
season enum:
1: spring
2: summer
3: autumn
4: winter
'''
minTime = np.nanmin(time) if starttime==None else starttime
maxTime = np.nanmax(time) if endtime==None else endtime
# get data_value
if isinstance(data,dict):
if 'mean' in data.keys(): data=data['mean']
if 'season' in timeStep.keys():
dt = relativedelta(months=3)
if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1)
maxTime+=dt
if zeroPara!=None: minTime=minTime.replace(**zeroPara)
dummy={}
for para in outputPara_list:
if para=='quartile':
dummy['lower']=[]
dummy['median']=[]
dummy['upper']=[]
else:
dummy[para]=[]
tummy = []
count = []
# deal with perfix date before a new start
i = Time._get_season(minTime.month)
year = minTime.year if minTime.month!=12 else minTime.year+1
mask=np.where(time<datetime.datetime(year,3*i,1))[0]
t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list)
tummy+=list(t); count+=list(c)
for key in dummy.keys(): dummy[key]+=list(d[key])
minTime=datetime.datetime(year,3*i,1)
while minTime<=maxTime:
if minTime>max(time): break
mask=np.where((time>=minTime) & (time<minTime+dt))[0]
t,d,c = Time._nofixTime(time[mask],data[mask],parameter='season',outputPara_list=outputPara_list)
tummy+=list(t); count+=list(c)
for key in dummy.keys(): dummy[key]+=list(d[key])
minTime+=dt
else:
dt = relativedelta(**timeStep)
if not storageForward: time+=dt; time+=datetime.timedelta(microseconds=-1)
maxTime+=dt
if zeroPara!=None: minTime=minTime.replace(**zeroPara)
# if ratio==None: ratio=0
dummy = {}
for para in outputPara_list:
if para=='quartile':
dummy['lower']=[]
dummy['median']=[]
dummy['upper']=[]
else:
dummy[para]=[]
tummy = []
count = []
while minTime<=maxTime:
mask = np.where((time>=minTime) & (time<minTime+dt))[0]
if mask.size==0: minTime+=dt; continue
tummy.append(minTime)
count.append(np.sum(np.isfinite(data[mask])))
if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0))
if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0))
if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0))
if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0))
if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)])
if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)])
if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0))
if ('quartile' in outputPara_list) | ('median' in outputPara_list):
dummy['median'].append(np.nanpercentile(data[mask],50,axis=0))
if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0))
# dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0])))
minTime+=dt
dummy = Time._set_ndarray(dummy)
return tummy,dummy,count
@staticmethod
def _nofixTime(time,data,parameter:str,outputPara_list:list):
# def _nofixTime(time,data,parameter:str,ratio:int):
'''
parameter: set the datetime parameter (second, minute ...etc) will be used to calculate
season enum:
1: winter
2: spring
3: summer
4: autumn
'''
season_dict = {
1: 'Winter',
2: 'Spring',
3: 'Summer',
4: 'Autumn',
}
if parameter.lower()=='season':
time_para_list = [Time._get_season(val.month) for val in time]
else:
time_para_list = [eval(f"val.{parameter}") for val in time]
time_para_list = np.array(time_para_list)
if time_para_list.size==0: return np.array(np.nan),np.array(np.nan),np.array(np.nan)
minTime = np.nanmin(time_para_list)
maxTime = np.nanmax(time_para_list)
# if ratio==None: ratio=0
# get data_value
if isinstance(data,dict):
if 'mean' in data.keys(): data=data['mean']
dummy = {}
for para in outputPara_list:
if para=='quartile':
dummy['lower']=[]
dummy['median']=[]
dummy['upper']=[]
else:
dummy[para]=[]
tummy = []
count = []
for i in range(minTime,maxTime+1):
mask = np.where(time_para_list==i)[0]
tummy.append(i if parameter.lower()!='season' else [time[mask[0]].year,season_dict[i]])
count.append(np.sum(np.isfinite(data[mask])))
if 'mean' in outputPara_list: dummy['mean'].append(np.nanmean(data[mask],axis=0))
if 'std' in outputPara_list: dummy['std'].append(np.nanstd(data[mask],axis=0))
if 'max' in outputPara_list: dummy['max'].append(np.nanmax(data[mask],axis=0))
if 'min' in outputPara_list: dummy['min'].append(np.nanmin(data[mask],axis=0))
if 'maxTime' in outputPara_list: dummy['maxTime'].append(time[mask][np.argmax(data[mask],axis=0)])
if 'maxTime' in outputPara_list: dummy['minTime'].append(time[mask][np.argmin(data[mask],axis=0)])
if 'quartile' in outputPara_list: dummy['lower'].append(np.nanpercentile(data[mask],25,axis=0))
if ('quartile' in outputPara_list) | ('median' in outputPara_list):
dummy['median'].append(np.nanpercentile(data[mask],50,axis=0))
if 'quartile' in outputPara_list: dummy['upper'].append(np.nanpercentile(data[mask],75,axis=0))
# dummy.append(np.nanmean(data[mask],axis=0) if count[-1]>=ratio else np.array([np.nan]*len(data[0])))
dummy = Time._set_ndarray(dummy)
return tummy,dummy,count
@staticmethod
def _get_season(month):
'''
enum:
1: winter
2: spring
3: summer
4: autumn
'''
return (month%12+3)//3
@staticmethod
def _QC_numbers(data,count,threshold):
if threshold==None: return data
count = np.array(count)
data = np.array(data)
mask = np.where(count<threshold)[0]
data[mask,:]=np.nan
return data
def set_config(self,init:bool=False,**kwargs) -> None:
'''
config['storageForward']: save the value at the start time or not<br>
config['outputPara_list]: select output parameter [mean,std,max,min]
Arguments:
init: Is the initialize status? Default is False
If set True, will using the init state.
**kwargs:
Optional, this work only init set false.
config: {
asDict: bool,
storage: bool,
fixTime: bool,
zeroStart: bool,
selfUpdate: bool,
outputPara_list: list = [
mean,
std,
max,
min,
maxTime,
minTime,
quartile,
median
]
}
'''
if init==True:
self.config = dict(
asDict=False,
storageForward=True,
fixTime=True,
zeroStart=True,
selfUpdate=True,
outputPara_list=['mean','std','mean'] # ['mean','std','max','min','maxTime','minTime','quartile','median'],
)
else:
for key in kwargs.keys():
self.config[key] = kwargs[key]
def input(self,time: Union[list, np.ndarray],data: Union[list, np.ndarray],dtype:object =float,
ratio: Union[int, float]=None,header: list=None,starttime:datetime.datetime=None,endtime:datetime.datetime=None) -> str:
'''
time <datetime> : input timelist of data <br>
data <numerical>: input data array
Arguments:
time: list of time series
data: list of data set depend on time series
dtype: convert type of data elements
ratio: require of the data numbers(int) or ratio(float)
header: export tag of data header
starttime: start of the time
endtime: end of the time
Returns:
return 'Successfully' when process success.
'''
self.time = np.array(time)
self.data = np.array(data,dtype=dtype)
self.ratio = ratio
self.header = header
self.starttime = starttime
self.endtime = endtime
self.counts = []
return "Successfully"
def isrepeat(self) -> bool:
'''
Check weather data repeat depend on time.
Returns:
check there has repeat datetime in the data set.
'''
if len(self.time.reshape(-1))==len(set(self.time)):
return False
else:
return True
def second(self,ratio: Union[int, float]=None,base: int=1000) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1),
zeroPara=dict(microsecond=0),storageForward=self.config['storageForward'],
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(seconds=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='second',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def minute(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1),
zeroPara=dict(second=0,microsecond=0),storageForward=self.config['storageForward'],
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(minutes=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='minute',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def hour(self,ratio: Union[int, float]=None,base: int=60) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1)
,zeroPara=dict(minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'],
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(hours=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='hour',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def day(self,ratio: Union[int, float]=None,base: int=24) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1),
zeroPara=dict(hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'],
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(days=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='day',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def month(self,ratio: Union[int, float]=None,base: int=30) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1),
zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0),
outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(months=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='month',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def season(self,ratio: Union[int, float]=None,base: int=3) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
'''
Spring: March, April, May <br>
Summer: June, July, August <br>
Autumn: September, October, November <br>
Winter: December, January, February
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1),
zeroPara=dict(day=1,hour=0,minute=0,second=0,microsecond=0),
outputPara_list=self.config['outputPara_list'],storageForward=self.config['storageForward'], starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(season=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='season',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def year(self,ratio:Union[int, float]=None,base:int=12) -> Union[None, tuple, list, dict]:
'''
Do statistic method base on config setting.
Arguments:
ratio: require of the data numbers(int) or ratio(float)
base: base number of required data, use on ratio<=1
Returns:
structure of return data
None: if config.selfUpdate==True, then export data by self.get()
tuple or list: if config.selfUpdate==False & config.asDict==False, then return the data as tuple.
( time, data, count )
dict: if config.selfUpdate==False & config.asDict==True, then return the data as dictionary.
{
time: np.ndarray,
data: np.ndarray,
count: np.ndarray
}
'''
if ratio!=None:
ratio=int(base*ratio) if ratio<=1 else int(ratio)
else:
if self.ratio!=None:
ratio=int(base*self.ratio) if self.ratio<=1 else int(self.ratio)
if self.config['fixTime']:
if self.config['zeroStart']:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1),
zeroPara=dict(month=1,day=1,hour=0,minute=0,second=0,microsecond=0),storageForward=self.config['storageForward'],
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else:
tummy,dummy,count = self._fixTime(self.time,self.data,timeStep=dict(years=1),
outputPara_list=self.config['outputPara_list'],starttime=self.starttime,endtime=self.endtime)
else: # self.config['fixTime']==False
tummy,dummy,count = self._nofixTime(self.time,self.data,parameter='year',outputPara_list=self.config['outputPara_list'])
dummy = self._QC_numbers(dummy,count,ratio)
if self.config['selfUpdate']:
self.data = np.array(dummy)
self.time = np.array(tummy)
self.counts = np.array(count)
else:
print("This is not object standard operation!")
print("You need to set config[selfUpdate]=True and use get method to get the result.")
dummy = Time._set_header(dummy,header=self.header)
if self.config['asDict']:
return dict(time=tummy,data=dummy,counts=count)
else:
return tummy,dummy,count
def get(self,parameter: str=None) -> Union[list, dict, np.ndarray]:
'''
export the data from Time factory.
Arguments:
parameter: select the return parameter.
enum:
None: {
time,
data,
counts
},
config,
time,
data,
counts
Returns:
select parameter data set.
'''
if parameter=='config': return self.config
if (parameter==None) and (self.config['asDict']): return dict(time=self.time, data=Time._set_header(self.data,header=self.header), counts=self.counts)
if parameter=='time': return self.time
if parameter=='data': return Time._set_header(self.data,header=self.header)
if parameter=='counts': return self.counts
print("Please select the return parameter or set config['asDict']=True.")
if __name__ == "__main__":
# Implement the object
myobj = Time()
# Input data
import datetime, random
st = datetime.datetime(2020,1,1)
number = 50000
time = [st+datetime.timedelta(hours=val) for val in range(number)]
data = [[random.gauss(10,5) for _ in range(4)] for _ in range(number)]
myobj.input(time,data,header=['a','b','c','d'])
# Calculate and Get result
# myobj.hour(1,500)
myobj.set_config(outputPara_list=['mean','std','max','quartile'])
myobj.season()
myobj.set_config(asDict=True)
result = myobj.get()
print(result)
| 0
| 8,533
| 0
| 22,470
| 0
| 0
| 0
| 23
| 97
|
f191d89902854c6a45383db6b705fc612cf47791
| 784
|
py
|
Python
|
mmdet/models/__init__.py
|
FelixZhang7/miemiedetection
|
ca44f33255e0bb9d6150044983a344fb9a288c08
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/__init__.py
|
FelixZhang7/miemiedetection
|
ca44f33255e0bb9d6150044983a344fb9a288c08
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/__init__.py
|
FelixZhang7/miemiedetection
|
ca44f33255e0bb9d6150044983a344fb9a288c08
|
[
"Apache-2.0"
] | 1
|
2022-02-16T08:35:00.000Z
|
2022-02-16T08:35:00.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
| 27.034483
| 63
| 0.811224
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
from .backbones.darknet import CSPDarknet, Darknet
from .backbones.resnet_vd import Resnet18Vd, Resnet50Vd
from .backbones.resnet_vb import Resnet50Vb
from .losses.yolov3_loss import YOLOv3Loss
from .losses.losses import IOUloss
from .losses.iou_losses import MyIOUloss, IouLoss, IouAwareLoss
from .losses.fcos_loss import FCOSLoss
from .heads.yolov3_head import YOLOv3Head
from .heads.yolox_head import YOLOXHead
from .heads.fcos_head import FCOSHead
from .necks.yolo_pafpn import YOLOPAFPN
from .necks.yolo_fpn import YOLOFPN
from .necks.fpn import FPN
from .architectures.yolo import PPYOLO
from .architectures.yolox import YOLOX
from .architectures.fcos import FCOS
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 318
| 357
|
0468d6c246b239a4fae46a385cc87c22edb5790e
| 282
|
py
|
Python
|
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
|
zeek-han/kaldi
|
e3ed0812db7abd3c266d5616babfd0adff8260ac
|
[
"Apache-2.0"
] | null | null | null |
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
|
zeek-han/kaldi
|
e3ed0812db7abd3c266d5616babfd0adff8260ac
|
[
"Apache-2.0"
] | null | null | null |
egs/voxceleb/v2.voxceleb1/scp_ark2npy.py
|
zeek-han/kaldi
|
e3ed0812db7abd3c266d5616babfd0adff8260ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import kaldiio
id2mfcc = kaldiio.load_scp('/home/sangjik/kaldi/egs/voxceleb/v2.smallest/mfcc/raw_mfcc_train.10.scp')
for utt_id, mfcc in id2mfcc.items():
#print(utt_id, mfcc.shape)
np.save('./tmp_mfcc/{}.npy'.format(utt_id), mfcc)
| 28.2
| 101
| 0.72695
|
#!/usr/bin/env python
import numpy as np
import kaldiio
id2mfcc = kaldiio.load_scp('/home/sangjik/kaldi/egs/voxceleb/v2.smallest/mfcc/raw_mfcc_train.10.scp')
for utt_id, mfcc in id2mfcc.items():
#print(utt_id, mfcc.shape)
np.save('./tmp_mfcc/{}.npy'.format(utt_id), mfcc)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b75db27a80b0122a92a95241b891c75aed56b87b
| 38,348
|
py
|
Python
|
pipeline/publication.py
|
Yi-61/map-ephys
|
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
|
[
"MIT"
] | null | null | null |
pipeline/publication.py
|
Yi-61/map-ephys
|
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
|
[
"MIT"
] | null | null | null |
pipeline/publication.py
|
Yi-61/map-ephys
|
8eacd84f67678b05bcc379c7d5a9560ea7a87e46
|
[
"MIT"
] | null | null | null |
import logging
import datajoint as dj
from . import experiment
from . import ephys
from . import get_schema_name
PUBLICATION_TRANSFER_TIMEOUT = 10000
schema = dj.schema(get_schema_name('publication'))
log = logging.getLogger(__name__)
__all__ = [experiment, ephys]
| 34.861818
| 80
| 0.49854
|
import logging
import pathlib
import re
import os
from fnmatch import fnmatch
from textwrap import dedent
from collections import defaultdict
import datajoint as dj
from . import lab
from . import experiment
from . import ephys
from . import tracking
from .ingest.tracking import TrackingIngest
from pipeline.globus import GlobusStorageManager
from . import get_schema_name
PUBLICATION_TRANSFER_TIMEOUT = 10000
schema = dj.schema(get_schema_name('publication'))
log = logging.getLogger(__name__)
__all__ = [experiment, ephys]
@schema
class GlobusStorageLocation(dj.Lookup):
""" globus storage locations """
definition = """
globus_alias: varchar(32) # name for location (e.g. 'raw-ephys')
---
globus_endpoint: varchar(255) # globus endpoint (user#endpoint)
globus_path: varchar(1024) # unix-style path within endpoint
"""
@property
def contents(self):
custom = dj.config.get('custom', None)
if custom and 'globus.storage_locations' in custom: # test config
return custom['globus.storage_locations']
return (('raw-ephys', '5b875fda-4185-11e8-bb52-0ac6873fc732', '/'),
('raw-video', '5b875fda-4185-11e8-bb52-0ac6873fc732', '/'),)
@classmethod
def local_endpoint(cls, globus_alias=None):
'''
return local endpoint for globus_alias from dj.config
expects:
globus.local_endpoints: {
globus_alias: {
'endpoint': uuid, # UUID of local endpoint
'endpoint_subdir': str, # unix-style path within endpoint
'endpoint_path': str # corresponding local path
}
'''
le = dj.config.get('custom', {}).get('globus.local_endpoints', None)
if le is None or globus_alias not in le:
raise dj.DataJointError(
"globus_local_endpoints for {} not configured".format(
globus_alias))
return le[globus_alias]
@schema
class ArchivedSession(dj.Imported):
definition = """
-> experiment.Session
---
-> GlobusStorageLocation
"""
@schema
class DataSetType(dj.Lookup):
definition = """
dataset_type: varchar(64)
"""
contents = zip(['ephys-raw-trialized',
'ephys-raw-continuous',
'ephys-sorted',
'tracking-video'])
@schema
class FileType(dj.Lookup):
definition = """
file_type: varchar(32) # file type short name
---
file_glob: varchar(64) # file match pattern
file_descr: varchar(255) # file type long description
"""
@property
def contents(self):
data = [('ephys-raw-3a-ap-trial',
'*_g0_t[0-9]*.imec.ap.bin',
'''
3A Probe per-trial AP channels high pass filtered at
300Hz and sampled at 30kHz - recording file
'''),
('ephys-raw-3a-ap-trial-meta',
'*_g0_t[0-9]*.imec.ap.meta',
'''
3A Probe per-trial AP channels high pass
filtered at 300Hz and sampled at 30kHz - file metadata
'''),
('ephys-raw-3a-lf-trial',
'*_g0_t[0-9]*.imec.lf.bin',
'''
3A Probe per-trial AP channels low pass filtered at
300Hz and sampled at 2.5kHz - recording file
'''),
('ephys-raw-3a-lf-trial-meta',
'*_g0_t[0-9]*.imec.lf.meta',
'''
3A Probe per-trial AP channels low pass filtered at
300Hz and sampled at 2.5kHz - file metadata
'''),
('ephys-raw-3b-ap-trial',
'*_????????_g?_t[0-9]*.imec.ap.bin',
'''
3B Probe per-trial AP channels high pass filtered at
300Hz and sampled at 30kHz - recording file
'''),
('ephys-raw-3b-ap-trial-meta',
'*_????????_g?_t[0-9]*.imec.ap.meta',
'''
3B Probe per-trial AP channels high pass
filtered at 300Hz and sampled at 30kHz - file metadata
'''),
('ephys-raw-3b-lf-trial',
'*_????????_g?_t[0-9]*.imec.lf.bin',
'''
3B Probe per-trial AP channels low pass filtered at
300Hz and sampled at 2.5kHz - recording file
'''),
('ephys-raw-3b-lf-trial-meta',
'*_????????_g?_t[0-9]*.imec.lf.meta',
'''
3B Probe per-trial AP channels low pass filtered at
300Hz and sampled at 2.5kHz - file metadata
'''),
('ephys-raw-3b-ap-concat',
'*_????????_g?_tcat.imec.ap.bin',
'''
3B Probe concatenated AP channels high pass filtered at
300Hz and sampled at 30kHz - recording file
'''),
('ephys-raw-3b-ap-concat-meta',
'*_??????_g?_tcat.imec.ap.meta',
'''
3B Probe concatenated AP channels high pass
filtered at 300Hz and sampled at 30kHz - file metadata
'''),
('ephys-raw-3b-lf-concat',
'*_????????_g?_tcat.imec.lf.bin',
'''
3B Probe concatenated AP channels low pass filtered at
300Hz and sampled at 2.5kHz - recording file
'''),
('ephys-raw-3b-lf-concat-meta',
'*_????????_g?_tcat.imec.lf.meta',
'''
3B Probe concatenated AP channels low pass filtered at
300Hz and sampled at 2.5kHz - file metadata
'''),
('tracking-video-trial',
'*_*_[0-9]*-*.[am][vp][i4]',
'''
Video Tracking per-trial file at 300fps
'''),
('tracking-video-map',
'*_????????_*.txt',
'''
Video Tracking file-to-trial mapping
''')]
return [[dedent(i).replace('\n', ' ').strip(' ') for i in r]
for r in data]
@schema
class DataSet(dj.Manual):
definition = """
-> GlobusStorageLocation
dataset_name: varchar(128)
---
-> DataSetType
"""
class PhysicalFile(dj.Part):
definition = """
-> master
file_subpath: varchar(128)
---
-> FileType
"""
@schema
class ArchivedRawEphys(dj.Imported):
definition = """
-> ArchivedSession
-> DataSet
probe_folder: tinyint
"""
key_source = experiment.Session
gsm = None # for GlobusStorageManager
class RawEphysTrial(dj.Part):
""" file:trial mapping if applicable """
definition = """
-> master
-> experiment.SessionTrial
-> DataSet.PhysicalFile
"""
def get_gsm(self):
log.debug('ArchivedRawEphysTrial.get_gsm()')
if self.gsm is None:
self.gsm = GlobusStorageManager()
self.gsm.wait_timeout = PUBLICATION_TRANSFER_TIMEOUT
return self.gsm
@classmethod
def discover(cls):
"""
Discover files on globus and attempt to register them.
"""
self = cls()
globus_alias = 'raw-ephys'
ra, rep, rep_sub = (GlobusStorageLocation()
& {'globus_alias': globus_alias}).fetch1().values()
smap = {'{}/{}'.format(s['water_restriction_number'],
s['session_date']).replace('-', ''): s
for s in (experiment.Session()
* (lab.WaterRestriction() * lab.Subject.proj()))}
ftmap = {t['file_type']: t for t
in (FileType() & "file_type like 'ephys%%'")}
skey = None
sskip = set()
sfiles = [] # {file_subpath:, trial:, file_type:,}
def commit(skey, sfiles):
log.info('commit. skey: {}, sfiles: {}'.format(skey, sfiles))
if not sfiles:
log.info('skipping. no files in set')
return
h2o, sdate, ftypes = set(), set(), set()
ptmap = defaultdict(lambda: defaultdict(list)) # probe:trial:file
for s in sfiles:
ptmap[s['probe']][s['trial']].append(s)
h2o.add(s['water_restriction_number'])
sdate.add(s['session_date'])
ftypes.add(s['file_type'])
if len(h2o) != 1 or len(sdate) != 1:
log.info('skipping. bad h2o {} or session date {}'.format(
h2o, sdate))
return
h2o, sdate = next(iter(h2o)), next(iter(sdate))
{k: {kk: vv for kk, vv in v.items()} for k, v in ptmap.items()}
if all('trial' in f for f in ftypes):
# DataSet
ds_type = 'ephys-raw-trialized'
ds_name = '{}_{}_{}'.format(h2o, sdate, ds_type)
ds_key = {'dataset_name': ds_name,
'globus_alias': globus_alias}
if (DataSet & ds_key):
log.info('DataSet: {} already exists. Skipping.'.format(
ds_key))
return
DataSet.insert1({**ds_key, 'dataset_type': ds_type},
allow_direct_insert=True)
# ArchivedSession
as_key = {k: v for k, v in smap[skey].items()
if k in ArchivedSession.primary_key}
ArchivedSession.insert1(
{**as_key, 'globus_alias': globus_alias},
allow_direct_insert=True,
skip_duplicates=True)
for p in ptmap:
# ArchivedRawEphys
ep_key = {**as_key, **ds_key, 'probe_folder': p}
ArchivedRawEphys.insert1(ep_key, allow_direct_insert=True)
for t in ptmap[p]:
for f in ptmap[p][t]:
DataSet.PhysicalFile.insert1(
{**ds_key, **f}, allow_direct_insert=True,
ignore_extra_fields=True)
ArchivedRawEphys.RawEphysTrial.insert1(
{**ep_key, **ds_key,
'trial': t,
'file_subpath': f['file_subpath']},
allow_direct_insert=True)
elif all('concat' in f for f in ftypes):
raise NotImplementedError('concatenated not yet implemented')
else:
log.info('skipping. mixed filetypes detected')
return
gsm = self.get_gsm()
gsm.activate_endpoint(rep)
for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)):
log.debug('checking: {}:{}/{}'.format(
ep, dirname, node.get('name', '')))
edir = re.match('([a-z]+[0-9]+)/([0-9]{8})/([0-9]+)', dirname)
if not edir or node['DATA_TYPE'] != 'file':
continue
log.debug('dir match: {}'.format(dirname))
h2o, sdate, probe = edir[1], edir[2], edir[3]
skey_i = '{}/{}'.format(h2o, sdate)
if skey_i != skey:
if skey and skey in smap:
with dj.conn().transaction:
try:
commit(skey, sfiles)
except Exception as e:
log.error(
'Exception {} committing {}. files: {}'.format(
repr(e), skey, sfiles))
skey, sfiles = skey_i, []
if skey not in smap:
if skey not in sskip:
log.debug('session {} not known. skipping.'.format(skey))
sskip.add(skey)
continue
fname = node['name']
log.debug('found file {}'.format(fname))
if '.' not in fname:
log.debug('skipping {} - no dot in fname'.format(fname))
continue
froot, fext = fname.split('.', 1)
ftype = {g['file_type']: g for g in ftmap.values()
if fnmatch(fname, g['file_glob'])}
if len(ftype) != 1:
log.debug('skipping {} - incorrect type matches: {}'.format(
fname, ftype))
continue
ftype = next(iter(ftype.values()))['file_type']
trial = None
if 'trial' in ftype:
trial = int(froot.split('_t')[1])
file_subpath = '{}/{}'.format(dirname, fname)
sfiles.append({'water_restriction_number': h2o,
'session_date': '{}-{}-{}'.format(
sdate[:4], sdate[4:6], sdate[6:]),
'probe': int(probe),
'trial': int(trial),
'file_subpath': file_subpath,
'file_type': ftype})
if skey:
with dj.conn().transaction:
commit(skey, sfiles)
def make(self, key):
"""
discover files in local endpoint and transfer/register
"""
log.debug(key)
globus_alias = 'raw-ephys'
le = GlobusStorageLocation.local_endpoint(globus_alias)
lep, lep_sub, lep_dir = (le['endpoint'],
le['endpoint_subdir'],
le['endpoint_path'])
re, rep, rep_sub = (GlobusStorageLocation()
& {'globus_alias': globus_alias}).fetch1().values()
log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir))
# Get session related information needed for filenames/records
sinfo = (lab.WaterRestriction
* lab.Subject.proj()
* experiment.Session() & key).fetch1()
tinfo = ((lab.WaterRestriction
* lab.Subject.proj()
* experiment.Session()
* experiment.SessionTrial) & key).fetch()
h2o = sinfo['water_restriction_number']
sdate = sinfo['session_date']
subdir = pathlib.Path(h2o, str(sdate).replace('-', '')) # + probeno
lep_subdir = pathlib.Path(lep_dir, subdir)
probechoice = [str(i) for i in range(1, 10)] # XXX: hardcoded
file_globs = {i['file_glob']: i['file_type']
for i in FileType & "file_type like 'ephys%%'"}
# Process each probe folder
for lep_probedir in lep_subdir.glob('*'):
lep_probe = str(lep_probedir.relative_to(lep_subdir))
if lep_probe not in probechoice:
log.info('skipping lep_probedir: {} - unexpected name'.format(
lep_probedir))
continue
lep_matchfiles = {}
lep_probefiles = lep_probedir.glob('*.*')
for pf in lep_probefiles:
pfbase = pf.relative_to(lep_probedir)
pfmatch = {k: pfbase.match(k) for k in file_globs}
if any(pfmatch.values()):
log.debug('found valid file: {}'.format(pf))
lep_matchfiles[pf] = tuple(k for k in pfmatch if pfmatch[k])
else:
log.debug('skipping non-match file: {}'.format(pf))
continue
# Build/Validate file records
if not all([len(lep_matchfiles[i]) == 1 for i in lep_matchfiles]):
# TODO: handle trial + concatenated match case...
log.warning('files matched multiple types'.format(
lep_matchfiles))
continue
type_to_file = {file_globs[lep_matchfiles[mf][0]]: mf
for mf in lep_matchfiles}
ds_key, ds_name, ds_files, ds_trials = (
None, None, None, [], [])
if all(['trial' in t for t in type_to_file]):
dataset_type = 'ephys-raw-trialized'
ds_name = '{}_{}_{}'.format(h2o, sdate.isoformat(),
dataset_type)
ds_key = {'dataset_name': ds_name,
'globus_storage_location': globus_alias}
for t in type_to_file:
fsp = type_to_file[t].relative_to(lep_dir)
dsf = {**ds_key, 'file_subpath': str(fsp)}
# e.g : 'tw34_g0_t0.imec.ap.meta' -> *_t(trial).*
trial = int(fsp.name.split('_t')[1].split('.')[0])
if trial not in tinfo['trial']:
log.warning('unknown trial file: {}. skipping'.format(
dsf))
continue
ds_trials.append({**dsf, 'trial': trial})
ds_files.append({**dsf, 'file_type': t})
elif all(['concat' in t for t in type_to_file]):
dataset_type = 'ephys-raw-continuous'
ds_name = '{}_{}_{}'.format(h2o, sdate.isoformat(),
dataset_type)
ds_key = {'dataset_name': ds_name,
'globus_storage_location': globus_alias}
for t in type_to_file:
fsp = type_to_file[t].relative_to(lep_dir)
ds_files.append({**ds_key,
'file_subpath': str(fsp),
'file_type': t})
else:
log.warning("couldn't determine dataset type for {}".format(
lep_probedir))
continue
# Transfer Files
gsm = self.get_gsm()
gsm.activate_endpoint(lep) # XXX: cache / prevent duplicate RPC?
gsm.activate_endpoint(rep) # XXX: cache / prevent duplicate RPC?
DataSet.insert1({**ds_key, 'dataset_type': dataset_type},
allow_direct_insert=True)
for f in ds_files:
fsp = ds_files[f]['file_subpath']
srcp = '{}:{}/{}'.format(lep, lep_sub, fsp)
dstp = '{}:{}/{}'.format(rep, rep_sub, fsp)
log.info('transferring {} to {}'.format(srcp, dstp))
# XXX: check if exists 1st?
if not gsm.cp(srcp, dstp):
emsg = "couldn't transfer {} to {}".format(srcp, dstp)
log.error(emsg)
raise dj.DataJointError(emsg)
DataSet.PhysicalFile.insert1({**ds_key, **ds_files[f]},
allow_direct_insert=True)
# Add Records
ArchivedSession.insert1(
{**key, 'globus_storage_location': globus_alias},
skip_duplicates=True, allow_direct_insert=True)
ArchivedRawEphys.insert1(
{**key, **ds_key, 'probe_folder': int(str(lep_probe))},
allow_direct_insert=True)
if dataset_type == 'ephys-raw-trialized':
ArchivedRawEphys.ArchivedTrials.insert(
[{**key, **t} for t in ds_trials],
allow_direct_insert=True)
@classmethod
def retrieve(cls):
self = cls()
for key in self:
self.retrieve1(key)
@classmethod
def retrieve1(cls, key):
'''
retrieve related files for a given key
'''
self = cls()
raise NotImplementedError('retrieve not yet implemented')
# Old / to be updated:
# >>> list(key.keys())
# ['subject_id', 'session', 'trial', 'electrode_group', 'globus_alia
log.debug(key)
lep, lep_sub, lep_dir = GlobusStorageLocation().local_endpoint
log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir))
# get session related information needed for filenames/records
sinfo = ((lab.WaterRestriction
* lab.Subject.proj()
* experiment.Session()
* experiment.SessionTrial) & key).fetch1()
h2o = sinfo['water_restriction_number']
sdate = sinfo['session_date']
eg = key['electrode_group']
trial = key['trial']
# build file locations:
# fpat: base file pattern for this sessions files
# gbase: globus-url base path for this sessions files
fpat = '{}_{}_{}_g0_t{}'.format(h2o, sdate, eg, trial)
gbase = '/'.join((h2o, str(sdate), str(eg), fpat))
repname, rep, rep_sub = (GlobusStorageLocation() & key).fetch()[0]
gsm = self.get_gsm()
gsm.activate_endpoint(lep) # XXX: cache this / prevent duplicate RPC?
gsm.activate_endpoint(rep) # XXX: cache this / prevent duplicate RPC?
sfxmap = {'.imec.ap.bin': ArchivedRawEphysTrial.ArchivedApChannel,
'.imec.ap.meta': ArchivedRawEphysTrial.ArchivedApMeta,
'.imec.lf.bin': ArchivedRawEphysTrial.ArchivedLfChannel,
'.imec.lf.meta': ArchivedRawEphysTrial.ArchivedLfMeta}
for sfx, cls in sfxmap.items():
if cls & key:
log.debug('record found for {} & {}'.format(cls.__name__, key))
gname = '{}{}'.format(gbase, sfx)
srcp = '{}:/{}/{}'.format(rep, rep_sub, gname)
dstp = '{}:/{}/{}'.format(lep, lep_sub, gname)
log.info('transferring {} to {}'.format(srcp, dstp))
# XXX: check if exists 1st? (manually or via API copy-checksum)
if not gsm.cp(srcp, dstp):
emsg = "couldn't transfer {} to {}".format(srcp, dstp)
log.error(emsg)
raise dj.DataJointError(emsg)
@schema
class ArchivedSortedEphys(dj.Imported):
definition = """
-> ArchivedSession
-> DataSet
probe_folder: tinyint
---
sorting_time=null: datetime
"""
key_source = experiment.Session
def make(self, key):
"""
discover files in local endpoint and transfer/register
"""
raise NotImplementedError('ArchivedSortedEphys.make to be implemented')
@schema
class ArchivedTrackingVideo(dj.Imported):
'''
ArchivedTrackingVideo storage
Note: video_file_name tracked here as trial->file map is non-deterministic
Directory locations of the form:
{Water restriction number}\{Session Date}\video
with file naming convention of the form:
{Water restriction number}_{camera-position-string}_NNN-NNNN.avi
Where 'NNN' is determined from the 'tracking map file' which maps
trials to videos as outlined in tracking.py
XXX:
Using key-source based loookup as is currently done,
may have trials for which there is no tracking,
so camera cannot be determined to do file lookup, thus videos are missed.
This could be resolved via schema adjustment, or file-traversal
based 'opportunistic' registration strategy.
'''
definition = """
-> ArchivedSession
-> tracking.TrackingDevice
---
-> DataSet
"""
key_source = tracking.TrackingDevice * experiment.Session
ingest = None # ingest module reference
gsm = None # for GlobusStorageManager
class TrialVideo(dj.Part):
definition = """
-> master
-> experiment.SessionTrial
---
-> DataSet.PhysicalFile
"""
@classmethod
def get_ingest(cls):
'''
return tracking_ingest module
not imported globally to prevent ingest schema creation for client case
'''
log.debug('ArchivedVideoFile.get_ingest()')
if cls.ingest is None:
from .ingest import tracking as tracking_ingest
cls.ingest = tracking_ingest
return cls.ingest
def get_gsm(self):
log.debug('ArchivedVideoFile.get_gsm()')
if self.gsm is None:
self.gsm = GlobusStorageManager()
self.gsm.wait_timeout = PUBLICATION_TRANSFER_TIMEOUT
return self.gsm
@classmethod
def discover(cls):
"""
discover files on globus and attempt to register them
"""
self = cls()
globus_alias = 'raw-video'
le = GlobusStorageLocation.local_endpoint(globus_alias)
lep, lep_sub, lep_dir = (le['endpoint'],
le['endpoint_subdir'],
le['endpoint_path'])
ra, rep, rep_sub = (GlobusStorageLocation()
& {'globus_alias': globus_alias}).fetch1().values()
smap = {'{}/{}'.format(s['water_restriction_number'],
s['session_date']).replace('-', ''): s
for s in (experiment.Session()
* (lab.WaterRestriction() * lab.Subject.proj()))}
tpos_dev = {s['tracking_position']: s['tracking_device']
for s in tracking.TrackingDevice()} # position:device
ftmap = {t['file_type']: t for t
in (FileType() & "file_type like 'tracking%%'")}
skey = None
sskip = set()
sfiles = [] # {file_subpath:, trial:, file_type:,}
gsm = self.get_gsm()
gsm.activate_endpoint(lep)
gsm.activate_endpoint(rep)
def commit(skey, sfiles):
log.info('commit. skey: {}'.format(skey))
if not sfiles:
log.info('commit skipping {}. no files in set'.format(skey))
# log.debug('sfiles: {}'.format(sfiles))
h2o, sdate, ftypes = set(), set(), set()
dftmap = {} # device:file:trial via load_campath mapping files
dvfmap = defaultdict(lambda: defaultdict(list)) # device:video:file
dtfmap = defaultdict(lambda: defaultdict(list)) # device:trial:file
for s in sfiles:
if s['file_type'] == 'tracking-video-trial':
dvfmap[s['position']][s['video']].append(s)
h2o.add(s['water_restriction_number'])
sdate.add(s['session_date'])
ftypes.add(s['file_type'])
if s['file_type'] == 'tracking-video-map':
# xfer & load camera:trial map ex: dl55_20190108_side.txtb
fsp = s['file_subpath']
lsp = '/tmp/' + s['file_subpath'].split('/')[-1]
srcp = '{}:{}/{}'.format(rep, rep_sub, fsp)
dstp = '{}:{}/{}'.format(lep, lep_sub, lsp)
log.info('transferring {} to {}'.format(srcp, dstp))
if not gsm.cp(srcp, dstp): # XXX: check if exists 1st?
emsg = "couldn't transfer {} to {}".format(srcp, dstp)
log.error(emsg)
raise dj.DataJointError(emsg)
lfname = lep_dir + lsp # local filesysem copy location
dftmap[s['position']] = TrackingIngest.load_campath(lfname)
if len(h2o) != 1 or len(sdate) != 1:
log.info('skipping. bad h2o {} or session date {}'.format(
h2o, sdate))
return
h2o, sdate = next(iter(h2o)), next(iter(sdate))
for d in dvfmap:
if d in dftmap: # remap video no -> trial
dtfmap[d] = {dftmap[d][v]:
dict(dvfmap[d][v], trial=dftmap[d][v])
for v in dvfmap[d]}
else: # assign video no -> trial
dtfmap[d] = {k: dict(v, trial=v['video'])
for k, v in dvfmap[d].items()}
# DataSet
ds_type = 'tracking-video'
ds_name = '{}_{}_{}'.format(h2o, sdate, ds_type)
ds_key = {'dataset_name': ds_name, 'globus_alias': globus_alias}
if (DataSet & ds_key):
log.info('DataSet: {} already exists. Skipping.'.format(
ds_key))
return
DataSet.insert1({**ds_key, 'dataset_type': ds_type},
allow_direct_insert=True)
# ArchivedSession
as_key = {k: v for k, v in smap[skey].items()
if k in ArchivedSession.primary_key}
ArchivedSession.insert1(
{**as_key, 'globus_alias': globus_alias},
allow_direct_insert=True,
skip_duplicates=True)
for d in dtfmap:
# ArchivedTrackingVideo
atv_key = {**as_key, **ds_key, 'tracking_device': tpos_dev[d]}
ArchivedTrackingVideo.insert1(
atv_key, allow_direct_insert=True)
for t in dtfmap[d]:
for f in dtfmap[d][t]:
DataSet.PhysicalFile.insert1(
{**ds_key, **f}, allow_direct_insert=True,
ignore_extra_fields=True)
ArchivedTrackingVideo.TrialVideo.insert1(
{**atv_key, **ds_key,
'trial': t,
'file_subpath': f['file_subpath']},
allow_direct_insert=True)
# end commit()
for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)):
vdir = re.match('([a-z]+[0-9]+)/([0-9]{8})/video', dirname)
if not vdir or node['DATA_TYPE'] != 'file':
continue
h2o, sdate = vdir[1], vdir[2]
skey_i = '{}/{}'.format(h2o, sdate)
if skey_i != skey:
if skey and skey in smap:
with dj.conn().transaction:
try:
commit(skey, sfiles)
except Exception as e:
log.error(
'Exception {} committing {}. files: {}'.format(
repr(e), skey, sfiles))
skey, sfiles = skey_i, []
if skey not in smap:
if skey not in sskip:
log.debug('session {} not known. skipping'.format(skey))
sskip.add(skey)
continue
fname = node['name']
log.debug('checking {}/{}'.format(dirname, fname))
if '.' not in fname:
log.debug('skipping {} - no dot in fname'.format(fname))
continue
froot, fext = fname.split('.', 1)
ftype = {g['file_type']: g for g in ftmap.values()
if fnmatch(fname, g['file_glob'])}
if len(ftype) != 1:
log.debug('skipping {} - incorrect type matches: {}'.format(
fname, ftype))
continue
ftype = next(iter(ftype.values()))['file_type']
log.debug('processing as {}'.format(ftype))
file_subpath = '{}/{}'.format(dirname, fname)
if ftype == 'tracking-video-map':
# e.g. dl55_20190108_side.txt
h2o_f, fdate, pos = froot.split('_')
sfiles.append({'water_restriction_number': h2o,
'session_date': '{}-{}-{}'.format(
sdate[:4], sdate[4:6], sdate[6:]),
'position': pos,
'file_subpath': file_subpath,
'file_type': ftype})
else: # tracking-video-map
# e.g. dl41_side_998-0000.avi or dl41_side_998-0000_00.avi
h2o_f, pos, video = froot.replace('-', '_').split('_')[:3]
sfiles.append({'water_restriction_number': h2o,
'session_date': '{}-{}-{}'.format(
sdate[:4], sdate[4:6], sdate[6:]),
'position': pos,
'video': int(video),
'file_subpath': file_subpath,
'file_type': ftype})
def make(self, key):
"""
discover files in local endpoint and transfer/register
"""
log.info('ArchivedVideoFile.make(): {}'.format(key))
# {'tracking_device': 'Camera 0', 'subject_id': 432572, 'session': 1}
globus_alias = 'raw-video'
le = GlobusStorageLocation.local_endpoint(globus_alias)
lep, lep_sub, lep_dir = (le['endpoint'],
le['endpoint_subdir'],
le['endpoint_path'])
re = (GlobusStorageLocation & {'globus_alias': globus_alias}).fetch1()
rep, rep_sub = re['globus_endpoint'], re['globus_path']
log.info('local_endpoint: {}:{} -> {}'.format(lep, lep_sub, lep_dir))
log.info('remote_endpoint: {}:{}'.format(rep, rep_sub))
h2o = (lab.WaterRestriction & key).fetch1('water_restriction_number')
session = (experiment.Session & key).fetch1()
sdate = session['session_date']
sdate_sml = "{}{:02d}{:02d}".format(sdate.year, sdate.month, sdate.day)
dev = (tracking.TrackingDevice & key).fetch1()
trls = (experiment.SessionTrial & key).fetch(
order_by='trial', as_dict=True)
tracking_ingest = self.get_ingest()
tdev = dev['tracking_device'] # NOQA: notused
tpos = dev['tracking_position']
camtrial = '{}_{}_{}.txt'.format(h2o, sdate_sml, tpos)
vbase = pathlib.Path(lep_dir, h2o, sdate_sml, 'video')
campath = vbase / camtrial
if not campath.exists(): # XXX: uses 1st found
log.warning('trial map {} n/a! skipping.'.format(campath))
return
log.info('loading trial map: {}'.format(campath))
vmap = {v: k for k, v in
tracking_ingest.TrackingIngest.load_campath(campath).items()}
log.debug('loaded video map: {}'.format(vmap))
# add ArchivedSession
as_key = {k: v for k, v in key.items()
if k in experiment.Session.primary_key}
as_rec = {**as_key, 'globus_alias': globus_alias}
ArchivedSession.insert1(as_rec, allow_direct_insert=True,
skip_duplicates=True)
# add DataSet
ds_type = 'tracking-video'
ds_name = '{}_{}_{}_{}'.format(h2o, sdate.isoformat(), ds_type, tpos)
ds_key = {'globus_alias': globus_alias, 'dataset_name': ds_name}
ds_rec = {**ds_key, 'dataset_type': ds_type}
DataSet.insert1(ds_rec, allow_direct_insert=True)
# add ArchivedVideoTracking
vt_key = {**as_key, 'tracking_device': tdev}
vt_rec = {**vt_key, 'globus_alias': globus_alias,
'dataset_name': ds_name}
self.insert1(vt_rec)
filetype = 'tracking-video-trial'
for t in trls:
trial = t['trial']
log.info('.. tracking trial {} ({})'.format(trial, t))
if t['trial'] not in vmap:
log.warning('trial {} not in video map. skipping!'.format(t))
continue
vmatch = '{}_{}_{}-*'.format(h2o, tpos, vmap[trial])
log.debug('vbase: {}, vmatch: {}'.format(vbase, vmatch))
vglob = list(vbase.glob(vmatch))
if len(vglob) != 1:
emsg = 'incorrect videos found in {}: {}'.format(vbase, vglob)
log.warning(emsg)
raise dj.DataJointError(emsg)
vfile = vglob[0].name
gfile = '{}/{}/{}/{}'.format(
h2o, sdate_sml, 'video', vfile) # subpath
srcp = '{}:{}/{}'.format(lep, lep_sub, gfile) # source path
dstp = '{}:{}/{}'.format(rep, rep_sub, gfile) # dest path
gsm = self.get_gsm()
gsm.activate_endpoint(lep) # XXX: cache / prevent duplicate RPC?
gsm.activate_endpoint(rep) # XXX: cache / prevent duplicate RPC?
log.info('transferring {} to {}'.format(srcp, dstp))
if not gsm.cp(srcp, dstp):
emsg = "couldn't transfer {} to {}".format(srcp, dstp)
log.error(emsg)
raise dj.DataJointError(emsg)
pf_key = {**ds_key, 'file_subpath': vfile}
pf_rec = {**pf_key, 'file_type': filetype}
DataSet.PhysicalFile.insert1({**pf_rec}, allow_direct_insert=True)
trk_key = {k: v for k, v in {**key, 'trial': trial}.items()
if k in experiment.SessionTrial.primary_key}
tv_rec = {**vt_key, **trk_key, **pf_key}
self.TrialVideo.insert1({**tv_rec})
def test_flist(fname='globus-index-full.txt'):
'''
spoof tester for discover methods
expects:
f: ep:/path/to/file
d: ep:/path/to/direct
etc. (aka: globus-shell 'find' output)
replace the line:
for ep, dirname, node in gsm.fts('{}:{}'.format(rep, rep_sub)):
with:
for ep, dirname, node in test_flist('globus-list.txt'):
to test against the file 'globus-list.txt'
'''
with open(fname, 'r') as infile:
for l in infile:
try:
t, fp = l.split(' ')
fp = fp.split(':')[1].lstrip('/').rstrip('\n')
dn, bn = os.path.split(fp)
if t == 'f:':
yield ('ep', dn, {'DATA_TYPE': 'file', 'name': bn})
else:
yield ('ep', dn, {'DATA_TYPE': 'dunno', 'path': bn})
except ValueError as e:
if 'too many values' in repr(e):
pass
| 0
| 36,662
| 0
| 0
| 936
| 0
| 0
| 41
| 430
|
31ae962fdd5c782121ff85fe6854a2b889e2cbfd
| 2,334
|
py
|
Python
|
docs/source/examples/7/sample.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | 34
|
2021-08-18T13:27:36.000Z
|
2022-03-26T01:25:36.000Z
|
docs/source/examples/7/sample.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | 111
|
2021-08-18T13:19:46.000Z
|
2022-03-30T05:57:01.000Z
|
docs/source/examples/7/sample.py
|
kumar-pratik/hi-ml
|
a108cf4ea244a76127adedc0ca60f0a5afdfb3e8
|
[
"MIT"
] | 6
|
2021-09-13T12:07:58.000Z
|
2022-03-24T16:31:06.000Z
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
# From:
# https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py
if __name__ == "__main__":
main()
| 39.559322
| 169
| 0.654242
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
# From:
# https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/ml-frameworks/scikit-learn/train-hyperparameter-tune-deploy-with-sklearn/train_iris.py
import argparse
from pathlib import Path
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from health_azure import submit_to_azure_if_needed
def main() -> None:
run_info = submit_to_azure_if_needed(
compute_cluster_name="lite-testing-ds2",
default_datastore="himldatasets",
input_datasets=["himl_sample7_input"],
wait_for_completion=True,
wait_for_completion_show_output=True)
parser = argparse.ArgumentParser()
parser.add_argument('--kernel', type=str, default='linear',
help='Kernel type to be used in the algorithm')
parser.add_argument('--penalty', type=float, default=1.0,
help='Penalty parameter of the error term')
args = parser.parse_args()
print(f'Kernel type:{args.kernel}')
print(f'Penalty: {args.penalty}')
# X -> features, y -> label
input_folder = run_info.input_datasets[0] or Path("dataset")
X = np.loadtxt(fname=input_folder / "X.csv", delimiter=',', skiprows=1)
y = np.loadtxt(fname=input_folder / "y.csv", dtype='str', delimiter=',', skiprows=1)
# dividing X, y into train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# training a linear SVM classifier
from sklearn.svm import SVC
svm_model_linear = SVC(kernel=args.kernel, C=args.penalty).fit(X_train, y_train)
svm_predictions = svm_model_linear.predict(X_test)
# model accuracy for X_test
accuracy = svm_model_linear.score(X_test, y_test)
print('Accuracy of SVM classifier on test set: {:.2f}'.format(accuracy))
# creating a confusion matrix
cm = confusion_matrix(y_test, svm_predictions)
print(cm)
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 1,536
| 0
| 77
| 157
|
6599733b0213579573a907d9fc5ab78c8a716ed8
| 510
|
py
|
Python
|
tests/test_cli.py
|
vfranca/pp
|
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
vfranca/pp
|
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
vfranca/pp
|
db9e15a490e5b28a177cdcd8f448d21fd5bec8d7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
| 26.842105
| 74
| 0.668627
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from click.testing import CliRunner
from pivotpoint import pp
from pivotpoint import cli
class TestPivotPoint(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_command_line_interface(self):
result = self.runner.invoke(cli.main, ["34.80", "32.80", "33.40"])
assert "35.67\n34.54\n33.67\n32.54\n31.67\n" in result.output
| 0
| 0
| 0
| 335
| 0
| 0
| 0
| 17
| 111
|
6c59951f383e22b4b6dd672512b717c4ad1ef094
| 1,086
|
py
|
Python
|
rackio/dao/controls.py
|
crivero7/rackio-framework
|
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
|
[
"MIT"
] | null | null | null |
rackio/dao/controls.py
|
crivero7/rackio-framework
|
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
|
[
"MIT"
] | null | null | null |
rackio/dao/controls.py
|
crivero7/rackio-framework
|
d3362041b1fc4c3af7eb51ac06b1f0f1b5aa497c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""rackio/dao/controls.py
This module implements Controls Data Objects Access.
"""
| 18.40678
| 52
| 0.57919
|
# -*- coding: utf-8 -*-
"""rackio/dao/controls.py
This module implements Controls Data Objects Access.
"""
from .core import RackioDAO
class ControlsDAO(RackioDAO):
def get_all(self):
app = self.get_app()
manager = app.get_manager("control")
result = list()
for control in manager.get_controls():
result.append(control.serialize())
return result
def get(self, name):
app = self.get_app()
manager = app.get_manager("control")
control = manager.get_control(name)
if control:
return control.serialize()
class RulesDAO(RackioDAO):
def get_all(self):
app = self.get_app()
manager = app.get_manager("control")
result = list()
for rule in manager.get_rules():
result.append(rule.serialize())
return result
def get(self, name):
app = self.get_app()
manager = app.get_manager("control")
rule = manager.get_rule(name)
if rule:
return rule.serialize()
| 0
| 0
| 0
| 902
| 0
| 0
| 0
| 6
| 68
|
b3ba1dada5fdca8c0505e224c5e297d351338eaf
| 721
|
py
|
Python
|
AddPDFBookmarks/handle_pdf.py
|
wanghuohuo0716/py-project
|
b771b8005d72843df1653ce68ddb67ccf77a57a8
|
[
"MIT"
] | 92
|
2018-02-26T07:59:27.000Z
|
2022-03-31T08:57:51.000Z
|
AddPDFBookmarks/handle_pdf.py
|
Linkeer365/py-project
|
b771b8005d72843df1653ce68ddb67ccf77a57a8
|
[
"MIT"
] | 2
|
2020-08-19T00:55:52.000Z
|
2021-03-08T07:37:32.000Z
|
AddPDFBookmarks/handle_pdf.py
|
Linkeer365/py-project
|
b771b8005d72843df1653ce68ddb67ccf77a57a8
|
[
"MIT"
] | 53
|
2018-09-07T14:26:33.000Z
|
2022-03-31T08:57:53.000Z
|
# coding:utf-8
# PDF
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
if __name__ == '__main__':
main()
| 31.347826
| 88
| 0.71706
|
# coding:utf-8
# 添加PDF书签
from pdf_utils import MyPDFHandler,PDFHandleMode as mode
import ConfigParser
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def main():
# 从配置文件中读取配置信息
cf = ConfigParser.SafeConfigParser()
cf.read('./info.conf')
pdf_path = cf.get('info','pdf_path')
bookmark_file_path = cf.get('info','bookmark_file_path')
page_offset = cf.getint('info','page_offset')
new_pdf_file_name = cf.get('info','new_pdf_file_name')
pdf_handler = MyPDFHandler(pdf_path,mode = mode.NEWLY)
pdf_handler.add_bookmarks_by_read_txt(bookmark_file_path,page_offset = page_offset)
pdf_handler.save2file(new_pdf_file_name)
if __name__ == '__main__':
main()
| 48
| 0
| 0
| 0
| 0
| 480
| 0
| 33
| 71
|
28cd58401f73165d35b3541ef644b2fdcb572816
| 1,638
|
py
|
Python
|
openff/utilities/testing.py
|
openforcefield/openff-utilities
|
89255d6cc9513df6ad5293841e86ab1f968f7e3b
|
[
"MIT"
] | null | null | null |
openff/utilities/testing.py
|
openforcefield/openff-utilities
|
89255d6cc9513df6ad5293841e86ab1f968f7e3b
|
[
"MIT"
] | 17
|
2021-06-09T06:46:20.000Z
|
2022-03-02T00:30:41.000Z
|
openff/utilities/testing.py
|
openforcefield/openff-utilities
|
89255d6cc9513df6ad5293841e86ab1f968f7e3b
|
[
"MIT"
] | null | null | null |
from typing import List, Optional, Union
import pytest
from openff.utilities.utilities import has_executable, has_package
def skip_if_missing(package_name: str, reason: Optional[str] = None):
"""
Helper function to generate a pytest.mark.skipif decorator
for any package. This allows tests to be skipped if some
optional dependency is not found.
Parameters
----------
package_name : str
The name of the package that is required for a test(s)
reason : str, optional
Explanation of why the skipped it to be tested
Returns
-------
requires_package : _pytest.mark.structures.MarkDecorator
A pytest decorator that will skip tests if the package is not available
"""
if not reason:
reason = f"Package {package_name} is required, but was not found."
requires_package = pytest.mark.skipif(not has_package(package_name), reason=reason)
return requires_package
def skip_if_missing_exec(exec: Union[str, List[str]]):
"""Helper function to generate a pytest.mark.skipif decorator
if an executable(s) is not found."""
if isinstance(exec, str):
execs: List = [exec]
elif isinstance(exec, list):
execs: List = exec # type: ignore[no-redef]
else:
raise ValueError(
"Bad type passed to skip_if_missing_exec. " f"Found type {type(exec)}"
)
found_exec = False
for exec_ in execs:
found_exec = found_exec or has_executable(exec_)
reason = f"Package {str(exec)} is required, but was not found."
mark = pytest.mark.skipif(not found_exec, reason=reason)
return mark
| 32.117647
| 87
| 0.681319
|
from typing import List, Optional, Union
import pytest
from openff.utilities.utilities import has_executable, has_package
def skip_if_missing(package_name: str, reason: Optional[str] = None):
"""
Helper function to generate a pytest.mark.skipif decorator
for any package. This allows tests to be skipped if some
optional dependency is not found.
Parameters
----------
package_name : str
The name of the package that is required for a test(s)
reason : str, optional
Explanation of why the skipped it to be tested
Returns
-------
requires_package : _pytest.mark.structures.MarkDecorator
A pytest decorator that will skip tests if the package is not available
"""
if not reason:
reason = f"Package {package_name} is required, but was not found."
requires_package = pytest.mark.skipif(not has_package(package_name), reason=reason)
return requires_package
def skip_if_missing_exec(exec: Union[str, List[str]]):
"""Helper function to generate a pytest.mark.skipif decorator
if an executable(s) is not found."""
if isinstance(exec, str):
execs: List = [exec]
elif isinstance(exec, list):
execs: List = exec # type: ignore[no-redef]
else:
raise ValueError(
"Bad type passed to skip_if_missing_exec. " f"Found type {type(exec)}"
)
found_exec = False
for exec_ in execs:
found_exec = found_exec or has_executable(exec_)
reason = f"Package {str(exec)} is required, but was not found."
mark = pytest.mark.skipif(not found_exec, reason=reason)
return mark
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8ef5b889f2b72b0ee8d71a6019e587e98cab7064
| 5,122
|
py
|
Python
|
projects/Show me the Data Structures/problem_1.py
|
gmendozah/Data-Structures-and-Algorithms
|
07474db45acfe42855cc0f4cc968c0564b2cb91a
|
[
"MIT"
] | 5
|
2021-10-08T11:21:08.000Z
|
2022-01-24T22:40:03.000Z
|
projects/Show me the Data Structures/problem_1.py
|
gmendozah/Data-Structures-and-Algorithms
|
07474db45acfe42855cc0f4cc968c0564b2cb91a
|
[
"MIT"
] | null | null | null |
projects/Show me the Data Structures/problem_1.py
|
gmendozah/Data-Structures-and-Algorithms
|
07474db45acfe42855cc0f4cc968c0564b2cb91a
|
[
"MIT"
] | 3
|
2021-12-13T06:50:58.000Z
|
2022-02-05T03:38:49.000Z
|
if __name__ == '__main__':
test_case_1()
test_case_2()
test_case_3()
| 31.423313
| 115
| 0.557595
|
class Node:
def __init__(self, key=None, value=None):
self.key = key
self.value = value
self.next = None
self.prev = None
class LRU_Cache(object):
def __init__(self, capacity):
if capacity < 1:
print('LRUCache should have capacity > 0')
return
# Initialize class variables
self.capacity = capacity
self.size = 0
self.map = dict()
self.head = None # this node represents the least recently used
self.tail = None # this node represents the most recently used
def get_capacity(self):
return self.capacity
def get(self, key):
# Retrieve item from provided key. Return -1 if nonexistent.
if key is None:
return -1
elif key not in self.map:
return -1
else:
node = self.map[key]
self.move_to_front(node)
return node.value
def set(self, key, value):
# Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.
"""
First. we validate the input key
Second. we verify if key is already in the map
If key in map:
We update the node value and move it to the front
If not in map:
We create a new node and set it in the map
Third: we validate if we passed the cache capacity
"""
if key is None or value is None:
return -1
elif key not in self.map:
node = Node(key, value)
self.map[key] = node
self.add(node)
else:
node = self.map[key]
node.value = value
self.move_to_front(node)
if self.capacity < 0:
self.remove_lru()
def move_to_front(self, node):
self.remove(node)
self.add(node)
def add(self, node):
# add data to the next attribute of the tail (i.e. the end of the queue)
# if head and tail have no values
if self.head is None or self.tail is None:
self.head = node
self.tail = node
# if the linked list has values already
else:
node.next = self.head
node.prev = None
self.head.prev = node
self.head = node
self.capacity = self.capacity - 1
def remove(self, node):
# if the node we want to delete is the head
if self.head.key == node.key:
next_node = self.head.next
self.head = next_node
# if the node we want to delete is the tail
elif self.tail.key == node.key:
prev_node = self.tail.prev
self.tail = prev_node
# if none of the above happens
else:
prev_node = node.prev
next_node = node.next
prev_node.next = next_node
next_node.prev = prev_node
self.capacity += 1
def remove_lru(self):
node = self.tail
self.remove(node)
del self.map[node.key]
if __name__ == '__main__':
def test_case_1(): # invalid length cache test case
our_cache = LRU_Cache(-1)
# should show an invalid capacity value
def test_case_2(): # normal length cache test case
our_cache = LRU_Cache(5)
our_cache.set(1, 11)
our_cache.set(2, 22)
our_cache.set(3, 33)
our_cache.set(4, 44)
our_cache.set(5, 55)
our_cache.set(6, 66)
our_cache.set(7, 77)
print(our_cache.get(1)) # returns -1
print(our_cache.get(2)) # returns -1
print(our_cache.get(3)) # returns 33
print(our_cache.get(7)) # returns 77
print(our_cache.get(6)) # returns 66
print(our_cache.get(4)) # returns 44
our_cache.set(8, 88)
print(our_cache.get(5)) # returns -1
def test_case_3(): # short cache test case
our_cache = LRU_Cache(3)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(our_cache.get(4)) # Expected Value = 4
print(our_cache.get(1)) # Expected Value = -1
our_cache.set(2, 4)
print(our_cache.get(2)) # Expected Value = 4
our_cache.set(5, 5)
print(our_cache.get(3)) # Expected Value = -1
print(our_cache.get(5)) # Expected Value = 5
our_cache.set(2, 6)
print(our_cache.get(2)) # Expected Value = 6
our_cache.set(6, 6)
print(our_cache.get(4)) # Expected Value = -1
print(our_cache.get(6)) # Expected Value = 6
our_cache.set(5, 10)
our_cache.set(7, 7)
print(our_cache.get(2)) # Expected Value = -1
print(our_cache.get(7)) # Expected Value = 7
print(our_cache.get(6)) # Expected Value = 6
print(our_cache.get(5)) # Expected Value = 10
print(our_cache.get(5)) # Expected Value = 10
our_cache.set(8, 8)
print(our_cache.get(7)) # Expected Value = -1
test_case_1()
test_case_2()
test_case_3()
| 0
| 0
| 0
| 3,064
| 0
| 1,858
| 0
| 0
| 125
|
8246e38f21bcb46e020f248157401bcc92b6f2e7
| 45,802
|
py
|
Python
|
hydrus/tests/test_app.py
|
vcode11/hydrus
|
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
|
[
"MIT"
] | 1
|
2019-12-04T12:54:21.000Z
|
2019-12-04T12:54:21.000Z
|
hydrus/tests/test_app.py
|
vcode11/hydrus
|
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
|
[
"MIT"
] | 3
|
2019-12-21T04:15:23.000Z
|
2020-04-07T05:11:05.000Z
|
hydrus/tests/test_app.py
|
vcode11/hydrus
|
4ed8ada7ed8fd7d8897e744bae410b312f4cfb83
|
[
"MIT"
] | null | null | null |
"""Test for checking if the response format is proper. Run test_crud before running this."""
import unittest
import random
import string
from hydra_python_core.doc_writer import HydraLink
def gen_dummy_object(class_title, doc):
"""Create a dummy object based on the definitions in the API Doc.
:param class_title: Title of the class whose object is being created.
:param doc: ApiDoc.
:return: A dummy object of class `class_title`.
"""
object_ = {
"@type": class_title
}
for class_path in doc.parsed_classes:
if class_title == doc.parsed_classes[class_path]["class"].title:
for prop in doc.parsed_classes[class_path]["class"].supportedProperty:
if isinstance(prop.prop, HydraLink) or prop.write is False:
continue
if "vocab:" in prop.prop:
prop_class = prop.prop.replace("vocab:", "")
object_[prop.title] = gen_dummy_object(prop_class, doc)
else:
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
return object_
if __name__ == '__main__':
message = """
Running tests for the app. Checking if all responses are in proper order.
"""
unittest.main()
| 51.929705
| 100
| 0.550696
|
"""Test for checking if the response format is proper. Run test_crud before running this."""
import unittest
import random
import string
import json
import re
import uuid
from hydrus.app_factory import app_factory
from hydrus.socketio_factory import create_socket
from hydrus.utils import set_session, set_doc, set_api_name, set_page_size
from hydrus.data import doc_parse, crud
from hydra_python_core import doc_maker
from hydra_python_core.doc_writer import HydraLink
from hydrus.samples import doc_writer_sample
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from hydrus.data.db_models import Base
def gen_dummy_object(class_title, doc):
"""Create a dummy object based on the definitions in the API Doc.
:param class_title: Title of the class whose object is being created.
:param doc: ApiDoc.
:return: A dummy object of class `class_title`.
"""
object_ = {
"@type": class_title
}
for class_path in doc.parsed_classes:
if class_title == doc.parsed_classes[class_path]["class"].title:
for prop in doc.parsed_classes[class_path]["class"].supportedProperty:
if isinstance(prop.prop, HydraLink) or prop.write is False:
continue
if "vocab:" in prop.prop:
prop_class = prop.prop.replace("vocab:", "")
object_[prop.title] = gen_dummy_object(prop_class, doc)
else:
object_[prop.title] = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(6))
return object_
class ViewsTestCase(unittest.TestCase):
"""Test Class for the app."""
@classmethod
def setUpClass(self):
"""Database setup before the tests."""
print("Creating a temporary database...")
engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
self.session = session
self.API_NAME = "demoapi"
self.page_size = 1
self.HYDRUS_SERVER_URL = "http://hydrus.com/"
self.app = app_factory(self.API_NAME)
self.socketio = create_socket(self.app, self.session)
print("going for create doc")
self.doc = doc_maker.create_doc(
doc_writer_sample.api_doc.generate(),
self.HYDRUS_SERVER_URL,
self.API_NAME)
test_classes = doc_parse.get_classes(self.doc.generate())
test_properties = doc_parse.get_all_properties(test_classes)
doc_parse.insert_classes(test_classes, self.session)
doc_parse.insert_properties(test_properties, self.session)
print("Classes and properties added successfully.")
print("Setting up hydrus utilities... ")
self.api_name_util = set_api_name(self.app, self.API_NAME)
self.session_util = set_session(self.app, self.session)
self.doc_util = set_doc(self.app, self.doc)
self.page_size_util = set_page_size(self.app, self.page_size)
self.client = self.app.test_client()
print("Creating utilities context... ")
self.api_name_util.__enter__()
self.session_util.__enter__()
self.doc_util.__enter__()
self.client.__enter__()
print("Setup done, running tests...")
@classmethod
def tearDownClass(self):
"""Tear down temporary database and exit utilities"""
self.client.__exit__(None, None, None)
self.doc_util.__exit__(None, None, None)
self.session_util.__exit__(None, None, None)
self.api_name_util.__exit__(None, None, None)
self.session.close()
def setUp(self):
for class_ in self.doc.parsed_classes:
link_props = {}
class_title = self.doc.parsed_classes[class_]["class"].title
dummy_obj = gen_dummy_object(class_title, self.doc)
for supportedProp in self.doc.parsed_classes[class_]['class'].supportedProperty:
if isinstance(supportedProp.prop, HydraLink):
class_name = supportedProp.prop.range.replace("vocab:", "")
for collection_path in self.doc.collections:
coll_class = self.doc.collections[
collection_path]['collection'].class_.title
if class_name == coll_class:
id_ = str(uuid.uuid4())
crud.insert(
gen_dummy_object(class_name, self.doc),
id_=id_,
session=self.session)
link_props[supportedProp.title] = id_
dummy_obj[supportedProp.title] = "{}/{}/{}".format(
self.API_NAME, collection_path, id_)
crud.insert(
dummy_obj,
id_=str(
uuid.uuid4()),
link_props=link_props,
session=self.session)
# If it's a collection class then add an extra object so
# we can test pagination thoroughly.
if class_ in self.doc.collections:
crud.insert(
dummy_obj,
id_=str(
uuid.uuid4()),
session=self.session)
def test_Index(self):
"""Test for the index."""
response_get = self.client.get("/{}".format(self.API_NAME))
endpoints = json.loads(response_get.data.decode('utf-8'))
response_post = self.client.post(
"/{}".format(self.API_NAME), data=dict(foo="bar"))
response_put = self.client.put(
"/{}".format(self.API_NAME), data=dict(foo="bar"))
response_delete = self.client.delete("/{}".format(self.API_NAME))
assert "@context" in endpoints
assert endpoints["@id"] == "/{}".format(self.API_NAME)
assert endpoints["@type"] == "EntryPoint"
assert response_get.status_code == 200
assert response_post.status_code == 405
assert response_put.status_code == 405
assert response_delete.status_code == 405
def test_EntryPoint_context(self):
"""Test for the EntryPoint context."""
response_get = self.client.get(
"/{}/contexts/EntryPoint.jsonld".format(self.API_NAME))
response_get_data = json.loads(response_get.data.decode('utf-8'))
response_post = self.client.post(
"/{}/contexts/EntryPoint.jsonld".format(self.API_NAME), data={})
response_delete = self.client.delete(
"/{}/contexts/EntryPoint.jsonld".format(self.API_NAME))
assert response_get.status_code == 200
assert "@context" in response_get_data
assert response_post.status_code == 405
assert response_delete.status_code == 405
def test_Vocab(self):
"""Test the vocab."""
response_get = self.client.get("/{}/vocab#".format(self.API_NAME))
response_get_data = json.loads(response_get.data.decode('utf-8'))
assert "@context" in response_get_data
assert response_get_data["@type"] == "ApiDocumentation"
assert response_get_data["@id"] == "{}{}/vocab".format(
self.HYDRUS_SERVER_URL, self.API_NAME)
assert response_get.status_code == 200
response_delete = self.client.delete(
"/{}/vocab#".format(self.API_NAME))
assert response_delete.status_code == 405
response_put = self.client.put(
"/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar')))
assert response_put.status_code == 405
response_post = self.client.post(
"/{}/vocab#".format(self.API_NAME), data=json.dumps(dict(foo='bar')))
assert response_post.status_code == 405
def test_Collections_GET(self):
"""Test GET on collection endpoints."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
response_get = self.client.get(endpoints[endpoint])
# pdb.set_trace()
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "@context" in response_get_data
assert "@id" in response_get_data
assert "@type" in response_get_data
assert "members" in response_get_data
# Check the item URI has the valid format, so it can be dereferenced
if len(response_get_data["members"]) > 0:
for item in response_get_data["members"]:
class_type = item["@type"]
if class_type in self.doc.parsed_classes:
class_ = self.doc.parsed_classes[class_type]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "GET" in class_methods:
item_response = self.client.get(
response_get_data["members"][0]["@id"])
assert item_response.status_code == 200
def test_pagination(self):
"""Test basic pagination"""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "view" in response_get_data
assert "first" in response_get_data["view"]
assert "last" in response_get_data["view"]
if "next" in response_get_data["view"]:
response_next = self.client.get(response_get_data["view"]["next"])
assert response_next.status_code == 200
response_next_data = json.loads(
response_next.data.decode('utf-8'))
assert "previous" in response_next_data["view"]
break
def test_Collections_PUT(self):
"""Test insert data to the collection."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
good_response_put = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert good_response_put.status_code == 201
def test_object_POST(self):
"""Test replace of a given object using ID."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_methods = [x.method for x in class_.supportedOperation]
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
initial_put_response = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert initial_put_response.status_code == 201
response = json.loads(
initial_put_response.data.decode('utf-8'))
regex = r'(.*)ID (.{36})* (.*)'
matchObj = re.match(regex, response["description"])
assert matchObj is not None
id_ = matchObj.group(2)
if "POST" in class_methods:
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
post_replace_response = self.client.post(
'{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object))
assert post_replace_response.status_code == 200
def test_object_DELETE(self):
"""Test DELETE of a given object using ID."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_methods = [x.method for x in class_.supportedOperation]
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
initial_put_response = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert initial_put_response.status_code == 201
response = json.loads(
initial_put_response.data.decode('utf-8'))
regex = r'(.*)ID (.{36})* (.*)'
matchObj = re.match(regex, response["description"])
assert matchObj is not None
id_ = matchObj.group(2)
if "DELETE" in class_methods:
delete_response = self.client.delete(
'{}/{}'.format(endpoints[endpoint], id_))
assert delete_response.status_code == 200
def test_object_PUT_at_id(self):
"""Create object in collection using PUT at specific ID."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_methods = [x.method for x in class_.supportedOperation]
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
if "PUT" in class_methods:
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
put_response = self.client.put('{}/{}'.format(
endpoints[endpoint], uuid.uuid4()), data=json.dumps(dummy_object))
assert put_response.status_code == 201
def test_object_PUT_at_ids(self):
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_methods = [x.method for x in class_.supportedOperation]
data_ = {"data": list()}
objects = list()
ids = ""
for index in range(3):
objects.append(gen_dummy_object(
collection.class_.title, self.doc))
ids = "{},".format(uuid.uuid4())
data_["data"] = objects
if "PUT" in class_methods:
put_response = self.client.put(
'{}/add/{}'.format(endpoints[endpoint], ids),
data=json.dumps(data_))
assert put_response.status_code == 201
def test_endpointClass_PUT(self):
"""Check non collection Class PUT."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "PUT" in class_methods:
dummy_object = gen_dummy_object(class_.title, self.doc)
put_response = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert put_response.status_code == 201
def test_endpointClass_POST(self):
"""Check non collection Class POST."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "POST" in class_methods:
dummy_object = gen_dummy_object(class_.title, self.doc)
post_response = self.client.post(
endpoints[endpoint], data=json.dumps(dummy_object))
assert post_response.status_code == 200
def test_endpointClass_DELETE(self):
"""Check non collection Class DELETE."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "DELETE" in class_methods:
delete_response = self.client.delete(
endpoints[endpoint])
assert delete_response.status_code == 200
def test_endpointClass_GET(self):
"""Check non collection Class GET."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "GET" in class_methods:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "@context" in response_get_data
assert "@id" in response_get_data
assert "@type" in response_get_data
def test_IriTemplate(self):
"""Test structure of IriTemplates attached to collections"""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "search" in response_get_data
assert "mapping" in response_get_data["search"]
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_props = [x.prop for x in class_.supportedProperty]
for mapping in response_get_data["search"]["mapping"]:
if mapping["property"] not in ["limit", "offset", "pageIndex"]:
assert mapping["property"] in class_props
def test_client_controlled_pagination(self):
"""Test pagination controlled by client with help of pageIndex,
offset and limit parameters."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "search" in response_get_data
assert "mapping" in response_get_data["search"]
# Test with pageIndex and limit
params = {"pageIndex": 1, "limit": 2}
response_for_page_param = self.client.get(endpoints[endpoint], query_string=params)
assert response_for_page_param.status_code == 200
response_for_page_param_data = json.loads(
response_for_page_param.data.decode('utf-8'))
assert "first" in response_for_page_param_data["view"]
assert "last" in response_for_page_param_data["view"]
if "next" in response_for_page_param_data["view"]:
assert "pageIndex=2" in response_for_page_param_data["view"]["next"]
next_response = self.client.get(response_for_page_param_data["view"]["next"])
assert next_response.status_code == 200
next_response_data = json.loads(
next_response.data.decode('utf-8'))
assert "previous" in next_response_data["view"]
assert "pageIndex=1" in next_response_data["view"]["previous"]
# Test with offset and limit
params = {"offset": 1, "limit": 2}
response_for_offset_param = self.client.get(endpoints[endpoint],
query_string=params)
assert response_for_offset_param.status_code == 200
response_for_offset_param_data = json.loads(
response_for_offset_param.data.decode('utf-8'))
assert "first" in response_for_offset_param_data["view"]
assert "last" in response_for_offset_param_data["view"]
if "next" in response_for_offset_param_data["view"]:
assert "offset=3" in response_for_offset_param_data["view"]["next"]
next_response = self.client.get(
response_for_offset_param_data["view"]["next"])
assert next_response.status_code == 200
next_response_data = json.loads(
next_response.data.decode('utf-8'))
assert "previous" in next_response_data["view"]
assert "offset=1" in next_response_data["view"]["previous"]
def test_GET_for_nested_class(self):
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "GET" in class_methods:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
response_get_data = json.loads(
response_get.data.decode('utf-8'))
assert "@context" in response_get_data
assert "@id" in response_get_data
assert "@type" in response_get_data
class_props = [x for x in class_.supportedProperty]
for prop_name in class_props:
if isinstance(prop_name.prop, HydraLink) and prop_name.read is True:
nested_obj_resp = self.client.get(
response_get_data[prop_name.title])
assert nested_obj_resp.status_code == 200
nested_obj = json.loads(
nested_obj_resp.data.decode('utf-8'))
assert "@type" in nested_obj
elif "vocab:" in prop_name.prop:
assert "@type" in response_get_data[prop_name.title]
def test_required_props(self):
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "PUT" in class_methods:
dummy_object = gen_dummy_object(class_.title, self.doc)
required_prop = ""
for prop in class_.supportedProperty:
if prop.required:
required_prop = prop.title
break
if required_prop:
del dummy_object[required_prop]
put_response = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert put_response.status_code == 400
def test_writeable_props(self):
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "POST" in class_methods:
dummy_object = gen_dummy_object(class_.title, self.doc)
# Test for writeable properties
post_response = self.client.post(
endpoints[endpoint], data=json.dumps(dummy_object))
assert post_response.status_code == 200
# Test for properties with writeable=False
non_writeable_prop = ""
for prop in class_.supportedProperty:
if prop.write is False:
non_writeable_prop = prop.title
break
if non_writeable_prop != "":
dummy_object[non_writeable_prop] = "xyz"
post_response = self.client.post(
endpoints[endpoint], data=json.dumps(dummy_object))
assert post_response.status_code == 405
def test_readable_props(self):
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "GET" in class_methods:
not_readable_prop = ""
for prop in class_.supportedProperty:
if prop.read is False:
not_readable_prop = prop.title
break
if not_readable_prop:
get_response = self.client.get(
endpoints[endpoint])
get_response_data = json.loads(
get_response.data.decode('utf-8'))
assert not_readable_prop not in get_response_data
def test_bad_objects(self):
"""Checks if bad objects are added or not."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
bad_response_put = self.client.put(
endpoints[endpoint],
data=json.dumps(
dict(
foo='bar')))
assert bad_response_put.status_code == 400
def test_bad_requests(self):
"""Checks if bad requests are handled or not."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
collection = self.doc.collections[collection_name]["collection"]
class_ = self.doc.parsed_classes[collection.class_.title]["class"]
class_methods = [x.method for x in class_.supportedOperation]
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
initial_put_response = self.client.put(
endpoints[endpoint], data=json.dumps(dummy_object))
assert initial_put_response.status_code == 201
response = json.loads(
initial_put_response.data.decode('utf-8'))
regex = r'(.*)ID (.{36})* (.*)'
matchObj = re.match(regex, response["description"])
assert matchObj is not None
id_ = matchObj.group(2)
if "POST" not in class_methods:
dummy_object = gen_dummy_object(
collection.class_.title, self.doc)
post_replace_response = self.client.post(
'{}/{}'.format(endpoints[endpoint], id_), data=json.dumps(dummy_object))
assert post_replace_response.status_code == 405
if "DELETE" not in class_methods:
delete_response = self.client.delete(
'{}/{}'.format(endpoints[endpoint], id_))
assert delete_response.status_code == 405
def test_Endpoints_Contexts(self):
"""Test all endpoints contexts are generated properly."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
collection_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if collection_name in self.doc.collections:
response_get = self.client.get(endpoints[endpoint])
assert response_get.status_code == 200
context = json.loads(
response_get.data.decode('utf-8'))["@context"]
response_context = self.client.get(context)
response_context_data = json.loads(
response_context.data.decode('utf-8'))
assert response_context.status_code == 200
assert "@context" in response_context_data
class SocketTestCase(unittest.TestCase):
"""Test Class for socket events and operations."""
@classmethod
def setUpClass(self):
"""Database setup before the tests."""
print("Creating a temporary database...")
engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(engine)
session = scoped_session(sessionmaker(bind=engine))
self.session = session
self.API_NAME = "demoapi"
self.page_size = 1
self.HYDRUS_SERVER_URL = "http://hydrus.com/"
self.app = app_factory(self.API_NAME)
self.socketio = create_socket(self.app, self.session)
print("going for create doc")
self.doc = doc_maker.create_doc(
doc_writer_sample.api_doc.generate(),
self.HYDRUS_SERVER_URL,
self.API_NAME)
test_classes = doc_parse.get_classes(self.doc.generate())
test_properties = doc_parse.get_all_properties(test_classes)
doc_parse.insert_classes(test_classes, self.session)
doc_parse.insert_properties(test_properties, self.session)
print("Classes and properties added successfully.")
print("Setting up hydrus utilities... ")
self.api_name_util = set_api_name(self.app, self.API_NAME)
self.session_util = set_session(self.app, self.session)
self.doc_util = set_doc(self.app, self.doc)
self.page_size_util = set_page_size(self.app, self.page_size)
self.client = self.app.test_client()
self.socketio_client = self.socketio.test_client(self.app, namespace='/sync')
print("Creating utilities context... ")
self.api_name_util.__enter__()
self.session_util.__enter__()
self.doc_util.__enter__()
self.client.__enter__()
print("Setup done, running tests...")
@classmethod
def tearDownClass(self):
"""Tear down temporary database and exit utilities"""
self.client.__exit__(None, None, None)
self.doc_util.__exit__(None, None, None)
self.session_util.__exit__(None, None, None)
self.api_name_util.__exit__(None, None, None)
self.session.close()
def setUp(self):
for class_ in self.doc.parsed_classes:
class_title = self.doc.parsed_classes[class_]["class"].title
dummy_obj = gen_dummy_object(class_title, self.doc)
crud.insert(
dummy_obj,
id_=str(
uuid.uuid4()),
session=self.session)
# If it's a collection class then add an extra object so
# we can test pagination thoroughly.
if class_ in self.doc.collections:
crud.insert(
dummy_obj,
id_=str(
uuid.uuid4()),
session=self.session)
# Add two dummy modification records
crud.insert_modification_record(method="POST",
resource_url="", session=self.session)
crud.insert_modification_record(method="DELETE",
resource_url="", session=self.session)
def test_connect(self):
"""Test connect event."""
socket_client = self.socketio.test_client(self.app, namespace='/sync')
data = socket_client.get_received('/sync')
assert len(data) > 0
event = data[0]
assert event['name'] == 'connect'
last_job_id = crud.get_last_modification_job_id(self.session)
assert event['args'][0]['last_job_id'] == last_job_id
socket_client.disconnect(namespace='/sync')
def test_reconnect(self):
"""Test reconnect event."""
socket_client = self.socketio.test_client(self.app, namespace='/sync')
# Flush data of first connect event
socket_client.get_received('/sync')
# Client reconnects by emitting 'reconnect' event.
socket_client.emit('reconnect', namespace='/sync')
# Get update received on reconnecting to the server
data = socket_client.get_received('/sync')
assert len(data) > 0
# Extract the event information
event = data[0]
assert event['name'] == 'connect'
last_job_id = crud.get_last_modification_job_id(self.session)
# Check last job id with last_job_id received by client in the update.
assert event['args'][0]['last_job_id'] == last_job_id
socket_client.disconnect(namespace='/sync')
def test_modification_table_diff(self):
"""Test 'modification-table-diff' events."""
# Flush old received data at socket client
self.socketio_client.get_received('/sync')
# Set last_job_id as the agent_job_id
agent_job_id = crud.get_last_modification_job_id(self.session)
# Add an extra modification record newer than the agent_job_id
new_latest_job_id = crud.insert_modification_record(method="POST",
resource_url="", session=self.session)
self.socketio_client.emit('get_modification_table_diff',
{'agent_job_id': agent_job_id}, namespace='/sync')
data = self.socketio_client.get_received('/sync')
assert len(data) > 0
event = data[0]
assert event['name'] == 'modification_table_diff'
# Check received event contains data of newly added modification record.
assert event['args'][0][0]['method'] == "POST"
assert event['args'][0][0]['resource_url'] == ""
assert event['args'][0][0]['job_id'] == new_latest_job_id
def test_socketio_POST_updates(self):
"""Test 'update' event emitted by socketio for POST operations."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "POST" in class_methods:
dummy_object = gen_dummy_object(class_.title, self.doc)
# Flush old socketio updates
self.socketio_client.get_received('/sync')
post_response = self.client.post(
endpoints[endpoint], data=json.dumps(dummy_object))
assert post_response.status_code == 200
# Get new socketio update
update = self.socketio_client.get_received('/sync')
assert len(update) != 0
assert update[0]['args'][0]['method'] == "POST"
resource_name = update[0]['args'][0]['resource_url'].split('/')[-1]
assert resource_name == endpoints[endpoint].split('/')[-1]
def test_socketio_DELETE_updates(self):
"""Test 'update' event emitted by socketio for DELETE operations."""
index = self.client.get("/{}".format(self.API_NAME))
assert index.status_code == 200
endpoints = json.loads(index.data.decode('utf-8'))
for endpoint in endpoints:
if endpoint not in ["@context", "@id", "@type"]:
class_name = "/".join(endpoints[endpoint].split(
"/{}/".format(self.API_NAME))[1:])
if class_name not in self.doc.collections:
class_ = self.doc.parsed_classes[class_name]["class"]
class_methods = [
x.method for x in class_.supportedOperation]
if "DELETE" in class_methods:
# Flush old socketio updates
self.socketio_client.get_received('/sync')
delete_response = self.client.delete(
endpoints[endpoint])
assert delete_response.status_code == 200
# Get new update event
update = self.socketio_client.get_received('/sync')
assert len(update) != 0
assert update[0]['args'][0]['method'] == 'DELETE'
resource_name = update[0]['args'][0]['resource_url'].split('/')[-1]
assert resource_name == endpoints[endpoint].split('/')[-1]
if __name__ == '__main__':
message = """
Running tests for the app. Checking if all responses are in proper order.
"""
unittest.main()
| 0
| 4,067
| 0
| 39,830
| 0
| 0
| 0
| 195
| 326
|
c06b33ca0266576dd77b1443051d6971f9e82077
| 801
|
py
|
Python
|
environment.py
|
LCBRU/hic_covid
|
eb5a37339185ed71246235e307a81d91dc91f9ec
|
[
"MIT"
] | null | null | null |
environment.py
|
LCBRU/hic_covid
|
eb5a37339185ed71246235e307a81d91dc91f9ec
|
[
"MIT"
] | null | null | null |
environment.py
|
LCBRU/hic_covid
|
eb5a37339185ed71246235e307a81d91dc91f9ec
|
[
"MIT"
] | null | null | null |
"""Environment Variables
"""
import os
from dotenv import load_dotenv
load_dotenv()
HIC_DB_USERNAME = os.environ["HIC_DB_USERNAME"]
HIC_DB_PASSWORD = os.environ["HIC_DB_PASSWORD"]
HIC_DB_HOST = os.environ["HIC_DB_HOST"]
HIC_DB_DATABASE = os.environ["HIC_DB_DATABASE"]
MS_SQL_ODBC_DRIVER = os.environ["MS_SQL_ODBC_DRIVER"]
MS_SQL_UHL_DWH_HOST = os.environ["MS_SQL_UHL_DWH_HOST"]
MS_SQL_UHL_DWH_USER = os.environ["MS_SQL_UHL_DWH_USER"]
MS_SQL_UHL_DWH_PASSWORD = os.environ["MS_SQL_UHL_DWH_PASSWORD"]
IDENTITY_API_KEY = os.environ["IDENTITY_API_KEY"]
IDENTITY_HOST = os.environ["IDENTITY_HOST"]
HIC_CONNECTION_STRING = os.environ["HIC_CONNECTION_STRING"]
HIC_HOST = os.environ["HIC_HOST"]
HIC_USERNAME = os.environ["HIC_USERNAME"]
HIC_PASSWORD = os.environ["HIC_PASSWORD"]
| 30.807692
| 64
| 0.781523
|
"""Environment Variables
"""
import os
from dotenv import load_dotenv
load_dotenv()
HIC_DB_USERNAME = os.environ["HIC_DB_USERNAME"]
HIC_DB_PASSWORD = os.environ["HIC_DB_PASSWORD"]
HIC_DB_HOST = os.environ["HIC_DB_HOST"]
HIC_DB_DATABASE = os.environ["HIC_DB_DATABASE"]
MS_SQL_ODBC_DRIVER = os.environ["MS_SQL_ODBC_DRIVER"]
MS_SQL_UHL_DWH_HOST = os.environ["MS_SQL_UHL_DWH_HOST"]
MS_SQL_UHL_DWH_USER = os.environ["MS_SQL_UHL_DWH_USER"]
MS_SQL_UHL_DWH_PASSWORD = os.environ["MS_SQL_UHL_DWH_PASSWORD"]
IDENTITY_API_KEY = os.environ["IDENTITY_API_KEY"]
IDENTITY_HOST = os.environ["IDENTITY_HOST"]
HIC_CONNECTION_STRING = os.environ["HIC_CONNECTION_STRING"]
HIC_HOST = os.environ["HIC_HOST"]
HIC_USERNAME = os.environ["HIC_USERNAME"]
HIC_PASSWORD = os.environ["HIC_PASSWORD"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f0682b750b96bf2f312eb6ff9f2bea5aef2c5958
| 390
|
py
|
Python
|
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
|
Kevson102/Nbd-Phoenix
|
509a9cf026d24827dccc9a5ec67819ecd86fbf03
|
[
"MIT"
] | null | null | null |
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
|
Kevson102/Nbd-Phoenix
|
509a9cf026d24827dccc9a5ec67819ecd86fbf03
|
[
"MIT"
] | null | null | null |
nbd_app/migrations/0007_alter_socialamenities_hotline_number.py
|
Kevson102/Nbd-Phoenix
|
509a9cf026d24827dccc9a5ec67819ecd86fbf03
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-01-03 14:32
| 20.526316
| 47
| 0.605128
|
# Generated by Django 3.2.9 on 2022-01-03 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nbd_app', '0006_generalposts'),
]
operations = [
migrations.AlterField(
model_name='socialamenities',
name='hotline_number',
field=models.BigIntegerField(),
),
]
| 0
| 0
| 0
| 276
| 0
| 0
| 0
| 19
| 46
|
a68bccebe211588992d72d7335645e18121c4bf1
| 1,039
|
py
|
Python
|
python/icp.py
|
nowtechnologies/ridi_imu
|
2d8a8e54d0491c44de7edac662b101db47ad3cfc
|
[
"MIT"
] | 140
|
2018-05-27T16:11:40.000Z
|
2022-03-28T15:49:28.000Z
|
python/icp.py
|
nowtechnologies/ridi_imu
|
2d8a8e54d0491c44de7edac662b101db47ad3cfc
|
[
"MIT"
] | 13
|
2018-07-16T20:59:58.000Z
|
2021-12-09T08:35:43.000Z
|
python/icp.py
|
nowtechnologies/ridi_imu
|
2d8a8e54d0491c44de7edac662b101db47ad3cfc
|
[
"MIT"
] | 58
|
2018-02-14T03:53:51.000Z
|
2022-03-07T15:59:41.000Z
|
import numpy as np
def fit_transformation(source, target):
"""
This function computes the best rigid transformation between two point sets. It assumes that "source" and
"target" are with the same length and "source[i]" corresponds to "target[i]".
:param source: Nxd array.
:param target: Nxd array.
:return: A transformation as (d+1)x(d+1) matrix; the rotation part as a dxd matrix and the translation
part as a dx1 vector.
"""
assert source.shape == target.shape
center_source = np.mean(source, axis=0)
center_target = np.mean(target, axis=0)
m = source.shape[1]
source_zeromean = source - center_source
target_zeromean = target - center_target
W = np.dot(source_zeromean.T, target_zeromean)
U, S, Vt = np.linalg.svd(W)
R = np.dot(Vt.T, U.T)
if np.linalg.det(R) < 0:
Vt[m - 1, :] *= -1
R = np.dot(Vt.T, U.T)
t = center_target.T - np.dot(R, center_source.T)
T = np.identity(m + 1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
| 33.516129
| 109
| 0.627526
|
import numpy as np
def fit_transformation(source, target):
"""
This function computes the best rigid transformation between two point sets. It assumes that "source" and
"target" are with the same length and "source[i]" corresponds to "target[i]".
:param source: Nxd array.
:param target: Nxd array.
:return: A transformation as (d+1)x(d+1) matrix; the rotation part as a dxd matrix and the translation
part as a dx1 vector.
"""
assert source.shape == target.shape
center_source = np.mean(source, axis=0)
center_target = np.mean(target, axis=0)
m = source.shape[1]
source_zeromean = source - center_source
target_zeromean = target - center_target
W = np.dot(source_zeromean.T, target_zeromean)
U, S, Vt = np.linalg.svd(W)
R = np.dot(Vt.T, U.T)
if np.linalg.det(R) < 0:
Vt[m - 1, :] *= -1
R = np.dot(Vt.T, U.T)
t = center_target.T - np.dot(R, center_source.T)
T = np.identity(m + 1)
T[:m, :m] = R
T[:m, m] = t
return T, R, t
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d22e0e78871a5ab8c68a346843dd049461897cb0
| 1,887
|
py
|
Python
|
codechef/long-challenge/may21/MODEQ.py
|
ramanaditya/data-structure-and-algorithms
|
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
|
[
"MIT"
] | 81
|
2020-05-22T14:22:04.000Z
|
2021-12-18T10:11:23.000Z
|
codechef/long-challenge/may21/MODEQ.py
|
techhub-community/data-structure-and-algorithms
|
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
|
[
"MIT"
] | 4
|
2020-08-06T21:08:00.000Z
|
2021-03-31T16:07:50.000Z
|
codechef/long-challenge/may21/MODEQ.py
|
techhub-community/data-structure-and-algorithms
|
8dcfeb011e76b2b38b54842e8ccc7a59728141f8
|
[
"MIT"
] | 37
|
2020-05-22T14:25:21.000Z
|
2021-12-30T03:13:13.000Z
|
"""
[Modular Equation](https://www.codechef.com/MAY21C/problems/MODEQ)
Given integers N and M, find the number of ordered pairs (a,b)
such that 1a<bN and ((M mod a) mod b)=((M mod b) mod a).
Input
The first line contains an integer T, the number of test cases. Then the test cases follow.
The only line of each test case contains two integers N, M.
Output
For each testcase, output in a single line the answer to the problem.
Constraints
1T1000
2N106
1M5105
The sum of N over all test cases does not exceed 106.
Note: Multiplier for JAVA for this problem is reduced to 1.25 instead of usual 2.
Subtasks
Subtask #1 (10 points):
1T10
2N103
1M105
Subtask #2 (40 points):
1T100
2N105
1M105
The sum of N over all test cases does not exceed 106.
Subtask #3 (50 points): Original Constraints
Sample Input
3
3 5
3 6
3 10
Sample Output
2
3
2
Explanation
Test Case 1: The valid pairs are {(1,2),(1,3)}.
Test Case 2: The valid pairs are {(1,2),(1,3),(2,3)}.
Test Case 3: The valid pairs are {(1,2),(1,3)}.
"""
# Brute Force
"""
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
T = data[0]
idx = 1
while T > 0:
N, M = data[idx: idx + 2]
res = 0
for i in range(1, N):
for j in range(i + 1, N + 1):
if (M % i) % j == (M % j) % i:
res += 1
print(res)
T -= 1
idx += 2
# Time : 0.58s
"""
if __name__ == '__main__':
T = int(input())
idx = 1
while T > 0:
N, M = list(map(int, input().split()))
res = 0
mod = dict()
for a in range(2, N+1):
mod_with_a = M % a
res += mod.get(mod_with_a, 1)
for b in range(mod_with_a, N+1, a):
mod[b] = mod.get(b, 1) + 1
print(res)
T -= 1
# Time : 4.92s
| 19.255102
| 91
| 0.569687
|
"""
[Modular Equation](https://www.codechef.com/MAY21C/problems/MODEQ)
Given integers N and M, find the number of ordered pairs (a,b)
such that 1≤a<b≤N and ((M mod a) mod b)=((M mod b) mod a).
Input
The first line contains an integer T, the number of test cases. Then the test cases follow.
The only line of each test case contains two integers N, M.
Output
For each testcase, output in a single line the answer to the problem.
Constraints
1≤T≤1000
2≤N≤106
1≤M≤5⋅105
The sum of N over all test cases does not exceed 106.
Note: Multiplier for JAVA for this problem is reduced to 1.25 instead of usual 2.
Subtasks
Subtask #1 (10 points):
1≤T≤10
2≤N≤103
1≤M≤105
Subtask #2 (40 points):
1≤T≤100
2≤N≤105
1≤M≤105
The sum of N over all test cases does not exceed 106.
Subtask #3 (50 points): Original Constraints
Sample Input
3
3 5
3 6
3 10
Sample Output
2
3
2
Explanation
Test Case 1: The valid pairs are {(1,2),(1,3)}.
Test Case 2: The valid pairs are {(1,2),(1,3),(2,3)}.
Test Case 3: The valid pairs are {(1,2),(1,3)}.
"""
import sys
# Brute Force
"""
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
T = data[0]
idx = 1
while T > 0:
N, M = data[idx: idx + 2]
res = 0
for i in range(1, N):
for j in range(i + 1, N + 1):
if (M % i) % j == (M % j) % i:
res += 1
print(res)
T -= 1
idx += 2
# Time : 0.58s
"""
if __name__ == '__main__':
T = int(input())
idx = 1
while T > 0:
N, M = list(map(int, input().split()))
res = 0
mod = dict()
for a in range(2, N+1):
mod_with_a = M % a
res += mod.get(mod_with_a, 1)
for b in range(mod_with_a, N+1, a):
mod[b] = mod.get(b, 1) + 1
print(res)
T -= 1
# Time : 4.92s
| 63
| 0
| 0
| 0
| 0
| 0
| 0
| -11
| 23
|