hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
72d3b2344864c102c1f3172f717ed4ec52b97b5c
| 2,050
|
py
|
Python
|
tools/dataset_building/limit_density.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
tools/dataset_building/limit_density.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
tools/dataset_building/limit_density.py
|
IQTLabs/WITW
|
36154fb9388dbdc5b2776fc9d49699b26a08f8ae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import argparse
import numpy as np
# Modified from: CosmiQ Solaris
# https://github.com/CosmiQ/solaris/blob/master/solaris/preproc/sar.py
def haversine(lat1, lon1, lat2, lon2, rad=False, radius=6.371E6):
"""
Haversine formula for distance between two points given their
latitude and longitude, assuming a spherical earth.
"""
if not rad:
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
return 2 * radius * np.arcsin(np.sqrt(a))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('output_path')
parser.add_argument('threshold', nargs='?', type=float, default=10.)
args = parser.parse_args()
main(args.input_path, args.output_path, args.threshold)
| 32.03125
| 75
| 0.609756
|
#!/usr/bin/env python
import csv
import argparse
import numpy as np
import pandas as pd
import tqdm
# Modified from: CosmiQ Solaris
# https://github.com/CosmiQ/solaris/blob/master/solaris/preproc/sar.py
def haversine(lat1, lon1, lat2, lon2, rad=False, radius=6.371E6):
"""
Haversine formula for distance between two points given their
latitude and longitude, assuming a spherical earth.
"""
if not rad:
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
lat2 = np.radians(lat2)
lon2 = np.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2
return 2 * radius * np.arcsin(np.sqrt(a))
def main(input_path, output_path, threshold, randomize=True):
# Input and output dataframes
dfi = pd.read_csv(input_path, sep=',', header=0, dtype={'id':str})
dfo = dfi.iloc[0:0,:].copy()
# Loop through AOIs
aois = np.sort(dfi['aoi'].unique())
for aoi in aois:
print('AOI', aoi)
dfai = dfi[dfi.aoi == aoi]
dfao = dfai.iloc[0:0,:].copy()
if randomize:
dfai = dfai.sample(frac=1).reset_index(drop=True)
for index, row in tqdm.tqdm(dfai.iterrows(), total=len(dfai)):
lat = np.array(row['lat'])
lon = np.array(row['lon'])
dists = haversine(lat, lon, dfao['lat'], dfao['lon'])
if len(dists) > 0:
min_dist = np.min(dists)
else:
min_dist = np.inf
if min_dist >= threshold:
dfao = dfao.append(row)
dfo = dfo.append(dfao)
# Write output to disk
dfo.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('output_path')
parser.add_argument('threshold', nargs='?', type=float, default=10.)
args = parser.parse_args()
main(args.input_path, args.output_path, args.threshold)
| 0
| 0
| 0
| 0
| 0
| 996
| 0
| -23
| 90
|
a926adeeae5e6a18471aa287cd2d0a24f03b3693
| 1,595
|
py
|
Python
|
__init__.py
|
HappyRay/anki-addon-test
|
f947070fa7eb47f95d84a9f6b5707a822ea75161
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
HappyRay/anki-addon-test
|
f947070fa7eb47f95d84a9f6b5707a822ea75161
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
HappyRay/anki-addon-test
|
f947070fa7eb47f95d84a9f6b5707a822ea75161
|
[
"Apache-2.0"
] | null | null | null |
# import the main window object (mw) from aqt
from aqt import mw
# import the "show info" tool from utils.py
# import all of the Qt GUI library
# We're going to add a menu item below. First we want to create a function to
# be called when the menu item is activated.
# create a new menu item, "test"
action = QAction("test", mw)
# set it to call testFunction when it's clicked
action.triggered.connect(add_note)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
action.setShortcut(QKeySequence("Ctrl+t"))
| 31.27451
| 93
| 0.684013
|
# import the main window object (mw) from aqt
from aqt import mw
# import the "show info" tool from utils.py
from aqt.utils import showInfo
# import all of the Qt GUI library
from aqt.qt import *
# We're going to add a menu item below. First we want to create a function to
# be called when the menu item is activated.
def add_note():
col = mw.col
did = col.decks.id_for_name("test")
m = col.models.byName("cc Chinese")
# mid = m['id']
# showInfo("deck id for the deck test: {}. Model id for cc Chinese: {}".format(did, mid))
col.models.setCurrent(m)
n = col.newNote()
test_simplified = "ๆต่ฏ"
n['Pinyin'] = "ce4 shi4"
simplified_field_name = "Simplified"
n[simplified_field_name] = test_simplified
n['English'] = "test"
# showInfo(deck.keys())
node_ids = col.find_notes("{}:{}".format(simplified_field_name, test_simplified))
if node_ids:
showInfo("The note with the question {} already exists".format(test_simplified))
else:
col.add_note(n, did)
showInfo("Added a note with the question {}.".format(test_simplified))
def show_card_count():
# get the number of cards in the current collection, which is stored in
# the main window
card_count = mw.col.cardCount()
# show a message box
showInfo("Card count: %d" % card_count)
# create a new menu item, "test"
action = QAction("test", mw)
# set it to call testFunction when it's clicked
action.triggered.connect(add_note)
# and add it to the tools menu
mw.form.menuTools.addAction(action)
action.setShortcut(QKeySequence("Ctrl+t"))
| 6
| 0
| 0
| 0
| 0
| 967
| 0
| 8
| 90
|
a5b597dda0e02678d4bc53de2fd6dc1b66489dbe
| 4,210
|
py
|
Python
|
changes/listeners/build_revision.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
changes/listeners/build_revision.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
changes/listeners/build_revision.py
|
bowlofstew/changes
|
ebd393520e0fdb07c240a8d4e8747281b6186e28
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
| 36.608696
| 148
| 0.595724
|
from __future__ import absolute_import
import logging
from flask import current_app
from changes.api.build_index import BuildIndexAPIView
from changes.models import ProjectStatus, Project, ProjectConfigError, ProjectOptionsHelper, Revision
from changes.utils.diff_parser import DiffParser
from changes.utils.project_trigger import files_changed_should_trigger_project
from changes.vcs.base import UnknownRevision
def revision_created_handler(revision_sha, repository_id, **kwargs):
revision = Revision.query.filter(
Revision.sha == revision_sha,
Revision.repository_id == repository_id,
).first()
if not revision:
return
handler = CommitTrigger(revision)
handler.run()
class CommitTrigger(object):
logger = logging.getLogger('build_revision')
def __init__(self, revision):
self.repository = revision.repository
self.revision = revision
def get_project_list(self):
return list(Project.query.filter(
Project.repository_id == self.revision.repository_id,
Project.status == ProjectStatus.active,
))
def get_changed_files(self):
vcs = self.repository.get_vcs()
if not vcs:
raise NotImplementedError
# Make sure the repo exists on disk.
if not vcs.exists():
vcs.clone()
diff = None
try:
diff = vcs.export(self.revision.sha)
except UnknownRevision:
# Maybe the repo is stale; update.
vcs.update()
# If it doesn't work this time, we have
# a problem. Let the exception escape.
diff = vcs.export(self.revision.sha)
diff_parser = DiffParser(diff)
return diff_parser.get_changed_files()
def run(self):
revision = self.revision
project_list = self.get_project_list()
if not project_list:
return
options = ProjectOptionsHelper.get_options(project_list, [
'build.branch-names',
'build.commit-trigger',
'build.file-whitelist',
])
files_changed = self.get_changed_files()
projects_to_build = []
for project in project_list:
if options[project.id].get('build.commit-trigger', '1') != '1':
self.logger.info('build.commit-trigger is disabled for project %s', project.slug)
continue
branch_names = filter(bool, options[project.id].get('build.branch-names', '*').split(' '))
if not revision.should_build_branch(branch_names):
self.logger.info('No branches matched build.branch-names for project %s', project.slug)
continue
try:
if not files_changed_should_trigger_project(files_changed, project, options[project.id], revision.sha):
self.logger.info('No changed files matched project trigger for project %s', project.slug)
continue
except ProjectConfigError:
author_name = '(unknown)'
if revision.author_id:
author_name = revision.author.name
self.logger.error('Project config for project %s is not in a valid format. Author is %s.', project.slug, author_name, exc_info=True)
projects_to_build.append(project.slug)
for project_slug in projects_to_build:
data = {
'sha': revision.sha,
'project': project_slug,
'tag': 'commit',
}
with current_app.test_request_context('/api/0/builds/', method='POST', data=data):
try:
response = BuildIndexAPIView().post()
except Exception as e:
self.logger.exception('Failed to create build: %s' % (e,))
else:
if isinstance(response, (list, tuple)):
response, status = response
if status != 200:
self.logger.error('Failed to create build: %s' % (response,), extra={
'data': data,
})
| 0
| 0
| 0
| 3,468
| 0
| 279
| 0
| 220
| 202
|
39145ba22026f38331d59b821d26f97cac5a0876
| 2,332
|
py
|
Python
|
TestScripts/TestWebScraping.py
|
HansFriedrichSchwanecke/EquityRiseAndFallTearSheet
|
a65ceeb04c4cdacafd8eb3dcc1e52b25654c3e19
|
[
"MIT"
] | null | null | null |
TestScripts/TestWebScraping.py
|
HansFriedrichSchwanecke/EquityRiseAndFallTearSheet
|
a65ceeb04c4cdacafd8eb3dcc1e52b25654c3e19
|
[
"MIT"
] | null | null | null |
TestScripts/TestWebScraping.py
|
HansFriedrichSchwanecke/EquityRiseAndFallTearSheet
|
a65ceeb04c4cdacafd8eb3dcc1e52b25654c3e19
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
driver_path = 'msedgedriver.exe'
constituents_url = 'https://www.stoxx.com/index-details?symbol=SXXP'
table_id = "stoxx_index_detail_component"
constituents = {}
driver = webdriver.Edge(driver_path)
driver.get(url=constituents_url)
components = driver.find_element_by_link_text('Components')
components.click()
driver.implicitly_wait(2)
table = driver.find_element_by_id('component-table')
for row in table.find_elements_by_xpath(".//tr"):
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except:
# TODO: Add Logger
continue
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,'//*[@id="onetrust-accept-btn-handler"]'))).click()
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
counter = len(button_list)
driver.implicitly_wait(2)
idx = 0
while idx < counter:
print("Loading page {0}".format(idx))
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
button_list[idx].click()
time.sleep(2)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,'component-table')))
table = driver.find_element_by_id('component-table')
rows = table.find_elements_by_xpath(".//tr")
print(len(rows))
for row in rows:
driver.implicitly_wait(2)
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except Exception as err:
print("Issue: {0}".format(err))# TODO: Add Logger
driver.implicitly_wait(2)
continue
idx = idx+1
href = constituents.popitem()[1]
driver.get(href)
table = driver.find_element_by_class_name('flat-table')
static_data = table.text.split('\n')
output = []
for key_value in static_data:
key, value = key_value.split(': ', 1)
if not output or key in output[-1]:
output.append({})
output[-1][key] = value
| 29.518987
| 120
| 0.706261
|
import timeit
import selenium.webdriver
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import time
import pandas as pd
driver_path = 'msedgedriver.exe'
constituents_url = 'https://www.stoxx.com/index-details?symbol=SXXP'
table_id = "stoxx_index_detail_component"
constituents = {}
driver = webdriver.Edge(driver_path)
driver.get(url=constituents_url)
components = driver.find_element_by_link_text('Components')
components.click()
driver.implicitly_wait(2)
table = driver.find_element_by_id('component-table')
for row in table.find_elements_by_xpath(".//tr"):
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except:
# TODO: Add Logger
continue
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH,'//*[@id="onetrust-accept-btn-handler"]'))).click()
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
counter = len(button_list)
driver.implicitly_wait(2)
idx = 0
while idx < counter:
print("Loading page {0}".format(idx))
button_list = driver.find_elements_by_xpath("//*/li[contains(@onclick,'paginate')]")
button_list[idx].click()
time.sleep(2)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,'component-table')))
table = driver.find_element_by_id('component-table')
rows = table.find_elements_by_xpath(".//tr")
print(len(rows))
for row in rows:
driver.implicitly_wait(2)
try:
href = row.find_element_by_xpath("./td[1]/input")
constituents[row.text] = href.get_property('value')
except Exception as err:
print("Issue: {0}".format(err))# TODO: Add Logger
driver.implicitly_wait(2)
continue
idx = idx+1
href = constituents.popitem()[1]
driver.get(href)
table = driver.find_element_by_class_name('flat-table')
static_data = table.text.split('\n')
output = []
for key_value in static_data:
key, value = key_value.split(': ', 1)
if not output or key in output[-1]:
output.append({})
output[-1][key] = value
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 90
|
bd182b2ee422cb743cc750e17448d7ac07f848a6
| 5,191
|
py
|
Python
|
src/runners/tfa_runner.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/runners/tfa_runner.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
src/runners/tfa_runner.py
|
ChenyangTang/bark-ml
|
1d2ab1957bf49929e27d718dd4bd3912162197b8
|
[
"MIT"
] | null | null | null |
import logging
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
logger = logging.getLogger()
# NOTE(@hart): this will print all statements
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
| 37.890511
| 108
| 0.681372
|
import sys
import logging
import time
import tensorflow as tf
tf.compat.v1.enable_v2_behavior()
from tf_agents.drivers import dynamic_step_driver
from tf_agents.drivers import dynamic_episode_driver
from modules.runtime.commons.parameters import ParameterServer
from tf_agents.metrics import tf_metrics
from tf_agents.eval import metric_utils
from tf_agents.utils import common
from tf_agents.trajectories import time_step as ts
from src.runners.base_runner import BaseRunner
logger = logging.getLogger()
# NOTE(@hart): this will print all statements
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class TFARunner(BaseRunner):
"""Runner that takes the runtime and agent
and runs the training and evaluation as specified.
"""
def __init__(self,
runtime=None,
agent=None,
params=ParameterServer(),
unwrapped_runtime=None):
BaseRunner.__init__(self,
runtime=runtime,
agent=agent,
params=params)
self._eval_metrics = [
tf_metrics.AverageReturnMetric(
buffer_size=self._params["ML"]["Runner"]["evaluation_steps"]),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=self._params["ML"]["Runner"]["evaluation_steps"])
]
self._summary_writer = None
self._unwrapped_runtime = unwrapped_runtime
self.get_initial_collection_driver()
self.get_collection_driver()
def setup_writer(self):
if self._params["ML"]["Runner"]["summary_path"] is not None:
self._summary_writer = tf.summary.create_file_writer(
self._params["ML"]["Runner"]["summary_path"])
def get_initial_collection_driver(self):
"""Sets the initial collection driver for tf-agents.
"""
self._initial_collection_driver = []
for agent in self._agent:
self._initial_collection_driver.append(dynamic_episode_driver.DynamicEpisodeDriver(
env=self._runtime,
policy=agent._agent.collect_policy,
observers=[agent._replay_buffer.add_batch],
num_episodes=self._params["ML"]["Runner"]["initial_collection_steps"]))
def get_collection_driver(self):
"""Sets the collection driver for tf-agents.
"""
self._collection_driver = []
for agent in self._agent:
self._collection_driver.append(dynamic_step_driver.DynamicStepDriver(
env=self._runtime,
policy=agent._agent.collect_policy, # this is the agents policy
observers=[agent._replay_buffer.add_batch],
num_steps = 1
))
def collect_initial_episodes(self):
"""Function that collects the initial episodes
"""
for i in range(len(self._initial_collection_driver)):
self._initial_collection_driver[i].run()
def train(self):
"""Wrapper that sets the summary writer.
This enables a seamingless integration with TensorBoard.
"""
# collect initial episodes
self.collect_initial_episodes()
# main training cycle
if self._summary_writer is not None:
with self._summary_writer.as_default():
self._train()
else:
self._train()
def _train(self):
"""Trains the agent as specified in the parameter file
"""
pass
def evaluate(self):
"""Evaluates the agent
"""
global_iteration = self._agent._agent._train_step_counter.numpy()
logger.info("Evaluating the agent's performance in {} episodes."
.format(str(self._params["ML"]["Runner"]["evaluation_steps"])))
metric_utils.eager_compute(
self._eval_metrics,
self._runtime,
self._agent._agent.policy,
num_episodes=self._params["ML"]["Runner"]["evaluation_steps"])
metric_utils.log_metrics(self._eval_metrics)
tf.summary.scalar("mean_reward",
self._eval_metrics[0].result().numpy(),
step=global_iteration)
tf.summary.scalar("mean_steps",
self._eval_metrics[1].result().numpy(),
step=global_iteration)
logger.info(
"The agent achieved on average {} reward and {} steps in \
{} episodes." \
.format(str(self._eval_metrics[0].result().numpy()),
str(self._eval_metrics[1].result().numpy()),
str(self._params["ML"]["Runner"]["evaluation_steps"])))
def visualize(self, num_episodes=1):
# Ticket (https://github.com/tensorflow/agents/issues/59) recommends
# to do the rendering in the original environment
if self._unwrapped_runtime is not None:
for _ in range(0, num_episodes):
state = self._unwrapped_runtime.reset()
is_terminal = False
suc_time = self._params["ML"]["Maneuver"]["success"]
while not is_terminal:
action_step_0 = self._agent[0]._eval_policy.action(ts.transition(state, reward=0.0, discount=1.0))
action_step_1 = self._agent[1]._eval_policy.action(ts.transition(state, reward=0.0, discount=1.0))
state, reward, is_terminal, _ = self._unwrapped_runtime.step(action_step_0.action.numpy())
state, reward, is_terminal, _ = self._unwrapped_runtime.step(action_step_1.action.numpy())
self._unwrapped_runtime.render()
| 0
| 0
| 0
| 4,552
| 0
| 0
| 0
| 183
| 246
|
a56fdb86ec479a0b307f3343c329ab6ccf839751
| 760
|
py
|
Python
|
setup.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 4
|
2020-04-16T22:57:00.000Z
|
2021-10-05T02:37:58.000Z
|
setup.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 112
|
2020-05-21T09:47:14.000Z
|
2022-03-20T16:00:27.000Z
|
setup.py
|
BuildJet/lagtraj
|
a49bff9c165b225b37e212dec4c1d319452cc3f3
|
[
"MIT"
] | 5
|
2020-05-14T11:04:07.000Z
|
2022-03-11T16:38:35.000Z
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
INSTALL_REQUIRES = open("requirements.txt").readlines()
setup(
name="lagtraj",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python trajectory code for Lagrangian simulations",
url="https://github.com/EUREC4A-UK/lagtraj",
maintainer="Leif Denby",
maintainer_email="[email protected]",
py_modules=["lagtraj"],
packages=find_packages(),
package_data={"": ["*.csv", "*.yml", "*.html", "*.dat", "*.yaml"]},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
| 31.666667
| 71
| 0.703947
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import versioneer
INSTALL_REQUIRES = open("requirements.txt").readlines()
setup(
name="lagtraj",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Python trajectory code for Lagrangian simulations",
url="https://github.com/EUREC4A-UK/lagtraj",
maintainer="Leif Denby",
maintainer_email="[email protected]",
py_modules=["lagtraj"],
packages=find_packages(),
package_data={"": ["*.csv", "*.yml", "*.html", "*.dat", "*.yaml"]},
include_package_data=True,
install_requires=INSTALL_REQUIRES,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
zip_safe=False,
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
aee8828cea0fd749235f9d7e36d30e4e14ddf27e
| 138
|
py
|
Python
|
Pyon exercicios/Exercicios/021.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
Pyon exercicios/Exercicios/021.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
Pyon exercicios/Exercicios/021.py
|
alefbispo/Exercicios-do-curso-de-Python
|
16cd569ab16542135b834ac8d0cfb0ae84836d53
|
[
"MIT"
] | null | null | null |
#executar um audio mp3
import pygame
pygame.init()
pygame.mixer.music.load('BlackDog.mp3')
pygame.mixer.music.play()
pygame.event.wait()
| 17.25
| 39
| 0.768116
|
#executar um audio mp3
import pygame
pygame.init()
pygame.mixer.music.load('BlackDog.mp3')
pygame.mixer.music.play()
pygame.event.wait()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c7b3d8dec1fd101207e35c912d98c7301395c27
| 273
|
py
|
Python
|
catalog/bindings/gmd/geometric_complex.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/geometric_complex.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/geometric_complex.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
__NAMESPACE__ = "http://www.opengis.net/gml"
| 24.818182
| 68
| 0.783883
|
from dataclasses import dataclass
from bindings.gmd.geometric_complex_type import GeometricComplexType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class GeometricComplex(GeometricComplexType):
class Meta:
namespace = "http://www.opengis.net/gml"
| 0
| 100
| 0
| 0
| 0
| 0
| 0
| 59
| 67
|
0bf3ff960e9ba03544b4330daf97aa30ac87da93
| 2,041
|
py
|
Python
|
run.py
|
NCBI-Codeathons/Identifying-bulk-RNA-seq-derived-biomarkers-of-cancer-risk-within-single-cell-populations
|
fda26f5cfe41f3e64bff4602dc010d6f6be183f8
|
[
"MIT"
] | 3
|
2020-01-15T03:17:52.000Z
|
2020-09-30T20:12:53.000Z
|
run.py
|
NCBI-Codeathons/Identifying-bulk-RNA-seq-derived-biomarkers-of-cancer-risk-within-single-cell-populations
|
fda26f5cfe41f3e64bff4602dc010d6f6be183f8
|
[
"MIT"
] | null | null | null |
run.py
|
NCBI-Codeathons/Identifying-bulk-RNA-seq-derived-biomarkers-of-cancer-risk-within-single-cell-populations
|
fda26f5cfe41f3e64bff4602dc010d6f6be183f8
|
[
"MIT"
] | 2
|
2021-05-17T20:59:33.000Z
|
2021-05-27T07:30:42.000Z
|
import sys
# # Load data
# scRNAdata = H5COUNTS('data/GSE103224.h5')
# # Preprocess data
# scRNAdata.preprocess_data(log_normalize=True, filter_genes=False, n_neighbors=False, umap=False)
# # Add clustering results
# scRNAdata.add_clustering_results(path='data/interim/', tumor_ids=[1, 2, 3, 4, 5, 6, 7, 8])
#
# # Get a list of biomarkers associated to Glioma survival
# BIOMARKER_F = "data/glioma_survival_associated_genes_Fatai.csv"
# biomarkers_df = pd.read_table(BIOMARKER_F, )
# biomarkers = pd.Index(scRNAdata.GENE_NAMES) & biomarkers_df["Gene"].unique()
#
# # Aggregate all cell expressions to find clusters with the biomarkers expressed
# scRNAdata.get_aggregated_cluster_expression(biomarkers, quantile_threshold=0.75,)
#
# # Run GSEA on all the DE genes for each cluster
# from src.analysis.gsea_analysis import GSEA_Analysis
# gsea = GSEA_Analysis(scRNAdata, path='data/interim/', threshold=0.05,) # path leads the file with the DE genes list for each cluster
# gsea.get_gsea_result()
#
# # Get the GSEA results of only the clusters which have a query biomarker expressed
# query_biomarker = ["CDC6"]
# result = gsea.get_gsea_result_by_cluster(scRNAdata.get_clusters_with_biomarker_expression(query_biomarker))
#
# # Visualize
# from src.visualization import heatmap
# heatmap(result, height=1000, width=600)
if __name__== "__main__":
main(sys.argv[1:])
| 37.109091
| 134
| 0.711906
|
import sys, getopt, subprocess
from src.common.load_h5 import H5COUNTS
from src.preprocess.build_h5_GSE103224 import build_h5
import pandas as pd
# # Load data
# scRNAdata = H5COUNTS('data/GSE103224.h5')
# # Preprocess data
# scRNAdata.preprocess_data(log_normalize=True, filter_genes=False, n_neighbors=False, umap=False)
# # Add clustering results
# scRNAdata.add_clustering_results(path='data/interim/', tumor_ids=[1, 2, 3, 4, 5, 6, 7, 8])
#
# # Get a list of biomarkers associated to Glioma survival
# BIOMARKER_F = "data/glioma_survival_associated_genes_Fatai.csv"
# biomarkers_df = pd.read_table(BIOMARKER_F, )
# biomarkers = pd.Index(scRNAdata.GENE_NAMES) & biomarkers_df["Gene"].unique()
#
# # Aggregate all cell expressions to find clusters with the biomarkers expressed
# scRNAdata.get_aggregated_cluster_expression(biomarkers, quantile_threshold=0.75,)
#
# # Run GSEA on all the DE genes for each cluster
# from src.analysis.gsea_analysis import GSEA_Analysis
# gsea = GSEA_Analysis(scRNAdata, path='data/interim/', threshold=0.05,) # path leads the file with the DE genes list for each cluster
# gsea.get_gsea_result()
#
# # Get the GSEA results of only the clusters which have a query biomarker expressed
# query_biomarker = ["CDC6"]
# result = gsea.get_gsea_result_by_cluster(scRNAdata.get_clusters_with_biomarker_expression(query_biomarker))
#
# # Visualize
# from src.visualization import heatmap
# heatmap(result, height=1000, width=600)
def main(argv):
try:
opts, args = getopt.getopt(argv, "hg:r:p:", ["ifile1=", "ifile2="])
print(args, opts)
except getopt.GetoptError:
print('run.py -g <genes> -r <resolution>')
sys.exit(3)
for opt, arg in opts:
if opt == '-h':
print('python run.py -p')
sys.exit()
elif opt in ("-p", "--preprocess"):
print('Building h5 file for {} outputing at data/GSE103224.h5'.format(arg))
build_h5(ROOT=arg, OUT_F="data/GSE103224.h5")
if __name__== "__main__":
main(sys.argv[1:])
| 0
| 0
| 0
| 0
| 0
| 511
| 0
| 69
| 89
|
3677aecafa8af2453264a152c0aa94e0c15da665
| 186
|
py
|
Python
|
main.py
|
ZzAZz4/recon_app
|
00a430e8cf3657b923286fe13d39f0706290608c
|
[
"MIT"
] | null | null | null |
main.py
|
ZzAZz4/recon_app
|
00a430e8cf3657b923286fe13d39f0706290608c
|
[
"MIT"
] | null | null | null |
main.py
|
ZzAZz4/recon_app
|
00a430e8cf3657b923286fe13d39f0706290608c
|
[
"MIT"
] | 1
|
2020-12-16T03:55:02.000Z
|
2020-12-16T03:55:02.000Z
|
if __name__ == "__main__":
MainApp().run()
| 16.909091
| 34
| 0.682796
|
from kivy.app import App
from kivy.uix.button import Button
from kivy import utils
class MainApp(App):
def build(self):
pass
if __name__ == "__main__":
MainApp().run()
| 0
| 0
| 0
| 32
| 0
| 0
| 0
| 17
| 89
|
b27777f3d4622fee2e631a02cb9de8c559c5e33d
| 227
|
py
|
Python
|
tuiuiu/contrib/sitemaps/apps.py
|
caputomarcos/tuiuiu.io
|
d8fb57cf95487e7fe1454b2130ef18acc916da46
|
[
"BSD-3-Clause"
] | 3
|
2019-08-08T09:09:35.000Z
|
2020-12-15T18:04:17.000Z
|
tuiuiu/contrib/sitemaps/apps.py
|
caputomarcos/tuiuiu.io
|
d8fb57cf95487e7fe1454b2130ef18acc916da46
|
[
"BSD-3-Clause"
] | null | null | null |
tuiuiu/contrib/sitemaps/apps.py
|
caputomarcos/tuiuiu.io
|
d8fb57cf95487e7fe1454b2130ef18acc916da46
|
[
"BSD-3-Clause"
] | 1
|
2017-09-09T20:10:40.000Z
|
2017-09-09T20:10:40.000Z
|
from __future__ import absolute_import, unicode_literals
| 22.7
| 56
| 0.770925
|
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
class SitemapsAppConfig(AppConfig):
name = 'tuiuiu.contrib.sitemaps'
label = 'sitemaps'
verbose_name = "Tuiuiu sitemaps"
| 0
| 0
| 0
| 111
| 0
| 0
| 0
| 12
| 46
|
a9c0784c20aa7324f070f9a0f1a0cc3f287c3b63
| 10,391
|
py
|
Python
|
scripts/Agents.py
|
Youngl41/A3C
|
e82a93eca37ded7814be58ee253abd7d08e27355
|
[
"Apache-2.0"
] | null | null | null |
scripts/Agents.py
|
Youngl41/A3C
|
e82a93eca37ded7814be58ee253abd7d08e27355
|
[
"Apache-2.0"
] | null | null | null |
scripts/Agents.py
|
Youngl41/A3C
|
e82a93eca37ded7814be58ee253abd7d08e27355
|
[
"Apache-2.0"
] | null | null | null |
# env.unwrapped.get_action_meanings()
#======================================================
# Agent classes
#======================================================
'''
Info:
Version: 1.0
Author: Young Lee
Created: Friday, 16 August 2019
'''
# Import modules
import os
import sys
try:
get_ipython().system('pip install gym')
get_ipython().system('pip install tqdm')
get_ipython().system('pip install dropbox')
get_ipython().system('pip install gym[atari]')
except NameError:
pass
# get_ipython().system('apt-get install -y cmake libopenmpi-dev python3-dev zlib1g-dev')
# get_ipython().system('apt-get install -y python-mpi4py')
# get_ipython().system('pip install stable-baselines')
# get_ipython().system('brew install cmake openmpi')
# !pip install pandas
# !pip install keras
# !pip install matplotlib
# !pip install gym[atari]
try:
from stable_baselines.common.atari_wrappers import WarpFrame
except ModuleNotFoundError:
try:
from stable_baselines.common.atari_wrappers import WarpFrame
except ModuleNotFoundError:
# %matplotlib inline
# Import custom modules
try:
sys.path.append(os.path.dirname(os.path.abspath(os.path.join(__file__, '..')))) # 1 level upper dir
sys.path.append(os.path.dirname(os.path.abspath(os.path.join(__file__, '..', '..')))) # 2 levels upper dir
except NameError:
sys.path.append('.') # current dir
sys.path.append('..') # 1 level upper dir
sys.path.append(os.path.join(os.getcwd(), '..')) # 1 levels upper dir
sys.path.append(os.path.join(os.getcwd(), '..', '..')) # 2 levels upper dir
# dtype = 'float16'
# K.set_floatx(dtype)
# K.set_epsilon(1e-4)
# print(tf.__version__)
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=12, device_count = {'CPU': 12 })
# session = tf.compat.v1.Session(config=config)
# K.set_session(session)
# Suppress warnings
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
#------------------------------
# DQN Agent
#------------------------------
# Define agent
| 47.884793
| 295
| 0.590992
|
# env.unwrapped.get_action_meanings()
#======================================================
# Agent classes
#======================================================
'''
Info:
Version: 1.0
Author: Young Lee
Created: Friday, 16 August 2019
'''
# Import modules
import os
import re
import sys
try:
get_ipython().system('pip install gym')
get_ipython().system('pip install tqdm')
get_ipython().system('pip install dropbox')
get_ipython().system('pip install gym[atari]')
except NameError:
pass
# get_ipython().system('apt-get install -y cmake libopenmpi-dev python3-dev zlib1g-dev')
# get_ipython().system('apt-get install -y python-mpi4py')
# get_ipython().system('pip install stable-baselines')
# get_ipython().system('brew install cmake openmpi')
# !pip install pandas
# !pip install keras
# !pip install matplotlib
# !pip install gym[atari]
try:
from stable_baselines.common.atari_wrappers import WarpFrame
except ModuleNotFoundError:
try:
from stable_baselines.common.atari_wrappers import WarpFrame
except ModuleNotFoundError:
from baselines.common.atari_wrappers import WarpFrame
import gym
from gym import spaces
from gym.wrappers.atari_preprocessing import AtariPreprocessing
from gym import envs
from tqdm import tqdm
import numpy as np
import pandas as pd
import random
import dropbox
from datetime import datetime
from scipy.special import softmax
import matplotlib.pyplot as plt
from copy import deepcopy
# %matplotlib inline
# Import custom modules
try:
sys.path.append(os.path.dirname(os.path.abspath(os.path.join(__file__, '..')))) # 1 level upper dir
sys.path.append(os.path.dirname(os.path.abspath(os.path.join(__file__, '..', '..')))) # 2 levels upper dir
except NameError:
sys.path.append('.') # current dir
sys.path.append('..') # 1 level upper dir
sys.path.append(os.path.join(os.getcwd(), '..')) # 1 levels upper dir
sys.path.append(os.path.join(os.getcwd(), '..', '..')) # 2 levels upper dir
from config.paths import main_dir
import utility.util_general as gen
from collections import deque
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import Conv3D, MaxPooling3D, AveragePooling3D
from tensorflow.keras.optimizers import Adam, Nadam, RMSprop, SGD
from tensorflow.keras.models import clone_model
from tensorflow.keras import backend as K
import tensorflow as tf
# dtype = 'float16'
# K.set_floatx(dtype)
# K.set_epsilon(1e-4)
# print(tf.__version__)
# config = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=12, device_count = {'CPU': 12 })
# session = tf.compat.v1.Session(config=config)
# K.set_session(session)
# Suppress warnings
import logging, os
logging.disable(logging.WARNING)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
#------------------------------
# DQN Agent
#------------------------------
# Define agent
class DQNAgent:
# Initialise
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=500)
self.train_interval = 5
self.memory_size = 0
self.gamma = 0.95
self.learning_rate = 0.001
self.model_primary = self._build_model_primary() # primary network
self.update_target_network_freq = 10000
self.polyak_weight = 0.95
# Epsilon - greedy algorithm
self.policy_method = 'epsilon-greedy'
self.epsilon_start = 1.0
self.epsilon_decay_steps = 100000
self.epsilon_min = 0.1
# Model predicts the action values (Q-values)
def _build_model_primary(self):
pass
# Target network
def _build_model_target(self):
self.model_target = clone_model(self.model_primary)
self.model_target.set_weights(self.model_primary.get_weights())
# Update target model
def update_target_network(self):
# Number of layers
n_layers = len(self.model_primary.get_weights())
# Polyak averaging weights
weights = [self.polyak_weight, 1-self.polyak_weight]
# Allocate models
models = [self.model_primary, self.model_target]
avg_model_weights = []
# For each layer get Polyak avg weights
for layer in range(n_layers):
# Get layer weights
layer_weights = np.array([model_.get_weights()[layer] for model_ in models])
# Weighted average of weights for the layer
avg_layer_weights = np.average(layer_weights, axis=0, weights=weights)
avg_model_weights.append(avg_layer_weights)
# Update target model
self.model_target = clone_model(self.model_primary)
self.model_target.set_weights(avg_model_weights)
def _initialise_decay(self):
self.epsilon = deepcopy(self.epsilon_start)
self.lambda_ = -1*np.log(self.epsilon_min)/(self.epsilon_decay_steps)
# Story in memory
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
self.memory_size = self.memory_size+1
# Epsilon greedy or Boltzmann action
def act(self, state):
if self.policy_method.lower()=='epsilon-greedy':
# Random action
if np.random.rand() <= self.epsilon:
action = random.sample(list(np.arange(self.action_size)), 1)[0]
# Best action w.r.t. q-values
else:
act_values = self.model_primary.predict(state)
action = np.nanargmax(act_values[0])
# Decay epsilon (exponential)
if self.epsilon>=self.epsilon_min:
self.epsilon = max(self.epsilon * np.exp(-1*self.lambda_), self.epsilon_min)
# Return action
return action
elif self.policy_method.lower()=='boltzmann':
act_values = self.model_primary.predict(state)[0]
# Softmax
softmax_val = softmax(act_values)
# softmax_val = np.around(softmax_val, 3)
try:
random_choice = np.random.choice(np.arange(len(softmax_val)), p=softmax_val)
return random_choice
except ValueError as e:
# print(e, '\n', softmax_val)
softmax_val = np.array(softmax_val) * (1./ np.array(softmax_val).sum())
random_choice = np.random.choice(np.arange(len(softmax_val)), p=softmax_val)
return random_choice
# Replay memory
def replay(self, batch_size):
random_idx = np.random.choice(range(len(self.memory)), size=batch_size, replace=True)
# minibatch = random.sample(self.memory, batch_size) + list(self.transition)
minibatch = [self.memory[idx] for idx in random_idx] + list(self.memory)[-20000:-20000+self.train_interval]
states, q_valuess = [], []
for state, action, reward, next_state, done in minibatch:
q_update = reward
if not done:
best_action = np.argmax(self.model_primary.predict(next_state)[0])
q_update = (reward + self.gamma * self.model_target.predict(next_state)[0][best_action])
q_values = self.model_primary.predict(state)
q_values[0][action] = q_update
states.append(state)
q_valuess.append(q_values)
self.model_primary.fit(np.reshape(np.array(states), [self.train_interval+self.batch_size,self.state_size[0],self.state_size[1],self.state_size[2],1]), np.reshape(np.array(q_valuess), [self.train_interval+self.batch_size, self.action_size]), epochs=1, verbose=0, use_multiprocessing=True)
# Replay memory
def fast_replay(self, batch_size):
random_idx = np.random.choice(range(len(self.memory)), size=batch_size, replace=True)
# minibatch = random.sample(self.memory, batch_size) + list(self.transition)
minibatch = [self.memory[idx] for idx in random_idx]# + list(self.memory)[-20000:-20000+self.train_interval]
states, q_valuess = [], []
for state, action, reward, next_state, done in minibatch:
q_update = reward
if not done:
best_action = np.argmax(self.model_primary.predict(next_state)[0])
q_update = (reward + self.gamma * self.model_target.predict(next_state)[0][best_action])
q_values = self.model_primary.predict(state)
q_values[0][action] = q_update
states.append(state)
q_valuess.append(q_values)
self.model_primary.fit(np.reshape(np.array(states), [self.batch_size,self.state_size[0],self.state_size[1],self.state_size[2],1]), np.reshape(np.array(q_valuess), [self.batch_size, self.action_size]), epochs=1, verbose=0, use_multiprocessing=True)
# Load
def load(self, name):
self.model_primary.load_weights(name)
# Save
def save(self, name):
self.model_primary.save_weights(name)
| 0
| 0
| 0
| 7,160
| 0
| 0
| 0
| 425
| 670
|
319df75b5ec5c80ff285a5dd8607c14cab95ac51
| 351
|
py
|
Python
|
examples/fastapi_integration/src/fastapi_integration/congratulations/role_checking.py
|
maximsakhno/galo-ioc
|
d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f
|
[
"MIT"
] | 9
|
2022-01-16T11:45:00.000Z
|
2022-03-23T07:42:24.000Z
|
examples/fastapi_integration/src/fastapi_integration/congratulations/role_checking.py
|
maximsakhno/galo-ioc
|
d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f
|
[
"MIT"
] | 2
|
2022-01-16T12:03:14.000Z
|
2022-01-16T12:11:27.000Z
|
examples/fastapi_integration/src/fastapi_integration/congratulations/role_checking.py
|
maximsakhno/galo-ioc
|
d300cc0e63e6ad375b7d2e75ac2b2e2fda30da4f
|
[
"MIT"
] | null | null | null |
__all__ = [
"load",
]
| 27
| 87
| 0.777778
|
from fastapi_integration.current_user_resolvers.role_checkers import RoleCheckerFactory
from galo_ioc import get_factory
__all__ = [
"load",
]
def load() -> None:
role_checker_factory = get_factory(RoleCheckerFactory)
role_checker = role_checker_factory()
role_checker.register_roles_for_route("POST", "/happy_birthday", ["admin"])
| 0
| 0
| 0
| 0
| 0
| 179
| 0
| 77
| 67
|
12c32d59d3ae193352b3e3043ee04147f412d795
| 2,783
|
py
|
Python
|
src/build/android/pylib/instrumentation/test_package.py
|
bopopescu/MQUIC
|
703e944ec981366cfd2528943b1def2c72b7e49d
|
[
"MIT"
] | 1
|
2018-01-02T15:42:08.000Z
|
2018-01-02T15:42:08.000Z
|
src/build/android/pylib/instrumentation/test_package.py
|
bopopescu/MQUIC
|
703e944ec981366cfd2528943b1def2c72b7e49d
|
[
"MIT"
] | null | null | null |
src/build/android/pylib/instrumentation/test_package.py
|
bopopescu/MQUIC
|
703e944ec981366cfd2528943b1def2c72b7e49d
|
[
"MIT"
] | 1
|
2020-07-25T02:05:49.000Z
|
2020-07-25T02:05:49.000Z
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
| 35.679487
| 80
| 0.740927
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Class representing instrumentation test apk and jar."""
import os
from devil.android import apk_helper
from pylib.instrumentation import test_jar
from pylib.local.device import local_device_test_run
class TestPackage(test_jar.TestJar):
def __init__(self, apk_path, jar_path, test_support_apk_path,
additional_apks=None, apk_under_test=None,
test_apk_incremental_install_script=None,
apk_under_test_incremental_install_script=None):
test_jar.TestJar.__init__(self, jar_path)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
self._additional_apks = additional_apks or []
self._apk_name = os.path.splitext(os.path.basename(apk_path))[0]
if apk_under_test:
self._apk_under_test = apk_helper.ApkHelper(apk_under_test)
else:
self._apk_under_test = None
self._test_apk = apk_helper.ApkHelper(apk_path)
self._test_support_apk_path = test_support_apk_path
self._test_apk_incremental_install_script = (
test_apk_incremental_install_script)
self._apk_under_test_incremental_install_script = (
apk_under_test_incremental_install_script)
def GetApkPath(self):
"""Returns the absolute path to the APK."""
return self._test_apk.path
def GetApkUnderTest(self):
"""Returns an ApkHelper instance for the apk under test.
Note that --apk-under-test is not required, so this can be None.
"""
return self._apk_under_test
def GetApkName(self):
"""Returns the name of the apk without the suffix."""
return self._apk_name
def GetPackageName(self):
"""Returns the package name of this APK."""
return self._test_apk.GetPackageName()
def GetTestApk(self):
"""Returns an ApkHelper instance for the test apk."""
return self._test_apk
# Override.
def Install(self, device):
if self._test_apk_incremental_install_script:
local_device_test_run.IncrementalInstall(device, self._test_apk,
self._test_apk_incremental_install_script)
else:
device.Install(self._test_apk)
if self._apk_under_test_incremental_install_script:
local_device_test_run.IncrementalInstall(device,
self._apk_under_test, self._apk_under_test_incremental_install_script)
elif self._apk_under_test:
device.Install(self._apk_under_test)
if (self._test_support_apk_path and
os.path.exists(self._test_support_apk_path)):
device.Install(self._test_support_apk_path)
for apk in (a for a in self._additional_apks if os.path.exists(a)):
device.Install(apk)
| 0
| 0
| 0
| 2,388
| 0
| 0
| 0
| 55
| 113
|
7310a4fa6daa6cfe1b119f1478c9d97c6d3e9123
| 1,303
|
py
|
Python
|
cliboa/test/util/test_cache.py
|
chiru1221/cliboa
|
0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1
|
[
"MIT"
] | null | null | null |
cliboa/test/util/test_cache.py
|
chiru1221/cliboa
|
0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1
|
[
"MIT"
] | null | null | null |
cliboa/test/util/test_cache.py
|
chiru1221/cliboa
|
0aad84f237b7c0d8a5ae0cbd27b9d70f97acbee1
|
[
"MIT"
] | 1
|
2020-12-20T10:59:16.000Z
|
2020-12-20T10:59:16.000Z
|
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
| 37.228571
| 86
| 0.72218
|
#
# Copyright 2019 BrainPad Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
import os
from cliboa.util.cache import StorageIO
class TestStorageIO(object):
def setup_method(self, method):
self.__tmp_valid_cache_file = "/tmp/cliboa_cache_" + str(os.getpid()) + ".tmp"
self.__tmp_invalid_cache_file = "/tmp/cliboa_cache.tmp"
if os.path.exists(self.__tmp_valid_cache_file):
os.remove(self.__tmp_valid_cache_file)
def test_save_ok(self):
s = StorageIO()
s.save(["spam"])
assert os.path.exists(self.__tmp_valid_cache_file) is True
def test_save_ng(self):
s = StorageIO()
s.save("spam")
assert os.path.exists(self.__tmp_invalid_cache_file) is False
| 0
| 0
| 0
| 592
| 0
| 0
| 0
| 6
| 68
|
2033b6dddfb77b16a6c23592bad60a247462cec3
| 7,719
|
py
|
Python
|
src/analysis_diagrams.py
|
seedatnabeel/Data-Imputation-Uncertainty
|
ffde47089546702b42045a92f9796bc1b5b7a662
|
[
"Apache-2.0"
] | null | null | null |
src/analysis_diagrams.py
|
seedatnabeel/Data-Imputation-Uncertainty
|
ffde47089546702b42045a92f9796bc1b5b7a662
|
[
"Apache-2.0"
] | null | null | null |
src/analysis_diagrams.py
|
seedatnabeel/Data-Imputation-Uncertainty
|
ffde47089546702b42045a92f9796bc1b5b7a662
|
[
"Apache-2.0"
] | null | null | null |
from utils import myrmse
from sklearn.metrics import (accuracy_score, roc_auc_score, mean_squared_error)
import numpy as np
import random
import matplotlib.pyplot as plt
def performance_vs_confidence(
original_data,
imp_data,
missing_data,
testY,
test_idx,
total_uncertainty,
coeff_variation,
clf=None,
):
"""
Computes the performance vs confidence (i.e exclusions)
Args:
analysis_scores (dict): dict of different analysis scores
"""
df_mis = missing_data
testX = original_data
percents = np.linspace(0.01, 0.9, 10)
amounts = percents * testX.shape[0]
# sort based on variance
uncert = np.argsort(total_uncertainty)
# sort based on CV
cv_uncert = np.argsort(coeff_variation)[::-1]
uncert_rmses_retention = []
cv_rmses_retention = []
random_rmses_retention = []
y_score_retention = []
auc_retention = []
gt_y = []
acc_scores = []
# apply mask
true = testX[~missing_data.astype(bool)]
preds = imp_data[~missing_data.astype(bool)]
# oracle error
errors = np.abs(preds - true)
# sort based on error - oracle
uncert_oracle = np.argsort(errors)
rmse_oracle = []
for count, amount in enumerate(amounts):
idx = int(amount)
# Calculations and exclusions based on variance
excl = uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
uncert_rmses_retention.append(rmse)
# Calculations for oracle
if count > 0:
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
else:
rmse_oracle.append(rmse)
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
# if a classifier is specified apply the sortings for diff acc and auc
if clf:
y_preds = clf.predict(imputed_data[:, 0:-1])
y_scores = clf.predict_proba(imputed_data[:, 0:-1])[:, 1]
if len(np.unique(testY)) == 2:
auc_retention.append(
roc_auc_score(testY[excl], y_scores, multi_class="ovr")
)
y_score_retention.append(y_scores)
gt_y.append(testY[excl])
acc_scores.append(accuracy_score(testY[excl], y_preds))
# Calculations and exclusions based on CV
excl = cv_uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
cv_rmses_retention.append(rmse)
# Calculations and exclusions based on random
rand_excl = random.sample(range(len(uncert)), idx)
ori_data = testX[rand_excl, :]
imputed_data = imp_data[rand_excl, :]
data_m = np.array(df_mis != df_mis)[rand_excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
random_rmses_retention.append(rmse)
return (
uncert_rmses_retention,
cv_rmses_retention,
random_rmses_retention,
y_score_retention,
auc_retention,
gt_y,
acc_scores,
rmse_oracle[:-1],
)
def plot_rmse_conf_curve(analysis_scores, dataset, filename):
"""
Plots the RMSE Confidence-Exclusion curve
"""
plt.style.reload_library()
plt.style.use(["science", "ieee", "no-latex", "notebook", "grid", "vibrant"])
mean_uncert = np.mean(analysis_scores["uncert_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["uncert_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Variance", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["random_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["random_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Random", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["cv_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["cv_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="CV", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["rmse_oracle"], axis=0)
std_uncert = np.std(analysis_scores["rmse_oracle"], axis=0)
plt.plot(np.linspace(0, 1, 11), mean_uncert, label="Oracle", marker="o")
plt.fill_between(
np.linspace(0, 1, 11),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
plt.xlabel("Proportion Data Excluded")
plt.ylabel("RMSE")
plt.legend()
plt.savefig(f"data/results/{dataset}/{filename}.png")
def plot_reliability_diagram(uncertainty_list, rmses, dataset, filename):
"""
Plots the Reliability diagram
"""
f, ax = plt.subplots(figsize=(6, 6))
ax.scatter(uncertainty_list, rmses, c=".3")
(diag_line,) = ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
plt.xlabel("Uncertainty")
plt.ylabel("RMSE")
plt.savefig(f"data/results/{dataset}/reliability_{filename}.png")
def plot_auc_sparsification(
aoc_uncert_lists,
aoc_uncert_rand_lists,
aoc_uncert_cv_lists,
auc_uncerts,
auc_rands,
auc_cvs,
dataset,
filename,
):
"""
Plots the Sparsification curve
"""
plt.figure()
xx = np.linspace(0, 1, 10)
mean_uncert = np.mean(aoc_uncert_lists, axis=0)
std_uncert = np.std(aoc_uncert_lists, axis=0)
auc_label = f"Variances Scores AUC: {round(np.mean(auc_uncerts),2)}+-{round(np.std(auc_uncerts),2)}"
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(aoc_uncert_rand_lists, axis=0)
std_uncert = np.std(aoc_uncert_rand_lists, axis=0)
auc_label = f"Random Scores AUC: {round(np.mean(auc_rands),2)}+-{round(np.std(auc_rands),2)}"
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(aoc_uncert_cv_lists, axis=0)
std_uncert = np.std(aoc_uncert_cv_lists, axis=0)
auc_label = (
f"Coeff Variation AUC: {round(np.mean(auc_cvs),2)}+-{round(np.std(auc_cvs),2)}"
)
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
plt.legend()
plt.title("Area under the sparsification curve")
plt.savefig(f"data/results/{dataset}/{filename}.png")
| 29.018797
| 104
| 0.62832
|
from utils import normdata, myrmse
from sklearn.metrics import (
accuracy_score,
roc_curve,
auc,
roc_auc_score,
mean_squared_error,
)
import numpy as np
import random
import matplotlib.pyplot as plt
def performance_vs_confidence(
original_data,
imp_data,
missing_data,
testY,
test_idx,
total_uncertainty,
coeff_variation,
clf=None,
):
"""
Computes the performance vs confidence (i.e exclusions)
Args:
analysis_scores (dict): dict of different analysis scores
"""
df_mis = missing_data
testX = original_data
percents = np.linspace(0.01, 0.9, 10)
amounts = percents * testX.shape[0]
# sort based on variance
uncert = np.argsort(total_uncertainty)
# sort based on CV
cv_uncert = np.argsort(coeff_variation)[::-1]
uncert_rmses_retention = []
cv_rmses_retention = []
random_rmses_retention = []
y_score_retention = []
auc_retention = []
gt_y = []
acc_scores = []
# apply mask
true = testX[~missing_data.astype(bool)]
preds = imp_data[~missing_data.astype(bool)]
# oracle error
errors = np.abs(preds - true)
# sort based on error - oracle
uncert_oracle = np.argsort(errors)
rmse_oracle = []
for count, amount in enumerate(amounts):
idx = int(amount)
# Calculations and exclusions based on variance
excl = uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
uncert_rmses_retention.append(rmse)
# Calculations for oracle
if count > 0:
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
else:
rmse_oracle.append(rmse)
excl_oracle = uncert_oracle[: -int(amount)]
rmseval = mean_squared_error(true[excl_oracle], preds[excl_oracle])
rmse_oracle.append(rmseval)
# if a classifier is specified apply the sortings for diff acc and auc
if clf:
y_preds = clf.predict(imputed_data[:, 0:-1])
y_scores = clf.predict_proba(imputed_data[:, 0:-1])[:, 1]
if len(np.unique(testY)) == 2:
auc_retention.append(
roc_auc_score(testY[excl], y_scores, multi_class="ovr")
)
y_score_retention.append(y_scores)
gt_y.append(testY[excl])
acc_scores.append(accuracy_score(testY[excl], y_preds))
# Calculations and exclusions based on CV
excl = cv_uncert[:-idx]
ori_data = testX[excl, :]
imputed_data = imp_data[excl, :]
data_m = np.array(df_mis != df_mis)[excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
cv_rmses_retention.append(rmse)
# Calculations and exclusions based on random
rand_excl = random.sample(range(len(uncert)), idx)
ori_data = testX[rand_excl, :]
imputed_data = imp_data[rand_excl, :]
data_m = np.array(df_mis != df_mis)[rand_excl, :]
rmse = myrmse(
actual=ori_data, predicted=imputed_data, mask=~data_m.astype(bool)
)
random_rmses_retention.append(rmse)
return (
uncert_rmses_retention,
cv_rmses_retention,
random_rmses_retention,
y_score_retention,
auc_retention,
gt_y,
acc_scores,
rmse_oracle[:-1],
)
def plot_rmse_conf_curve(analysis_scores, dataset, filename):
"""
Plots the RMSE Confidence-Exclusion curve
"""
plt.style.reload_library()
plt.style.use(["science", "ieee", "no-latex", "notebook", "grid", "vibrant"])
mean_uncert = np.mean(analysis_scores["uncert_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["uncert_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Variance", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["random_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["random_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="Random", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["cv_rmses_retention"], axis=0)
std_uncert = np.std(analysis_scores["cv_rmses_retention"], axis=0)
plt.plot(np.linspace(0, 1, 10), mean_uncert, label="CV", marker="o")
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(analysis_scores["rmse_oracle"], axis=0)
std_uncert = np.std(analysis_scores["rmse_oracle"], axis=0)
plt.plot(np.linspace(0, 1, 11), mean_uncert, label="Oracle", marker="o")
plt.fill_between(
np.linspace(0, 1, 11),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
plt.xlabel("Proportion Data Excluded")
plt.ylabel("RMSE")
plt.legend()
plt.savefig(f"data/results/{dataset}/{filename}.png")
def plot_reliability_diagram(uncertainty_list, rmses, dataset, filename):
"""
Plots the Reliability diagram
"""
f, ax = plt.subplots(figsize=(6, 6))
ax.scatter(uncertainty_list, rmses, c=".3")
(diag_line,) = ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
plt.xlabel("Uncertainty")
plt.ylabel("RMSE")
plt.savefig(f"data/results/{dataset}/reliability_{filename}.png")
def plot_auc_sparsification(
aoc_uncert_lists,
aoc_uncert_rand_lists,
aoc_uncert_cv_lists,
auc_uncerts,
auc_rands,
auc_cvs,
dataset,
filename,
):
"""
Plots the Sparsification curve
"""
plt.figure()
xx = np.linspace(0, 1, 10)
mean_uncert = np.mean(aoc_uncert_lists, axis=0)
std_uncert = np.std(aoc_uncert_lists, axis=0)
auc_label = f"Variances Scores AUC: {round(np.mean(auc_uncerts),2)}+-{round(np.std(auc_uncerts),2)}"
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(aoc_uncert_rand_lists, axis=0)
std_uncert = np.std(aoc_uncert_rand_lists, axis=0)
auc_label = f"Random Scores AUC: {round(np.mean(auc_rands),2)}+-{round(np.std(auc_rands),2)}"
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
mean_uncert = np.mean(aoc_uncert_cv_lists, axis=0)
std_uncert = np.std(aoc_uncert_cv_lists, axis=0)
auc_label = (
f"Coeff Variation AUC: {round(np.mean(auc_cvs),2)}+-{round(np.std(auc_cvs),2)}"
)
plt.plot(np.linspace(0, 1, 10), mean_uncert, marker="o", label=auc_label)
plt.fill_between(
np.linspace(0, 1, 10),
mean_uncert - std_uncert,
mean_uncert + std_uncert,
alpha=0.25,
)
plt.legend()
plt.title("Area under the sparsification curve")
plt.savefig(f"data/results/{dataset}/{filename}.png")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 49
| 0
|
7be1a277da21b142d3eccde4f5166dabe54f8d13
| 9,710
|
py
|
Python
|
bblogger/deserialize.py
|
lohmega/jamble
|
ca7d2788c584cfb1c86ae766d06f6a9d57a60974
|
[
"Apache-2.0"
] | null | null | null |
bblogger/deserialize.py
|
lohmega/jamble
|
ca7d2788c584cfb1c86ae766d06f6a9d57a60974
|
[
"Apache-2.0"
] | 3
|
2020-05-27T13:00:45.000Z
|
2020-09-29T12:42:23.000Z
|
bblogger/deserialize.py
|
lohmega/jamble
|
ca7d2788c584cfb1c86ae766d06f6a9d57a60974
|
[
"Apache-2.0"
] | null | null | null |
import logging
# not needed in python >= 3.6? as default dict keeps order
try:
except ImportError:
# not in debian stretch dpkg/apt version of the pb lib
from bblogger.defs import BlueBerryLogEntryFields
logger = logging.getLogger(__name__)
TXT_COL_WIDTH = 10
_COLNAME_TO_FLD = {}
_COLNAME_TO_UNITS = {}
_COLNAME_TO_TXTFMT = {}
_PBNAME_TO_FLD = {}
for x in BlueBerryLogEntryFields:
fld = x.value
_PBNAME_TO_FLD[fld.pbname] = fld
for colname in fld.colnames:
_COLNAME_TO_FLD[colname] = fld
_COLNAME_TO_UNITS[colname] = fld.unit
_COLNAME_TO_TXTFMT[colname]= fld.txtfmt
| 29.603659
| 89
| 0.564367
|
import logging
import csv
import json
from platform import system
from sys import stderr, stdout
# not needed in python >= 3.6? as default dict keeps order
from collections import OrderedDict, deque
try:
from google.protobuf.json_format import MessageToDict
except ImportError:
# not in debian stretch dpkg/apt version of the pb lib
from google.protobuf.json_format import MessageToJson
def MessageToDict(pb):
# super inefficient - yes!
tmpjs = MessageToJson(pb)
return json.loads(tmpjs)
from google.protobuf.message import DecodeError
from bblogger import bb_log_entry_pb2
from bblogger.defs import BlueBerryLogEntryFields
from bblogger.outputwriter import mk_OutputWriter
logger = logging.getLogger(__name__)
TXT_COL_WIDTH = 10
_COLNAME_TO_FLD = {}
_COLNAME_TO_UNITS = {}
_COLNAME_TO_TXTFMT = {}
_PBNAME_TO_FLD = {}
for x in BlueBerryLogEntryFields:
fld = x.value
_PBNAME_TO_FLD[fld.pbname] = fld
for colname in fld.colnames:
_COLNAME_TO_FLD[colname] = fld
_COLNAME_TO_UNITS[colname] = fld.unit
_COLNAME_TO_TXTFMT[colname]= fld.txtfmt
class _PacketBuffer:
"""
FIFO buffer preserving BLE packets. can handle packets out of order and
drop induvidual packets
'pkt' - bluteooth package (chunk of bytes)
"""
def __init__(self):
self._q = deque(maxlen=128)
def write(self, data):
if len(self._q) >= self._q.maxlen:
raise RuntimeError("buf to small")
self._q.append(data)
def peek(self, size, pkt_order=None):
""" returns a bytearray of len size or less """
res = bytearray()
if not size:
return res
if pkt_order is None:
pkt_order = range(0, len(self._q))
for i in pkt_order:
remains = size - len(res)
if remains <= 0:
break
try:
pkt = self._q[i]
except IndexError:
break
# remains could be out of range (no error raised)
chunk = pkt[0 : remains]
res.extend(chunk)
return res
def getc(self):
""" read a single char/byte """
try:
c = self._q[0][0]
except IndexError:
raise EOFError()
self._q[0] = self._q[0][1:] # pop left
return int(c)
def seek_fwd(self, size, pkt_order=None):
""" Move "read cursor" forward N bytes """
if not size:
return
if pkt_order is None:
pkt_order = range(0, len(self._q))
remains = size
to_del = []
for i in pkt_order:
try:
pkt = self._q[i]
except IndexError:
break
if remains < len(pkt):
self._q[i] = pkt[remains:]
remains = 0
break
to_del.append(i)
remains -= len(pkt)
if remains <= 0:
break
if remains > 0:
raise EOFError()
# reverse sort to preserve index while deleting
for i in sorted(to_del, reverse=True):
del self._q[i]
def drop_pkt(self, n=0):
r = self._q[n]
del self._q[n]
return r
class BlueBerryDeserializer:
"""
reads a stream of protobuf data with the format
<len><protobuf message of size len><len>,...
abbrevations and definitions used:
'msg' - bytes or pb object for a complete message
'pkg' - bluteooth package (chunk of bytes)
"""
def __init__(self, outfile=stdout, fmt="txt", raw=False, msg_hist_len=32):
self._pb = bb_log_entry_pb2.bb_log_entry() # protobuf message
self._raw = raw
self._msg_hist = deque(maxlen=msg_hist_len)
self._msg_count = 0
self._pkt_buf = _PacketBuffer()
self._msg_size = None
self._fail_count = 0
self._debug_dump = False
self._out = mk_OutputWriter(
outfile=outfile,
fmt=fmt,
colwidth=10,
units=_COLNAME_TO_UNITS,
formats=_COLNAME_TO_TXTFMT)
@property
def nentries(self):
return self._msg_count
def _MessageToOrderedDict(self, pb, columnize=False):
"""
mimic name from protobuf lib.
assumption: all values can be converted to float or list of floats.
if the protobuf format change, the built in MessageToDict() function
can be used. requres python > 3.6 (?) where the default dict heaviour
rememebers insertion order.
"""
od = OrderedDict()
for descr in pb.DESCRIPTOR.fields:
fld = _PBNAME_TO_FLD[descr.name]
val = getattr(pb, descr.name)
if descr.label == descr.LABEL_REPEATED:
# HasField() do not work on repeated, use len instead. hack
if not len(val):
continue
if columnize:
for i in range(0, len(val)):
name = fld.colnames[i]
od[name] = val[i]
else:
name = fld.colnames[0]
od[name] = list(val) # [x for x in val]
else:
if not pb.HasField(descr.name):
continue
name = fld.colnames[0]
od[name] = val
return od
def _print_msg_bytes(self, msg_count, msg_size, msg_bytes, err_str=""):
if isinstance(msg_bytes, (bytes, bytearray)):
msg_bytes = msg_bytes.hex()
msg_count = "{:04x}".format(msg_count)
msg_size = "{:02x}".format(msg_size)
err_str = "'{}'".format(err_str)
print(msg_count, msg_size, msg_bytes, err_str, sep=",", file=stderr)
def _dump_msg_hist(self, max_len=4):
print("==== MSG HISTORY DUMP (count, size, bytes, err) ====", file=stderr)
for entry in self._msg_hist:
msg_count, msg_size, msg_bytes, err_str = entry
self._print_msg_bytes(msg_count, msg_size, msg_bytes, err_str)
msg_bytes = ','.join([ba.hex() for ba in self._pkt_buf._q])
msg_bytes = "({})".format(msg_bytes)
err_str = "Failed pakets"
self._print_msg_bytes(self._msg_count, self._msg_size, msg_bytes, err_str)
print("==== END: MSG HISTORY ====", file=stderr)
def _is_end_of_log_msg(self, odm):
""" end of log "EOF" is a empty messagge with only the required
timestamp field """
if len(odm) == 1:
if "TS" not in odm:
logger.warning("unexpected last msg keys {}".format(odm.keys()))
return True
else:
return False
def parse_msg_bytes(self, msg_bytes):
self._pb.Clear()
# ignore E1101: Instance of 'bb_log_entry' has no 'FromString' member (no-member)
pb_msg = self._pb.FromString(msg_bytes) # pylint: disable=E1101
odmsg = self._MessageToOrderedDict(pb_msg, columnize=True)
done = self._is_end_of_log_msg(odmsg)
if done:
logger.debug("End of log msg received")
return done
# convert to tuple as odict_keys object rejected by json module etc
keys = tuple(odmsg.keys())
if self._raw:
vals = tuple(odmsg.values())
else:
vals = [_COLNAME_TO_FLD[k].tounit(v) for k, v in odmsg.items()]
assert len(keys) == len(vals)
self._out.write_sensordata(keys, vals)
return done
def _parse_pkt_buf(self, pkt_order=None):
""" parse data previously added to pkt_buf """
if self._msg_size is None:
self._msg_size = self._pkt_buf.getc() # raises EOFError if no data
if self._msg_size == 0:
raise RuntimeError("msg_size is zero. Where to start?")
msg_bytes = self._pkt_buf.peek(self._msg_size, pkt_order)
if len(msg_bytes) < self._msg_size:
raise EOFError("Need more data")
if self._debug_dump:
self._print_msg_bytes(self._msg_count, self._msg_size, msg_bytes)
done = False
else:
done = self.parse_msg_bytes(msg_bytes)
entry = (self._msg_count, self._msg_size, msg_bytes, "")
self._msg_hist.append(entry)
self._msg_count += 1
# reset
self._pkt_buf.seek_fwd(self._msg_size, pkt_order)
self._msg_size = None
return done # might have more msg in pkt_buf
def putb(self, chunk):
if not isinstance(chunk, bytearray):
chunk = bytearray(chunk)
self._pkt_buf.write(chunk)
while True:
try:
done = self._parse_pkt_buf()
if self._fail_count:
self._fail_count = 0
logger.debug("Successfully recovered")
if done:
return True
except EOFError as e:
return False # Need more data
except DecodeError as e:
self._fail_count += 1
if self._fail_count < 3:
pkt = self._pkt_buf.drop_pkt(0)
logger.error("Dropping invalid pkt '%s' N=%d, msg_size=%d",
pkt.hex(), self._msg_count, self._msg_size)
self._msg_size = None
self._msg_count += 1
continue
logger.error("Failed to parse msg N=%d. '%s'", self._msg_count, str(e))
self._dump_msg_hist()
raise e
return False # try recover on next call
| 0
| 43
| 0
| 8,493
| 0
| 103
| 0
| 149
| 303
|
c448d924e39cdd3b511d47b105bb773ffd02d4fe
| 121
|
py
|
Python
|
error_handlers/__init__.py
|
NikitolProject/idm_lp
|
a1eeb1c12e1918a715beb63c3bee97b7e1404801
|
[
"MIT"
] | 2
|
2020-11-04T15:16:08.000Z
|
2020-11-04T15:55:29.000Z
|
error_handlers/__init__.py
|
Dlol0ne/idm_lp
|
3a5563024a3062d74b8c47259c4241554f073b39
|
[
"MIT"
] | null | null | null |
error_handlers/__init__.py
|
Dlol0ne/idm_lp
|
3a5563024a3062d74b8c47259c4241554f073b39
|
[
"MIT"
] | 1
|
2021-03-04T03:00:06.000Z
|
2021-03-04T03:00:06.000Z
|
from error_handlers import captha
from error_handlers import rps
error_handlers_bp = (
rps.user,
captha.user,
)
| 15.125
| 33
| 0.752066
|
from error_handlers import captha
from error_handlers import rps
error_handlers_bp = (
rps.user,
captha.user,
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4a43febd8c2f697fbbc2dd319b189c165f503979
| 5,422
|
py
|
Python
|
SimTools/test_RNA_describe.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | 1
|
2021-08-16T14:49:04.000Z
|
2021-08-16T14:49:04.000Z
|
SimTools/test_RNA_describe.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | null | null | null |
SimTools/test_RNA_describe.py
|
ShepherdCode/Soars2021
|
ab4f304eaa09e52d260152397a6c53d7a05457da
|
[
"MIT"
] | null | null | null |
# The following unix command will run all tests.
# $ pytest
# The -v option will list each test and show progress.
# $ pytest -v
# By default, pytest captures stdout unless the tests fail.
# Use this option to see the output of print() statements.
# $ pytest --capture=tee-sys
| 42.359375
| 83
| 0.610107
|
import pytest
from RNA_describe import RNA_describer
from RNA_describe import ORF_counter
from RNA_describe import ORF_RE
# The following unix command will run all tests.
# $ pytest
# The -v option will list each test and show progress.
# $ pytest -v
# By default, pytest captures stdout unless the tests fail.
# Use this option to see the output of print() statements.
# $ pytest --capture=tee-sys
class Test_ORF_RE():
def test_get_all_orfs(self):
ore = ORF_RE()
rna = 'CCCATGAAATGACCTGATGCCCTGACCC'
orfs = ore.get_all_orfs(rna)
ans = ['ATGAAATGA', 'ATGACCTGA', 'ATGCCCTGA']
msg="Overlapping ORFs"
assert orfs==ans,msg
def test_get_all_orfs(self):
ore = ORF_RE()
rna = 'ATGCCCTGA'+'ATGCCCCCCTAG'+'CC'
orfs = ore.get_three_lengths(rna)
ans = (9,9,2)
msg="Overlapping ORFs"
assert orfs==ans,msg
class Test_ORF_counter():
def test_three_codon_orf(self):
oc = ORF_counter()
msg= "Detects START CODON STOP"
oc.set_sequence('C'+'ATG'+'CAC'+'TAG'+'C')
assert oc.get_max_orf_len()==6,msg
assert oc.count_maximal_orfs()==1,msg
def test_no_codon_orf(self):
oc = ORF_counter()
msg = "Counts bases ATG thru TAA"
oc.set_sequence('ATG'+'TAA'+'G')
assert oc.get_max_orf_len()==3,msg
oc.set_sequence('A'+'ATG'+'TAA'+'G')
assert oc.get_max_orf_len()==3,msg
oc.set_sequence('CA'+'ATG'+'TAA'+'G')
assert oc.get_max_orf_len()==3,msg
def test_no_start_codon(self):
oc = ORF_counter()
msg = "Detects ATG not found"
oc.set_sequence('TGTAAGC')
assert oc.get_max_orf_len()==0,msg
assert oc.count_maximal_orfs()==0,msg
def test_no_stop_codon(self):
oc = ORF_counter()
msg = "Detects if TAG not found"
oc.set_sequence('ATGTACCTA')
assert oc.get_max_orf_len()==0,msg
assert oc.count_maximal_orfs()==0,msg
def test_three_frames(self):
oc = ORF_counter()
msg = "Counts bases ATG thru TAA in frame 1"
oc.set_sequence('CCC'+'ATG'+'AAA'+'TAA')
assert oc.get_max_orf_len()==6,msg
msg = "Counts bases ATG thru TAG in frame 2"
oc.set_sequence('CC'+'ATG'+'AAA'+'TAG')
assert oc.get_max_orf_len()==6,msg
msg = "Counts bases ATG thru TGA in frame 3"
oc.set_sequence('C'+'ATG'+'AAA'+'TGA')
assert oc.get_max_orf_len()==6,msg
def test_multiple_ORFs(self):
oc = ORF_counter()
msg = "Gets longest of overlapping ORFs in different frames"
oc.set_sequence('ATG'+'AAA'+'TGA'+'AACCC'+'TGA')
assert oc.get_max_orf_len()==9,msg
assert oc.count_maximal_orfs()==2,msg
msg = "Gets longest of consecutive ORFs in same frame"
oc.set_sequence('ATG'+'TGA'+'ATG'+'AAA'+'TGA')
assert oc.get_max_orf_len()==6,msg
assert oc.count_maximal_orfs()==2,msg
def test_contained_ORFs(self):
oc = ORF_counter()
msg = "Recognizes contained ORFs in same frame"
oc.set_sequence('ATG'+'AAA'+'ATG'+'CCC'+'TGA')
assert oc.get_max_orf_len()==12,msg
assert oc.count_maximal_orfs()==1,msg
assert oc.count_contained_orfs()==1,msg
class Test_RNA_describer():
def test_orf_length(self):
rn = RNA_describer()
msg= "Require sequence starts with ATG"
assert rn.get_orf_length('TGATGTGA')==0,msg
msg = "Minimum requirement is start and stop"
assert rn.get_orf_length('ATG'+'TGA')==3,msg
msg = "Start + codon + stop = 3+3=6"
assert rn.get_orf_length('ATG'+'AAA'+'TGA')==6,msg
msg = "polyA tail or any 3'UTR does not count"
assert rn.get_orf_length('ATG'+'AAA'+'TGA'+'AAAA')==6,msg
msg = "No in-frame stop? Then no ORF"
assert rn.get_orf_length('ATG'+'AA'+'TGA')==0,msg
def test_longest_orf(self):
rn = RNA_describer()
msg = "Counts bases ATG thru TAA in frame 0"
assert rn.get_longest_orf('ATG'+'TAA'+'G')==(0,3),msg
msg = "Returns (0,0) if ATG not found"
assert rn.get_longest_orf('TGTAAGC')==(0,0),msg
msg = "Returns (0,0) if TAG not found"
assert rn.get_longest_orf('ATGTACCTA')==(0,0),msg
msg = "Counts bases ATG thru TAA in frame 1"
assert rn.get_longest_orf('CCC'+'ATG'+'AAA'+'TAA')==(3,6),msg
msg = "Counts bases ATG thru TAG in frame 2"
assert rn.get_longest_orf('CC'+'ATG'+'AAA'+'TAG')==(2,6),msg
msg = "Counts bases ATG thru TGA in frame 3"
assert rn.get_longest_orf('C'+'ATG'+'AAA'+'TGA')==(1,6),msg
msg = "Gets longest of two ORFs in same frame"
assert rn.get_longest_orf('ATG'+'TGA'+'ATG'+'AAA'+'TGA')==(6,6),msg
msg = "Gets longest of two ORFs in different frames"
assert rn.get_longest_orf('ATG'+'AAA'+'TGA'+'AACCC'+'TGA')==(5,9),msg
def test_orf_lengths(self):
rn = RNA_describer()
msg = "Return list of lengths"
assert rn.get_orf_lengths(['ATG'+'TGA','ATG'+'AAA'+'TGA'])==[3,6],msg
def test_three_lengths(self):
rn = RNA_describer()
msg = "ORF? Return lengths [ (5'UTR,ORF,3'UTR) ]"
assert rn.get_three_lengths(['CAT'+'ATG'+'GGG'+'TGA'+'AAA'])==[(3,6,3)],msg
msg = "No ORF? Return lengths [ (half,0,half) ]"
assert rn.get_three_lengths(['CCC'+'AAA'])==[(3,0,3)],msg
| 0
| 0
| 0
| 4,953
| 0
| 0
| 0
| 34
| 157
|
980eb881b9183a85e8a0d89ebe3875d7adc604ce
| 4,544
|
py
|
Python
|
scripts/gen_negative_agreements.py
|
aistairc/lm_syntax_negative
|
19889a84d6ce32531fe82dfeea7a48df233d7f50
|
[
"MIT"
] | 3
|
2020-05-07T06:58:53.000Z
|
2021-02-19T13:37:57.000Z
|
scripts/gen_negative_agreements.py
|
aistairc/lm_syntax_negative
|
19889a84d6ce32531fe82dfeea7a48df233d7f50
|
[
"MIT"
] | null | null | null |
scripts/gen_negative_agreements.py
|
aistairc/lm_syntax_negative
|
19889a84d6ce32531fe82dfeea7a48df233d7f50
|
[
"MIT"
] | null | null | null |
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate negative examples for LM agreement task.')
parser.add_argument('--source', required=True, type=str)
parser.add_argument('--output', default='verb_negative_examples.txt')
args = parser.parse_args()
run(args)
| 37.553719
| 97
| 0.559419
|
import argparse
import corenlp
import gzip
import inflect
from tqdm import tqdm
def open_f(fn, mode='rt'):
if fn.endswith('.gz'):
return gzip.open(fn, mode)
else:
return open(fn, mode)
class VerbFinder(object):
def __init__(self):
# each position is a pair (idx, simple or not)
# simple means the last word is an (agreed) noun
self.vbz_positions = [] # singular
self.vbp_positions = [] # plural
self.vbz_set = set() # all vbz appearing in the corpus
self.vbp_set = set() # all vbp appearing in the corpus
self.infl = inflect.engine()
def get_converter(self):
conv = {}
for vbz in self.vbz_set:
vbp = self.infl.plural_verb(vbz)
if vbp in self.vbp_set:
conv[vbz] = vbp
conv[vbp] = vbz
return conv
def find_all_verbs(self, fn):
props = {"tokenize.whitespace": "true",
"ssplit.eolonly": "true",
"tokenize.options": "\"normalizeParentheses=true,normalizeOtherBrackets=true\""}
num_lines = sum(1 for line in open(fn, 'r'))
with corenlp.CoreNLPClient(annotators="tokenize ssplit pos".split(),
properties=props) as client, \
open_f(args.source) as source:
# To reduce network overhead we call corenlp on every chunk of 100 sentences.
sents = []
chunk_size = 100
for line in tqdm(source, total=num_lines):
if len(sents) >= chunk_size:
ann = client.annotate('\n'.join(sents))
assert(len(ann.sentence) == chunk_size)
self.record_positions(ann)
sents = []
sents.append(line[:-1])
if sents:
ann = client.annotate('\n'.join(sents))
self.record_positions(ann)
def record_positions(self, annotation):
sents = annotation.sentence
for sent in sents:
poses = [t.pos for t in sent.token]
words = [t.word.lower() for t in sent.token]
def is_singular_noun(p):
return p == 'NN' or p == 'NNP'
def is_plural_noun(p):
return p == 'NNS' or p == 'NNPS'
def is_third_pronoun(w):
return w == 'he' or w == 'she' or w == 'it' or w == 'this'
def is_nonthird_pronoun(w):
return w == 'we' or w == 'they' or w == 'all' or w == 'i' or w == 'you'
def simple_vbz(i):
return i > 0 and (is_singular_noun(poses[i-1]) or is_third_pronoun(words[i-1]))
def simple_vbp(i):
return i > 0 and (is_plural_noun(poses[i-1]) or is_nonthird_pronoun(words[i-1]))
vbz_idx = [(i, simple_vbz(i)) for i, p in enumerate(poses) if p == 'VBZ']
vbp_idx = [(i, simple_vbp(i)) for i, p in enumerate(poses) if p == 'VBP']
self.vbz_positions.append(vbz_idx)
self.vbp_positions.append(vbp_idx)
for idx, simple in vbz_idx: self.vbz_set.add(sent.token[idx].word)
for idx, simple in vbp_idx: self.vbp_set.add(sent.token[idx].word)
def run(args):
verb_finder = VerbFinder()
verb_finder.find_all_verbs(args.source)
conv = verb_finder.get_converter()
vbz_positions = verb_finder.vbz_positions
vbp_positions = verb_finder.vbp_positions
def filter_cands(sent, positions):
items = [(idx, conv.get(sent[idx]), simple) for (idx, simple) in positions]
return [item for item in items if item[1] and item[1] != sent[item[0]]]
with open_f(args.source) as source, open_f(args.output, 'wt') as target:
for i, line in enumerate(source):
sent = line[:-1].split()
vbz = vbz_positions[i]
vbp = vbp_positions[i]
vbz = filter_cands(sent, vbz)
vbp = filter_cands(sent, vbp)
examples = sorted(vbz + vbp, key=lambda x: x[0])
line = '\t'.join("{} {} {}".format(e[0], e[1], e[2]) for e in examples)
target.write('{} {}'.format(i, line))
target.write('\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Generate negative examples for LM agreement task.')
parser.add_argument('--source', required=True, type=str)
parser.add_argument('--output', default='verb_negative_examples.txt')
args = parser.parse_args()
run(args)
| 0
| 0
| 0
| 3,028
| 0
| 1,066
| 0
| -24
| 157
|
50236fdf6467d13205dc115c04971b4092f2ea4f
| 5,125
|
py
|
Python
|
snake.py
|
0Franky/snAIke
|
ddabb04c68e81d21b6ad23454ea2b8d67357aefb
|
[
"MIT"
] | null | null | null |
snake.py
|
0Franky/snAIke
|
ddabb04c68e81d21b6ad23454ea2b8d67357aefb
|
[
"MIT"
] | 1
|
2020-05-16T14:33:39.000Z
|
2020-05-16T14:33:39.000Z
|
snake.py
|
0Franky/ai-battleship
|
ddabb04c68e81d21b6ad23454ea2b8d67357aefb
|
[
"MIT"
] | 1
|
2020-11-08T17:08:10.000Z
|
2020-11-08T17:08:10.000Z
|
# Valentin Mac
# [email protected]
# Developed for fun
# Feel free to use this code as you wish as long as you quote me as author
"""
snake.py
~~~~~~~~~~
This module is for building the snake itself in the snake game
The snake:
- Is on the form of a list, each element for a body block (containing its coordinates)
- Has a head pointing on the first block, a direction and also a neural network (brain)
- Has vision given by the map (Map.scan method)
- Is in charge of moving its blocks, aging, growing by adding a block to the right place
and makes decision with neural net
- Gives its fitness based on self age and length
"""
| 37.962963
| 116
| 0.58478
|
# Valentin Macรฉ
# [email protected]
# Developed for fun
# Feel free to use this code as you wish as long as you quote me as author
"""
snake.py
~~~~~~~~~~
This module is for building the snake itself in the snake game
The snake:
- Is on the form of a list, each element for a body block (containing its coordinates)
- Has a head pointing on the first block, a direction and also a neural network (brain)
- Has vision given by the map (Map.scan method)
- Is in charge of moving its blocks, aging, growing by adding a block to the right place
and makes decision with neural net
- Gives its fitness based on self age and length
"""
from neural_network import *
class Snake:
"""Snake Class"""
def __init__(self, neural_net=None, xMaxSize = 20, yMaxSize = 20):
"""
:param neural_net: NeuralNet given to the snake in charge of decisions (AI)
"""
self.body = [[10, 10], [9, 10], [9, 11], [9, 12]] # the snake is in fact a list of coordinates
self.head = self.body[0][:] # first body block
self.old_tail = self.head[:] # useful to grow
self.direction = RIGHT
self.age = 0
self.starve = 500 # useful to avoid looping AI snakes
self.alive = True
self.neural_net = neural_net
self.vision = [] # holds the map.scan() and is used by the neural net
self.xMaxSize = xMaxSize
self.yMaxSize = yMaxSize
def update(self):
"""
Actualize the snake through time, making it older and more hungryat each game iteration,
sorry snek
"""
self.age += 1
self.starve -= 1
if self.starve < 1:
self.alive = False
self.move()
def grow(self):
"""
Makes snake grow one block longer
Called by map.update() when snake's head is in collision with food
"""
self.starve = 500 # useful to avoid looping AI snakes (they die younger -> bad fitness)
self.body.append(self.old_tail) # that's why I keep old_tail
def move(self):
"""
Makes the snake move, head moves in current direction and each blocks replace its predecessor
"""
self.old_tail = self.body[-1][:] # save old position of last block
self.head[0] += self.direction[0] # moves head
self.head[1] += self.direction[1]
self.head[0] = (self.head[0] + self.xMaxSize) % self.xMaxSize
self.head[1] = (self.head[1] + self.yMaxSize) % self.yMaxSize
if self.head in self.body[1:]: # if snakes hits himself
self.alive = False
self.body.insert(0, self.body.pop()) # each block is replace by predecessor
self.body[0] = self.head[:] # first block is head
def turn_right(self):
"""
Makes the snake direction to the right of the current direction
Current direction = [x,y], turn_right gives [-y,x]
Example:
If [0,1] (down) is current direction, [-1,0] (right) is new direction
"""
temp = self.direction[0]
self.direction[0] = -self.direction[1]
self.direction[1] = temp
def turn_left(self):
"""
Makes the snake direction to the right of the current direction
Current direction = [x,y], turn_right gives [y,-x]
"""
temp = self.direction[0]
self.direction[0] = self.direction[1]
self.direction[1] = -temp
def AI(self):
"""
Makes decision for the snake direction according to its current vision
Vision is given to the NeuralNetwork and most activated output neuron is considered as decision
"""
decision = np.argmax(self.neural_net.feed_forward(self.vision))
if decision == 1:
self.turn_right()
elif decision == 2:
self.turn_left()
def fitness(self):
"""
Measures how well the snake is doing as a function of its length and age
Note:
- You can be creative with the formula and find a better solution
- It has a big impact on the genetic algorithm
:return: integer representing how good the snake is performing
"""
return (len(self.body)**2) * self.age
def render(self, window):
"""
Renders the map (background, walls and food) on the window surface and calls render() of snake
Very very very unoptimized since render does not affect the genetic algorithm
:param window: surface window
"""
body = pygame.image.load(IMAGE_SNAKE).convert_alpha() # loading image
for block in self.body:
window.blit(body, (block[0]*SPRITE_SIZE, block[1]*SPRITE_SIZE)) # painting a beautiful snek
if self.neural_net: # calls for neural net rendering
self.neural_net.render(window, self.vision)
| 2
| 0
| 0
| 4,428
| 0
| 0
| 0
| 7
| 46
|
80cdfea7dc48867c436f0e3fb26d31ebb7279008
| 4,414
|
py
|
Python
|
tests.py
|
gwpicard/flask-kanban
|
49a13635d14723639bde896d802e8f67b1c3147e
|
[
"MIT"
] | 9
|
2019-02-01T01:17:28.000Z
|
2022-02-01T14:50:58.000Z
|
tests.py
|
gwpicard/flask-kanban
|
49a13635d14723639bde896d802e8f67b1c3147e
|
[
"MIT"
] | null | null | null |
tests.py
|
gwpicard/flask-kanban
|
49a13635d14723639bde896d802e8f67b1c3147e
|
[
"MIT"
] | 1
|
2022-02-21T11:20:49.000Z
|
2022-02-21T11:20:49.000Z
|
# project/test_basic.py
import unittest
TEST_DB = 'test.db'
if __name__ == "__main__":
unittest.main()
| 34.217054
| 104
| 0.646353
|
# project/test_basic.py
import os
import unittest
from app.app import Kanban_app
from app.models import db, User, Card
TEST_DB = 'test.db'
class BasicTests(unittest.TestCase):
# execute before each test
def setUp(self):
Kanban_app.config['TESTING'] = True # set test mode
Kanban_app.config['WTF_CSRF_ENABLED'] = False # enable app to trigger requests
Kanban_app.config['DEBUG'] = False
Kanban_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db/'+TEST_DB # configure test database
self.app = Kanban_app.test_client()
db.drop_all() # drop tables to start fresh for each test
db.create_all()
self.assertEqual(Kanban_app.debug, False)
# execute after each test
def tearDown(self):
pass
# methods to help pass data to views
def register(self, email, password):
return self.app.post(
'/signup',
data=dict(email=email, password=password),
follow_redirects=True
)
def login(self, email, password):
return self.app.post(
'/login',
data=dict(email=email, password=password),
follow_redirects=True
)
def logout(self):
return self.app.get(
'/logout',
follow_redirects=True
)
# tests to run
# check home view works
def test_home(self):
response = self.app.get('/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
# test user can register
def test_registration(self):
response = self.register('[email protected]', 'password')
# check registration succeeds
self.assertEqual(response.status_code, 200)
# check user is redirected to login page
self.assertIn(b'Login', response.data)
# test invalid email during registration
def test_invalid_email(self):
response = self.register('test', 'password')
self.assertIn(b'Form didn't validate', response.data)
# test invalid passwords during registration
def test_invalid_password_1(self):
response = self.register('test', '')
self.assertIn(b'Form didn't validate', response.data)
# test log in works
def test_valid_login(self):
response = self.register('[email protected]', 'password')
# check registration succeeds
self.assertEqual(response.status_code, 200)
# check use can login details
response = self.login('[email protected]', 'password')
# check login succeeds
self.assertEqual(response.status_code, 200)
#print(response.data)
#self.assertIn(b'My Kanban', response.data)
# test incorrect password
def test_invalid_login_1(self):
response = self.register('[email protected]', 'password')
# check registration succeeds
self.assertEqual(response.status_code, 200)
# check use can login details
response = self.login('[email protected]', 'pssword')
# check login fails due to password
self.assertIn(b'Wrong password', response.data)
# test bad email prevents login
def test_invalid_login_2(self):
response = self.register('[email protected]', 'password')
# check registration succeeds
self.assertEqual(response.status_code, 200)
# check use can login details
response = self.login('[email protected]', 'password')
# check login fails due to email
self.assertIn(b'User doesn't exist. Please sign up', response.data)
# test app prevents duplicate emails
def test_duplicate_email(self):
response = self.register('[email protected]', 'password')
self.assertEqual(response.status_code, 200)
response = self.register('[email protected]', 'password')
self.assertIn(b"Email address already exists", response.data)
# test logout
def test_logout(self):
response = self.register('[email protected]', 'password')
# check registration succeeds
self.assertEqual(response.status_code, 200)
# check use can login details
response = self.login('[email protected]', 'password')
# check login succeeds
self.assertEqual(response.status_code, 200)
# check logout succeeds
self.logout()
self.assertEqual(response.status_code, 200)
if __name__ == "__main__":
unittest.main()
| 0
| 0
| 0
| 4,201
| 0
| 0
| 0
| 13
| 91
|
914dc2978f7ce70ef0733bfca7b7d211db0b3238
| 1,823
|
py
|
Python
|
src/pwned_passwords_django/api.py
|
jdufresne/pwned-passwords-django
|
664df66b54f662a26d98f34f1713281a15d0eb0b
|
[
"BSD-3-Clause"
] | 102
|
2018-03-06T11:46:40.000Z
|
2022-03-23T17:25:19.000Z
|
src/pwned_passwords_django/api.py
|
jdufresne/pwned-passwords-django
|
664df66b54f662a26d98f34f1713281a15d0eb0b
|
[
"BSD-3-Clause"
] | 24
|
2018-03-08T08:19:54.000Z
|
2020-11-05T11:09:03.000Z
|
src/pwned_passwords_django/api.py
|
jdufresne/pwned-passwords-django
|
664df66b54f662a26d98f34f1713281a15d0eb0b
|
[
"BSD-3-Clause"
] | 6
|
2018-03-07T22:19:48.000Z
|
2020-05-05T00:43:52.000Z
|
"""
Direct access to the Pwned Passwords API for checking whether a
password is compromised.
"""
import hashlib
import logging
import sys
import requests
from django.conf import settings
from . import __version__
log = logging.getLogger(__name__)
API_ENDPOINT = "https://api.pwnedpasswords.com/range/{}"
REQUEST_TIMEOUT = 1.0 # 1 second
USER_AGENT = "pwned-passwords-django/{} (Python/{} | requests/{})".format(
__version__, "{}.{}.{}".format(*sys.version_info[:3]), requests.__version__
)
def _get_pwned(prefix):
"""
Fetches a dict of all hash suffixes from Pwned Passwords for a
given SHA-1 prefix.
"""
try:
response = requests.get(
url=API_ENDPOINT.format(prefix),
headers={"User-Agent": USER_AGENT},
timeout=getattr(settings, "PWNED_PASSWORDS_API_TIMEOUT", REQUEST_TIMEOUT),
)
response.raise_for_status()
except requests.RequestException as e:
# Gracefully handle timeouts and HTTP error response codes.
log.warning("Skipped Pwned Passwords check due to error: %r", e)
return None
results = {}
for line in response.text.splitlines():
line_suffix, _, times = line.partition(":")
results[line_suffix] = int(times)
return results
def pwned_password(password):
"""
Checks a password against the Pwned Passwords database.
"""
if not isinstance(password, str):
raise TypeError("Password values to check must be Unicode strings.")
password_hash = hashlib.sha1(password.encode("utf-8")).hexdigest().upper()
prefix, suffix = password_hash[:5], password_hash[5:]
results = _get_pwned(prefix)
if results is None:
# Gracefully handle timeouts and HTTP error response codes.
return None
return results.get(suffix, 0)
| 28.046154
| 86
| 0.675261
|
"""
Direct access to the Pwned Passwords API for checking whether a
password is compromised.
"""
import hashlib
import logging
import sys
import requests
from django.conf import settings
from . import __version__
log = logging.getLogger(__name__)
API_ENDPOINT = "https://api.pwnedpasswords.com/range/{}"
REQUEST_TIMEOUT = 1.0 # 1 second
USER_AGENT = "pwned-passwords-django/{} (Python/{} | requests/{})".format(
__version__, "{}.{}.{}".format(*sys.version_info[:3]), requests.__version__
)
def _get_pwned(prefix):
"""
Fetches a dict of all hash suffixes from Pwned Passwords for a
given SHA-1 prefix.
"""
try:
response = requests.get(
url=API_ENDPOINT.format(prefix),
headers={"User-Agent": USER_AGENT},
timeout=getattr(settings, "PWNED_PASSWORDS_API_TIMEOUT", REQUEST_TIMEOUT),
)
response.raise_for_status()
except requests.RequestException as e:
# Gracefully handle timeouts and HTTP error response codes.
log.warning("Skipped Pwned Passwords check due to error: %r", e)
return None
results = {}
for line in response.text.splitlines():
line_suffix, _, times = line.partition(":")
results[line_suffix] = int(times)
return results
def pwned_password(password):
"""
Checks a password against the Pwned Passwords database.
"""
if not isinstance(password, str):
raise TypeError("Password values to check must be Unicode strings.")
password_hash = hashlib.sha1(password.encode("utf-8")).hexdigest().upper()
prefix, suffix = password_hash[:5], password_hash[5:]
results = _get_pwned(prefix)
if results is None:
# Gracefully handle timeouts and HTTP error response codes.
return None
return results.get(suffix, 0)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
906c9cd8624d841bd93c4dfadff12bec3fb9bb94
| 3,004
|
py
|
Python
|
tests/python/test_dataset_methods.py
|
billschereriii/SmartRedis
|
63147106d90df11765b5dd93f03df64a26937da6
|
[
"BSD-2-Clause"
] | null | null | null |
tests/python/test_dataset_methods.py
|
billschereriii/SmartRedis
|
63147106d90df11765b5dd93f03df64a26937da6
|
[
"BSD-2-Clause"
] | null | null | null |
tests/python/test_dataset_methods.py
|
billschereriii/SmartRedis
|
63147106d90df11765b5dd93f03df64a26937da6
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from smartredis import Dataset
def test_add_get_tensor(mock_data):
"""Test adding and retrieving 1D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 1D tensors of all data types
data = mock_data.create_data(10)
add_get_arrays(dataset, data)
def test_add_get_tensor_2D(mock_data):
"""Test adding and retrieving 2D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 2D tensors of all data types
data_2D = mock_data.create_data((10, 10))
add_get_arrays(dataset, data_2D)
def test_add_get_tensor_3D(mock_data):
"""Test adding and retrieving 3D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 3D tensors of all datatypes
data_3D = mock_data.create_data((10, 10, 10))
add_get_arrays(dataset, data_3D)
def test_add_get_scalar(mock_data):
"""Test adding and retrieving scalars to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 1D tensors of all data types
data = mock_data.create_metadata_scalars(10)
add_get_scalars(dataset, data)
def test_add_get_strings(mock_data):
"""Test adding and retrieving strings to
a dataset
"""
dataset = Dataset("test-dataset")
# list of strings
data = mock_data.create_metadata_strings(10)
add_get_strings(dataset, data)
# ------- Helper Functions -----------------------------------------------
def add_get_arrays(dataset, data):
"""Helper for dataset tests"""
# add to dataset
for index, array in enumerate(data):
key = f"array_{str(index)}"
dataset.add_tensor(key, array)
# get from dataset
for index, array in enumerate(data):
key = f"array_{str(index)}"
rarray = dataset.get_tensor(key)
np.testing.assert_array_equal(
rarray,
array,
"Returned array from get_tensor not equal to tensor added to dataset",
)
def add_get_scalars(dataset, data):
"""Helper for metadata tests"""
# add to dataset
for index, scalars in enumerate(data):
key = f"meta_scalars_{index}"
for scalar in scalars:
dataset.add_meta_scalar(key, scalar)
# get from dataset
for index, scalars in enumerate(data):
key = f"meta_scalars_{index}"
rscalars = dataset.get_meta_scalars(key)
np.testing.assert_array_equal(
rscalars,
scalars,
"Returned scalars from get_meta_scalars not equal to scalars added to dataset",
)
def add_get_strings(dataset, data):
"""Helper for metadata tests"""
# add to dataset
key = "test_meta_strings"
for meta_string in data:
dataset.add_meta_string(key, meta_string)
# get from dataset
rdata = dataset.get_meta_strings(key)
assert len(data) == len(rdata)
assert all([a == b for a, b in zip(data, rdata)])
| 26.121739
| 91
| 0.649134
|
import numpy as np
from smartredis import Dataset
def test_add_get_tensor(mock_data):
"""Test adding and retrieving 1D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 1D tensors of all data types
data = mock_data.create_data(10)
add_get_arrays(dataset, data)
def test_add_get_tensor_2D(mock_data):
"""Test adding and retrieving 2D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 2D tensors of all data types
data_2D = mock_data.create_data((10, 10))
add_get_arrays(dataset, data_2D)
def test_add_get_tensor_3D(mock_data):
"""Test adding and retrieving 3D tensors to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 3D tensors of all datatypes
data_3D = mock_data.create_data((10, 10, 10))
add_get_arrays(dataset, data_3D)
def test_add_get_scalar(mock_data):
"""Test adding and retrieving scalars to
a dataset and with all datatypes
"""
dataset = Dataset("test-dataset")
# 1D tensors of all data types
data = mock_data.create_metadata_scalars(10)
add_get_scalars(dataset, data)
def test_add_get_strings(mock_data):
"""Test adding and retrieving strings to
a dataset
"""
dataset = Dataset("test-dataset")
# list of strings
data = mock_data.create_metadata_strings(10)
add_get_strings(dataset, data)
# ------- Helper Functions -----------------------------------------------
def add_get_arrays(dataset, data):
"""Helper for dataset tests"""
# add to dataset
for index, array in enumerate(data):
key = f"array_{str(index)}"
dataset.add_tensor(key, array)
# get from dataset
for index, array in enumerate(data):
key = f"array_{str(index)}"
rarray = dataset.get_tensor(key)
np.testing.assert_array_equal(
rarray,
array,
"Returned array from get_tensor not equal to tensor added to dataset",
)
def add_get_scalars(dataset, data):
"""Helper for metadata tests"""
# add to dataset
for index, scalars in enumerate(data):
key = f"meta_scalars_{index}"
for scalar in scalars:
dataset.add_meta_scalar(key, scalar)
# get from dataset
for index, scalars in enumerate(data):
key = f"meta_scalars_{index}"
rscalars = dataset.get_meta_scalars(key)
np.testing.assert_array_equal(
rscalars,
scalars,
"Returned scalars from get_meta_scalars not equal to scalars added to dataset",
)
def add_get_strings(dataset, data):
"""Helper for metadata tests"""
# add to dataset
key = "test_meta_strings"
for meta_string in data:
dataset.add_meta_string(key, meta_string)
# get from dataset
rdata = dataset.get_meta_strings(key)
assert len(data) == len(rdata)
assert all([a == b for a, b in zip(data, rdata)])
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
137463cfc39f1173fd92189be66736d31bb70731
| 341
|
py
|
Python
|
ants/cyants/ex-setup.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
ants/cyants/ex-setup.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
ants/cyants/ex-setup.py
|
bwhewe-13/ants
|
6923cfc1603e0cd90c2ae90fa0fed6dd86edc0b2
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from Cython.Build import cythonize
# ext = Extension(name="wrap_fib", source=["cfibc.c", "wrap_fib.pyx"])
# ext = ["hermite_splines.pyx", "source_iteration.pyx", "splines.pyx"]
ext = ["multi_group.pyx", "x_sweeps.pyx"] #, "x_sweeps.pxd"]
setup(ext_modules=cythonize(ext, language_level="3"))
| 34.1
| 71
| 0.721408
|
from distutils.core import setup, Extension
from Cython.Build import cythonize
# ext = Extension(name="wrap_fib", source=["cfibc.c", "wrap_fib.pyx"])
# ext = ["hermite_splines.pyx", "source_iteration.pyx", "splines.pyx"]
ext = ["multi_group.pyx", "x_sweeps.pyx"] #, "x_sweeps.pxd"]
setup(ext_modules=cythonize(ext, language_level="3"))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
|
9c8267f71830eb7f8fa7c49b3f712bc593dfe2dd
| 205
|
py
|
Python
|
bus_plan/wsgi.py
|
diegopmayer/bussiness_plan
|
56f7491a9b1767f60341e003648a7b9a946a877c
|
[
"MIT"
] | null | null | null |
bus_plan/wsgi.py
|
diegopmayer/bussiness_plan
|
56f7491a9b1767f60341e003648a7b9a946a877c
|
[
"MIT"
] | 2
|
2019-02-27T16:46:53.000Z
|
2019-05-07T00:32:10.000Z
|
bus_plan/wsgi.py
|
diegopmayer/bussiness_plan
|
56f7491a9b1767f60341e003648a7b9a946a877c
|
[
"MIT"
] | null | null | null |
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bus_plan.settings')
application = Cling(get_wsgi_application())
| 20.5
| 68
| 0.82439
|
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bus_plan.settings')
application = Cling(get_wsgi_application())
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e851c09d856bac197cdb3242af940148b3e9a3ea
| 210
|
py
|
Python
|
awaitawaitawait.py
|
bmintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-12T10:33:13.000Z
|
2019-02-24T05:01:40.000Z
|
awaitawaitawait.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | null | null | null |
awaitawaitawait.py
|
iomintz/python-snippets
|
982861c173bf4bcd5d908514a9e8b1914a580a5d
|
[
"CC0-1.0"
] | 2
|
2018-11-24T08:16:59.000Z
|
2019-02-24T04:41:30.000Z
|
#!/usr/bin/env python3
# encoding: utf-8
import asyncio
asyncio.run(main())
| 15
| 49
| 0.633333
|
#!/usr/bin/env python3
# encoding: utf-8
import asyncio
async def f(*, n=5):
if n == 1: return True
return f(n=n-1)
async def main():
print(await (await (await (await (await f())))))
asyncio.run(main())
| 0
| 0
| 86
| 0
| 0
| 0
| 0
| 0
| 46
|
58fe7c9a90e9776fd8838d8eb40a468fc3594ba1
| 792
|
py
|
Python
|
examples/windows/python/example.py
|
gomiero/bin2src
|
5b1e849873631fe2bef76cc77ed18026cd90c2d9
|
[
"MIT"
] | 1
|
2022-03-07T08:21:49.000Z
|
2022-03-07T08:21:49.000Z
|
examples/windows/python/example.py
|
gomiero/bin2src
|
5b1e849873631fe2bef76cc77ed18026cd90c2d9
|
[
"MIT"
] | null | null | null |
examples/windows/python/example.py
|
gomiero/bin2src
|
5b1e849873631fe2bef76cc77ed18026cd90c2d9
|
[
"MIT"
] | 1
|
2021-08-02T08:07:16.000Z
|
2021-08-02T08:07:16.000Z
|
#
# Embed a photo data inside a Tk frame
#
import tkinter as tk
AUTHOR = "Alexandre Gomiero de Oliveira"
REPO = "https://github.com/gomiero/bin2src"
# Entry point: create the root window...
root = tk.Tk()
# ...the App instance...
app = App(master = root)
# ...and run the main loop.
app.mainloop()
| 28.285714
| 82
| 0.651515
|
#
# Embed a photo data inside a Tk frame
#
import tkinter as tk
import smimgpng as smimg
AUTHOR = "Alexandre Gomiero de Oliveira"
REPO = "https://github.com/gomiero/bin2src"
class App(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.config(width=427, height=640)
canvas = tk.Canvas(self, width=427, height=640, bg="black")
canvas.pack()
# --> Read image from binary data generated at smimgpng.py <--
self.photo_img = tk.PhotoImage(format = 'png', data = smimg.SMIMGPNG_DATA)
canvas.create_image(0, 0, image = self.photo_img, anchor=tk.NW)
self.pack()
# Entry point: create the root window...
root = tk.Tk()
# ...the App instance...
app = App(master = root)
# ...and run the main loop.
app.mainloop()
| 0
| 0
| 0
| 443
| 0
| 0
| 0
| 3
| 45
|
7640a854eb6514e315d4382f00b04b6a3bbf1c3f
| 5,852
|
py
|
Python
|
lib/eval.py
|
yzhq97/SCKR
|
601545db60eac3845e0eeaaae6b0580d4a41d949
|
[
"MIT"
] | 7
|
2019-05-02T07:26:46.000Z
|
2020-04-06T06:59:25.000Z
|
lib/eval.py
|
yzhq97/SCKR
|
601545db60eac3845e0eeaaae6b0580d4a41d949
|
[
"MIT"
] | 1
|
2019-06-06T18:26:25.000Z
|
2020-11-07T08:39:39.000Z
|
lib/eval.py
|
yzhq97/SCKR
|
601545db60eac3845e0eeaaae6b0580d4a41d949
|
[
"MIT"
] | 3
|
2019-09-20T09:14:19.000Z
|
2021-02-13T15:17:59.000Z
|
from lib.mlnet import MLNet
from data.data_loader import DataLoader
from data.utils import split_and_pack
import tensorflow as tf
import numpy as np
import time
def get_descs_and_labels(net: MLNet, sess: tf.Session, modal,
paths_with_labels, process_fn, batch_size):
"""
This function computes description vectors for image and text samples.
"""
if net.is_training: raise Exception("should not run this in training mode")
if net.is_retrieving: raise Exception("should not run this in retrieving mode")
descriptors = []
labels = []
loader = DataLoader(paths_with_labels, batch_size, shuffle=False, process_fn=process_fn)
for batch in range(loader.n_batches):
batch_data, batch_labels = loader.get_batch_by_index(batch)
batch_data = split_and_pack(batch_data)
if modal == 1:
feed_dict = {}
for ph, data in zip(net.ph1, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_1.eval(session=sess, feed_dict=feed_dict)
elif modal == 2:
feed_dict = {}
for ph, data in zip(net.ph2, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_2.eval(session=sess, feed_dict=feed_dict)
else:
raise Exception("modal should be either 1 or 2")
descriptors.append(batch_descs)
labels.append(batch_labels)
if loader.n_remain > 0:
batch_data, batch_labels = loader.get_remaining()
batch_data = split_and_pack(batch_data)
if modal == 1:
feed_dict = {}
for ph, data in zip(net.ph1, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_1.eval(session=sess, feed_dict=feed_dict)
elif modal == 2:
feed_dict = {}
for ph, data in zip(net.ph2, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_2.eval(session=sess, feed_dict=feed_dict)
else:
raise Exception("modal should be either 1 or 2")
descriptors.append(batch_descs[:loader.n_remain])
labels.append(batch_labels[:loader.n_remain])
descriptors = np.concatenate(descriptors, axis=0)
labels = np.concatenate(labels, axis=0)
return descriptors, labels
def average_precisions(net: MLNet, sess: tf.Session,
q_descs, q_labels, r_descs, r_labels,
at=100, batch_size=128):
"""
:param net: an MLNet model
:param sess: a tensorflow session=
:param q_descs: descriptors for querying data
:param q_labels: labels for querying data
:param r_descs: descriptors for retrieved data
:param r_labels: labels for retrieved data
:param at: if mAP@100 is desired, assign 'at' with 100, if mAP@ALL is desired, assign 'at' with 0
:param batch_size: batch size
:return: average procisions
"""
n_samples, n_entries = len(q_descs), len(r_descs)
APs = []
for query_idx in range(n_samples):
time1 = time.time()
_, average_precision = retrieve(net, sess, q_descs[query_idx], q_labels[query_idx], r_descs, r_labels, at=at, batch_size=batch_size)
APs.append(average_precision)
time2 = time.time()
ellapsed = time2 - time1
print("sample %4d/%4d, AP: %5.3f, time: %5.2fs" %
(query_idx + 1, n_samples, average_precision, ellapsed), end='\r')
return APs
| 36.805031
| 140
| 0.645762
|
from lib.mlnet import MLNet
from data.data_loader import DataLoader
from data.utils import split_and_pack
import tensorflow as tf
import numpy as np
import time
def get_descs_and_labels(net: MLNet, sess: tf.Session, modal,
paths_with_labels, process_fn, batch_size):
"""
This function computes description vectors for image and text samples.
"""
if net.is_training: raise Exception("should not run this in training mode")
if net.is_retrieving: raise Exception("should not run this in retrieving mode")
descriptors = []
labels = []
loader = DataLoader(paths_with_labels, batch_size, shuffle=False, process_fn=process_fn)
for batch in range(loader.n_batches):
batch_data, batch_labels = loader.get_batch_by_index(batch)
batch_data = split_and_pack(batch_data)
if modal == 1:
feed_dict = {}
for ph, data in zip(net.ph1, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_1.eval(session=sess, feed_dict=feed_dict)
elif modal == 2:
feed_dict = {}
for ph, data in zip(net.ph2, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_2.eval(session=sess, feed_dict=feed_dict)
else:
raise Exception("modal should be either 1 or 2")
descriptors.append(batch_descs)
labels.append(batch_labels)
if loader.n_remain > 0:
batch_data, batch_labels = loader.get_remaining()
batch_data = split_and_pack(batch_data)
if modal == 1:
feed_dict = {}
for ph, data in zip(net.ph1, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_1.eval(session=sess, feed_dict=feed_dict)
elif modal == 2:
feed_dict = {}
for ph, data in zip(net.ph2, batch_data):
feed_dict[ph] = data
batch_descs = net.descriptors_2.eval(session=sess, feed_dict=feed_dict)
else:
raise Exception("modal should be either 1 or 2")
descriptors.append(batch_descs[:loader.n_remain])
labels.append(batch_labels[:loader.n_remain])
descriptors = np.concatenate(descriptors, axis=0)
labels = np.concatenate(labels, axis=0)
return descriptors, labels
def retrieve(net: MLNet, sess: tf.Session,
q_desc, q_label, r_descs, r_labels,
at=100, batch_size=128):
if not net.is_retrieving: raise Exception("should run this in retrieving mode")
n_entries = len(r_descs)
desc_dims = len(q_desc)
n_batches = int(n_entries / batch_size)
n_remain = n_entries % batch_size
logits = []
labels = []
batch_q_descs = np.repeat(np.expand_dims(q_desc, axis=0), batch_size, axis=0)
batch_q_labels = np.array([q_label for _ in range(batch_size)], dtype='int32')
for batch in range(n_batches):
batch_r_descs = r_descs[batch * batch_size: (batch + 1) * batch_size]
batch_r_labels = r_labels[batch * batch_size:(batch + 1) * batch_size]
batch_labels = np.array(batch_q_labels == batch_r_labels, dtype='int32')
feed_dict = {net.ph_desc_1: batch_q_descs, net.ph_desc_2: batch_r_descs}
batch_logits = net.logits.eval(session=sess, feed_dict=feed_dict)
logits.append(batch_logits)
labels.append(batch_labels)
if n_remain > 0:
batch_r_descs = np.zeros([batch_size, desc_dims], dtype='float32')
batch_r_descs[:n_remain, :] = r_descs[-n_remain:]
batch_r_labels = np.zeros([batch_size], dtype='int32')
batch_r_labels[:n_remain] = r_labels[-n_remain:]
batch_labels = np.array(batch_q_labels == batch_r_labels, dtype='int32')
feed_dict = {net.ph_desc_1: batch_q_descs, net.ph_desc_2: batch_r_descs}
batch_logits = net.logits.eval(session=sess, feed_dict=feed_dict)
logits.append(batch_logits[:n_remain])
labels.append(batch_labels[:n_remain])
indices = [i for i in range(n_entries)]
logits = np.concatenate(logits, axis=0).tolist()
labels = np.concatenate(labels, axis=0).tolist()
zipped = list(zip(indices, logits, labels))
zipped = sorted(zipped, key=lambda x: x[1], reverse=True)
indices, logits, labels = zip(*zipped)
n_relavant = 0
precisions = []
piv = len(labels) if at <= 0 or at > len(labels) else at
for j in range(piv):
if labels[j] == 1:
n_relavant += 1
precisions.append(1.0 * n_relavant / (j + 1))
if n_relavant == 0: precisions = [0]
average_precision = sum(precisions) / len(precisions)
return indices[:at], average_precision
def average_precisions(net: MLNet, sess: tf.Session,
q_descs, q_labels, r_descs, r_labels,
at=100, batch_size=128):
"""
:param net: an MLNet model
:param sess: a tensorflow session=
:param q_descs: descriptors for querying data
:param q_labels: labels for querying data
:param r_descs: descriptors for retrieved data
:param r_labels: labels for retrieved data
:param at: if mAP@100 is desired, assign 'at' with 100, if mAP@ALL is desired, assign 'at' with 0
:param batch_size: batch size
:return: average procisions
"""
n_samples, n_entries = len(q_descs), len(r_descs)
APs = []
for query_idx in range(n_samples):
time1 = time.time()
_, average_precision = retrieve(net, sess, q_descs[query_idx], q_labels[query_idx], r_descs, r_labels, at=at, batch_size=batch_size)
APs.append(average_precision)
time2 = time.time()
ellapsed = time2 - time1
print("sample %4d/%4d, AP: %5.3f, time: %5.2fs" %
(query_idx + 1, n_samples, average_precision, ellapsed), end='\r')
return APs
| 0
| 0
| 0
| 0
| 0
| 2,327
| 0
| 0
| 23
|
c685fdb6b92c9e8375aa383895edecf724d650b5
| 379
|
py
|
Python
|
tests/exceptions/test_repo_not_found.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T23:10:11.000Z
|
2021-04-13T23:10:11.000Z
|
tests/exceptions/test_repo_not_found.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 4
|
2021-01-27T00:06:12.000Z
|
2021-02-12T01:20:32.000Z
|
tests/exceptions/test_repo_not_found.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 1
|
2021-05-07T05:07:29.000Z
|
2021-05-07T05:07:29.000Z
|
"""Testing invalid cookiecutter template repositories."""
import pytest
from tackle import exceptions, main
def test_should_raise_error_if_repo_does_not_exist(chdir):
"""Cookiecutter invocation with non-exist repository should raise error."""
chdir('/')
with pytest.raises(exceptions.UnknownSourceException):
main.tackle('definitely-not-a-valid-repo-dir')
| 31.583333
| 79
| 0.76781
|
"""Testing invalid cookiecutter template repositories."""
import pytest
from tackle import exceptions, main
def test_should_raise_error_if_repo_does_not_exist(chdir):
"""Cookiecutter invocation with non-exist repository should raise error."""
chdir('/')
with pytest.raises(exceptions.UnknownSourceException):
main.tackle('definitely-not-a-valid-repo-dir')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8785153ec48817e3188a7b8b4bf14392a9bd7b80
| 6,260
|
py
|
Python
|
src/analyze_orig_data.py
|
MadryLab/dataset-replication-analysis
|
f06ee16f0bb1c119492c6134788e62457ad9f5bb
|
[
"MIT"
] | 25
|
2020-05-19T20:06:58.000Z
|
2022-01-19T07:41:06.000Z
|
src/analyze_orig_data.py
|
MadryLab/dataset-replication-analysis
|
f06ee16f0bb1c119492c6134788e62457ad9f5bb
|
[
"MIT"
] | null | null | null |
src/analyze_orig_data.py
|
MadryLab/dataset-replication-analysis
|
f06ee16f0bb1c119492c6134788e62457ad9f5bb
|
[
"MIT"
] | 5
|
2020-05-20T06:30:56.000Z
|
2021-03-03T00:46:24.000Z
|
import torch as ch
import pandas as pd
import numpy as np
from pathlib import Path
from pathos.multiprocessing import Pool
from argparse import ArgumentParser
import matplotlib as mpl
from matplotlib import rc
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
mpl.style.use('ggplot')
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
## Copied verbatim from Recht et al code release
# Selection frequency ops
# Bootstrap
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--trials', type=int, default=10)
parser.add_argument('--workers', type=int, default=2)
parser.add_argument('--experiment', required=True,
choices=['heldout', 'naiveest', 'ezflickr'])
parser.add_argument('--out-dir', required=True)
parser.add_argument('--df-path', required=True)
args = parser.parse_args()
print("Loading data...")
MY_PATH = Path(args.out_dir)
df = ch.load(args.df_path)
print(f"Loaded data (currently {len(df)} annotations)")
CLA_KEYS = [k for k in df.columns if k.startswith('correct_')]
p = Pool(args.workers)
if args.experiment == 'ezflickr':
FORMAT_STR = "Accs (v1, v2, v2_EZ): ({0}, {1}, {2}) | " \
"SFs (v1, v2, v2_EZ): ({3}, {4}, {5}) | " \
"v2 heldout SF: {6}"
res = p.map(flickr_ez_exp, range(args.trials))
print(FORMAT_STR.format(*list(np.array(res).mean(0))))
elif args.experiment == 'heldout':
FORMAT_STR = "SFs (v1, v2): ({0:.3f}, {1:.3f}) | " \
"v2 heldout SF: {2:.3f}"
stats = p.map(heldout_sf_exp, range(args.trials))
print(FORMAT_STR.format(*list(np.array(stats).mean(0))))
elif args.experiment == 'naiveest':
fig, ax = plt.subplots(1, 1, figsize=(6,2))
xs = [5, 6, 7, 8, 9, 10]
res = np.array(p.map(naive_est_exp, [xs] * args.trials))
res_df = pd.DataFrame(columns=xs, data=res).melt(var_name='xs', value_name='adj_acc')
ch.save(res_df, str(MY_PATH / 'orig_data_naive_est_data.pt'))
print(f"X: {xs} | Y: {res.mean(0)}")
sns.lineplot(data=res_df, x='xs', y='adj_acc',
ax=ax, palette=sns.color_palette("tab10", 1))
ax.set(xlabel='Number of annotators per image',
ylabel='ImageNet v1/v2 accuracy gap')
plt.tight_layout()
fig.savefig(str(MY_PATH / 'orig_data_naive_est.png'))
| 40.387097
| 117
| 0.620128
|
import torch as ch
import pandas as pd
import numpy as np
from pathlib import Path
from pathos.multiprocessing import Pool
from argparse import ArgumentParser
from numpy.random import seed
import matplotlib as mpl
from matplotlib import rc
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
mpl.style.use('ggplot')
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
def agg(_df):
agged = _df.groupby(['id', 'wnid']).agg(sel_freq=('selected', 'mean'))
return agged.reset_index().set_index('id')
## Copied verbatim from Recht et al code release
def round_histogram(hist, target_sum):
fractional_hist = target_sum * hist / np.sum(hist)
floor_hist = np.floor(fractional_hist)
floor_sum = int(np.round(np.sum(floor_hist)))
remainder_hist = fractional_hist - floor_hist
remainder = target_sum - floor_sum
top_buckets = list(reversed(sorted(enumerate(remainder_hist), key=lambda x:(x[1], x[0]))))
result = np.copy(floor_hist).astype(np.int64)
for ii in range(remainder):
result[top_buckets[ii][0]] += 1
return result
def split_df(_df, head_size=5, tail_size=None):
shuffled = _df.sample(frac=1.0)
first_5 = shuffled.groupby('id').head(head_size)
if tail_size is not None:
last_5 = shuffled.groupby('id').tail(tail_size)
else:
last_5 = shuffled.loc[~shuffled.index.isin(first_5.index)]
first_5, last_5 = map(agg, (first_5, last_5))
return first_5, last_5
def match_datasets(v1, cands, N):
bins = [(0, 0.2), (0.2, 0.4), (0.4, 0.6), (0.6, 0.8), (0.8, 1.001)]
bins = pd.IntervalIndex.from_tuples(bins, closed='left')
# Add a column that contains the "bin" each image belongs to
v1['bin'] = pd.cut(v1['sel_freq'], bins, include_lowest=True)
cands['bin'] = pd.cut(cands['sel_freq'], bins, include_lowest=True)
all_ims = []
total_missing = 0
for wnid in v1['wnid'].unique():
hist_v1 = v1[v1['wnid'] == wnid].groupby('bin').count()['sel_freq']
hist_v1 = round_histogram(hist_v1, N)
residual = 0 # Upwards sampling
for (b, n) in zip(bins, hist_v1):
src = cands[(cands['bin'] == b) & (cands['wnid'] == wnid)]
max_ims = src.sample(n=min(n+residual, len(src)))
residual = n + residual - len(max_ims)
all_ims.append(max_ims)
if residual > 0:
print(f"Missing {residual} images from class {wnid} ({len(cands[cands['wnid'] == wnid])} total images)")
total_missing += residual
return pd.concat(all_ims)
def acc(im_df): return df.set_index('id').loc[im_df.index][CLA_KEYS].mean().T.mean()
# Selection frequency ops
def sf(im_df): return im_df['sel_freq'].mean()
def heldout_sf(im_df, heldout): return heldout.loc[im_df.index]['sel_freq'].mean()
# Bootstrap
def bootstrap(arr):
inds = np.random.choice(np.arange(len(arr)), size=1000)
return [np.percentile(arr[inds], c, axis=0) for c in (2.5, 97.5)]
def flickr_ez_exp(_):
seed()
v1_df = agg(df[df['dataset'] == 'v1'])
cand_df, heldout = split_df(df[df['dataset'] == 'v2'])
samples, noise = split_df(df[df['dataset'] == 'v2'], tail_size=4)
cand_ez_df = samples.loc[noise[noise['sel_freq'] >= 0.5].index]
v2_ims = match_datasets(v1_df, cand_df, N=4)
v2_ez_ims = match_datasets(v1_df, cand_ez_df, N=4)
return [acc(v1_df), acc(v2_ims), acc(v2_ez_ims),
sf(v1_df), sf(v2_ims), sf(v2_ez_ims),
heldout_sf(v2_ims, heldout)]
def heldout_sf_exp(_):
seed()
v1_df = agg(df[df['dataset'] == 'v1'])
cand_df, heldout = split_df(df[df['dataset'] == 'v2'], tail_size=5)
v2_ims = match_datasets(v1_df, cand_df, N=4)
stats = [sf(v1_df), sf(v2_ims), heldout_sf(v2_ims, heldout)]
return stats
def naive_est_exp(xs):
seed()
pred_df = df.set_index('id')[CLA_KEYS]
ys = []
for nw in xs:
v1_w, v2_w = [split_df(df[df['dataset'] == x], head_size=nw)[0]['sel_freq'] for x in ('v1', 'v2')]
tot = 0.
for b in v1_w.unique():
f_given_s = pred_df.loc[v2_w[v2_w == b].index].mean()
p_1 = (v1_w == b).mean()
tot = tot + p_1 * f_given_s
ys.append(df[df['dataset'] == 'v1'][CLA_KEYS].mean().T.mean() - tot.T.mean())
return ys
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--trials', type=int, default=10)
parser.add_argument('--workers', type=int, default=2)
parser.add_argument('--experiment', required=True,
choices=['heldout', 'naiveest', 'ezflickr'])
parser.add_argument('--out-dir', required=True)
parser.add_argument('--df-path', required=True)
args = parser.parse_args()
print("Loading data...")
MY_PATH = Path(args.out_dir)
df = ch.load(args.df_path)
print(f"Loaded data (currently {len(df)} annotations)")
CLA_KEYS = [k for k in df.columns if k.startswith('correct_')]
p = Pool(args.workers)
if args.experiment == 'ezflickr':
FORMAT_STR = "Accs (v1, v2, v2_EZ): ({0}, {1}, {2}) | " \
"SFs (v1, v2, v2_EZ): ({3}, {4}, {5}) | " \
"v2 heldout SF: {6}"
res = p.map(flickr_ez_exp, range(args.trials))
print(FORMAT_STR.format(*list(np.array(res).mean(0))))
elif args.experiment == 'heldout':
FORMAT_STR = "SFs (v1, v2): ({0:.3f}, {1:.3f}) | " \
"v2 heldout SF: {2:.3f}"
stats = p.map(heldout_sf_exp, range(args.trials))
print(FORMAT_STR.format(*list(np.array(stats).mean(0))))
elif args.experiment == 'naiveest':
fig, ax = plt.subplots(1, 1, figsize=(6,2))
xs = [5, 6, 7, 8, 9, 10]
res = np.array(p.map(naive_est_exp, [xs] * args.trials))
res_df = pd.DataFrame(columns=xs, data=res).melt(var_name='xs', value_name='adj_acc')
ch.save(res_df, str(MY_PATH / 'orig_data_naive_est_data.pt'))
print(f"X: {xs} | Y: {res.mean(0)}")
sns.lineplot(data=res_df, x='xs', y='adj_acc',
ax=ax, palette=sns.color_palette("tab10", 1))
ax.set(xlabel='Number of annotators per image',
ylabel='ImageNet v1/v2 accuracy gap')
plt.tight_layout()
fig.savefig(str(MY_PATH / 'orig_data_naive_est.png'))
| 0
| 0
| 0
| 0
| 0
| 3,529
| 0
| 8
| 271
|
f78ed197c79dd4247a597e1b42e0f17b20112e58
| 1,525
|
py
|
Python
|
remote-notify/server.py
|
JOndra91/siliness
|
a0aa3af1f57ec15e9ebfa952351cb3e6d644e8f7
|
[
"Unlicense"
] | null | null | null |
remote-notify/server.py
|
JOndra91/siliness
|
a0aa3af1f57ec15e9ebfa952351cb3e6d644e8f7
|
[
"Unlicense"
] | null | null | null |
remote-notify/server.py
|
JOndra91/siliness
|
a0aa3af1f57ec15e9ebfa952351cb3e6d644e8f7
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
if __name__ == '__main__':
main()
| 27.232143
| 63
| 0.566557
|
#!/usr/bin/python3
import argparse
from http import server
import json
import subprocess
def main():
argp = argparse.ArgumentParser()
argp.add_argument('--host', default='0.0.0.0')
argp.add_argument('--port', default=6969, type=int)
# argp.add_argument('--password')
args = argp.parse_args()
server_addr = (args.host, args.port)
httpd = server.HTTPServer(server_addr, NotifyHandler)
httpd.serve_forever()
class NotifyHandler(server.BaseHTTPRequestHandler):
def do_POST(self):
binary = self.path[1:]
if binary not in ['notify-send', 'zenity']:
self.send_response_only(403)
self.end_headers()
return
try:
length = int(self.headers.get('Content-Length', 0))
content = self.rfile.read(length).decode('utf-8')
request = json.loads(content)
if type(request) is list:
app = subprocess.run(
[binary] + request, stderr=subprocess.PIPE)
if app.returncode == 0:
self.send_response_only(200)
else:
self.send_response(500)
self.end_headers()
self.wfile.write(app.stderr)
else:
self.send_response_only(400)
self.end_headers()
except Exception as e:
self.send_response(400)
self.end_headers()
self.wfile.write(str(e))
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 1,019
| 0
| 328
| 0
| -18
| 135
|
c073e119a8186298ac7dffa8adb5db13b57599fc
| 212
|
py
|
Python
|
rexplore/initialize.py
|
Seraphyx/reddit_explorer
|
a0c23e995c893fb40875a9248d9527b9402a1b95
|
[
"Apache-2.0"
] | null | null | null |
rexplore/initialize.py
|
Seraphyx/reddit_explorer
|
a0c23e995c893fb40875a9248d9527b9402a1b95
|
[
"Apache-2.0"
] | null | null | null |
rexplore/initialize.py
|
Seraphyx/reddit_explorer
|
a0c23e995c893fb40875a9248d9527b9402a1b95
|
[
"Apache-2.0"
] | null | null | null |
import configparser
def initialize(config_path):
'''
Import a config .ini file.
It should have the following definition:
'''
config = configparser.ConfigParser()
config.read(config_path)
| 14.133333
| 41
| 0.745283
|
import mysql
import configparser
def initialize(config_path):
'''
Import a config .ini file.
It should have the following definition:
'''
config = configparser.ConfigParser()
config.read(config_path)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -9
| 22
|
0c8fcd1a6114c33b2a99b9de62a42b63033f28bd
| 1,406
|
py
|
Python
|
tests/test_parsers.py
|
ggoldman1/project1
|
28a9b36a0873ee1ecb391b818611dfe119a87048
|
[
"MIT"
] | null | null | null |
tests/test_parsers.py
|
ggoldman1/project1
|
28a9b36a0873ee1ecb391b818611dfe119a87048
|
[
"MIT"
] | null | null | null |
tests/test_parsers.py
|
ggoldman1/project1
|
28a9b36a0873ee1ecb391b818611dfe119a87048
|
[
"MIT"
] | null | null | null |
# write tests for parsers
from seqparser import (FastaParser, FastqParser)
def test_freebie_parser_1():
"""
This one is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert True
def test_freebie_parser_2():
"""
This too is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert 1 != 2
def test_FastaParser():
"""
Write your unit test for your FastaParser
class here. You should generate an instance of
your FastaParser class and assert that it properly
reads in the example Fasta File.
"""
fa = FastaParser("./data/test.fa")
records = [r for r in fa]
assert len(records) == 100, "did not read in correct number of records" # 100 records in total
for r in records:
assert len(r) == 2, "the record is the wrong length" # each record consists of header and sequence
def test_FastqParser():
"""
Write your unit test for your FastqParser
class here. You should generate an instance of
your FastqParser class and assert that it properly
reads in the example Fastq File.
"""
fq = FastqParser("./data/test.fq")
records = [r for r in fq]
assert len(records) == 100, "did not read in correct number of records" # 100 records in total
for r in records:
assert len(r) == 3, "the record is the wrong length" # each record is header, sequence, and quality
| 26.037037
| 107
| 0.652916
|
# write tests for parsers
from seqparser import (
FastaParser,
FastqParser)
def test_freebie_parser_1():
"""
This one is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert True
def test_freebie_parser_2():
"""
This too is a freebie
DO NOT MODIFY THIS FUNCTION
"""
assert 1 != 2
def test_FastaParser():
"""
Write your unit test for your FastaParser
class here. You should generate an instance of
your FastaParser class and assert that it properly
reads in the example Fasta File.
"""
fa = FastaParser("./data/test.fa")
records = [r for r in fa]
assert len(records) == 100, "did not read in correct number of records" # 100 records in total
for r in records:
assert len(r) == 2, "the record is the wrong length" # each record consists of header and sequence
def test_FastqParser():
"""
Write your unit test for your FastqParser
class here. You should generate an instance of
your FastqParser class and assert that it properly
reads in the example Fastq File.
"""
fq = FastqParser("./data/test.fq")
records = [r for r in fq]
assert len(records) == 100, "did not read in correct number of records" # 100 records in total
for r in records:
assert len(r) == 3, "the record is the wrong length" # each record is header, sequence, and quality
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0
|
4c6a9382b347ed5d441f6449ab4c5d19324704dd
| 1,692
|
py
|
Python
|
home/migrations/0003_auto_20220326_0711.py
|
SeanCodeMedia/codeMedia-django
|
734284859e35f24bc4a0131154f175614804d4fa
|
[
"MIT"
] | null | null | null |
home/migrations/0003_auto_20220326_0711.py
|
SeanCodeMedia/codeMedia-django
|
734284859e35f24bc4a0131154f175614804d4fa
|
[
"MIT"
] | null | null | null |
home/migrations/0003_auto_20220326_0711.py
|
SeanCodeMedia/codeMedia-django
|
734284859e35f24bc4a0131154f175614804d4fa
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2022-03-26 11:11
| 30.214286
| 106
| 0.550827
|
# Generated by Django 3.1.2 on 2022-03-26 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_home_main_photo'),
]
operations = [
migrations.RemoveField(
model_name='home',
name='icon1',
),
migrations.RemoveField(
model_name='home',
name='main_description',
),
migrations.RemoveField(
model_name='home',
name='title',
),
migrations.AddField(
model_name='home',
name='email',
field=models.CharField(default='[email protected]', max_length=100),
),
migrations.AddField(
model_name='home',
name='facebook',
field=models.CharField(default='https://www.facebook.com/', max_length=100),
),
migrations.AddField(
model_name='home',
name='instagram',
field=models.CharField(default='https://www.instagram.com/?hl=en', max_length=100),
),
migrations.AddField(
model_name='home',
name='main_photo_2',
field=models.ImageField(default='', upload_to='uploads/home/homephotos'),
),
migrations.AddField(
model_name='home',
name='main_photo_3',
field=models.ImageField(default='', upload_to='uploads/home/homephotos'),
),
migrations.AddField(
model_name='home',
name='youtube',
field=models.CharField(default='https://www.youtube.com/watch?v=KohwrjUIpuw', max_length=100),
),
]
| 0
| 0
| 0
| 1,578
| 0
| 0
| 0
| 19
| 46
|
08975034e5eeea126f26be92ee6ee1566c77c249
| 8,678
|
py
|
Python
|
src/everythingAboutTheMetalAPI/chapter09/__main__.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | 1
|
2021-08-05T04:31:02.000Z
|
2021-08-05T04:31:02.000Z
|
src/everythingAboutTheMetalAPI/chapter09/__main__.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | 2
|
2021-08-14T03:33:12.000Z
|
2021-11-11T06:25:01.000Z
|
src/everythingAboutTheMetalAPI/chapter09/__main__.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | null | null | null |
import pathlib
import ctypes
import numpy as np
from objc_util import c, create_objc_class, ObjCClass
#import pdbg
shader_path = pathlib.Path('./Shaders.metal')
# --- load objc classes
MTKView = ObjCClass('MTKView')
MTLCompileOptions = ObjCClass('MTLCompileOptions')
MTLRenderPipelineDescriptor = ObjCClass('MTLRenderPipelineDescriptor')
# --- initialize MetalDevice
MTLCreateSystemDefaultDevice = c.MTLCreateSystemDefaultDevice
MTLCreateSystemDefaultDevice.argtypes = []
MTLCreateSystemDefaultDevice.restype = ctypes.c_void_p
memcpy = c.memcpy
memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
memcpy.restype = ctypes.c_void_p
err_ptr = ctypes.c_void_p()
nd_type = np.float32
# --- set Vertex
vertex_array = [
[[-1.0, -1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]],
[[ 1.0, -1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0]],
[[ 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[-1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
[[-1.0, -1.0, -1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[ 1.0, -1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
[[ 1.0, 1.0, -1.0, 1.0], [1.0, 0.0, 0.0, 1.0]],
[[-1.0, 1.0, -1.0, 1.0], [0.0, 1.0, 0.0, 1.0]],
]
Vertex = (((ctypes.c_float * 4) * 2) * 8)
np_vertex = np.array(vertex_array, dtype=nd_type)
index_array = [
0, 1, 2, 2, 3, 0, # front
1, 5, 6, 6, 2, 1, # right
3, 2, 6, 6, 7, 3, # top
4, 5, 1, 1, 0, 4, # bottom
4, 0, 3, 3, 7, 4, # left
7, 6, 5, 5, 4, 7, # back
]
Index = (ctypes.c_uint16 * 36)
np_index = np.array(index_array, dtype=np.uint16)
#MatrixFloat4x4 = ((ctypes.c_float * 4) *4)
MatrixFloat4x4 = (ctypes.c_float *16)
# --- Matrix func
# todo:
__vertexData = np_vertex.ctypes.data_as(ctypes.POINTER(Vertex)).contents
_vertexData = np.ctypeslib.as_array(__vertexData)
vertexData = _vertexData.ctypes.data_as(ctypes.POINTER(Vertex)).contents
indexData = np_index.ctypes.data_as(ctypes.POINTER(Index)).contents
# --- MTKViewDelegate
PyRenderer = create_objc_class(
name='PyRenderer',
methods=[drawInMTKView_, mtkView_drawableSizeWillChange_],
protocols=['MTKViewDelegate'])
if __name__ == '__main__':
view = MetalView()
view.present(style='fullscreen', orientations=['portrait'])
| 31.442029
| 151
| 0.671583
|
import pathlib
import ctypes
import numpy as np
from objc_util import c, create_objc_class, ObjCClass, ObjCInstance
import ui
#import pdbg
shader_path = pathlib.Path('./Shaders.metal')
# --- load objc classes
MTKView = ObjCClass('MTKView')
MTLCompileOptions = ObjCClass('MTLCompileOptions')
MTLRenderPipelineDescriptor = ObjCClass('MTLRenderPipelineDescriptor')
# --- initialize MetalDevice
MTLCreateSystemDefaultDevice = c.MTLCreateSystemDefaultDevice
MTLCreateSystemDefaultDevice.argtypes = []
MTLCreateSystemDefaultDevice.restype = ctypes.c_void_p
memcpy = c.memcpy
memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
memcpy.restype = ctypes.c_void_p
err_ptr = ctypes.c_void_p()
nd_type = np.float32
# --- set Vertex
vertex_array = [
[[-1.0, -1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0]],
[[ 1.0, -1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0]],
[[ 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[-1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
[[-1.0, -1.0, -1.0, 1.0], [0.0, 0.0, 1.0, 1.0]],
[[ 1.0, -1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0]],
[[ 1.0, 1.0, -1.0, 1.0], [1.0, 0.0, 0.0, 1.0]],
[[-1.0, 1.0, -1.0, 1.0], [0.0, 1.0, 0.0, 1.0]],
]
Vertex = (((ctypes.c_float * 4) * 2) * 8)
np_vertex = np.array(vertex_array, dtype=nd_type)
index_array = [
0, 1, 2, 2, 3, 0, # front
1, 5, 6, 6, 2, 1, # right
3, 2, 6, 6, 7, 3, # top
4, 5, 1, 1, 0, 4, # bottom
4, 0, 3, 3, 7, 4, # left
7, 6, 5, 5, 4, 7, # back
]
Index = (ctypes.c_uint16 * 36)
np_index = np.array(index_array, dtype=np.uint16)
#MatrixFloat4x4 = ((ctypes.c_float * 4) *4)
MatrixFloat4x4 = (ctypes.c_float *16)
class Uniforms(ctypes.Structure):
_fields_ = [('modelViewProjectionMatrix', MatrixFloat4x4)]
# --- Matrix func
def translationMatrix(position):
_matrix4x4 = np.identity(4, dtype=nd_type)
_matrix4x4[3] = [position[0], position[1], position[2], 1.0]
return _matrix4x4
def scalingMatrix(scale):
_matrix4x4 = np.identity(4, dtype=nd_type)
_matrix4x4[0, 0] = scale
_matrix4x4[1, 1] = scale
_matrix4x4[2, 2] = scale
_matrix4x4[3, 3] = 1.0
return _matrix4x4
def rotationMatrix(angle, axis):
X = np.zeros(4, dtype=nd_type)
X[0] = axis[0] * axis[0] + (1.0 - axis[0] * axis[0]) * np.cos(angle)
X[1] = axis[0] * axis[1] * (1.0 - np.cos(angle)) - axis[2] * np.sin(angle)
X[2] = axis[0] * axis[2] * (1.0 - np.cos(angle)) + axis[1] * np.sin(angle)
X[3] = 0.0
Y = np.zeros(4, dtype=nd_type)
Y[0] = axis[0] * axis[1] * (1.0 - np.cos(angle)) + axis[2] * np.sin(angle)
Y[1] = axis[1] * axis[1] + (1.0 - axis[1] * axis[1]) * np.cos(angle)
Y[2] = axis[1] * axis[2] * (1.0 - np.cos(angle)) - axis[0] * np.sin(angle)
Y[3] = 0.0
Z = np.zeros(4, dtype=nd_type)
Z[0] = axis[0] * axis[2] * (1.0 - np.cos(angle)) - axis[1] * np.sin(angle)
Z[1] = axis[1] * axis[2] * (1.0 - np.cos(angle)) + axis[0] * np.sin(angle)
Z[2] = axis[2] * axis[2] + (1.0 - axis[2] * axis[2]) * np.cos(angle)
Z[3] = 0.0
W = np.zeros(4, dtype=nd_type)
W[3] = 1.0
_matrix4x4 = np.vstack((X, Y, Z, W))
return _matrix4x4
def projectionMatrix(near, far, aspect, fovy):
scaleY = 1.0 / np.tan(fovy * 0.5)
scaleX = scaleY / aspect
scaleZ = -(far + near) / (far - near)
scaleW = -2.0 * far * near / (far - near)
X = np.array([scaleX, 0.0, 0.0, 0.0], dtype=np.float32)
Y = np.array([0.0, scaleY, 0.0, 0.0], dtype=np.float32)
Z = np.array([0.0, 0.0, scaleZ, -1.0], dtype=np.float32)
W = np.array([0.0, 0.0, scaleW, 0.0], dtype=np.float32)
_matrix4x4 = np.vstack((X, Y, Z, W))
return _matrix4x4
# todo: ็ก้งใซใญใฃในใใใใในใ
__vertexData = np_vertex.ctypes.data_as(ctypes.POINTER(Vertex)).contents
_vertexData = np.ctypeslib.as_array(__vertexData)
vertexData = _vertexData.ctypes.data_as(ctypes.POINTER(Vertex)).contents
indexData = np_index.ctypes.data_as(ctypes.POINTER(Index)).contents
class MetalView(ui.View):
def __init__(self, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
self.bg_color = 'maroon'
self.view_did_load()
def view_did_load(self):
mtkView = MTKView.alloc()
_device = MTLCreateSystemDefaultDevice()
# todo: ็ซฏๆซใตใคใบใซใฆ่ฆ่ชฟๆด
_uw, _uh = ui.get_window_size()
_w = min(_uw, _uh) * 0.96
_x = (_uw - _w) / 2
_y = _uh / 4
#_frame = ((32.0, 32.0), (300.0, 300.0))
#_frame = ((0.0, 0.0), (300.0, 300.0))
_frame = ((_x, _y), (_w, _w))
devices = ObjCInstance(_device)
mtkView.initWithFrame_device_(_frame, devices)
#mtkView.setAutoresizingMask_((1 << 1) | (1 << 4))
renderer = self.renderer_init(PyRenderer, mtkView)
mtkView.delegate = renderer
mtkView.framebufferOnly = False
self.objc_instance.addSubview_(mtkView)
def renderer_init(self, delegate, _mtkView):
renderer = delegate.alloc().init()
# --- createBuffer
renderer.device = _mtkView.device()
renderer.commandQueue = renderer.device.newCommandQueue()
# xxx: length
# vertexData.count: 256
renderer.vertexBuffer = renderer.device.newBufferWithBytes_length_options_(vertexData, np_vertex.nbytes, 0)
print('vertexData.count: ', np_vertex.nbytes)
# indexData.count: 72
renderer.indexBuffer = renderer.device.newBufferWithBytes_length_options_(indexData, np_index.nbytes, 0)
print('indexData.count: ', np_index.nbytes)
# size: 64
renderer.uniformBuffer = renderer.device.newBufferWithLength_options_(ctypes.sizeof(Uniforms), 0)
print('Uniforms.size: ', ctypes.sizeof(Uniforms))
renderer.bufferPointer = renderer.uniformBuffer.contents()
renderer.rotation = 0.0
# --- registerShaders
source = shader_path.read_text('utf-8')
library = renderer.device.newLibraryWithSource_options_error_(source, MTLCompileOptions.new(), err_ptr)
vertex_func = library.newFunctionWithName_('vertex_func')
frag_func = library.newFunctionWithName_('fragment_func')
rpld = MTLRenderPipelineDescriptor.new()
rpld.vertexFunction = vertex_func
rpld.fragmentFunction = frag_func
rpld.colorAttachments().objectAtIndexedSubscript(0).pixelFormat = 80 # .bgra8Unorm
renderer.rps = renderer.device.newRenderPipelineStateWithDescriptor_error_(rpld, err_ptr)
return renderer
# --- MTKViewDelegate
def drawInMTKView_(_self, _cmd, _view):
self = ObjCInstance(_self)
view = ObjCInstance(_view)
# --- update
scaled = scalingMatrix(0.5)
self.rotation += 1 / 100 * np.pi / 4.0
rotatedY = rotationMatrix(self.rotation, [0.0, 1.0, 0.0])
rotatedX = rotationMatrix(np.pi / 4.0, [1.0, 0.0, 0.0])
modelMatrix = np.dot(np.dot(rotatedX, rotatedY), scaled)
cameraPosition = [0.0, 0.0, -3.0]
viewMatrix = translationMatrix(cameraPosition)
projMatrix = projectionMatrix(0.0, 10.0, 1.0, 1.0)
_modelViewProjectionMatrix = np.dot(projMatrix, np.dot(viewMatrix, modelMatrix))
# todo: ใใใงใ`ctypes` ใธใญใฃในใ
modelViewProjectionMatrix = _modelViewProjectionMatrix.ctypes.data_as(ctypes.POINTER(MatrixFloat4x4)).contents
self.bufferPointer = self.uniformBuffer.contents()
uniforms = Uniforms(modelViewProjectionMatrix)
# size: 64
ctypes.memmove(self.bufferPointer, ctypes.byref(uniforms), ctypes.sizeof(uniforms))
drawable = view.currentDrawable()
rpd = view.currentRenderPassDescriptor()
rpd.colorAttachments().objectAtIndexedSubscript(0).clearColor = (0.0, 0.5, 0.5, 1.0)
commandBuffer = self.commandQueue.commandBuffer()
commandEncoder = commandBuffer.renderCommandEncoderWithDescriptor_(rpd)
commandEncoder.setRenderPipelineState_(self.rps)
# MTLWinding
# clockwise = 0
# counterClockwise = 1
# MTLCullMode
# none = 0
# front = 1
# back = 2
commandEncoder.setFrontFacingWinding_(1) # .counterClockwise
commandEncoder.setCullMode_(2) # .back
commandEncoder.setVertexBuffer_offset_atIndex_(self.vertexBuffer, 0, 0)
commandEncoder.setVertexBuffer_offset_atIndex_(self.uniformBuffer, 0, 1)
# indexCount: 36
# --- indexBuffer.length: 72
# --- MemoryLayout<UInt16>.size: 2
commandEncoder.drawIndexedPrimitives_indexCount_indexType_indexBuffer_indexBufferOffset_(3, (self.indexBuffer.length() // 2), 0, self.indexBuffer, 0)
commandEncoder.endEncoding()
commandBuffer.presentDrawable_(drawable)
commandBuffer.commit()
commandBuffer.waitUntilCompleted()
def mtkView_drawableSizeWillChange_(_self, _cmd, _view, _size):
self = ObjCInstance(_self)
view = ObjCInstance(_view)
PyRenderer = create_objc_class(
name='PyRenderer',
methods=[drawInMTKView_, mtkView_drawableSizeWillChange_],
protocols=['MTKViewDelegate'])
if __name__ == '__main__':
view = MetalView()
view.present(style='fullscreen', orientations=['portrait'])
| 93
| 0
| 0
| 2,396
| 0
| 3,854
| 0
| 2
| 206
|
8cd547ed24dfc46665b2b6848260fc45380cd132
| 8,521
|
py
|
Python
|
ncmapi.py
|
NKID00/NeteaseCloudMusicApiPy
|
731e8c405928d38be693739cff6449e3426d22c7
|
[
"MIT"
] | null | null | null |
ncmapi.py
|
NKID00/NeteaseCloudMusicApiPy
|
731e8c405928d38be693739cff6449e3426d22c7
|
[
"MIT"
] | null | null | null |
ncmapi.py
|
NKID00/NeteaseCloudMusicApiPy
|
731e8c405928d38be693739cff6449e3426d22c7
|
[
"MIT"
] | null | null | null |
'''NeteaseCloudMusicApiPy
NeteaseCloudMusicApi Python
https://github.com/NKID00/NeteaseCloudMusicApiPy
MIT License
Copyright (c) 2020 NKID00
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from typing import Iterable
from subprocess import Popen, DEVNULL
from os import environ, kill
from signal import SIGTERM
__all__ = ['VERSION', 'start_ncmapi_server', 'stop_ncmapi_server',
'ncmapi', 'NeteaseCloudMusicApi']
VERSION = 'NeteaseCloudMusicApiPy 0.1.0'
def start_ncmapi_server(ncmapi_server_command: Iterable[str],
port: int = 3000, host: str = 'localhost') -> int:
''' NeteaseCloudMusicApi pid'''
env = dict(environ)
env['HOST'] = str(host)
env['PORT'] = str(port)
p = Popen(tuple(ncmapi_server_command), stdin=DEVNULL,
stdout=DEVNULL, stderr=DEVNULL, env=env)
return p.pid
def stop_ncmapi_server(ncmapi_server_pid: int) -> None:
''' pid NeteaseCloudMusicApi '''
kill(ncmapi_server_pid, SIGTERM)
| 35.065844
| 78
| 0.593006
|
'''NeteaseCloudMusicApiPy
NeteaseCloudMusicApi ็ Python ็ปๅฎ
https://github.com/NKID00/NeteaseCloudMusicApiPy
MIT License
Copyright (c) 2020 NKID00
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from typing import Iterable, Dict, Union, Optional
from subprocess import Popen, DEVNULL
from os import environ, kill
from signal import SIGTERM
from contextlib import contextmanager
from time import time
from requests import Session
from hashlib import md5
from base64 import b64decode
from io import BytesIO
__all__ = ['VERSION', 'start_ncmapi_server', 'stop_ncmapi_server',
'ncmapi', 'NeteaseCloudMusicApi']
VERSION = 'NeteaseCloudMusicApiPy 0.1.0'
def start_ncmapi_server(ncmapi_server_command: Iterable[str],
port: int = 3000, host: str = 'localhost') -> int:
'''ๅฏๅจๆๅฎ็ NeteaseCloudMusicApi ๆๅก่ฟ็จๅนถ่ฟๅ่ฟ็จ pid'''
env = dict(environ)
env['HOST'] = str(host)
env['PORT'] = str(port)
p = Popen(tuple(ncmapi_server_command), stdin=DEVNULL,
stdout=DEVNULL, stderr=DEVNULL, env=env)
return p.pid
def stop_ncmapi_server(ncmapi_server_pid: int) -> None:
'''ๅๆญขๆๅฎ pid ็ NeteaseCloudMusicApi ๆๅก่ฟ็จ'''
kill(ncmapi_server_pid, SIGTERM)
@contextmanager
def ncmapi(ncmapi_server_command: Iterable[str],
port: int = 3000, host: str = 'localhost'):
'''ๅฏๅจๆๅฎ็ NeteaseCloudMusicApi ๆๅก่ฟ็จ
ๅนถ่ฟๅ NeteaseCloudMusicApi ๅฏน่ฑก
้ๅบ่ฟ่กๆถไธไธๆๆถ่ชๅจ้ๅบ็ปๅฝๅนถๅๆญข NeteaseCloudMusicApi ๆๅก่ฟ็จ'''
pid = None
try:
pid = start_ncmapi_server(ncmapi_server_command, port, host)
with NeteaseCloudMusicApi(port, host) as api:
yield api
finally:
if pid is not None:
try:
stop_ncmapi_server(pid)
except OSError:
pass
class NeteaseCloudMusicApi:
'''ไฟๅญๆ API ๅฐๅใ็ธๅ
ณ่ฎพ็ฝฎๅ็ปๅฝ็ถๆ็ NeteaseCloudMusicApi ๅฏน่ฑก
้ๅบ่ฟ่กๆถไธไธๆๆถ่ชๅจ้ๅบ็ปๅฝ'''
def __init__(self, port: int = 3000, host: str = 'localhost',
raise_for_status: bool = True, add_timestamp: bool = False):
self.api_url_base = f'http://{host}:{port}'
self.api_session = Session()
self.raise_for_status = raise_for_status
self.add_timestamp = add_timestamp
def __enter__(self):
return self
def __exit__(self, *exc_info):
try:
self.logout()
except Exception:
pass
def call_api(self, api: str, args: Dict[str, Union[int, bool, str]],
add_timestamp: bool = False) -> dict:
'''่ฐ็จ API'''
if self.add_timestamp or add_timestamp: # ๆทปๅ ๆถ้ดๆณ
args['timestamp'] = int(time() * 1000)
r = self.api_session.get(self.api_url_base + api, params=args)
if self.raise_for_status: # ๅฆๆ่ฟๅ้่ฏฏไปฃ็ ๅๆๅบๅผๅธธ
r.raise_for_status()
return r.json()
def login(self, email: str, password: str = '',
md5_password: Optional[str] = None,
**args: Union[int, bool, str]) -> dict:
'''/login
้ฎ็ฎฑ็ปๅฝ
email: ้ฎ็ฎฑ
password: ๅฏ็
md5_password: md5 ๅ ๅฏๅ็ๅฏ็ ๏ผไผ ๅ
ฅๅ password ๅฐๅคฑๆ'''
if md5_password is None:
h = md5()
h.update(password.encode('utf8'))
md5_password = h.hexdigest()
args['email'] = email
args['md5_password'] = md5_password
return self.call_api('/login', args, add_timestamp=True)
def login_cellphone(self, phone: int, password: str = '',
countrycode: Optional[int] = None,
md5_password: Optional[str] = None,
**args: Union[int, bool, str]) -> dict:
'''/login/cellphone
ๆๆบ็ปๅฝ
phone: ๆๆบๅท็
password: ๅฏ็
countrycode: ๅฝๅฎถ็ ๏ผ็จไบๅฝๅคๆๆบๅท็ปๅฝ๏ผไพๅฆ็พๅฝไผ ๅ
ฅ1
md5_password: md5ๅ ๅฏๅ็ๅฏ็ ๏ผไผ ๅ
ฅๅ password ๅฐๅคฑๆ'''
if md5_password is None:
h = md5()
h.update(password.encode('utf8'))
md5_password = h.hexdigest()
args['phone'] = phone
if countrycode is not None:
args['countrycode'] = countrycode
args['md5_password'] = md5_password
return self.call_api('/login/cellphone', args, add_timestamp=True)
def login_qr_check(self, key: str, **args: Union[int, bool, str]) -> dict:
'''/login/qr/check
้ช่ฏไบ็ปด็ ็ปๅฝ
key: ไบ็ปด็ ๆ ่ฏ็ฌฆ'''
args['key'] = key
return self.call_api('/login/qr/check', args, add_timestamp=True)
def login_qr_create(self, key: str, qrimg: bool = True,
qrimg_str: bool = True,
**args: Union[int, bool, str]) -> str:
'''/login/qr/create
่ทๅไบ็ปด็ ้พๆฅ
key: ไบ็ปด็ ๆ ่ฏ็ฌฆ
qrimg: ่ทๅไบ็ปด็ ๅพ็
qrimg_str: ่ทๅไบ็ปด็ ๅพ็ๅญ็ฌฆ็ป'''
args['key'] = key
args['qrimg'] = qrimg or qrimg_str
data = self.call_api('/login/qr/create', args, add_timestamp=True)
if qrimg_str:
from PIL import Image
img_base64 = data['data']['qrimg'].split(',')[1]
img = Image.open(BytesIO(b64decode(img_base64)))
img = img.resize((40, 40), Image.NEAREST).crop((1, 1, 39, 39))
img_str = ''
for y in range(38): # ้ๅ่ก
for x in range(38): # ้ๅๅ
black = sum(img.getpixel((x, y))[:3]) < 384
img_str += 'โโ' if black else ' '
img_str += '\n'
return img_str
if qrimg:
return data['data']['qrimg']
else:
return data['data']['qrurl']
def login_qr_key(self, **args: Union[int, bool, str]) -> str:
'''/login/qr/key
่ทๅไบ็ปด็ ๆ ่ฏ็ฌฆ'''
data = self.call_api('/login/qr/check', args,
add_timestamp=True)
return data['data']['unikey']
def login_refresh(self, **args: Union[int, bool, str]) -> dict:
'''/login/refresh
ๅทๆฐ็ปๅฝ'''
return self.call_api('/login/refresh', args, add_timestamp=True)
def login_status(self, **args: Union[int, bool, str]) -> dict:
'''/login/status
่ทๅ็ปๅฝ็ถๆ
ๆณจๆ: ้่ฆ็ปๅฝ'''
return self.call_api('/login/status', args, add_timestamp=True)
def logout(self, **args: Union[int, bool, str]) -> dict:
'''/logout
้ๅบ็ปๅฝ
ๆณจๆ: ้่ฆ็ปๅฝ'''
return self.call_api('/logout', args, add_timestamp=True)
def playlist_detail(self, id: int, s: Optional[int] = None,
**args: Union[int, bool, str]) -> dict:
'''/playlist/detail
่ทๅๆญๅ่ฏฆๆ
id: ๆญๅ id
s: ๆญๅๆ่ฟ็ s ไธชๆถ่่
[้ป่ฎค8]
ๆณจๆ: ้่ฆ็ปๅฝ'''
args['id'] = id
if s:
args['s'] = s
return self.call_api('/playlist/detail', args)
def song_detail(self, ids: Union[int, Iterable[int]],
**args: Union[int, bool, str]) -> dict:
'''/song/detail
่ทๅๆญๆฒ่ฏฆๆ
ids: ้ณไน id'''
if isinstance(ids, int):
args['ids'] = ids
else:
args['ids'] = ','.join(map(str, ids))
return self.call_api('/song/detail', args)
def user_playlist(self, uid: int, limit: Optional[int] = None,
offset: Optional[int] = None,
**args: Union[int, bool, str]) -> dict:
'''/user/playlist
่ทๅ็จๆทๆญๅ
uid: ็จๆท id
limit: ่ฟๅๆฐ้
offset: ๅ็งปๆฐ้[้ป่ฎค0]
ๆณจๆ: ้่ฆ็ปๅฝ'''
args['uid'] = uid
if limit is not None:
args['limit'] = limit
if offset is not None:
args['offset'] = offset
return self.call_api('/user/playlist', args)
| 963
| 501
| 0
| 5,492
| 0
| 0
| 0
| 56
| 178
|
821f7729b184207b23c910240f3e1ceacd2e28df
| 12,862
|
py
|
Python
|
tests/test_datasets.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 6
|
2019-09-16T13:07:20.000Z
|
2021-06-02T19:02:05.000Z
|
tests/test_datasets.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 325
|
2019-09-20T20:06:00.000Z
|
2022-03-30T15:05:49.000Z
|
tests/test_datasets.py
|
platiagro/projects
|
00da234b35003bb0ecc2d22a997e08737ceda044
|
[
"Apache-2.0"
] | 17
|
2019-08-02T16:55:47.000Z
|
2021-06-26T19:13:35.000Z
|
# -*- coding: utf-8 -*-
import unittest.mock as mock
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.database import session_scope
import tests.util as util
app.dependency_overrides[session_scope] = util.override_session_scope
TEST_CLIENT = TestClient(app)
| 33.235142
| 132
| 0.594698
|
# -*- coding: utf-8 -*-
import unittest
import unittest.mock as mock
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.database import session_scope
import tests.util as util
app.dependency_overrides[session_scope] = util.override_session_scope
TEST_CLIENT = TestClient(app)
class TestDatasets(unittest.TestCase):
maxDiff = None
def setUp(self):
"""
Sets up the test before running it.
"""
util.create_mocks()
def tearDown(self):
"""
Deconstructs the test after running it.
"""
util.delete_mocks()
def test_list_datasets_project_not_found(self):
"""
Should return an http status 404 and a message 'specified project does not exist'.
"""
project_id = "unk"
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_1
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"message": "The specified project does not exist",
"code": "ProjectNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
def test_list_datasets_experiment_not_found(self):
"""
Should return an http status 404 and a message 'specified experiment does not exist'.
"""
project_id = util.MOCK_UUID_1
experiment_id = "unk"
run_id = "latest"
operator_id = util.MOCK_UUID_1
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"message": "The specified experiment does not exist",
"code": "ExperimentNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
def test_list_datasets_operator_not_found(self):
"""
Should return an http status 404 and a message 'specified operator does not exist'.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = "unk"
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"message": "The specified operator does not exist",
"code": "OperatorNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
side_effect=util.FILE_NOT_FOUND_ERROR,
)
def test_list_datasets_dataset_not_found(self, mock_stat_dataset, mock_kfp_client):
"""
Should return an http status 404 and a message 'specified run does not contain dataset'.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "unk"
operator_id = util.MOCK_UUID_1
name = util.IRIS_DATASET_NAME
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"message": "The specified run does not contain dataset",
"code": "DatasetNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id=run_id
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
return_value={
"columns": util.IRIS_COLUMNS,
"featuretypes": util.IRIS_FEATURETYPES,
"original-filename": util.IRIS_DATASET_NAME,
"total": len(util.IRIS_DATA_ARRAY),
},
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.load_dataset",
return_value=util.IRIS_DATAFRAME,
)
def test_list_datasets_success(
self, mock_load_dataset, mock_stat_dataset, mock_kfp_client
):
"""
Should return a experiment successfully.
"""
name = util.IRIS_DATASET_NAME
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_2
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"columns": [
"SepalLengthCm",
"SepalWidthCm",
"PetalLengthCm",
"PetalWidthCm",
"Species",
],
"data": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
[4.6, 3.1, 1.5, 0.2, "Iris-setosa"],
],
"total": 4,
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 200)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id="4546465"
)
mock_load_dataset.assert_any_call(
name=name, run_id="4546465", operator_id=operator_id, page=1, page_size=10
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
side_effect=util.FILE_NOT_FOUND_ERROR,
)
def test_list_datasets_no_dataset_assigned_to_run(
self, mock_stat_dataset, mock_kfp_client
):
"""
Should return an http status 404 and a message 'No dataset assigned to the run'.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_1
name = util.IRIS_DATASET_NAME
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets"
)
result = rv.json()
expected = {
"message": "The specified run does not contain dataset",
"code": "DatasetNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id="4546465"
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
return_value={
"columns": util.IRIS_HEADERLESS_COLUMNS,
"featuretypes": util.IRIS_FEATURETYPES,
"original-filename": util.IRIS_DATASET_NAME,
"total": len(util.IRIS_DATA_ARRAY),
},
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.load_dataset",
side_effect=util.mock_load_dataset,
)
def test_list_datasets_page_size_1(
self, mock_load_dataset, mock_stat_dataset, mock_kfp_client
):
"""
Should return a list of data and columns with one element.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_2
name = util.IRIS_DATASET_NAME
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets?page=1&page_size=1"
)
result = rv.json()
expected = {
"columns": [
"col0",
"col1",
"col2",
"col3",
"col4",
],
"data": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
],
"total": 4,
}
self.assertDictEqual(expected, result)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id="4546465"
)
mock_load_dataset.assert_any_call(
name=name, run_id="4546465", operator_id=operator_id, page=1, page_size=1
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
return_value={
"columns": util.IRIS_HEADERLESS_COLUMNS,
"featuretypes": util.IRIS_FEATURETYPES,
"original-filename": util.IRIS_DATASET_NAME,
},
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.load_dataset",
side_effect=util.mock_load_dataset,
)
def test_list_datasets_page_size_minus_1(
self, mock_load_dataset, mock_stat_dataset, mock_kfp_client
):
"""
Should return the dataset formatted as a .CSV file with one less page.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_2
name = util.IRIS_DATASET_NAME
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets?page=1&page_size=-1"
)
result = rv.json()
expected = {
"columns": ["col0", "col1", "col2", "col3", "col4"],
"data": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
],
"total": 3,
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 200)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id="4546465"
)
mock_load_dataset.assert_any_call(
name=name, run_id="4546465", operator_id=operator_id, page=1, page_size=-1
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.stat_dataset",
return_value={
"columns": util.IRIS_HEADERLESS_COLUMNS,
"featuretypes": util.IRIS_FEATURETYPES,
"original-filename": util.IRIS_DATASET_NAME,
},
)
@mock.patch(
"projects.controllers.experiments.runs.datasets.load_dataset",
side_effect=util.mock_load_dataset,
)
def test_list_datasets_page_not_exist(
self, mock_load_dataset, mock_stat_dataset, mock_kfp_client
):
"""
Should return the dataset formatted as a .CSV file with three pages.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_1
name = util.IRIS_DATASET_NAME
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/datasets?page=2&page_size=3"
)
result = rv.json()
expected = {
"columns": ["col0", "col1", "col2", "col3", "col4"],
"data": [
[5.1, 3.5, 1.4, 0.2, "Iris-setosa"],
[4.9, 3.0, 1.4, 0.2, "Iris-setosa"],
[4.7, 3.2, 1.3, 0.2, "Iris-setosa"],
],
"total": 3,
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 200)
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_stat_dataset.assert_any_call(
name=name, operator_id=operator_id, run_id="4546465"
)
mock_load_dataset.assert_any_call(
name=name, run_id="4546465", operator_id=operator_id, page=2, page_size=3
)
| 0
| 9,935
| 0
| 2,584
| 0
| 0
| 0
| -6
| 45
|
ae6c947746f3d9976489ea081db5ec36cf12f7d9
| 117
|
py
|
Python
|
tests/test_django2_2_fixers.py
|
pakal/django-compat-patcher
|
62c1a766807f2be11b03ea481fbb4c9f9e6529ba
|
[
"MIT"
] | 12
|
2017-05-21T10:52:45.000Z
|
2022-03-04T09:52:58.000Z
|
tests/test_django2_2_fixers.py
|
pakal/django-compat-patcher
|
62c1a766807f2be11b03ea481fbb4c9f9e6529ba
|
[
"MIT"
] | 18
|
2019-04-18T12:42:18.000Z
|
2022-02-23T09:38:45.000Z
|
tests/test_django2_2_fixers.py
|
pakal/django-compat-patcher
|
62c1a766807f2be11b03ea481fbb4c9f9e6529ba
|
[
"MIT"
] | 2
|
2019-05-07T20:28:25.000Z
|
2022-03-03T22:13:15.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
# NOTHING FOR NOW
| 16.714286
| 72
| 0.846154
|
from __future__ import absolute_import, print_function, unicode_literals
import _test_utilities
# NOTHING FOR NOW
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 23
|
71479f55708352b7b69778fa052c2356f1afdd1e
| 4,738
|
py
|
Python
|
main.py
|
Amirmoradi94/SmartCar
|
4c0f17a6a98e6db46769787dc95d11e48b335488
|
[
"MIT"
] | 3
|
2021-01-15T04:33:43.000Z
|
2021-02-15T18:20:15.000Z
|
main.py
|
Amirmoradi94/SmartCar
|
4c0f17a6a98e6db46769787dc95d11e48b335488
|
[
"MIT"
] | null | null | null |
main.py
|
Amirmoradi94/SmartCar
|
4c0f17a6a98e6db46769787dc95d11e48b335488
|
[
"MIT"
] | 1
|
2021-04-07T15:38:47.000Z
|
2021-04-07T15:38:47.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 21:40:05 2020
@author: Amir Moradi
"""
import cv2
from Utils.undistortion import undistortion
from Utils.angle_calculation import angle_calculation
import numpy as np
import serial
video_StreamL = cv2.VideoCapture(2) # index of left camera
video_StreamR = cv2.VideoCapture(1) # index of right camera
face_cascade = cv2.CascadeClassifier('SmartCar/Cascades/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('SmartCar/Cascades/haarcascade_eye_tree_eyeglasses.xml')
cen_eyesL = []
cen_eyesR = []
Proj_R = np.load("SmartCar/Calibration/matrices/Proj_R.npy")
Proj_L = np.load("SmartCar/Calibration/matrices/Proj_L.npy")
ser = serial.Serial("COM5", 9600)
# Set this value according to your project.
mirror_pt = [-10, 10, 150]
while(True):
retL, imgL = vidStreamL.read()
retR, imgR = vidStreamR.read()
imgL, imgR = undistortion(imgL, imgR)
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
try:
facesL = face_cascade.detectMultiScale(grayL, 1.3, 5)
facesR = face_cascade.detectMultiScale(grayR, 1.3, 5)
for (x_l, y_l, w_l, h_l), (x_r, y_r, w_r, h_r) in zip(facesL, facesR):
roi_grayL = grayL[y_l:y_l+h_l, x_l:x_l+w_l]
roi_grayR = grayR[y_r:y_r + h_r, x_r:x_r + w_r]
eyesL = eye_cascade.detectMultiScale(roi_grayL)
eyesR = eye_cascade.detectMultiScale(roi_grayR)
inter_l = []
inter_r = []
for (ex_l,ey_l,ew_l,eh_l), (ex_r,ey_r,ew_r,eh_r) in zip(eyesL, eyesR):
cv2.rectangle(imgL, (ex_l + x_l, ey_l + y_l), (ex_l + ew_l + x_l, ey_l + eh_l + y_l), (0,255,0), 2)
cv2.rectangle(imgR, (ex_r + x_r, ey_r + y_r), (ex_r + ew_r + x_r, ey_r + eh_r + y_r), (0,255,0), 2)
inter_l.append(((2 * ex_l + ew_l)/2, (2 * ey_l + eh_l)/2))
inter_r.append(((2 * ex_r + ew_r)/2, (2 * ey_r + eh_r)/2))
eyeL_l eyeR_l = inter_l[0], inter_l[1]
eyeLx_l = eyeL_l[0]
eyeLy_l = eyeL_l[1]
eyeRx_l = eyeR_l[0]
eyeRy_l = eyeR_l[1]
eyeL_r = inter_r[0]
eyeR_r = inter_r[1]
eyeLx_r = eyeL_r[0]
eyeLy_r = eyeL_r[1]
eyeRx_r = eyeR_r[0]
eyeRy_r = eyeR_r[1]
cen_pos_l = (int((eyeLx_l + eyeRx_l)/2 + x_l), int((eyeLy_l + eyeRy_l)/2 + y_l))
cen_pos_r = (int((eyeLx_r + eyeRx_r)/2 + x_r), int((eyeLy_r + eyeRy_r)/2 + y_r))
cen_eyesL.append(cen_pos_l)
cen_eyesR.append(cen_pos_r)
ptL = np.array([[cen_pos_l[0]], [cen_pos_l[1]]], dtype=np.float)
ptR = np.array([[cen_pos_r[0]], [cen_pos_r[1]]], dtype=np.float)
cv2.circle(imgL, cen_pos_l, radius=1, color=(0, 0, 255), thickness=10)
cv2.circle(imgR, cen_pos_r, radius=1, color=(0, 0, 255), thickness=10)
xyz_points = cv2.triangulatePoints(Proj_L, Proj_R, ptL, ptR)
xyz_points /= xyz_points[3]
driver_pt = [int(xyz_points[0][0]), int(xyz_points[1][0]), int(xyz_points[2][0])]
yaw, pitch = angle_calculation(driver_pt, mirror_pt)
pitch_angle = f"S2={pitch}"
yaw_angle = f"S1={yaw}"
ser.write(pitch_angle.encode())
ser.write(yaw_angle.encode())
"""
text_z = "Z is: {} cm".format(int(xyz_points[2][0]))
text_y = "Y is: {} cm".format(int(xyz_points[1][0]))
text_x = "X is: {} cm".format(int(xyz_points[0][0]))
cv2.putText(imgL, text_z, (int(w_l/2) + 20, int(h_l/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
cv2.putText(imgL, text_y, (int(w_l/2) + 20, int(h_l/2)+35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
cv2.putText(imgL, text_x, (int(w_l/2) + 20, int(h_l/2)+70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
"""
origin_R = np.dot(Proj_R[:3], xyz_points)
origin_L = np.dot(Proj_L[:3], xyz_points)
# Again, put in homogeneous form before using them
origin_R /= origin_R[2]
origin_L /= origin_L[2]
# Press "q" to break the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow('imgL', imgL)
cv2.imshow('imgR', imgR)
except:
pass
ser.close()
cv2.destroyAllWindows()
| 36.728682
| 120
| 0.548122
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 21:40:05 2020
@author: Amir Moradi
"""
import cv2
from Utils.undistortion import undistortion
from Utils.angle_calculation import angle_calculation
import numpy as np
import serial
video_StreamL = cv2.VideoCapture(2) # index of left camera
video_StreamR = cv2.VideoCapture(1) # index of right camera
face_cascade = cv2.CascadeClassifier('SmartCar/Cascades/haarcascade_frontalface_alt.xml')
eye_cascade = cv2.CascadeClassifier('SmartCar/Cascades/haarcascade_eye_tree_eyeglasses.xml')
cen_eyesL = []
cen_eyesR = []
Proj_R = np.load("SmartCar/Calibration/matrices/Proj_R.npy")
Proj_L = np.load("SmartCar/Calibration/matrices/Proj_L.npy")
ser = serial.Serial("COM5", 9600)
# Set this value according to your project.
mirror_pt = [-10, 10, 150]
while(True):
retL, imgL = vidStreamL.read()
retR, imgR = vidStreamR.read()
imgL, imgR = undistortion(imgL, imgR)
grayL = cv2.cvtColor(imgL, cv2.COLOR_BGR2GRAY)
grayR = cv2.cvtColor(imgR, cv2.COLOR_BGR2GRAY)
try:
facesL = face_cascade.detectMultiScale(grayL, 1.3, 5)
facesR = face_cascade.detectMultiScale(grayR, 1.3, 5)
for (x_l, y_l, w_l, h_l), (x_r, y_r, w_r, h_r) in zip(facesL, facesR):
roi_grayL = grayL[y_l:y_l+h_l, x_l:x_l+w_l]
roi_grayR = grayR[y_r:y_r + h_r, x_r:x_r + w_r]
eyesL = eye_cascade.detectMultiScale(roi_grayL)
eyesR = eye_cascade.detectMultiScale(roi_grayR)
inter_l = []
inter_r = []
for (ex_l,ey_l,ew_l,eh_l), (ex_r,ey_r,ew_r,eh_r) in zip(eyesL, eyesR):
cv2.rectangle(imgL, (ex_l + x_l, ey_l + y_l), (ex_l + ew_l + x_l, ey_l + eh_l + y_l), (0,255,0), 2)
cv2.rectangle(imgR, (ex_r + x_r, ey_r + y_r), (ex_r + ew_r + x_r, ey_r + eh_r + y_r), (0,255,0), 2)
inter_l.append(((2 * ex_l + ew_l)/2, (2 * ey_l + eh_l)/2))
inter_r.append(((2 * ex_r + ew_r)/2, (2 * ey_r + eh_r)/2))
eyeL_lู eyeR_l = inter_l[0], inter_l[1]
eyeLx_l = eyeL_l[0]
eyeLy_l = eyeL_l[1]
eyeRx_l = eyeR_l[0]
eyeRy_l = eyeR_l[1]
eyeL_r = inter_r[0]
eyeR_r = inter_r[1]
eyeLx_r = eyeL_r[0]
eyeLy_r = eyeL_r[1]
eyeRx_r = eyeR_r[0]
eyeRy_r = eyeR_r[1]
cen_pos_l = (int((eyeLx_l + eyeRx_l)/2 + x_l), int((eyeLy_l + eyeRy_l)/2 + y_l))
cen_pos_r = (int((eyeLx_r + eyeRx_r)/2 + x_r), int((eyeLy_r + eyeRy_r)/2 + y_r))
cen_eyesL.append(cen_pos_l)
cen_eyesR.append(cen_pos_r)
ptL = np.array([[cen_pos_l[0]], [cen_pos_l[1]]], dtype=np.float)
ptR = np.array([[cen_pos_r[0]], [cen_pos_r[1]]], dtype=np.float)
cv2.circle(imgL, cen_pos_l, radius=1, color=(0, 0, 255), thickness=10)
cv2.circle(imgR, cen_pos_r, radius=1, color=(0, 0, 255), thickness=10)
xyz_points = cv2.triangulatePoints(Proj_L, Proj_R, ptL, ptR)
xyz_points /= xyz_points[3]
driver_pt = [int(xyz_points[0][0]), int(xyz_points[1][0]), int(xyz_points[2][0])]
yaw, pitch = angle_calculation(driver_pt, mirror_pt)
pitch_angle = f"S2={pitch}"
yaw_angle = f"S1={yaw}"
ser.write(pitch_angle.encode())
ser.write(yaw_angle.encode())
"""
text_z = "Z is: {} cm".format(int(xyz_points[2][0]))
text_y = "Y is: {} cm".format(int(xyz_points[1][0]))
text_x = "X is: {} cm".format(int(xyz_points[0][0]))
cv2.putText(imgL, text_z, (int(w_l/2) + 20, int(h_l/2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
cv2.putText(imgL, text_y, (int(w_l/2) + 20, int(h_l/2)+35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
cv2.putText(imgL, text_x, (int(w_l/2) + 20, int(h_l/2)+70), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
"""
origin_R = np.dot(Proj_R[:3], xyz_points)
origin_L = np.dot(Proj_L[:3], xyz_points)
# Again, put in homogeneous form before using them
origin_R /= origin_R[2]
origin_L /= origin_L[2]
# Press "q" to break the loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow('imgL', imgL)
cv2.imshow('imgR', imgR)
except:
pass
ser.close()
cv2.destroyAllWindows()
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
96636c79f61d2c52c4c27582d3d3210f08ece747
| 3,547
|
py
|
Python
|
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
bot/exts/cricket.py
|
ShakyaMajumdar/ShaqqueBot
|
f618ae21e4bf700d86674399670634e8d1cc1dc9
|
[
"MIT"
] | null | null | null |
# from pprint import pprint
from discord.ext import commands
from bot import constants
API_URL = "https://livescore6.p.rapidapi.com/matches/v2/"
LIVE_MATCHES_URL = API_URL + "list-live"
HEADERS = {
"x-rapidapi-key": constants.RAPIDAPI_KEY,
"x-rapidapi-host": constants.RAPIDAPI_LIVESCORE6_HOST,
}
def setup(bot: commands.Bot):
"""Add Cricket Cog."""
bot.add_cog(Cricket(bot))
| 32.842593
| 120
| 0.480124
|
from dataclasses import dataclass
# from pprint import pprint
import aiohttp
import discord
from discord.ext import commands
from bot import constants
API_URL = "https://livescore6.p.rapidapi.com/matches/v2/"
LIVE_MATCHES_URL = API_URL + "list-live"
HEADERS = {
"x-rapidapi-key": constants.RAPIDAPI_KEY,
"x-rapidapi-host": constants.RAPIDAPI_LIVESCORE6_HOST,
}
@dataclass
class CricketMatch:
format: str
match_no: str
teams: tuple[str, str]
summary: str
scores: dict
status: str
_eid: str
class Cricket(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@staticmethod
def get_live_matches_list_embed(matches: list[CricketMatch]) -> discord.Embed:
embed = discord.Embed(title="Current Live Matches:", colour=discord.Colour.random())
for match in matches:
match_info = f"""\
{match.teams[0]}: {match.scores['T1I1']}
{match.teams[1]}: {match.scores['T2I1']}
"""
if "test" in match.format.lower():
match_info += f"""\
{match.teams[0]}: {match.scores['T1I2']}
{match.teams[1]}: {match.scores['T2I2']}
"""
match_info += f"""\
{match.summary}
{match.status}
"""
embed.add_field(
name="{} vs {}: {}".format(*match.teams, match.match_no or match.format), value=match_info, inline=False
)
return embed
@commands.command()
async def live_scores(self, ctx: commands.Context) -> None:
"""Sends information about ongoing cricket matches."""
querystring = {"Category": "cricket"}
async with aiohttp.ClientSession() as session:
async with session.get(
LIVE_MATCHES_URL, headers=HEADERS, params=querystring
) as response:
response = await response.json()
# pprint(response)
if not response:
await ctx.send("No matches in progress currently!")
return
matches = [
CricketMatch(
format=match["EtTx"],
teams=(
match["T1"][0]["Nm"],
match["T2"][0]["Nm"],
),
summary=match["ECo"],
_eid=match["Eid"],
status=match["EpsL"],
scores={
"T1I1": f"{match.get('Tr1C1', '-')}/"
f"{match.get('Tr1CW1', '-')} "
f"({match.get('Tr1CO1', '-')})",
"T2I1": f"{match.get('Tr2C1', '-')}/"
f"{match.get('Tr2CW1', '-')} "
f"({match.get('Tr2CO1', '-')})",
"T1I2": f"{match.get('Tr1C2', '-')}/"
f"{match.get('Tr1CW2', '-')} "
f"({match.get('Tr1CO2', '-')})",
"T2I2": f"{match.get('Tr2C2', '-')}/"
f"{match.get('Tr2CW2', '-')} "
f"({match.get('Tr2CO2', '-')})",
},
match_no=match.get("ErnInf", ""),
)
for match in map(lambda m: m["Events"][0], response["Stages"])
]
await ctx.send(embed=self.get_live_matches_list_embed(matches))
def setup(bot: commands.Bot):
"""Add Cricket Cog."""
bot.add_cog(Cricket(bot))
| 0
| 2,910
| 0
| 127
| 0
| 0
| 0
| -2
| 113
|
fb2b3853f43ad28f2ba2ee5903c79c030ef5c9c5
| 1,662
|
py
|
Python
|
families/ubuntutw_family.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
families/ubuntutw_family.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
families/ubuntutw_family.py
|
Botomatik/JackBot
|
58651d8b5a5bcead2a2eb79849019cb4f972b7cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 19.325581
| 96
| 0.427798
|
# -*- coding: utf-8 -*-
import family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'ubuntutw' #Set the family name; this should be the same as in the filename.
self.langs = {
'zh': None,
}
self.namespaces[-2] = {
'_default': u'ๅช้ซ',
}
self.namespaces[-1] = {
'_default': u'็นๆฎ',
}
self.namespaces[1] = {
'_default': u'่จ่ซ',
'zh': u'ไฝฟ็จ่
่จ่ซ',
}
self.namespaces[2] = {
'_default': u'ไฝฟ็จ่
',
}
self.namespaces[3] = {
'_default': u'่จ่ซ',
'zh': u'ไฝฟ็จ่
่จ่ซ',
}
self.namespaces[4] = {
'_default': u'Ubuntu ๆญฃ้ซไธญๆ Wiki',
}
self.namespaces[5] = {
'_default': u'Ubuntu ๆญฃ้ซไธญๆ Wikiๅฐ่ฉฑ',
}
self.namespaces[6] = {
'_default': u'ๅ็',
}
self.namespaces[7] = {
'_default': u'ๅ็่จ่ซ',
}
self.namespaces[10] = {
'_default': u'ๆจกๆฟ',
}
self.namespaces[11] = {
'_default': u'ๆจกๆฟ่จ่ซ',
}
self.namespaces[12] = {
'_default': u'ไฝฟ็จ่ชชๆ',
}
self.namespaces[13] = {
'_default': u'ไฝฟ็จ่ชชๆ่จ่ซ',
}
self.namespaces[14] = {
'_default': u'ๅ้ก',
}
self.namespaces[15] = {
'_default': u'ๅ้ก่จ่ซ',
}
def hostname(self, code):
return 'wiki.ubuntu-tw.org'
def version(self, code):
return "1.12.0"
def scriptpath(self, code):
return ''
| 177
| 0
| 0
| 1,540
| 0
| 0
| 0
| -8
| 46
|
10b3905d95f1693576c1d6105f16c89c21dc74cc
| 422
|
py
|
Python
|
keggretrieve.py
|
dewuem/python-bioinf
|
9dc45a467fc884644157ef75c4e3c34f5fd8ebcf
|
[
"MIT"
] | 1
|
2019-06-26T23:27:05.000Z
|
2019-06-26T23:27:05.000Z
|
keggretrieve.py
|
dewuem/python-bioinf
|
9dc45a467fc884644157ef75c4e3c34f5fd8ebcf
|
[
"MIT"
] | null | null | null |
keggretrieve.py
|
dewuem/python-bioinf
|
9dc45a467fc884644157ef75c4e3c34f5fd8ebcf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
# coding: utf-8
# Daniel Elsner
# Get the amino acid sequence from the correct url for a kegg entry...
# Use best with GNU parallel (Tange 2011a) and an input list containing all the gene IDs from a kegg pathway.
import sys
from bs4 import BeautifulSoup
import requests
url = sys.argv[1]
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, 'html.parser')
print soup.pre.get_text()
| 17.583333
| 109
| 0.729858
|
#!/usr/bin/python2
# coding: utf-8
# Daniel Elsner
# Get the amino acid sequence from the correct url for a kegg entry...
# Use best with GNU parallel (Tange 2011a) and an input list containing all the gene IDs from a kegg pathway.
import sys
from bs4 import BeautifulSoup
import requests
url = sys.argv[1]
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, 'html.parser')
print soup.pre.get_text()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a5d94fadc5c483bc4f0c130583259b8d58126dd1
| 3,209
|
py
|
Python
|
kogia/core/models.py
|
pascalpepe/kogia
|
af41f857729144f3c747a812345892e21d561e89
|
[
"Apache-2.0"
] | null | null | null |
kogia/core/models.py
|
pascalpepe/kogia
|
af41f857729144f3c747a812345892e21d561e89
|
[
"Apache-2.0"
] | null | null | null |
kogia/core/models.py
|
pascalpepe/kogia
|
af41f857729144f3c747a812345892e21d561e89
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2017-2020 Pascal Pepe <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core models."""
| 23.423358
| 74
| 0.63727
|
# Copyright (C) 2017-2020 Pascal Pepe <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core models."""
import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
class ArchivableModel(models.Model):
"""Abstract model that can be archived."""
is_archived = models.BooleanField(
default=False,
verbose_name=_('archived?'),
)
class Meta:
abstract = True
class OrderableModel(models.Model):
"""Abstract model that can be ordered."""
order = models.PositiveSmallIntegerField(
blank=True,
null=True,
verbose_name=_('order'),
)
class Meta:
abstract = True
class OwnableModel(models.Model):
"""Abstract model with an optional owner."""
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
blank=True,
null=True,
verbose_name=_('owner'),
)
class Meta:
abstract = True
class PublishableModel(models.Model):
"""Abstract model with publication features."""
PUB_STATUS_CHOICES = [
('DRAFT', _('draft')),
('PENDING', _('pending')),
('PUBLISHED', _('published')),
]
pub_date = models.DateTimeField(
blank=True,
null=True,
verbose_name=_('publication date'),
)
pub_status = models.CharField(
max_length=32,
choices=PUB_STATUS_CHOICES,
default='DRAFT',
verbose_name=_('publication status'),
)
class Meta:
abstract = True
class SEOModel(models.Model):
"""Abstract model with SEO-specific fields."""
search_title = models.CharField(
max_length=255,
blank=True,
verbose_name=_('search title'),
)
search_description = models.CharField(
max_length=255,
blank=True,
verbose_name=_('search description'),
)
class Meta:
abstract = True
class UUIDModel(models.Model):
"""Abstract model with a UUID as primary key."""
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name=_('ID'),
)
class Meta:
abstract = True
class VisibilityStatusModel(models.Model):
"""Abstract model with a visibility status."""
VISIBILITY_STATUS_CHOICES = [
('PRIVATE', _('private')),
('PUBLIC', _('public')),
]
visibility_status = models.CharField(
max_length=32,
choices=VISIBILITY_STATUS_CHOICES,
default='PUBLIC',
verbose_name=_('visibility status'),
)
class Meta:
abstract = True
| 0
| 0
| 0
| 2,282
| 0
| 0
| 0
| 41
| 251
|
65145260a9407e4c4c5f11fa168a6c7e6ff28eaf
| 985
|
py
|
Python
|
example/sql.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | 1
|
2021-01-23T22:52:52.000Z
|
2021-01-23T22:52:52.000Z
|
example/sql.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | null | null | null |
example/sql.py
|
kmuehlbauer/wetterdienst
|
85e72ccdbd00f0e8285e1ba24800dfafb81ccd63
|
[
"MIT"
] | null | null | null |
"""
=====
About
=====
Acquire measurement information from DWD and filter using SQL.
=====
Setup
=====
::
pip install wetterdienst[sql]
"""
import logging
log = logging.getLogger()
if __name__ == "__main__":
main()
| 18.240741
| 84
| 0.652792
|
"""
=====
About
=====
Acquire measurement information from DWD and filter using SQL.
=====
Setup
=====
::
pip install wetterdienst[sql]
"""
import logging
from wetterdienst import DWDStationRequest
from wetterdienst import TimeResolution, Parameter, PeriodType
log = logging.getLogger()
def sql_example():
request = DWDStationRequest(
station_ids=[1048],
parameter=[Parameter.TEMPERATURE_AIR],
time_resolution=TimeResolution.HOURLY,
start_date="2019-01-01",
end_date="2020-01-01",
tidy_data=True,
humanize_column_names=True,
prefer_local=True,
write_file=True,
)
sql = "SELECT * FROM data WHERE element='temperature_air_200' AND value < -7.0;"
log.info(f"Invoking SQL query '{sql}'")
df = request.collect_safe()
df = df.wd.lower().io.sql(sql)
print(df)
def main():
logging.basicConfig(level=logging.INFO)
sql_example()
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 600
| 0
| 62
| 91
|
717ef021b534903c9c8167a5c7531193230a9ab0
| 2,400
|
py
|
Python
|
click_game/MainWindow.py
|
HSU-S21-CS232/gui-examples
|
5f7d011dd13cee0648a3d69f1774edef7424e422
|
[
"Apache-2.0"
] | null | null | null |
click_game/MainWindow.py
|
HSU-S21-CS232/gui-examples
|
5f7d011dd13cee0648a3d69f1774edef7424e422
|
[
"Apache-2.0"
] | null | null | null |
click_game/MainWindow.py
|
HSU-S21-CS232/gui-examples
|
5f7d011dd13cee0648a3d69f1774edef7424e422
|
[
"Apache-2.0"
] | null | null | null |
import sys
#be sure to import any widget that you want to manipulate
from PySide2.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
| 30.379747
| 92
| 0.63375
|
import sys
from enum import Enum
from PySide2.QtUiTools import QUiLoader #allows us to load .ui files
#be sure to import any widget that you want to manipulate
from PySide2.QtWidgets import QApplication, QPushButton, QGridLayout, QSizePolicy
from PySide2.QtCore import QFile, QObject
import random
class MainWindow(QObject):
#class constructor
def __init__(self, ui_file = 'MainWindow.ui', parent=None):
self._num_buttons = 15
self._num_rows = 4
self._num_cols = 4
#call class parent (QObject) constructor
super(MainWindow, self).__init__(parent)
#load the UI file into Python
#ui_file was a string, now it's a proper QT object
ui_file = QFile(ui_file)
ui_file.open(QFile.ReadOnly)
loader = QUiLoader()
self.window = loader.load(ui_file)
#always remember to close files
ui_file.close()
#add event listeners for UI events
self.addEventListeners()
#randomize button placement
self.initializeGame()
#show window to the user
self.window.show()
def addEventListeners(self):
pass
def initializeGame(self):
#create buttons
self._buttons = {}
for i in range(1, self._num_buttons + 1):
self._buttons[i] = QPushButton(str(i))
self._buttons[i].setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
#initialize spaces
available_rows = []
available_cols = []
for i in range(self._num_rows):
available_rows.append(i)
for i in range(self._num_cols):
available_cols.append(i)
random.shuffle(available_rows)
random.shuffle(available_cols)
layout_grid = self.window.findChild(QGridLayout, 'mainWindowGridLayout')
#place buttons in random spaces
current_button = 1
for i in range(len(available_rows)):
for j in range(len(available_cols)):
next_row = available_rows[i]
next_col = available_cols[j]
if current_button in self._buttons:
layout_grid.addWidget(self._buttons[current_button], next_row, next_col)
current_button += 1
if __name__ == '__main__':
app = QApplication(sys.argv)
main_window = MainWindow()
sys.exit(app.exec_())
| 0
| 0
| 0
| 1,959
| 0
| 0
| 0
| 69
| 140
|
2de795b5bf05bc4e60da1705586f10f407d4268b
| 1,573
|
py
|
Python
|
list-quotas-regional.py
|
thiago-a-azevedo/gcp-resource-list
|
df7fe500f86b745e1815c8d510f8dca6e8bc355c
|
[
"Apache-2.0"
] | 2
|
2022-02-05T21:05:43.000Z
|
2022-02-06T01:55:50.000Z
|
list-quotas-regional.py
|
thiago-a-azevedo/gcp-resource-list
|
df7fe500f86b745e1815c8d510f8dca6e8bc355c
|
[
"Apache-2.0"
] | null | null | null |
list-quotas-regional.py
|
thiago-a-azevedo/gcp-resource-list
|
df7fe500f86b745e1815c8d510f8dca6e8bc355c
|
[
"Apache-2.0"
] | null | null | null |
# List GCP Regional project quotas
# Official GCP SDK (Python) Documentation: https://googleapis.github.io/google-api-python-client/docs/dyn/
import argparse
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.cloud import resource_manager
client = resource_manager.Client()
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
# Filter of Projects that will be scanned
parser_args = argparse.ArgumentParser(description='Define the projetc_id filter.'
'if empity will looking for all the active project_id that the credential have access')
parser_args.add_argument('--project')
project_Filter = parser_args.parse_args()
if project_Filter.project is None:
env_filter = {'lifecycleState': 'ACTIVE' }
else:
env_filter = {'projectId': project_Filter.project ,'lifecycleState': 'ACTIVE' }
# print csv header
print ('project_id;project_name;region;metric;limit;usage')
for project in client.list_projects(env_filter):
region_request = compute.regions().list(project=project.project_id)
regions = region_request.execute()
for region in regions['items']:
for quota in region['quotas']:
print(
project.project_id, ';',
project.name, ';',
region.get('name'),';',
quota.get('metric'),';',
quota.get('limit'),';',
quota.get('usage'),';'
)
| 32.770833
| 106
| 0.689129
|
# List GCP Regional project quotas
# Official GCP SDK (Python) Documentation: https://googleapis.github.io/google-api-python-client/docs/dyn/
import json
import ipcalc
import sys
import argparse
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.cloud import resource_manager
client = resource_manager.Client()
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
# Filter of Projects that will be scanned
parser_args = argparse.ArgumentParser(description='Define the projetc_id filter.'
'if empity will looking for all the active project_id that the credential have access')
parser_args.add_argument('--project')
project_Filter = parser_args.parse_args()
if project_Filter.project is None:
env_filter = {'lifecycleState': 'ACTIVE' }
else:
env_filter = {'projectId': project_Filter.project ,'lifecycleState': 'ACTIVE' }
# print csv header
print ('project_id;project_name;region;metric;limit;usage')
for project in client.list_projects(env_filter):
region_request = compute.regions().list(project=project.project_id)
regions = region_request.execute()
for region in regions['items']:
for quota in region['quotas']:
print(
project.project_id, ';',
project.name, ';',
region.get('name'),';',
quota.get('metric'),';',
quota.get('limit'),';',
quota.get('usage'),';'
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -29
| 67
|
df462c1c222636422e075517ef3000fe1439adb5
| 411
|
py
|
Python
|
osmaxx/profile/admin.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
osmaxx/profile/admin.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
osmaxx/profile/admin.py
|
tyrasd/osmaxx
|
da4454083d17b2ef8b0623cad62e39992b6bd52a
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from osmaxx.profile.models import Profile
admin.site.register(Profile, ProfileAdmin)
| 24.176471
| 58
| 0.751825
|
from django import forms
from django.contrib import admin
from osmaxx.profile.models import Profile
class ProfileAdminForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['associated_user', 'unverified_email']
class ProfileAdmin(admin.ModelAdmin):
list_display = ['associated_user', 'unverified_email']
form = ProfileAdminForm
admin.site.register(Profile, ProfileAdmin)
| 0
| 0
| 0
| 219
| 0
| 0
| 0
| 3
| 68
|
1abc0cd17b3be692c4ae6a95012e1e744129a64f
| 6,798
|
py
|
Python
|
rdkit/ML/ModelPackage/UnitTestPackage.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 1,609
|
2015-01-05T02:41:13.000Z
|
2022-03-30T21:57:24.000Z
|
rdkit/ML/ModelPackage/UnitTestPackage.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 3,412
|
2015-01-06T12:13:33.000Z
|
2022-03-31T17:25:41.000Z
|
rdkit/ML/ModelPackage/UnitTestPackage.py
|
bp-kelley/rdkit
|
e0de7c9622ce73894b1e7d9568532f6d5638058a
|
[
"BSD-3-Clause"
] | 811
|
2015-01-11T03:33:48.000Z
|
2022-03-28T11:57:49.000Z
|
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" unit tests for the model and descriptor packager """
import unittest
if __name__ == '__main__': # pragma: nocover
unittest.main()
| 38.191011
| 104
| 0.597823
|
#
# Copyright (C) 2002-2008 greg Landrum and Rational Discovery LLC
#
""" unit tests for the model and descriptor packager """
import os
import random
import unittest
from xml.dom import minidom
from xml.etree import ElementTree as ET
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import Descriptors
from rdkit.ML.Composite import Composite
from rdkit.ML.Data import DataUtils
from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator
from rdkit.ML.ModelPackage import Packager, PackageUtils
from rdkit.ML.ModelPackage.Packager import ModelPackage
from io import BytesIO
import pickle
def feq(a, b, tol=1e-4):
return abs(a - b) <= tol
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDCodeDir, 'ML/ModelPackage/test_data')
self.testD = [
# NOTE: the confidences here can be twitchy due to changes in descriptors:
('Fc1ccc(NC(=O)c2cccnc2Oc3cccc(c3)C(F)(F)F)c(F)c1', 0, 0.8),
# (r'CN/1(=C\C=C(/C=C1)\C\2=C\C=N(C)(Cl)\C=C2)Cl',0,0.70),
(r'NS(=O)(=O)c1cc(ccc1Cl)C2(O)NC(=O)c3ccccc32', 1, 0.70),
]
def _loadPackage(self):
with open(os.path.join(self.dataDir, 'Jan9_build3_pkg.pkl'), 'r') as pkgTF:
buf = pkgTF.read().replace('\r\n', '\n').encode('utf-8')
pkgTF.close()
io = BytesIO(buf)
pkg = pickle.load(io)
return pkg
def _verify(self, pkg, testD):
for smi, pred, conf in testD:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m is not None, 'SMILES: %s failed\n' % (smi))
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
def _verify2(self, pkg, testD):
for smi, pred, conf in testD:
m = Chem.MolFromSmiles(smi)
self.assertTrue(m is not None, 'SMILES: %s failed\n' % (smi))
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
p, c = pkg.Classify(m)
assert p == pred, 'bad prediction (%d) for smiles %s' % (p, smi)
assert feq(c, conf), 'bad confidence (%f) for smiles %s' % (c, smi)
def testBuild(self):
# """ tests building and screening a packager """
with open(os.path.join(self.dataDir, 'Jan9_build3_calc.dsc'), 'r') as calcTF:
buf = calcTF.read().replace('\r\n', '\n').encode('utf-8')
calcTF.close()
calc = pickle.load(BytesIO(buf))
with open(os.path.join(self.dataDir, 'Jan9_build3_model.pkl'), 'rb') as modelF:
model = pickle.load(modelF)
pkg = Packager.ModelPackage(descCalc=calc, model=model)
self._verify(pkg, self.testD)
def testLoad(self):
# """ tests loading and screening a packager """
pkg = self._loadPackage()
self._verify(pkg, self.testD)
def testLoad2(self):
# """ tests loading and screening a packager 2 """
pkg = self._loadPackage()
self._verify2(pkg, self.testD)
def testPerm1(self):
# """ tests the descriptor remapping stuff in a packager """
pkg = self._loadPackage()
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
ref = {}
DataUtils.InitRandomNumbers((23, 42))
for smi, _, _ in self.testD:
for desc in names:
fn = getattr(Descriptors, desc, lambda x: 777)
m = Chem.MolFromSmiles(smi)
ref[desc] = fn(m)
for _ in range(5):
perm = list(names)
random.shuffle(perm, random=random.random)
m = Chem.MolFromSmiles(smi)
for desc in perm:
fn = getattr(Descriptors, desc, lambda x: 777)
val = fn(m)
assert feq(val, ref[desc], 1e-4), '%s: %s(%s): %f!=%f' % (str(perm), smi, desc, val,
ref[desc])
def testPerm2(self):
# """ tests the descriptor remapping stuff in a packager """
pkg = self._loadPackage()
calc = pkg.GetCalculator()
names = calc.GetDescriptorNames()
DataUtils.InitRandomNumbers((23, 42))
perm = list(names)
random.shuffle(perm, random=random.random)
calc.simpleList = perm
calc.descriptorNames = perm
pkg.Init()
self._verify(pkg, self.testD)
def test_ModelPackage(self):
pkg = self._loadPackage()
self.assertTrue(isinstance(pkg.GetCalculator(), MolecularDescriptorCalculator))
pkg.SetCalculator('calculator')
self.assertEqual(pkg.GetCalculator(), 'calculator')
self.assertTrue(isinstance(pkg.GetModel(), Composite.Composite))
pkg.SetModel('model')
self.assertEqual(pkg.GetModel(), 'model')
self.assertEqual(pkg.GetDataset(), None)
pkg.SetDataset('dataset')
self.assertEqual(pkg.GetDataset(), 'dataset')
self.assertEqual(pkg.GetNotes(), 'General purpose model built from PhysProp data')
pkg.SetNotes('notes')
self.assertEqual(pkg.GetNotes(), 'notes')
# Here seems to be a difference between Python 2 and 3. The next assert works in Python 3,
# but fails in Python 2
# self.assertFalse(hasattr(pkg, '_supplementalData'))
self.assertEqual(pkg.GetSupplementalData(), [])
self.assertTrue(hasattr(pkg, '_supplementalData'))
delattr(pkg, '_supplementalData')
pkg.AddSupplementalData('supp1')
self.assertTrue(hasattr(pkg, '_supplementalData'))
self.assertEqual(pkg.GetSupplementalData(), ['supp1'])
pkg.AddSupplementalData('supp2')
self.assertEqual(pkg.GetSupplementalData(), ['supp1', 'supp2'])
pkg = ModelPackage()
self.assertFalse(pkg._initialized)
pkg.Init()
self.assertFalse(pkg._initialized)
def test_PackageUtils(self):
pkg = self._loadPackage()
xml = PackageUtils.PackageToXml(
pkg, dataPerformance=[('label', ['accuracy', 'avgCorrect', 'avgIncorrect']), ],
recommendedThreshold=0.2, classDescriptions=[('a', 'texta'), ('b', 'textb')],
modelType='model type', modelOrganism='model organism')
s = prettyXML(xml.getroot())
self.assertIn('<RDModelInfo>', s)
def prettyXML(xml):
s = ET.tostring(xml, encoding='utf-8')
tree = minidom.parseString(s)
return tree.toprettyxml(indent=' ')
if __name__ == '__main__': # pragma: nocover
unittest.main()
| 0
| 0
| 0
| 5,878
| 0
| 147
| 0
| 179
| 378
|
0700d43171f01aecee586622fd901c92050a6f9c
| 2,501
|
py
|
Python
|
tests/test_metrics_list.py
|
amitsagtani97/prometheus-api-client-python
|
49d0fdfc9a1fcfd5f51c53972cd2fcd223b1ddcf
|
[
"MIT"
] | 3
|
2020-05-06T06:39:00.000Z
|
2020-06-05T06:23:05.000Z
|
tests/test_metrics_list.py
|
amitsagtani97/prometheus-api-client-python
|
49d0fdfc9a1fcfd5f51c53972cd2fcd223b1ddcf
|
[
"MIT"
] | 2
|
2020-07-14T14:50:39.000Z
|
2020-08-10T02:27:44.000Z
|
tests/test_metrics_list.py
|
amitsagtani97/prometheus-api-client-python
|
49d0fdfc9a1fcfd5f51c53972cd2fcd223b1ddcf
|
[
"MIT"
] | null | null | null |
import unittest
if __name__ == "__main__":
unittest.main()
| 33.346667
| 100
| 0.608956
|
import unittest
import json
import os
import datetime
from prometheus_api_client import MetricsList
class TestMetricsList(unittest.TestCase):
def setUp(self):
"""
read metrics stored as jsons in './tests/metrics'
"""
self.raw_metrics_list = list()
for (dir_path, _, file_names) in os.walk("./tests/metrics"):
self.raw_metrics_list.extend(
[json.load(open(os.path.join(dir_path, file))) for file in file_names]
)
def test_setup(self):
"""
Check if setup was done correctly
"""
self.assertEqual(
8, len(self.raw_metrics_list), "incorrect number json files read (incorrect test setup)"
)
def test_init(self):
"""
Test if metrics initialized in the list are correct
"""
self.assertEqual(
9, # manually check the number of unique metric time-series
len(MetricsList(self.raw_metrics_list)),
"incorrect number of unique metric timeseries",
)
def test_init_single_metric(self):
self.assertEqual(
1,
len(MetricsList(self.raw_metrics_list[0][0])),
"incorrect number of Metric objects initialized for a raw metric not in a list",
)
self.assertEqual(
1,
len(MetricsList([self.raw_metrics_list[0][0]])),
"incorrect number of Metric objects initialized for a single metric list",
)
def test_unique_metric_combination(self):
start_time = datetime.datetime(2019, 7, 28, 10, 0)
start_time_plus_1m = datetime.datetime(2019, 7, 28, 10, 1)
end_time = datetime.datetime(2019, 7, 30, 10, 0)
end_time_minus_1m = datetime.datetime(2019, 7, 30, 9, 59)
self.assertTrue(
MetricsList(self.raw_metrics_list)[0].start_time > start_time,
"Combined metric start time incorrect",
)
self.assertTrue(
MetricsList(self.raw_metrics_list)[0].start_time < start_time_plus_1m,
"Combined metric start time incorrect",
)
self.assertTrue(
MetricsList(self.raw_metrics_list)[0].end_time < end_time,
"Combined metric end time incorrect",
)
self.assertTrue(
MetricsList(self.raw_metrics_list)[0].end_time > end_time_minus_1m,
"Combined metric end time incorrect",
)
if __name__ == "__main__":
unittest.main()
| 0
| 0
| 0
| 2,328
| 0
| 0
| 0
| -4
| 111
|
a757eb7b2184767f8ea2351b30cce6601a45be78
| 1,076
|
py
|
Python
|
captioning/utils/div_utils.py
|
HongkuanZhang/self-critical.pytorch
|
deccb8bf624ad6771193dfdfbe71bec958c7f715
|
[
"MIT"
] | 1,030
|
2017-11-18T09:15:26.000Z
|
2022-03-29T05:35:24.000Z
|
misc/div_utils.py
|
sgondala/GoogleConceptualCaptioning
|
b7aef355bcf893d9f1e2250efd3a9b0e30646331
|
[
"MIT"
] | 261
|
2017-06-09T03:45:54.000Z
|
2022-03-30T05:19:20.000Z
|
misc/div_utils.py
|
sgondala/GoogleConceptualCaptioning
|
b7aef355bcf893d9f1e2250efd3a9b0e30646331
|
[
"MIT"
] | 332
|
2017-05-10T02:28:48.000Z
|
2022-03-30T08:26:33.000Z
|
# -----------------------------------------------
| 28.315789
| 67
| 0.596654
|
from random import uniform
import numpy as np
from collections import OrderedDict, defaultdict
from itertools import tee
import time
# -----------------------------------------------
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def compute_div_n(caps,n=1):
aggr_div = []
for k in caps:
all_ngrams = set()
lenT = 0.
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return np.array(aggr_div).mean(), np.array(aggr_div)
def compute_global_div_n(caps,n=1):
aggr_div = []
all_ngrams = set()
lenT = 0.
for k in caps:
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
if n == 1:
aggr_div.append(float(len(all_ngrams)))
else:
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return aggr_div[0], np.repeat(np.array(aggr_div),len(caps))
| 0
| 0
| 0
| 0
| 0
| 825
| 0
| 23
| 178
|
fbcedc37d1242b6a75437cb537ee7a1051dfc8d8
| 358
|
py
|
Python
|
tests/test_bonddata.py
|
andrew-block/jamesbond
|
9820526df12cc7b62b93638788ca8bbef2081c9b
|
[
"MIT"
] | 3
|
2021-10-18T18:51:40.000Z
|
2021-12-20T15:45:26.000Z
|
tests/test_bonddata.py
|
andrew-block/jamesbond
|
9820526df12cc7b62b93638788ca8bbef2081c9b
|
[
"MIT"
] | null | null | null |
tests/test_bonddata.py
|
andrew-block/jamesbond
|
9820526df12cc7b62b93638788ca8bbef2081c9b
|
[
"MIT"
] | 3
|
2020-08-27T11:06:02.000Z
|
2021-08-10T10:13:24.000Z
|
from jamesbond import bonddata
def test_load_data():
"""
Test the row & column count (shape)
Test the first column from the last row of the dataset 'Spectre'.
"""
df = bonddata.load_data()
shape = df.shape
last_row_first_col = df.iloc[-1, 1]
assert shape == (24, 27)
assert last_row_first_col == 'Spectre'
| 23.866667
| 69
| 0.659218
|
import pytest
from jamesbond import bonddata
def test_load_data():
"""
Test the row & column count (shape)
Test the first column from the last row of the dataset 'Spectre'.
"""
df = bonddata.load_data()
shape = df.shape
last_row_first_col = df.iloc[-1, 1]
assert shape == (24, 27)
assert last_row_first_col == 'Spectre'
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -8
| 22
|
7c28549faf904dad9ced65ad4b3cab1ce627221f
| 40
|
py
|
Python
|
tests/__init__.py
|
bgailleton/helplotlib
|
1c517e997cbb7dca021b589d8237637f09040c42
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
bgailleton/helplotlib
|
1c517e997cbb7dca021b589d8237637f09040c42
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
bgailleton/helplotlib
|
1c517e997cbb7dca021b589d8237637f09040c42
|
[
"MIT"
] | null | null | null |
"""Unit test package for helplotlib."""
| 20
| 39
| 0.7
|
"""Unit test package for helplotlib."""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
43cd4c86ae09c8f5727dc536abc8a702deb94a79
| 52
|
py
|
Python
|
testcase/test.py
|
songhuijuantianxiezuo/songhuijuan
|
a80f4add92035914c61ccf5d873e4bc2063ef147
|
[
"MIT"
] | null | null | null |
testcase/test.py
|
songhuijuantianxiezuo/songhuijuan
|
a80f4add92035914c61ccf5d873e4bc2063ef147
|
[
"MIT"
] | null | null | null |
testcase/test.py
|
songhuijuantianxiezuo/songhuijuan
|
a80f4add92035914c61ccf5d873e4bc2063ef147
|
[
"MIT"
] | null | null | null |
a=1,b=2
print(assertEqual(a,b)) #
| 7.428571
| 32
| 0.538462
|
a=1,b=2
print(assertEqual(a,b)) #้ช่ฏๆฏๅฆไธ่ด
| 18
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c4c753f45db71b0be93714242ffc0238d4d7156f
| 10,257
|
py
|
Python
|
matlab_test_files/invfreqs_test.py
|
vishalbelsare/parametric_modeling
|
9bfe5df35671930043215c8f6c855af8f49e28bf
|
[
"BSD-3-Clause"
] | 37
|
2015-02-01T12:03:48.000Z
|
2021-12-23T14:38:38.000Z
|
matlab_test_files/invfreqs_test.py
|
vishalbelsare/parametric_modeling
|
9bfe5df35671930043215c8f6c855af8f49e28bf
|
[
"BSD-3-Clause"
] | 2
|
2015-07-27T11:34:24.000Z
|
2019-12-11T13:39:18.000Z
|
matlab_test_files/invfreqs_test.py
|
vishalbelsare/parametric_modeling
|
9bfe5df35671930043215c8f6c855af8f49e28bf
|
[
"BSD-3-Clause"
] | 19
|
2016-09-06T20:23:19.000Z
|
2021-11-07T16:07:40.000Z
|
# if available import pylab (from matlibplot)
try:
except ImportError:
pass
| 35.989474
| 253
| 0.538267
|
import numpy as np
import scipy
import matcompat
# if available import pylab (from matlibplot)
try:
import matplotlib.pylab as plt
except ImportError:
pass
def invfreqs(g, w, varargin):
# Local Variables: realFlag, cg, realStr, D31, gndir, t1, cw, nk, kom, nm, na, nb, Vcap, V1, rg, pf, rw, tol, maxiter, varargin, cwf, wf, ll, D, rwf, D32, Dva, Dvb, gaussFlag, verb, GC, e, th, a, OM, b, Vd, g, k, l, st, indg, R, inda, t, w, indb, D3
# Function calls: disp, all, invfreqs, deal, ischar, int2str, warning, apolystab, home, message, size, getString, sqrt, clc, zeros, norm, real, nargchk, max, nargin, ones, isempty, lower, length, num2str, error, strcmp
#%INVFREQS Analog filter least squares fit to frequency response data.
#% [B,A] = INVFREQS(H,W,nb,na) gives real numerator and denominator
#% coefficients B and A of orders nb and na respectively, where
#% H is the desired complex frequency response of the system at frequency
#% points W, and W contains the frequency values in radians/s.
#% INVFREQS yields a filter with real coefficients. This means that it is
#% sufficient to specify positive frequencies only; the filter fits the data
#% conj(H) at -W, ensuring the proper frequency domain symmetry for a real
#% filter.
#%
#% [B,A]=INVFREQS(H,W,nb,na,Wt) allows the fit-errors to the weighted
#% versus frequency. LENGTH(Wt)=LENGTH(W)=LENGTH(H).
#% Determined by minimization of sum |B-H*A|^2*Wt over the freqs in W.
#%
#% [B,A] = INVFREQS(H,W,nb,na,Wt,ITER) does another type of fit:
#% Sum |B/A-H|^2*Wt is minimized with respect to the coefficients in B and
#% A by numerical search in at most ITER iterations. The A-polynomial is
#% then constrained to be stable. [B,A]=INVFREQS(H,W,nb,na,Wt,ITER,TOL)
#% stops the iterations when the norm of the gradient is less than TOL.
#% The default value of TOL is 0.01. The default value of Wt is all ones.
#% This default value is also obtained by Wt=[].
#%
#% [B,A]=INVFREQS(H,W,nb,na,Wt,ITER,TOL,'trace') provides a textual
#% progress report of the iteration.
#%
#% [B,A] = INVFREQS(H,W,'complex',NB,NA,...) creates a complex filter. In
#% this case, no symmetry is enforced.
#%
#% % Example:
#% % Convert a simple transfer function to frequency response data and
#% % then back to the original filter coefficients. If the system is
#% % unstable, use invfreqs's iterative algorithm to find a stable
#% % approximation to the system.
#%
#% b = [1 2 3 2 3]; % Numerator coefficients
#% a = [1 2 3 2 1 4]; % Denominator coefficients
#% [h,w] = freqs(b,a,64);
#% [bb,aa] = invfreqs(h,w,4,5) % aa has poles in the right half-plane.
#% fprintf('Stable Approximation to the system:')
#% [bbb,aaa] = invfreqs(h,w,4,5,[],30) % stable approximation to system
#%
#% See also FREQZ, FREQS, INVFREQZ.
#% Author(s): J.O. Smith and J.N. Little, 4-23-86
#% J.N. Little, 4-27-88, revised
#% Lennart Ljung, 9-21-92, rewritten
#% T. Krauss, 10-22-92, trace mode made optional
#% Copyright 1988-2011 The MathWorks, Inc.
#%
#% calling sequence is
#%function [b,a]=invfreqs(g,w,nb,na,wf,maxiter,tol,pf)
#% OR
#%function [b,a]=invfreqs(g,w,'complex',nb,na,wf,maxiter,tol,pf)
matcompat.error(nargchk(4., 9., nargin, 'struct'))
if ischar(varargin.cell[0]):
realStr = lower(varargin.cell[0])
varargin[0] = np.array([])
else:
realStr = 'real'
gaussFlag = length(varargin) > 3.
#% run Gauss-Newton algorithm or not?
if length(varargin)<6.:
varargin.cell[5] = np.array([])
#% pad varargin with []'s
[nb, na, wf, maxiter, tol, pf] = deal(varargin.cell[:])
_switch_val=realStr
if False: # switch
pass
elif _switch_val == 'real':
realFlag = 1.
elif _switch_val == 'complex':
realFlag = 0.
else:
matcompat.warning(message('signal:invfreqs:InvalidParam', realStr))
realFlag = 0.
nk = 0.
#% The code is prepared for constraining the numerator to
#% begin with nk zeros.
nb = nb+nk+1.
if isempty(pf):
verb = 0.
elif strcmp(pf, 'trace'):
verb = 1.
else:
matcompat.error(message('signal:invfreqs:NotSupported', pf))
if isempty(wf):
wf = np.ones(length(w), 1.)
wf = np.sqrt(wf)
if length(g) != length(w):
matcompat.error(message('signal:invfreqs:UnmatchedLengths', 'H', 'W'))
if length(wf) != length(w):
matcompat.error(message('signal:invfreqs:UnmatchedLengths', 'Wt', 'W'))
#% if any( w(:)<0 ) && realFlag
#% warning(message('signal:invfreqs:InvalidWParam', 'W', 'INVFREQS', 'complex'))
#% end
[rw, cw] = matcompat.size(w)
if rw > cw:
w = w.conj().T
[rg, cg] = matcompat.size(g)
if cg > rg:
g = g.T
[rwf, cwf] = matcompat.size(wf)
if cwf > rwf:
wf = wf.conj().T
nm = matcompat.max((na+1.), (nb+nk))
indb = np.arange(nb, (1.)+(-1.), -1.)
indg = np.arange(na+1., (1.)+(-1.), -1.)
inda = np.arange(na, (1.)+(-1.), -1.)
OM = np.ones(1., length(w))
for kom in np.arange(1., (nm-1.)+1):
OM = np.array(np.vstack((np.hstack((OM)), np.hstack(((1i.*w)**kom)))))
#%
#% Estimation in the least squares case:
#%
Dva = OM[int(inda)-1,:].T*np.dot(g, np.ones(1., na))
Dvb = -OM[int(indb)-1,:].T
D = np.array(np.hstack((Dva, Dvb)))*np.dot(wf, np.ones(1., (na+nb)))
R = np.dot(D.conj().T, D)
Vd = np.dot(D.conj().T, -g*OM[int((na+1.))-1,:].T*wf)
if realFlag:
R = np.real(R)
Vd = np.real(Vd)
th = linalg.solve(R, Vd)
a = np.array(np.hstack((1., th[0:na].T)))
b = np.array(np.hstack((np.zeros(1., nk), th[int(na+1.)-1:na+nb].T)))
if not gaussFlag:
return []
#% Now for the iterative minimization
if isempty(maxiter):
maxiter = 30.
if isempty(tol):
tol = 0.01
#% Stabilizing the denominator:
a = apolystab(a, realFlag)
#% The initial estimate:
GC = (np.dot(b, OM[int(indb)-1,:])/np.dot(a, OM[int(indg)-1,:])).T
e = (GC-g)*wf
Vcap = np.dot(e.conj().T, e)
t = np.array(np.hstack((a[1:na+1.], b[int(nk+1.)-1:nk+nb]))).T
if verb:
#% invfreqz using same messages
clc
np.disp(np.array(np.hstack((' ', getString(message('signal:invfreqs:INITIALESTIMATE'))))))
np.disp(np.array(np.hstack((getString(message('signal:invfreqs:CurrentFit')), num2str(Vcap)))))
np.disp(getString(message('signal:invfreqs:Parvector')))
np.disp(t)
#% %
#% ** the minimization loop **
#%
gndir = 2.*tol+1.
l = 0.
st = 0.
while np.all(np.array(np.hstack((linalg.norm(gndir) > tol, l<maxiter, st != 1.)))):
l = l+1.
#% * compute gradient *
D31 = OM[int(inda)-1,:].T*np.dot(-GC/np.dot(a, OM[int(indg)-1,:]).T, np.ones(1., na))
D32 = OM[int(indb)-1,:].T/np.dot(np.dot(a, OM[int(indg)-1,:]).T, np.ones(1., nb))
D3 = np.array(np.hstack((D31, D32)))*np.dot(wf, np.ones(1., (na+nb)))
#% * compute Gauss-Newton search direction
e = (GC-g)*wf
R = np.dot(D3.conj().T, D3)
Vd = np.dot(D3.conj().T, e)
if realFlag:
R = np.real(R)
Vd = np.real(Vd)
gndir = linalg.solve(R, Vd)
#% * search along the gndir-direction *
ll = 0.
k = 1.
V1 = Vcap+1.
t1 = t
while np.all(np.array(np.hstack((V1, ll<20.)))):
t1 = t-np.dot(k, gndir)
if ll == 19.:
t1 = t
a = np.array(np.hstack((1., t1[0:na].T)))
b = np.array(np.hstack((np.zeros(1., nk), t1[int(na+1.)-1:na+nb].T)))
a = apolystab(a, realFlag)
#% Stabilizing the denominator
t1[0:na] = a[1:na+1.].T
GC = (np.dot(b, OM[int(indb)-1,:])/np.dot(a, OM[int(indg)-1,:])).T
V1 = np.dot(((GC-g)*wf).conj().T, (GC-g)*wf)
if verb:
home
np.disp(int2str(ll))
k = k/2.
ll = ll+1.
if ll == 10.:
gndir = np.dot(matdiv(Vd, linalg.norm(R)), length(R))
k = 1.
if ll == 20.:
st = 1.
if verb:
home
np.disp(np.array(np.hstack((' ', getString(message('signal:invfreqs:ITERATION')), int2str(l)))))
np.disp(np.array(np.hstack((getString(message('signal:invfreqs:CurrentFit')), num2str(V1), getString(message('signal:invfreqs:PreviousFit')), num2str(Vcap)))))
np.disp(getString(message('signal:invfreqs:CurrentParPrevparGNdir')))
np.disp(np.array(np.hstack((t1, t, gndir))))
np.disp(np.array(np.hstack((getString(message('signal:invfreqs:NormOfGNvector')), num2str(linalg.norm(gndir))))))
if st == 1.:
np.disp(getString(message('signal:invfreqs:NoImprovement')))
np.disp(getString(message('signal:invfreqs:IterationsThereforeTerminated')))
t = t1
Vcap = V1
return [b, a]
def apolystab(a, realFlag):
# Local Variables: a, realFlag, vind, v
# Function calls: real, poly, length, apolystab, find, roots
#%APOLYSTAB Stabilize filter, analog
#% inputs: a - denominator polynomial
#% realFlag - 1 for real, 0 for complex
#% returns stabilized denoninator polynomial
if length(a) > 0.:
v = np.roots(a)
vind = nonzero((np.real(v) > 0.))
v[int(vind)-1] = -v[int(vind)-1]
a = np.poly(v)
if realFlag:
a = np.real(a)
return [a]
| 0
| 0
| 0
| 0
| 0
| 10,047
| 0
| -8
| 138
|
000b1fd18754a4d6247223a1d655166753c79f23
| 22,046
|
py
|
Python
|
corehq/apps/api/es.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 471
|
2015-01-10T02:55:01.000Z
|
2022-03-29T18:07:18.000Z
|
corehq/apps/api/es.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 14,354
|
2015-01-01T07:38:23.000Z
|
2022-03-31T20:55:14.000Z
|
corehq/apps/api/es.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 175
|
2015-01-06T07:16:47.000Z
|
2022-03-29T13:27:01.000Z
|
import logging
from dimagi.utils.parsing import ISO_DATE_FORMAT
from corehq.apps.api.resources.v0_1 import TASTYPIE_RESERVED_GET_PARAMS
from corehq.pillows.base import VALUE_TAG
logger = logging.getLogger('es')
def report_term_filter(terms, mapping):
"""convert terms to correct #value term queries based upon the mapping
does it match up with pre-defined stuff in the mapping?
"""
ret_terms = []
for orig_term in terms:
curr_mapping = mapping.get('properties')
split_term = orig_term.split('.')
for ix, sub_term in enumerate(split_term, start=1):
is_property = sub_term in curr_mapping
if ix == len(split_term):
#it's the last one, and if it's still not in it, then append a value
if is_property:
ret_term = orig_term
else:
ret_term = '%s.%s' % (orig_term, VALUE_TAG)
ret_terms.append(ret_term)
if is_property and 'properties' in curr_mapping[sub_term]:
curr_mapping = curr_mapping[sub_term]['properties']
return ret_terms
SUPPORTED_DATE_FORMATS = [
ISO_DATE_FORMAT,
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%MZ', # legacy Case API date format
]
RESERVED_QUERY_PARAMS = set(['limit', 'offset', 'order_by', 'q', '_search'] + TASTYPIE_RESERVED_GET_PARAMS)
query_param_consumers = [
TermParam('xmlns', 'xmlns.exact'),
TermParam('xmlns.exact'),
TermParam('case_name', 'name', analyzed=True),
TermParam('case_type', 'type', analyzed=True),
# terms listed here to prevent conversion of their values to lower case since
# since they are indexed as `not_analyzed` in ES
TermParam('type.exact'),
TermParam('name.exact'),
TermParam('external_id.exact'),
TermParam('contact_phone_number'),
DateRangeParams('received_on'),
DateRangeParams('server_modified_on'),
DateRangeParams('date_modified', 'modified_on'),
DateRangeParams('server_date_modified', 'server_modified_on'),
DateRangeParams('indexed_on', 'inserted_at'),
]
| 37.177066
| 144
| 0.606459
|
import copy
import datetime
import json
import logging
from django.http import HttpResponse
from django.utils.decorators import classonlymethod, method_decorator
from django.views.generic import View
from corehq.util.es.elasticsearch import ElasticsearchException, NotFoundError
from casexml.apps.case.models import CommCareCase
from corehq.util.es.interface import ElasticsearchInterface
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import ISO_DATE_FORMAT
from corehq.apps.api.models import ESCase, ESXFormInstance
from corehq.apps.api.resources.v0_1 import TASTYPIE_RESERVED_GET_PARAMS
from corehq.apps.api.util import object_does_not_exist
from corehq.apps.domain.decorators import login_and_domain_required
from corehq.apps.es import filters
from corehq.apps.es.forms import FormES
from corehq.apps.es.cases import CaseES
from corehq.apps.es.utils import flatten_field_dict
from corehq.apps.reports.filters.forms import FormsByApplicationFilter
from corehq.elastic import (
ESError,
get_es_new,
report_and_fail_on_shard_failures,
)
from corehq.pillows.base import VALUE_TAG, restore_property_dict
from corehq.pillows.mappings.case_mapping import CASE_ES_ALIAS
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_ES_ALIAS
from corehq.pillows.mappings.reportxform_mapping import REPORT_XFORM_ALIAS
from corehq.pillows.mappings.xform_mapping import XFORM_ALIAS
from no_exceptions.exceptions import Http400
logger = logging.getLogger('es')
class ESUserError(Http400):
pass
class DateTimeError(ValueError):
pass
class ESView(View):
"""
Generic CBV for interfacing with the Elasticsearch REST api.
This is necessary because tastypie's built in REST assumptions don't like
ES's POST for querying, which we can set explicitly here.
For security purposes, queries ought to be domain'ed by the requesting user, so a base_query
is encouraged to be added.
Access to the APIs can be done via url endpoints which are attached to the corehq.api.urls
or programmatically via the self.run_query() method.
This current iteration of the ESView must require a domain for its usage for security purposes.
"""
#note - for security purposes, csrf protection is ENABLED
#search POST queries must take the following format:
#query={query_json}
#csrfmiddlewaretoken=token
#in curl, this is:
#curl -b "csrftoken=<csrftoken>;sessionid=<session_id>" -H "Content-Type: application/json" -XPOST http://server/a/domain/api/v0.1/xform_es/
# -d"[email protected]&csrfmiddlewaretoken=<csrftoken>"
#or, call this programmatically to avoid CSRF issues.
es_alias = ""
domain = ""
es = None
doc_type = None
model = None
http_method_names = ['get', 'post', 'head', ]
def __init__(self, domain):
super(ESView, self).__init__()
self.domain = domain.lower()
self.es = get_es_new()
self.es_interface = ElasticsearchInterface(self.es)
def head(self, *args, **kwargs):
raise NotImplementedError("Not implemented")
@method_decorator(login_and_domain_required)
#@method_decorator(csrf_protect)
# todo: csrf_protect temporarily removed and left to implementor's prerogative
# getting ajax'ed csrf token method needs revisit.
def dispatch(self, *args, **kwargs):
req = args[0]
self.pretty = req.GET.get('pretty', False)
if self.pretty:
self.indent = 4
else:
self.indent = None
ret = super(ESView, self).dispatch(*args, **kwargs)
return ret
@classonlymethod
def as_view(cls, **initkwargs):
"""
Django as_view cannot be used since the constructor requires information only present in the request.
"""
raise Exception('as_view not supported for domain-specific ESView')
@classonlymethod
def as_domain_specific_view(cls, **initkwargs):
"""
Creates a simple domain-specific class-based view for passing through ES requests.
"""
def view(request, domain, *args, **kwargs):
self = cls(domain)
return self.dispatch(request, domain, *args, **kwargs)
return view
def get_document(self, doc_id):
try:
doc = self.es_interface.get_doc(self.es_alias, '_all', doc_id)
except NotFoundError:
raise object_does_not_exist(self.doc_type, doc_id)
if doc.get('domain') != self.domain:
raise object_does_not_exist(self.doc_type, doc_id)
return self.model(doc) if self.model else doc
def run_query(self, es_query, es_type=None):
"""
Run a more advanced POST based ES query
Returns the raw query json back, or None if there's an error
"""
logger.info("ESlog: [%s.%s] ESquery: %s" % (self.__class__.__name__, self.domain, json.dumps(es_query)))
if 'fields' in es_query or 'script_fields' in es_query:
#nasty hack to add domain field to query that does specific fields.
#do nothing if there's no field query because we get everything
fields = es_query.get('fields', [])
fields.append('domain')
es_query['fields'] = fields
try:
es_results = self.es_interface.search(self.es_alias, es_type, body=es_query)
report_and_fail_on_shard_failures(es_results)
except ElasticsearchException as e:
if 'query_string' in es_query.get('query', {}).get('filtered', {}).get('query', {}):
# the error may have been caused by a bad query string
# re-run with no query string to check
querystring = es_query['query']['filtered']['query']['query_string']['query']
new_query = es_query
new_query['query']['filtered']['query'] = {"match_all": {}}
new_results = self.run_query(new_query)
if new_results:
# the request succeeded without that query string
# an error with a blank query will return None
raise ESUserError("Error with elasticsearch query: %s" %
querystring)
msg = "Error in elasticsearch query [%s]: %s\nquery: %s" % (self.es_alias, str(e), es_query)
raise ESError(msg)
hits = []
for res in es_results['hits']['hits']:
if '_source' in res:
res_domain = res['_source'].get('domain', None)
elif 'fields' in res:
res['fields'] = flatten_field_dict(res)
res_domain = res['fields'].get('domain', None)
# security check
if res_domain == self.domain:
hits.append(res)
else:
logger.info("Requester domain %s does not match result domain %s" % (
self.domain, res_domain))
es_results['hits']['hits'] = hits
return es_results
def count_query(self, es_query):
return self.es_interface.count(self.es_alias, None, es_query)
class CaseESView(ESView):
"""
Expressive CaseES interface. Yes, this is redundant with pieces of the v0_1.py CaseAPI - todo to merge these applications
Which this should be the final say on ES access for Casedocs
"""
es_alias = CASE_ES_ALIAS
doc_type = "CommCareCase"
model = ESCase
class ReportCaseESView(ESView):
es_alias = REPORT_CASE_ES_ALIAS
doc_type = "CommCareCase"
model = ESCase
class FormESView(ESView):
es_alias = XFORM_ALIAS
doc_type = "XFormInstance"
model = ESXFormInstance
def run_query(self, es_query, **kwargs):
es_results = super(FormESView, self).run_query(es_query)
# hack, walk the results again, and if we have xmlns, populate human readable names
# Note that `get_unknown_form_name` does not require the request, which is also
# not necessarily available here. So `None` is passed here.
form_filter = FormsByApplicationFilter(None, domain=self.domain)
for res in es_results.get('hits', {}).get('hits', []):
if '_source' in res:
xmlns = res['_source'].get('xmlns', None)
name = None
if xmlns:
name = form_filter.get_unknown_form_name(xmlns,
app_id=res['_source'].get('app_id',
None),
none_if_not_found=True)
if not name:
name = 'unknown' # try to fix it below but this will be the default
# fall back
try:
if res['_source']['form'].get('@name', None):
name = res['_source']['form']['@name']
else:
backup = res['_source']['form'].get('#type', 'data')
if backup != 'data':
name = backup
except (TypeError, KeyError):
pass
res['_source']['es_readable_name'] = name
return es_results
def report_term_filter(terms, mapping):
"""convert terms to correct #value term queries based upon the mapping
does it match up with pre-defined stuff in the mapping?
"""
ret_terms = []
for orig_term in terms:
curr_mapping = mapping.get('properties')
split_term = orig_term.split('.')
for ix, sub_term in enumerate(split_term, start=1):
is_property = sub_term in curr_mapping
if ix == len(split_term):
#it's the last one, and if it's still not in it, then append a value
if is_property:
ret_term = orig_term
else:
ret_term = '%s.%s' % (orig_term, VALUE_TAG)
ret_terms.append(ret_term)
if is_property and 'properties' in curr_mapping[sub_term]:
curr_mapping = curr_mapping[sub_term]['properties']
return ret_terms
class ReportFormESView(FormESView):
es_alias = REPORT_XFORM_ALIAS
doc_type = "XFormInstance"
model = ESXFormInstance
def run_query(self, es_query):
es_results = super(FormESView, self).run_query(es_query)
#hack, walk the results again, and if we have xmlns, populate human readable names
# Note that `get_unknown_form_name` does not require the request, which is also
# not necessarily available here. So `None` is passed here.
form_filter = FormsByApplicationFilter(None, domain=self.domain)
for res in es_results.get('hits', {}).get('hits', []):
if '_source' in res:
res_source = restore_property_dict(res['_source'])
res['_source'] = res_source
xmlns = res['_source'].get('xmlns', None)
name = None
if xmlns:
name = form_filter.get_unknown_form_name(xmlns,
app_id=res['_source'].get('app_id',
None),
none_if_not_found=True)
if not name:
name = 'unknown' # try to fix it below but this will be the default
# fall back
try:
if res['_source']['form'].get('@name', None):
name = res['_source']['form']['@name']
else:
backup = res['_source']['form'].get('#type', 'data')
if backup != 'data':
name = backup
except (TypeError, KeyError):
pass
res['_source']['es_readable_name'] = name
return es_results
class ElasticAPIQuerySet(object):
"""
An abstract representation of an elastic search query,
modeled somewhat after Django's QuerySet but with
the only important goal being compatibility
with Tastypie's classes. Key capabilities, by piece of
Tastypie:
Pagination:
- `__getitem__([start:stop])` which should efficiently pass the bounds on to ES
- `count()` which should efficiently ask ES for the total matching (regardless of slice)
Sorting:
- order_by('field') or order_by('-field') both become ES service-side sort directives
Serialization:
- `__iter__()`
"""
# Also note https://github.com/llonchj/django-tastypie-elasticsearch/ which is
# not very mature, plus this code below may involve Dimagic-specific assumptions
def __init__(self, es_client, payload=None, model=None):
"""
Instantiate with an entire ElasticSearch payload,
since "query", "filter", etc, all exist alongside
each other.
"""
self.es_client = es_client
self.payload = payload
self.model = model
self.__results = None
def with_fields(self, es_client=None, payload=None, model=None):
"Clones this queryset, optionally changing some fields"
return ElasticAPIQuerySet(es_client=es_client or self.es_client,
payload=payload or self.payload,
model=model or self.model)
@property
def results(self):
if self.__results is None:
self.__results = self.es_client.run_query(self.payload)
return self.__results
def count(self):
return self.es_client.count_query(self.payload)
def order_by(self, *fields):
new_payload = copy.deepcopy(self.payload)
new_payload['sort'] = []
for field in fields:
if not field:
continue
direction = 'asc'
missing_dir = '_first'
if field[0] == '-':
direction = 'desc'
missing_dir = '_last'
field = field[1:]
new_payload['sort'].append({field: {
'order': direction,
"missing": missing_dir
}})
return self.with_fields(payload=new_payload)
def __len__(self):
# Note that this differs from `count` in that it actually performs the query and measures
# only those objects returned
return len(self.results['hits']['hits'])
def __iter__(self):
for jvalue in self.results['hits']['hits']:
if self.model:
# HACK: Sometimes the model is a class w/ a wrap method, sometimes just a function
if hasattr(self.model, 'wrap'):
if self.model == CommCareCase:
jvalue['_source'].pop('modified_by', None)
yield self.model.wrap(jvalue['_source'])
else:
yield self.model(jvalue['_source'])
else:
yield jvalue['_source']
def __getitem__(self, idx):
if isinstance(idx, slice):
if idx.start < 0 or idx.stop < 0:
# This actually could be supported with varying degrees of efficiency
raise NotImplementedError('Negative index in slice not supported.')
new_payload = copy.deepcopy(self.payload)
new_payload['from'] = new_payload.get('from', 0) + (idx.start or 0)
if idx.stop is not None:
new_payload['size'] = max(0, idx.stop - (idx.start or 0))
return self.with_fields(payload=new_payload)
elif isinstance(idx, int):
if idx >= 0:
# Leverage efficicent backend slicing
return list(self[idx:idx+1])[0]
else:
# This actually could be supported with varying degrees of efficiency
raise NotImplementedError('Negative index not supported.')
else:
raise TypeError('Unsupported type: %s', type(idx))
SUPPORTED_DATE_FORMATS = [
ISO_DATE_FORMAT,
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%MZ', # legacy Case API date format
]
def validate_date(date):
for pattern in SUPPORTED_DATE_FORMATS:
try:
return datetime.datetime.strptime(date, pattern)
except ValueError:
pass
# No match
raise DateTimeError("Unknown date format: {}".format(date))
RESERVED_QUERY_PARAMS = set(['limit', 'offset', 'order_by', 'q', '_search'] + TASTYPIE_RESERVED_GET_PARAMS)
class DateRangeParams(object):
def __init__(self, param, term=None):
self.term = term or param
self.start_param = '{}_start'.format(param)
self.end_param = '{}_end'.format(param)
def consume_params(self, raw_params):
start = raw_params.pop(self.start_param, None)
end = raw_params.pop(self.end_param, None)
if start:
start = validate_date(start)
if end:
end = validate_date(end)
if start or end:
# Note that dates are already in a string format when they arrive as query params
return filters.date_range(self.term, gte=start, lte=end)
class TermParam(object):
def __init__(self, param, term=None, analyzed=False):
self.param = param
self.term = term or param
self.analyzed = analyzed
def consume_params(self, raw_params):
value = raw_params.pop(self.param, None)
if value:
# convert non-analyzed values to lower case
value = value.lower() if self.analyzed else value
return filters.term(self.term, value)
class XFormServerModifiedParams:
param = 'server_modified_on'
def consume_params(self, raw_params):
value = raw_params.pop(self.param, None)
if value:
return filters.OR(
filters.AND(
filters.NOT(filters.missing(self.param)), filters.range_filter(self.param, **value)
),
filters.AND(
filters.missing(self.param), filters.range_filter("received_on", **value)
)
)
query_param_consumers = [
TermParam('xmlns', 'xmlns.exact'),
TermParam('xmlns.exact'),
TermParam('case_name', 'name', analyzed=True),
TermParam('case_type', 'type', analyzed=True),
# terms listed here to prevent conversion of their values to lower case since
# since they are indexed as `not_analyzed` in ES
TermParam('type.exact'),
TermParam('name.exact'),
TermParam('external_id.exact'),
TermParam('contact_phone_number'),
DateRangeParams('received_on'),
DateRangeParams('server_modified_on'),
DateRangeParams('date_modified', 'modified_on'),
DateRangeParams('server_date_modified', 'server_modified_on'),
DateRangeParams('indexed_on', 'inserted_at'),
]
def _validate_and_get_es_filter(search_param):
_filter = search_param.pop('filter', None)
if not _filter:
# not a supported query
raise Http400
try:
# custom use case by 'enveritas' project for Form API
date_range = _filter['range']['inserted_at']
return {
'range': {'inserted_at': date_range}
}
except KeyError:
pass
try:
# custom filter from Data export tool
_range = None
try:
_range = _filter['or'][0]['and'][0]['range']['server_modified_on']
except KeyError:
try:
_range = _filter['or'][0]['and'][1]['range']['server_modified_on']
except KeyError:
pass
if _range:
return XFormServerModifiedParams().consume_params({'server_modified_on': _range})
else:
raise Http400
except (KeyError, AssertionError):
raise Http400
def es_query_from_get_params(search_params, domain, reserved_query_params=None, doc_type='form'):
# doc_type can be form or case
assert doc_type in ['form', 'case']
es = FormES() if doc_type == 'form' else CaseES()
query = es.remove_default_filters().domain(domain)
if doc_type == 'form':
if 'include_archived' in search_params:
query = query.filter(
filters.OR(filters.term('doc_type', 'xforminstance'), filters.term('doc_type', 'xformarchived')))
else:
query = query.filter(filters.term('doc_type', 'xforminstance'))
if '_search' in search_params:
# This is undocumented usecase by Data export tool and one custom project
# Validate that the passed in param is one of these two expected
_filter = _validate_and_get_es_filter(json.loads(search_params['_search']))
query = query.filter(_filter)
# filters are actually going to be a more common case
reserved_query_params = RESERVED_QUERY_PARAMS | set(reserved_query_params or [])
query_params = {
param: value
for param, value in search_params.items()
if param not in reserved_query_params and not param.endswith('__full')
}
for consumer in query_param_consumers:
try:
payload_filter = consumer.consume_params(query_params)
except DateTimeError as e:
raise Http400("Bad query parameter: {}".format(str(e)))
if payload_filter:
query = query.filter(payload_filter)
# add unconsumed filters
for param, value in query_params.items():
# assume these fields are analyzed in ES so convert to lowercase
# Any fields that are not analyzed in ES should be in the ``query_param_consumers`` above
value = value.lower()
query = query.filter(filters.term(param, value))
return query.raw_query
| 0
| 1,206
| 0
| 14,035
| 0
| 3,059
| 0
| 759
| 854
|
0079cd9a1c9858deccf8054f86b028bf23ff6bd4
| 489
|
py
|
Python
|
Day 5 Assign 2.py
|
dishabhavsar/Letsupgrade-python-b7
|
3c78bfe8aaa113a003efcccdb3d1787c95aef78e
|
[
"Apache-2.0"
] | null | null | null |
Day 5 Assign 2.py
|
dishabhavsar/Letsupgrade-python-b7
|
3c78bfe8aaa113a003efcccdb3d1787c95aef78e
|
[
"Apache-2.0"
] | null | null | null |
Day 5 Assign 2.py
|
dishabhavsar/Letsupgrade-python-b7
|
3c78bfe8aaa113a003efcccdb3d1787c95aef78e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[69]:
# In[70]:
isPrime(7)
# In[71]:
isPrime(8)
# In[72]:
pno = []
# In[73]:
lst = list(range(0,2500))
# In[74]:
for item in lst:
if isPrime(item):
pno.append(item)
# In[75]:
print(pno)
# In[76]:
lst_prime_no = filter(isPrime,lst)
# In[77]:
print(list(lst_prime_no))
# In[ ]:
| 6.985714
| 34
| 0.507157
|
#!/usr/bin/env python
# coding: utf-8
# In[69]:
def isPrime(n):
for i in range(2,n):
if n % i ==0:
return False
else:
return True
# In[70]:
isPrime(7)
# In[71]:
isPrime(8)
# In[72]:
pno = []
# In[73]:
lst = list(range(0,2500))
# In[74]:
for item in lst:
if isPrime(item):
pno.append(item)
# In[75]:
print(pno)
# In[76]:
lst_prime_no = filter(isPrime,lst)
# In[77]:
print(list(lst_prime_no))
# In[ ]:
| 0
| 0
| 0
| 0
| 0
| 96
| 0
| 0
| 23
|
3bb68af9b0478d9ea6afdf11b949fe5e580705ff
| 5,131
|
py
|
Python
|
deliver/tests/converter/test_converter.py
|
sirech/deliver
|
0ddb47d9b7c7a4bddfcf92e4bd683803c95efd3a
|
[
"MIT"
] | 3
|
2017-06-07T21:48:20.000Z
|
2020-06-15T16:27:43.000Z
|
deliver/tests/converter/test_converter.py
|
sirech/deliver
|
0ddb47d9b7c7a4bddfcf92e4bd683803c95efd3a
|
[
"MIT"
] | null | null | null |
deliver/tests/converter/test_converter.py
|
sirech/deliver
|
0ddb47d9b7c7a4bddfcf92e4bd683803c95efd3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 38.291045
| 99
| 0.653479
|
# -*- coding: utf-8 -*-
from deliver.tests.test_base import BaseTest, load_msg, load_all_msg
class ConverterTest(BaseTest):
'''Tests for the UnicodeMessage class'''
def setUp(self):
super(ConverterTest,self).setUp()
self.msg = load_msg('sample3')
def get_text(self, decode=False):
return self.msg.get_payload(0).get_payload(decode=decode)
def get_clean_text(self, forbidden_words):
return self.msg.get_payload(0).get_clean_payload(forbidden_words)
def set_text(self, payload):
self.msg.get_payload(0).set_payload(payload)
def test_get(self):
self.assertEqual(self.msg['To'], u'[email protected]')
def test_get_special_chars(self):
self.assertEqual(self.msg['Subject'], u'Re: [Test] atensiรณn: los 10 curros mejor pagados!')
def test_get_nokey(self):
self.assertEqual(self.msg['Heathen'], None)
def test_replace_header_ascii(self):
s = u'Memorias de Adriano'
self.msg.replace_header('Subject', s)
self.assertEqual(self.msg['Subject'], s)
self.assertEqual(self.msg._msg['Subject'], s.encode('ascii'))
def test_replace_header_special_chars(self):
s = u'Un dรญa de cรณlera'
self.msg.replace_header('Subject', s)
self.assertEqual(self.msg['Subject'], s)
self.assertEqual(self.msg._msg['Subject'], '=?utf-8?q?Un_d=C3=ADa_de_c=C3=B3lera?=')
def test_replace_header_no_header(self):
s = u'quoted-printable'
self.msg.replace_header('Content-Transfer-Encoding', s)
self.assertEqual(self.msg['Content-Transfer-Encoding'], s)
def _test_get(self, s, encoded):
self.assertEqual(self.get_text(decode=True), s)
self.assertEqual(self.get_text(), encoded)
def _test_set(self, s, encoded):
self.set_text(s)
self._test_get(s, encoded)
def test_set_payload(self):
s = u'El perro del hortelano'
self.msg = load_msg('sample')
self._test_set(s, s)
def test_set_payload_special_chars(self):
s = u'Con cien caรฑones por banda, viento en popa a toda vela'
self.msg = load_msg('sample')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_set_payload_utf8(self):
s = u'Con cien caรฑones por banda, viento en popa a toda vela'
self.msg = load_msg('sample')
self.msg.get_payload(0).set_charset('utf-8')
self._test_set(s, u'Con cien ca=C3=B1ones por banda, viento en popa a toda vela')
def test_set_payload_base64(self):
s = u'Con cien caรฑones por banda, viento en popa a toda vela'
self.msg = load_msg('sample4')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_set_payload_base64_utf8(self):
s = u'Con cien caรฑones por banda, viento en popa a toda vela'
self.msg = load_msg('sample5')
self._test_set(s, u'Con cien ca=C3=B1ones por banda, viento en popa a toda vela')
def test_set_payload_empty(self):
s = u'Con cien caรฑones por banda, viento en popa a toda vela'
self.msg = load_msg('sample6')
self._test_set(s, u'Con cien ca=F1ones por banda, viento en popa a toda vela')
def test_get_payload(self):
self.msg = load_msg('sample')
s = u'La direcci=F3n ha cambiado como pod=E9is comprobar en'
self.assertTrue(s in self.get_text())
def test_get_payload_decoded(self):
self.msg = load_msg('sample')
s = u'La direcciรณn ha cambiado como podรฉis comprobar en el'
self.assertTrue(s in self.get_text(decode=True))
def test_get_payload_base64(self):
self.msg = load_msg('sample4')
self._test_get(u'รก\n', u'4Qo=')
def test_get_payload_base64_utf8(self):
self.msg = load_msg('sample5')
self._test_get(u'รก', u'w6E=')
def test_get_payload_empty(self):
self.msg = load_msg('sample6')
self._test_get(u'\n', u'\n')
def test_clean_word_no_replace(self):
self.assertEqual(self.msg._clean_word(u'panic', {}), u'panic')
def test_clean_word_replace(self):
self.assertEqual(self.msg._clean_word(u'panic', {u'panic' : u'don\'t'}), u'don\'t')
def test_clean_word_replace_case(self):
self.assertEqual(self.msg._clean_word(u'Panic', {u'panic' : u'don\'t'}), u'don\'t')
def test_clean_word_replace_special_chars(self):
self.assertEqual(self.msg._clean_word(u'Pรกnico', {u'pรกnico' : u'don\'t'}), u'don\'t')
def test_clean_word_surrounded(self):
self.assertEqual(self.msg._clean_word(u'*Pรกnico*?', {u'pรกnico' : u'don\'t'}), u'*don\'t*?')
def test_get_clean_payload(self):
words = self.config['forbidden_words']
payload = self.get_clean_text(words)
for word in words.keys():
self.assertFalse(word in payload, 'word %s was not removed' % word)
for replacement in words.values():
self.assertTrue(replacement in payload, 'word %s was not inserted' % word)
def test_walk(self):
for mail in load_all_msg():
list(mail.walk())
| 32
| 0
| 0
| 4,999
| 0
| 0
| 0
| 47
| 45
|
4b059cdf1fee82342ebf26912e0f23a357b0cc33
| 1,961
|
py
|
Python
|
example/crawler/pool_client.py
|
Chisanan232/multirunnable
|
7223e49750dc3d3ccf7ebcd3d292138916b582f2
|
[
"Apache-2.0"
] | 1
|
2022-03-18T15:20:53.000Z
|
2022-03-18T15:20:53.000Z
|
example/crawler/pool_client.py
|
Chisanan232/multirunnable
|
7223e49750dc3d3ccf7ebcd3d292138916b582f2
|
[
"Apache-2.0"
] | null | null | null |
example/crawler/pool_client.py
|
Chisanan232/multirunnable
|
7223e49750dc3d3ccf7ebcd3d292138916b582f2
|
[
"Apache-2.0"
] | null | null | null |
import os
DEVELOPMENT_MODE = os.getenv("DEVELOPMENT_MODE", True)
if DEVELOPMENT_MODE:
# Import package multirunnable
import pathlib
import sys
package_path = str(pathlib.Path(__file__).absolute().parent.parent.parent)
sys.path.append(package_path)
# multirunnable package
if __name__ == '__main__':
print("This is system client: ")
o_pool = ExamplePool(pool_size=3, task_size=10)
o_pool.main_run()
| 30.169231
| 153
| 0.688424
|
import requests
import random
import os
DEVELOPMENT_MODE = os.getenv("DEVELOPMENT_MODE", True)
if DEVELOPMENT_MODE:
# Import package multirunnable
import pathlib
import sys
package_path = str(pathlib.Path(__file__).absolute().parent.parent.parent)
sys.path.append(package_path)
# multirunnable package
from multirunnable import RunningMode, SimplePool, sleep, async_sleep
class ExampleTargetFunction:
def crawl_function(self, *args, **kwargs) -> int:
response = requests.get("https://www.youtube.com")
return response.status_code
class ExamplePool:
__Pool_Size = None
__Task_Size = None
__Example_Target = ExampleTargetFunction()
def __init__(self, pool_size, task_size):
self.__Pool_Size = pool_size
self.__Task_Size = task_size
def main_run(self):
# # # # Initial Pool object
__pool = SimplePool(mode=RunningMode.Parallel, pool_size=self.__Pool_Size)
# __pool = SimplePool(mode=RunningMode.Concurrent, pool_size=self.__Pool_Size)
# __pool = SimplePool(mode=RunningMode.GreenThread, pool_size=self.__Pool_Size)
__result = None
with __pool as pool:
# # # # Running Pool
# pool.apply(function=self.__Example_Target.target_function, tasks_size=self.__Pool_Size)
pool.async_apply(function=self.__Example_Target.crawl_function, kwargs={"sleep_time": random.randrange(10, 20)}, tasks_size=self.__Pool_Size)
pool.map(function=self.__Example_Target.crawl_function, args_iter=(1, 2, 3))
# pool.map_by_args(function=self.__Example_Target.target_function, args_iter=[("index_1", "index_2.2"), ("index_3",), (1, 2, 3)])
# # # # Get result
__result = pool.get_result()
print("Result: ", __result)
if __name__ == '__main__':
print("This is system client: ")
o_pool = ExamplePool(pool_size=3, task_size=10)
o_pool.main_run()
| 0
| 0
| 0
| 1,373
| 0
| 0
| 0
| 34
| 112
|
853a07f29158122ed01614d80889727843fe1daf
| 3,733
|
py
|
Python
|
jetson/train.py
|
team7561/2022RapidReact
|
8b6e0d2a24411100689774c6c2e3b76c1c69deab
|
[
"BSD-3-Clause"
] | null | null | null |
jetson/train.py
|
team7561/2022RapidReact
|
8b6e0d2a24411100689774c6c2e3b76c1c69deab
|
[
"BSD-3-Clause"
] | null | null | null |
jetson/train.py
|
team7561/2022RapidReact
|
8b6e0d2a24411100689774c6c2e3b76c1c69deab
|
[
"BSD-3-Clause"
] | null | null | null |
# %%
import torch
import torchvision
import torchvision.transforms as transforms
device = torch.device('cuda')
TASK = 'balls'
CATEGORIES = ['red_ball',' blue_ball']
DATASETS = ['A']
TRANSFORMS = transforms.Compose([
transforms.ColorJitter(0.2, 0.2, 0.2, 0.2),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
datasets = {}
dataset = datasets[DATASETS[0]]
path = ""
# ALEXNET
# model = torchvision.models.alexnet(pretrained=True)
# model.classifier[-1] = torch.nn.Linear(4096, len(dataset.categories))
# SQUEEZENET
# model = torchvision.models.squeezenet1_1(pretrained=True)
# model.classifier[1] = torch.nn.Conv2d(512, len(dataset.categories), kernel_size=1)
# model.num_classes = len(dataset.categories)
# RESNET 18
model = torchvision.models.resnet18(pretrained=True)
model.fc = torch.nn.Linear(512, len(dataset.categories))
# RESNET 34
# model = torchvision.models.resnet34(pretrained=True)
# model.fc = torch.nn.Linear(512, len(dataset.categories))
model = model.to(device)
# display(model_widget)
print("model configured and model_widget created")
BATCH_SIZE = 8
optimizer = torch.optim.Adam(model.parameters())
# optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
epochs_widget = 1
# display(train_eval_widget)
print("trainer configured and train_eval_widget created")
| 28.715385
| 162
| 0.583981
|
# %%
import torch
import torchvision
import threading
import time
from utils import preprocess
import torch.nn.functional as F
import traitlets
import torchvision.transforms as transforms
from dataset import ImageClassificationDataset
device = torch.device('cuda')
TASK = 'balls'
CATEGORIES = ['red_ball',' blue_ball']
DATASETS = ['A']
TRANSFORMS = transforms.Compose([
transforms.ColorJitter(0.2, 0.2, 0.2, 0.2),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
datasets = {}
dataset = datasets[DATASETS[0]]
path = ""
# ALEXNET
# model = torchvision.models.alexnet(pretrained=True)
# model.classifier[-1] = torch.nn.Linear(4096, len(dataset.categories))
# SQUEEZENET
# model = torchvision.models.squeezenet1_1(pretrained=True)
# model.classifier[1] = torch.nn.Conv2d(512, len(dataset.categories), kernel_size=1)
# model.num_classes = len(dataset.categories)
# RESNET 18
model = torchvision.models.resnet18(pretrained=True)
model.fc = torch.nn.Linear(512, len(dataset.categories))
# RESNET 34
# model = torchvision.models.resnet34(pretrained=True)
# model.fc = torch.nn.Linear(512, len(dataset.categories))
model = model.to(device)
def load_model(c):
model.load_state_dict(torch.load(path))
def save_model(c):
torch.save(model.state_dict(), path)
# display(model_widget)
print("model configured and model_widget created")
BATCH_SIZE = 8
optimizer = torch.optim.Adam(model.parameters())
# optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
epochs_widget = 1
def train_eval(is_training):
global BATCH_SIZE, LEARNING_RATE, MOMENTUM, model, dataset, optimizer, eval_button, train_button, accuracy_widget, loss_widget, progress_widget, state_widget
try:
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=True
)
progress = 0
loss = 0
accuracy = 0
epochs = 10
time.sleep(1)
if is_training:
model = model.train()
else:
model = model.eval()
while epochs_widget.value > 0:
i = 0
sum_loss = 0.0
error_count = 0.0
for images, labels in iter(train_loader):
# send data to device
images = images.to(device)
labels = labels.to(device)
if is_training:
# zero gradients of parameters
optimizer.zero_grad()
# execute model to get outputs
outputs = model(images)
# compute loss
loss = F.cross_entropy(outputs, labels)
if is_training:
# run backpropogation to accumulate gradients
loss.backward()
# step optimizer to adjust parameters
optimizer.step()
# increment progress
error_count += len(torch.nonzero(outputs.argmax(1) - labels).flatten())
count = len(labels.flatten())
i += count
sum_loss += float(loss)
progress = i / len(dataset)
loss = sum_loss / i
accuracy = 1.0 - error_count / i
if is_training:
epochs -= 1
else:
break
except e:
pass
model = model.eval()
# display(train_eval_widget)
print("trainer configured and train_eval_widget created")
| 0
| 0
| 0
| 0
| 0
| 2,034
| 0
| 22
| 217
|
16ecd44268339807a3ab50f1f6e9552e1c4a7e7f
| 2,500
|
py
|
Python
|
tests/functional/push/test_remove_channels_from_push.py
|
Versature/pubnub-python
|
a558d212a44ada6fbf2793a32e93685c959b8b22
|
[
"MIT"
] | null | null | null |
tests/functional/push/test_remove_channels_from_push.py
|
Versature/pubnub-python
|
a558d212a44ada6fbf2793a32e93685c959b8b22
|
[
"MIT"
] | null | null | null |
tests/functional/push/test_remove_channels_from_push.py
|
Versature/pubnub-python
|
a558d212a44ada6fbf2793a32e93685c959b8b22
|
[
"MIT"
] | null | null | null |
try:
from mock import MagicMock
except ImportError:
| 34.246575
| 117
| 0.656
|
import unittest
try:
from mock import MagicMock
except ImportError:
from unittest.mock import MagicMock
from pubnub.pubnub import PubNub
import pubnub.enums
from pubnub.endpoints.push.remove_channels_from_push import RemoveChannelsFromPush
from tests.helper import pnconf, sdk_name
class TestRemoveChannelsFromPush(unittest.TestCase):
def setUp(self):
self.pubnub = MagicMock(
spec=PubNub,
config=pnconf,
sdk_name=sdk_name,
uuid=None
)
self.pubnub.uuid = "UUID_RemoveChannelsTest"
self.remove_channels = RemoveChannelsFromPush(self.pubnub)
def test_push_remove_single_channel(self):
self.remove_channels.channels(['ch']).push_type(pubnub.enums.PNPushType.APNS).device_id("coolDevice")
params = (pnconf.subscribe_key, "coolDevice")
self.assertEquals(self.remove_channels.build_path(), RemoveChannelsFromPush.REMOVE_PATH % params)
self.assertEqual(self.remove_channels.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'type': 'apns',
'remove': 'ch'
})
self.assertEqual(self.remove_channels._channels, ['ch'])
def test_push_remove_multiple_channels(self):
self.remove_channels.channels(['ch1', 'ch2']).push_type(pubnub.enums.PNPushType.MPNS).device_id("coolDevice")
params = (pnconf.subscribe_key, "coolDevice")
self.assertEquals(self.remove_channels.build_path(), RemoveChannelsFromPush.REMOVE_PATH % params)
self.assertEqual(self.remove_channels.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'type': 'mpns',
'remove': 'ch1,ch2'
})
self.assertEqual(self.remove_channels._channels, ['ch1', 'ch2'])
def test_push_remove_google(self):
self.remove_channels.channels(['ch1', 'ch2', 'ch3']).push_type(pubnub.enums.PNPushType.GCM)\
.device_id("coolDevice")
params = (pnconf.subscribe_key, "coolDevice")
self.assertEquals(self.remove_channels.build_path(), RemoveChannelsFromPush.REMOVE_PATH % params)
self.assertEqual(self.remove_channels.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'type': 'gcm',
'remove': 'ch1,ch2,ch3'
})
self.assertEqual(self.remove_channels._channels, ['ch1', 'ch2', 'ch3'])
| 0
| 0
| 0
| 2,182
| 0
| 0
| 0
| 98
| 162
|
70d0c88563906e11580cfe5c56c6f22438a75531
| 280
|
py
|
Python
|
Python/find the Rhombus Area.py
|
Bijitakc/Hacktoberfest2021-2
|
167e7c81dc9c5cc83fd604ea1d4bce52aa882605
|
[
"MIT"
] | 20
|
2021-10-06T13:51:46.000Z
|
2021-11-11T16:12:17.000Z
|
Python/find the Rhombus Area.py
|
Bijitakc/Hacktoberfest2021-2
|
167e7c81dc9c5cc83fd604ea1d4bce52aa882605
|
[
"MIT"
] | 42
|
2021-10-08T09:49:17.000Z
|
2021-10-21T23:18:39.000Z
|
Python/find the Rhombus Area.py
|
Bijitakc/Hacktoberfest2021-2
|
167e7c81dc9c5cc83fd604ea1d4bce52aa882605
|
[
"MIT"
] | 97
|
2021-10-06T13:04:34.000Z
|
2021-11-11T16:12:21.000Z
|
rhombusD1 = float(input("Enter Rhombus First Diagonal = "))
rhombusD2 = float(input("Enter Rhombus Second Diagonal = "))
rhombusArea = calRhombusArea(rhombusD1, rhombusD2)
print("The Area of a Rhombus = %.3f" %rhombusArea)
| 25.454545
| 60
| 0.710714
|
def calRhombusArea(d1, d2):
return (d1 * d2)/2
rhombusD1 = float(input("Enter Rhombus First Diagonal = "))
rhombusD2 = float(input("Enter Rhombus Second Diagonal = "))
rhombusArea = calRhombusArea(rhombusD1, rhombusD2)
print("The Area of a Rhombus = %.3f" %rhombusArea)
| 0
| 0
| 0
| 0
| 0
| 29
| 0
| 0
| 22
|
02ae1dd4f0b18eabd2110e77fdbaa8e28cc8fbf7
| 5,796
|
py
|
Python
|
Testing/elx_compare_overlap.py
|
eliseemond/elastix
|
0e8572f4a315e0a8f08b07d5947b4f3ac160b575
|
[
"Apache-2.0"
] | 318
|
2017-05-22T11:39:46.000Z
|
2022-03-27T04:40:13.000Z
|
Testing/elx_compare_overlap.py
|
eliseemond/elastix
|
0e8572f4a315e0a8f08b07d5947b4f3ac160b575
|
[
"Apache-2.0"
] | 358
|
2017-05-22T11:36:05.000Z
|
2022-03-18T15:49:10.000Z
|
Testing/elx_compare_overlap.py
|
eliseemond/elastix
|
0e8572f4a315e0a8f08b07d5947b4f3ac160b575
|
[
"Apache-2.0"
] | 102
|
2017-05-22T11:38:44.000Z
|
2021-12-23T20:27:51.000Z
|
import sys
import os
#-------------------------------------------------------------------------------
# the main function
# Below we deform the moving image segmentation by the current result as well as
# by a previous stored result. This makes this test a regression test.
#
# We could instead compare with a fixed image segmentation, but that would require
# the tested registrations to be relatively good, which they are not to save time.
#-------------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
| 45.637795
| 149
| 0.675811
|
import sys, subprocess
import os
import os.path
import shutil
import re
import glob
from optparse import OptionParser
#-------------------------------------------------------------------------------
# the main function
# Below we deform the moving image segmentation by the current result as well as
# by a previous stored result. This makes this test a regression test.
#
# We could instead compare with a fixed image segmentation, but that would require
# the tested registrations to be relatively good, which they are not to save time.
def main():
# usage, parse parameters
usage = "usage: %prog [options] arg";
parser = OptionParser( usage );
# option to debug and verbose
parser.add_option( "-v", "--verbose", action="store_true", dest="verbose" );
# options to control files
parser.add_option( "-d", "--directory", dest="directory", help="elastix output directory" );
parser.add_option( "-m", "--movingsegmentation", dest="mseg", help="moving image segmentation" );
parser.add_option( "-b", "--baselinetp", dest="btp", help="baseline transform parameter file" );
parser.add_option( "-p", "--path", dest="path", help="path where executables can be found" );
(options, args) = parser.parse_args();
# Check if option -d and -m and -b are given
if options.directory == None :
parser.error( "The option directory (-d) should be given" );
if options.mseg == None :
parser.error( "The option directory (-m) should be given" );
if options.btp == None :
parser.error( "The option directory (-b) should be given" );
# Get the transform parameters files
tpFileName_in = os.path.join( options.directory, "TransformParameters.0.txt" );
tpFileName = os.path.join( options.directory, "TransformParameters.seg.txt" );
tpFileName_b_in = options.btp;
tpFileName_b = os.path.join( options.directory, "TransformParameters.baseline.seg.txt" );
# Sanity checks
if not os.path.exists( tpFileName_in ) :
print( "ERROR: the file " + tpFileName_in + " does not exist" );
return 1;
# Below we use programs that are compiled with elastix, and are thus available
# in the binary directory. The user of this script has to supply the path
# to the binary directory via the command line.
# In order to make sure that python is able to find these programs we add
# the paths to the local environment.
_path = os.path.dirname( options.path );
_path += os.pathsep + os.getenv('PATH');
os.environ['PATH'] = _path;
#
# Deform the moving image segmentation by the current result
#
print( "Deforming moving image segmentation using " + tpFileName_in );
# Make the transform parameters file suitable for binary images
f1 = open( tpFileName_in, 'r' ); f2 = open( tpFileName, 'w' );
for line in f1 :
lineout = line.replace( '(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 0)' );
lineout = re.sub( "(ResultImageFormat \"mhd\")", "ResultImageFormat \"mha\"", lineout );
lineout = re.sub( "(ResultImagePixelType \"short\")", "ResultImagePixelType \"unsigned char\"", lineout );
lineout = re.sub( "(CompressResultImage \"false\")", "CompressResultImage \"true\"", lineout );
f2.write( lineout );
f1.close(); f2.close();
# Transform the moving image segmentation to mimick the baseline result
seg = os.path.join( options.directory, "result.mha" );
seg_defm = os.path.join( options.directory, "segmentation_deformed.mha" );
subprocess.call( [ "transformix", "-in", options.mseg, "-out", options.directory, "-tp", tpFileName ],
stdout=subprocess.PIPE );
if( os.path.exists( seg_defm ) ) : os.remove( seg_defm );
shutil.move( seg, seg_defm );
#
# Deform the moving image segmentation by the baseline result
#
print( "Deforming moving image segmentation using " + tpFileName_b_in );
# Make the transform parameters file suitable for binary images
f1 = open( tpFileName_b_in, 'r' ); f2 = open( tpFileName_b, 'w' );
for line in f1 :
lineout = line.replace( '(FinalBSplineInterpolationOrder 3)', '(FinalBSplineInterpolationOrder 0)' );
lineout = re.sub( "(ResultImageFormat \"mhd\")", "ResultImageFormat \"mha\"", lineout );
lineout = re.sub( "(ResultImagePixelType \"short\")", "ResultImagePixelType \"unsigned char\"", lineout );
lineout = re.sub( "(CompressResultImage \"false\")", "CompressResultImage \"true\"", lineout );
f2.write( lineout );
f1.close(); f2.close();
# Transform the moving image segmentation to mimick the fixed image segmentation
seg_defb = os.path.join( options.directory, "segmentation_baseline.mha" );
subprocess.call( [ "transformix", "-in", options.mseg, "-out", options.directory, "-tp", tpFileName_b ],
stdout=subprocess.PIPE );
if( os.path.exists( seg_defb ) ) : os.remove( seg_defb );
shutil.move( seg, seg_defb );
# Compute the overlap between baseline segmentation and deformed moving segmentation
try :
# This will work from python 2.7 on
outputAsString = subprocess.check_output( [ "elxComputeOverlap", "-in", seg_defm, seg_defb ] ).decode("utf-8");
except :
# Workaround for python 2.6 and lower. For MacMini specifically.
outputAsString = subprocess.Popen( [ "elxComputeOverlap", "-in", seg_defm, seg_defb ], stdout=subprocess.PIPE ).communicate()[0].decode("utf-8");
overlap = outputAsString[ outputAsString.find( "Overlap" ) : ].strip( "Overlap: " );
# Report
print( "The segmentation overlap between current and baseline is " + overlap );
if float( overlap ) > 0.99 :
print( "SUCCESS: overlap is higher than 0.99" );
return 0;
else :
print( "FAILURE: overlap is lower than 0.99" );
return 1;
#-------------------------------------------------------------------------------
if __name__ == '__main__':
sys.exit(main())
| 0
| 0
| 0
| 0
| 0
| 5,103
| 0
| -13
| 133
|
2b52a93d3cf4e00721091ea445ca5eee4afc169e
| 1,771
|
py
|
Python
|
build-tools/code_generator/function_generator/generate_src_nbla_function_cpp.py
|
PAC-P2P/nnabla
|
bb7e7d52555a5bc145ec3c9a2e152fa5b11574de
|
[
"Apache-2.0"
] | 1
|
2021-04-08T00:33:23.000Z
|
2021-04-08T00:33:23.000Z
|
build-tools/code_generator/function_generator/generate_src_nbla_function_cpp.py
|
enomotom/nnabla
|
1947fe16a0a41d19d76cd916f151aa1991ea1b44
|
[
"Apache-2.0"
] | null | null | null |
build-tools/code_generator/function_generator/generate_src_nbla_function_cpp.py
|
enomotom/nnabla
|
1947fe16a0a41d19d76cd916f151aa1991ea1b44
|
[
"Apache-2.0"
] | 1
|
2020-08-19T08:32:51.000Z
|
2020-08-19T08:32:51.000Z
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import generator_common.common as common
| 45.410256
| 99
| 0.645963
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import generator_common.common as common
import utils.type_conv
def generate(info, func_name, func_name_snakecase, template):
arg_info = common.function_arguments(info)
func_arg_variable_types = ', '.join(
[func_name] + [utils.type_conv.type_from_proto[t]['cpp'] for t in arg_info['types']])
func_args = ', '.join(['const Context &ctx'] + ['{} {}'.format(utils.type_conv.type_from_proto[
t]['cpp'], n) for t, n in zip(arg_info['types'], arg_info['names'])])
io_info = common.function_io(info)
ctypes = ', '.join(io_info['template_types'])
templates = ', '.join(io_info['templates'])
template_defines = ', '.join(['typename {}'.format(t)
for t in io_info['templates']])
return template.format(func_name=func_name,
func_name_snakecase=func_name_snakecase,
func_arg_variable_types=func_arg_variable_types,
func_args=func_args,
template_defines=template_defines,
templates=templates,
ctypes=ctypes)
| 0
| 0
| 0
| 0
| 0
| 1,073
| 0
| 1
| 45
|
7106131880699d6f4381bcf92970ea93379347b3
| 4,583
|
py
|
Python
|
low_shot_learning/architectures/tools.py
|
ZhenLiuBuaa/wDAE_GNN_FewShot
|
6db1e4b1fe99821ffa116be009b5765f47932400
|
[
"MIT"
] | 150
|
2019-04-06T15:27:15.000Z
|
2022-03-23T07:52:20.000Z
|
low_shot_learning/architectures/tools.py
|
ZhenLiuBuaa/wDAE_GNN_FewShot
|
6db1e4b1fe99821ffa116be009b5765f47932400
|
[
"MIT"
] | 17
|
2019-05-14T06:55:04.000Z
|
2021-03-12T15:45:54.000Z
|
low_shot_learning/architectures/tools.py
|
gidariss/wDAE_GNN_FewShot
|
6db1e4b1fe99821ffa116be009b5765f47932400
|
[
"MIT"
] | 21
|
2019-06-22T02:26:35.000Z
|
2022-01-14T15:37:44.000Z
|
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
def batch_cosine_fully_connected_layer(x_in, weight, scale=None, bias=None):
"""
Args:
x_in: a 3D tensor with shape
[meta_batch_size x num_examples x num_features_in]
weight: a 3D tensor with shape
[meta_batch_size x num_features_in x num_features_out]
scale: (optional) a scalar value
bias: (optional) a 1D tensor with shape [num_features_out]
Returns:
x_out: a 3D tensor with shape
[meta_batch_size x num_examples x num_features_out]
"""
assert(x_in.dim() == 3)
assert(weight.dim() == 3)
assert(x_in.size(0) == weight.size(0))
assert(x_in.size(2) == weight.size(1))
x_in = F.normalize(x_in, p=2, dim=2, eps=1e-12)
weight = F.normalize(weight, p=2, dim=1, eps=1e-12)
x_out = torch.bmm(x_in, weight)
if scale is not None:
x_out = x_out * scale
if bias is not None:
x_out = x_out + bias
return x_out
| 29.567742
| 76
| 0.602444
|
import torch
import torch
import torch.nn as nn
import torch.nn.functional as F
class LinearDiag(nn.Module):
def __init__(self, num_features, bias=False):
super(LinearDiag, self).__init__()
# initialize to the identity transform
weight = torch.FloatTensor(num_features).fill_(1)
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_features).fill_(0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.register_parameter('bias', None)
def forward(self, X):
assert(X.dim()==2 and X.size(1)==self.weight.size(0))
out = X * self.weight.expand_as(X)
if self.bias is not None:
out = out + self.bias.expand_as(out)
return out
def cosine_fully_connected_layer(x_in, weight, scale=None, bias=None):
assert(x_in.dim() == 2)
assert(weight.dim() == 2)
assert(x_in.size(1) == weight.size(0))
x_in = F.normalize(x_in, p=2, dim=1, eps=1e-12)
weight = F.normalize(weight, p=2, dim=0, eps=1e-12)
x_out = torch.mm(x_in, weight)
if scale is not None:
x_out = x_out * scale.view(1, -1)
if bias is not None:
x_out = x_out + bias.view(1, -1)
return x_out
def batch_cosine_fully_connected_layer(x_in, weight, scale=None, bias=None):
"""
Args:
x_in: a 3D tensor with shape
[meta_batch_size x num_examples x num_features_in]
weight: a 3D tensor with shape
[meta_batch_size x num_features_in x num_features_out]
scale: (optional) a scalar value
bias: (optional) a 1D tensor with shape [num_features_out]
Returns:
x_out: a 3D tensor with shape
[meta_batch_size x num_examples x num_features_out]
"""
assert(x_in.dim() == 3)
assert(weight.dim() == 3)
assert(x_in.size(0) == weight.size(0))
assert(x_in.size(2) == weight.size(1))
x_in = F.normalize(x_in, p=2, dim=2, eps=1e-12)
weight = F.normalize(weight, p=2, dim=1, eps=1e-12)
x_out = torch.bmm(x_in, weight)
if scale is not None:
x_out = x_out * scale
if bias is not None:
x_out = x_out + bias
return x_out
class CosineFullyConnectedLayer(nn.Module):
def __init__(
self,
num_inputs,
num_outputs,
scale=20.0,
per_plane=False,
learn_scale=True,
bias=False):
super(CosineFullyConnectedLayer, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.learn_scale = learn_scale
self.per_plane = per_plane
weight = torch.FloatTensor(num_inputs, num_outputs).normal_(
0.0, np.sqrt(2.0/num_inputs))
self.weight = nn.Parameter(weight, requires_grad=True)
if bias:
bias = torch.FloatTensor(num_outputs).fill_(0.0)
self.bias = nn.Parameter(bias, requires_grad=True)
else:
self.bias = None
if scale:
num_scale_values = num_outputs if per_plane else 1
scale = torch.FloatTensor(num_scale_values).fill_(scale)
self.scale = nn.Parameter(scale, requires_grad=learn_scale)
else:
self.scale = None
def forward(self, x_in):
assert(x_in.dim() == 2)
return cosine_fully_connected_layer(
x_in, self.weight, scale=self.scale, bias=self.bias)
def extra_repr(self):
s = 'num_inputs={0}, num_classes={1}'.format(
self.num_inputs, self.num_outputs)
if self.scale is not None:
if self.per_plane:
s += 'num_scales={0} (learnable={1})'.format(
self.num_outputs, self.learn_scale)
else:
s += 'num_scales={0} (value={1} learnable={2})'.format(
1, self.scale.item(), self.learn_scale)
if self.bias is None:
s += ', bias=False'
return s
def global_pooling(x, pool_type):
assert(x.dim() == 4)
if pool_type == 'max':
return F.max_pool2d(x, (x.size(2), x.size(3)))
elif pool_type == 'avg':
return F.avg_pool2d(x, (x.size(2), x.size(3)))
else:
raise ValueError('Unknown pooling type.')
class GlobalPooling(nn.Module):
def __init__(self, pool_type):
super(GlobalPooling, self).__init__()
assert(pool_type == 'avg' or pool_type == 'max')
self.pool_type = pool_type
def forward(self, x):
return global_pooling(x, pool_type=self.pool_type)
| 0
| 0
| 0
| 2,715
| 0
| 712
| 0
| 0
| 115
|
27da8b8c9f15b6143439568cacc7459e3df67bd2
| 1,527
|
py
|
Python
|
atlasutil/dvpp_process/dvpp_process.py
|
Atlas200DKTest/sample-facedetection-python
|
b1266604c853ab04efac6ed6656b192f72d0778c
|
[
"Apache-2.0"
] | 1
|
2020-04-10T08:48:05.000Z
|
2020-04-10T08:48:05.000Z
|
atlasutil/dvpp_process/dvpp_process.py
|
Atlas200DKTest/sample-facedetection-python
|
b1266604c853ab04efac6ed6656b192f72d0778c
|
[
"Apache-2.0"
] | 1
|
2020-01-23T11:41:25.000Z
|
2020-02-25T08:54:54.000Z
|
atlasutil/dvpp_process/dvpp_process.py
|
Atlas200DKTest/sample-facedetection-python
|
b1266604c853ab04efac6ed6656b192f72d0778c
|
[
"Apache-2.0"
] | 1
|
2020-04-10T08:47:53.000Z
|
2020-04-10T08:47:53.000Z
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
JPGENC_FORMAT_NV12 = 0x10
| 31.8125
| 113
| 0.638507
|
# !/usr/bin/env python
# -*- coding:utf-8 -*-
import ctypes
from ctypes import *
import os
import numpy as np
import time
JPGENC_FORMAT_NV12 = 0x10
class CameraImageBuf(Structure):
_fields_ = [
('size', c_uint),
('data', POINTER(c_ubyte))
]
class DvppImageBuffer(Structure):
_fields_ = [
('format', c_uint),
('buf_size', c_uint),
('width', c_uint),
('height', c_uint),
('image_size', c_uint),
('data', POINTER(c_ubyte)),
]
class DvppProcess():
lib = ctypes.CDLL(os.path.dirname(os.path.abspath(__file__)) + '/libdvppprocess.so')
def __init__(self, width, height):
self.width = width
self.height = height
self.size = int(width * height * 3 / 2)
self.yuv_buf = (c_ubyte * self.size)()
self.jpeg_buf = CameraImageBuf()
self.jpeg_buf.size = width * height * 3
self.jpeg_buf.data = (c_ubyte * self.jpeg_buf.size)()
DvppProcess.lib.InitDvpp(self.width, self.height)
def Yuv2Jpeg(self, in_yuv_data):
if not in_yuv_data.flags['C_CONTIGUOUS']:
in_yuv_data = np.ascontiguousarray(in_yuv_data.ctypes.data, POINTER(c_ubyte))
DvppProcess.lib.CvtYuv2Jpeg(byref(self.jpeg_buf), in_yuv_data.ctypes.data_as(c_char_p))
array = (ctypes.c_ubyte * self.jpeg_buf.size).from_address(ctypes.addressof(self.jpeg_buf.data.contents))
image_array = np.ndarray(buffer=array, dtype=np.uint8, shape=(self.jpeg_buf.size))
return image_array
| 0
| 0
| 0
| 1,306
| 0
| 0
| 0
| -34
| 180
|
0b832f5ceea1a3fbf7ea9c67e5673b38acad18d4
| 850
|
py
|
Python
|
programmers/lv3/shopping.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
programmers/lv3/shopping.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
programmers/lv3/shopping.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
#
if __name__ == "__main__":
gems = ["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"]
print(solution(gems))
| 26.5625
| 78
| 0.451765
|
# ๋ณด์ ์ผํ
def solution(gems):
answer = []
counts = {}
kinds = len(set(gems))
minimum = 987654321
left, right = 0, 0
while right < len(gems):
cur_right = gems[right]
counts[cur_right] = counts.get(cur_right, 0) + 1
right += 1
if kinds == len(counts):
while left < right:
cur_left = gems[left]
if counts[cur_left] > 1:
counts[cur_left] -= 1
left += 1
elif minimum > right - left:
minimum = right - left
answer = [left + 1, right]
break
else:
break
return answer
if __name__ == "__main__":
gems = ["DIA", "RUBY", "RUBY", "DIA", "DIA", "EMERALD", "SAPPHIRE", "DIA"]
print(solution(gems))
| 12
| 0
| 0
| 0
| 0
| 684
| 0
| 0
| 23
|
022d20ed03d831aaada101ae2cab7e00cafb2ea8
| 30,512
|
py
|
Python
|
head_tracker.py
|
kalleknast/head-tracker
|
df06cc71e1f36d7c752d82f94a010b5258fd1fb9
|
[
"Apache-2.0"
] | 2
|
2019-06-03T17:21:46.000Z
|
2021-05-27T04:48:24.000Z
|
head_tracker.py
|
kalleknast/head-tracker
|
df06cc71e1f36d7c752d82f94a010b5258fd1fb9
|
[
"Apache-2.0"
] | null | null | null |
head_tracker.py
|
kalleknast/head-tracker
|
df06cc71e1f36d7c752d82f94a010b5258fd1fb9
|
[
"Apache-2.0"
] | 1
|
2017-03-30T08:23:11.000Z
|
2017-03-30T08:23:11.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 30 15:20:09 2016
@author: hjalmar
"""
import matplotlib
matplotlib.use('Agg')
| 41.740082
| 181
| 0.470897
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 30 15:20:09 2016
@author: hjalmar
"""
import tensorflow as tf
from ht_helper import HeSD, angle2class, FrameStepper, class2angle, whiten
from ht_helper import anglediff, get_max_gaze_line, CountdownPrinter
from ht_helper import angles2complex, complex2angles, softmax, get_error
from data_preparation import read_log_data
import numpy as np
import re
import os
from scipy.misc import imresize
from glob import glob
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
class TrainModel:
"""
"""
def __init__(self, Nclass=12, data_dir=None, model_dir=None):
if data_dir is None:
data_dir = '/home/hjalmar/head_tracker/data/CAM/BIG'
self.data_dir = data_dir.rstrip('/')
if not os.path.isdir(data_dir):
raise FileNotFoundError('data_dir %s\nis not a directory.' %
self.data_dir)
if model_dir is None:
model_dir = '/home/hjalmar/head_tracker/model/CAM/BIG'
self.model_dir = model_dir.rstrip('/')
if not os.path.isdir(model_dir):
raise FileNotFoundError('model_dir %s\nis not a directory.' %
self.model_dir)
self.Nclass = Nclass
self.im_h = 120
self.im_w = 160
self.batch_sz = 64
def get_inputs(self, fname, Nepoch, Nex_per_epoch, train=False, batch_sz=None):
"""
Nex_per_epoch - Ntrain or Nvalid: number_of_examples_per_epoch
"""
if not os.path.isfile(fname):
raise FileNotFoundError('Failed to find file: %s' % fname)
if batch_sz is None:
batch_sz = self.batch_sz
with tf.name_scope('input'):
fname_queue = tf.train.string_input_producer(
[fname], num_epochs=Nepoch)
# Even when reading in multiple threads, share the filename
# queue.
im, angle, angle_ok, pos_x, pos_y = self._read_and_decode(fname_queue)
if train:
# Distort im
im = self._distort_inputs(im)
n_threads = 8
else:
n_threads = 4
# Subtract off the mean and divide by the variance of the pixels.
im = tf.image.per_image_whitening(im)
# Shuffle the examples and collect them into batch_sz batches.
# (Internally uses a RandomShuffleQueue.)
# We run this in two threads to avoid being a bottleneck.
# Ensures a minimum amount of shuffling of examples.
min_queue_examples = int(Nex_per_epoch * 0.4)
capacity = min_queue_examples + 3 * batch_sz
im, angle, angle_ok, pos_x, pos_y = tf.train.shuffle_batch([im,
angle,
angle_ok,
pos_x,
pos_y],
batch_size=batch_sz,
num_threads=n_threads,
capacity=capacity,
min_after_dequeue=min_queue_examples)
return im, angle, angle_ok, pos_x, pos_y
def _read_and_decode(self, fname_queue):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(fname_queue)
features = tf.parse_single_example(
serialized_example,
features={'image_raw': tf.FixedLenFeature([], tf.string),
'angle': tf.FixedLenFeature([], tf.int64),
'angle_ok': tf.FixedLenFeature([], tf.int64),
'position_x': tf.FixedLenFeature([], tf.int64),
'position_y': tf.FixedLenFeature([], tf.int64)})
im = tf.decode_raw(features['image_raw'], tf.uint8)
im.set_shape([self.im_h * self.im_w])
im = tf.reshape(im, [self.im_h, self.im_w, 1])
# Convert from [0, 255] -> [-0.5, 0.5] floats.
im = tf.cast(im, tf.float32) * (1. / 255) - 0.5
# Convert label from a scalar uint8 tensor to an int32 scalar.
angle = tf.cast(features['angle'], tf.int32)
angle_ok = tf.cast(features['angle_ok'], tf.int32)
position_x = tf.cast(features['position_x'], tf.int32)
position_y = tf.cast(features['position_y'], tf.int32)
return im, angle, angle_ok, position_x, position_y
def _distort_inputs(self, im):
"""
Don't flip orientation images
"""
im = tf.image.random_brightness(im, max_delta=63)
im = tf.image.random_contrast(im, lower=0.2, upper=1.8)
return im
def train(self, Nepoch, lmbda=5e-4):
"""
"""
model_fname = os.path.join(self.model_dir, 'CAM')
train_fname = os.path.join(self.data_dir, 'train_CAM_N*.tfrecords')
valid_fname = os.path.join(self.data_dir, 'dev_CAM_N*.tfrecords')
train_fname = glob(train_fname)
if not len(train_fname) == 1:
raise ValueError('Something wrong with the file name of the training data.')
else:
train_fname = train_fname[0]
valid_fname = glob(valid_fname)
if not len(valid_fname) == 1:
raise ValueError('Something wrong with the file name of the validation data.')
else:
valid_fname = valid_fname[0]
batch_sz = self.batch_sz
Nvalid = int(re.search(r'[\d]{4,6}', valid_fname.split('/')[-1]).group())
Ntrain = int(re.search(r'[\d]{4,6}', train_fname.split('/')[-1]).group())
Nbatch_per_epoch = Ntrain // batch_sz
#Nbatch = Nbatch_per_epoch * Nepoch
valid_batch_sz = 50
learning_rate = 1e-4
valid_X, valid_y = [], []
model = Model(Nclass=self.Nclass, im_w=self.im_w, im_h=self.im_h, lmbda=lmbda)
print('Starting training for %d epochs.' % Nepoch)
with model.graph.as_default():
# Input images and labels.
images, angles, angles_ok, _, _ = self.get_inputs(train_fname,
Nepoch,
Ntrain,
train=True)
valid_images, valid_angles, valid_angles_ok, _, _ = self.get_inputs(valid_fname, 1, Nvalid, train=False, batch_sz=valid_batch_sz)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(model.loss)
with tf.Session(graph=model.graph) as session:
session.run(tf.initialize_all_variables())
session.run(tf.initialize_local_variables())
# Start input enqueue threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=session, coord=coord)
validation_accuracy = []
train_accuracy = []
print('%s\n step | loss | acc | epoch \n%s' % ('='*30, '='*30))
step, epoch = 0, 0
while (epoch < Nepoch) and not coord.should_stop():
step += 1
# Train
X, theta, theta_ok = session.run([images, angles,
angles_ok])
y = angle2class(theta, self.Nclass,
angles_ok=theta_ok, units='deg')
optimizer.run(feed_dict={model.X: X, model.y_: y})
if (step % Nbatch_per_epoch == 0):
l, acc = session.run([model.loss, model.accuracy],
feed_dict={model.X: X,
model.y_: y})
epoch += 1
print(' %-5d| %-6.3f| %-6.2f| %-5d' % (step, l,
acc, epoch))
if (epoch % 10 == 0) or (epoch == Nepoch):
v_acc, i = 0.0, 0
if len(valid_y) < 1:
load_valid = True
else:
load_valid = False
while i < (Nvalid // valid_batch_sz):
if load_valid:
X, theta, theta_ok = session.run([valid_images,
valid_angles,
valid_angles_ok])
y = angle2class(theta, self.Nclass,
angles_ok=theta_ok,
units='deg')
valid_X.append(X)
valid_y.append(y)
feed_dict = {model.X: valid_X[i],
model.y_: valid_y[i]}
v_acc += model.accuracy.eval(feed_dict=feed_dict)
i += 1
validation_accuracy.append(v_acc/i)
train_accuracy.append(np.mean(acc))
model.saver.save(session, ('%s_Nclass%d_acc%1.1f_%d.ckpt' %
(model_fname,
self.Nclass,
validation_accuracy[-1],
epoch)))
print('Done training for %d epochs, %d steps.' % (epoch, step-1))
# Ask threads to stop
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
session.close()
print('Training accuracy:', train_accuracy)
print('Validation accuracy:', validation_accuracy)
return validation_accuracy, train_accuracy
class Model:
def __init__(self, Nclass, im_w, im_h, lmbda=5e-4):
self.Nclass = Nclass
self.im_w = im_w
self.im_h = im_h
self.graph = tf.Graph()
# Define ops and tensors in `g`.
with self.graph.as_default():
# Input data.
self.X = tf.placeholder(tf.float32, shape=(None, im_h, im_w, 1))
self.y_ = tf.placeholder(tf.float32, shape=(None))
c1 = tf.nn.relu(self._conv_layer(self.X, (11, 11, 1, 32), "conv1"))
c2 = tf.nn.relu(self._conv_layer(c1, (5, 5, 32, 64), "conv2"))
p1 = tf.nn.max_pool(c2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
c3 = tf.nn.relu(self._conv_layer(p1, (3, 3, 64, 128), "conv3"))
c4 = tf.nn.relu(self._conv_layer(c3, (3, 3, 128, 256), "conv4"))
p2 = tf.nn.max_pool(c4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
c5 = tf.nn.relu(self._conv_layer(p2, (3, 3, 256, 256), "conv5"))
self.top_conv = self._conv_layer(c5, (3, 3, 256, 1024), "conv6")
gap = tf.reduce_mean(self.top_conv, [1,2]) # Global Average Pooling
with tf.variable_scope("GAP"):
shape = (1024, Nclass)
w_init = tf.truncated_normal_initializer(mean=0.0, stddev=HeSD(shape))
gap_w = tf.get_variable("W", shape=shape, initializer=w_init)
self.logits = tf.matmul(gap, gap_w)
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
self.logits, tf.to_int64(self.y_), name='xentropy')
self.loss = tf.reduce_mean(xentropy, name='xentropy_mean')
weights = filter(lambda x: x.name.endswith('W:0'), tf.trainable_variables())
regularizer = tf.reduce_sum(tf.pack([tf.nn.l2_loss(x) for x in weights]))
#self.loss += (regularizer * 5e-4)
self.loss += (regularizer * lmbda)
correct = tf.equal(tf.argmax(self.logits, 1), tf.cast(self.y_, tf.int64))
self.accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) * 100.
# CAM
top_conv_resz = tf.image.resize_bilinear(self.top_conv,
[self.im_h, self.im_w])
label_w = tf.gather(tf.transpose(gap_w), tf.cast(self.y_, tf.int32))
label_w = tf.reshape(label_w, [-1, 1024, 1])
top_conv_resz = tf.reshape(top_conv_resz,
[-1, self.im_h * self.im_w, 1024])
cam = tf.batch_matmul(top_conv_resz, label_w)
self.cam = tf.reshape(cam, [-1, self.im_h, self.im_w])
self.saver = tf.train.Saver()
def _conv_layer(self, z, shape, name):
with tf.variable_scope(name):
w_init = tf.truncated_normal_initializer(mean=0.0,
stddev=HeSD(shape))
w = tf.get_variable("W", shape=shape, initializer=w_init)
b = tf.get_variable("b", shape=shape[-1],
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(z, w, [1, 1, 1, 1], padding='SAME')
return tf.nn.bias_add(conv, b)
class HeadTracker:
"""
"""
def __init__(self, Nclass=13, model_dir=None, im_w=160, im_h=120):
if model_dir is None:
model_dir = '/home/hjalmar/head_tracker/model/CAM'
self.model_dir = model_dir.rstrip('/')
self.Nclass = Nclass
self.im_h = im_h
self.im_w = im_w
self.im_scale = self.im_w / 640. # frame.shape is (480, 640)
self.frame = None
def track2video(self, in_fname, out_fname, log_fname=None,
t_start=0.0, t_end=-1, dur=None, verbose=True):
"""
t_start : only used if no log_fname is provided
t_end : only used if no log_fname is provided
dur : only used if no log_fname is provided
"""
if not tf.gfile.Exists(in_fname):
raise ValueError('Failed to find file: %s' % in_fname)
fst = FrameStepper(in_fname)
fps = int(round(1/fst.dt))
FFMpegWriter = manimation.writers['ffmpeg']
ttl = 'Head position tracking from video %s.' % in_fname.split('/')[-1]
metadata = dict(title=ttl, artist='Matplotlib',
comment='more info...') # TODO!
writer = FFMpegWriter(fps=fps, metadata=metadata, bitrate=20000, codec=None) # TODO: set a good codec
dpi = 96
figsize = (fst.frame.shape[1]/dpi, fst.frame.shape[0]/dpi)
fig = plt.figure(figsize=figsize, dpi=dpi) # TODO dpi depends on the monitor used, remove this dependence
# see: http://stackoverflow.com/questions/13714454/specifying-and-saving-a-figure-with-exact-size-in-pixels
if t_start < 0:
raise ValueError('t_start cannot be less than 0.0 (beginning of the video).')
if t_end < 0:
t_end = fst.duration
if not dur is None:
t_end = min(t_end, t_start + dur)
if t_end > fst.duration:
raise ValueError('t_end cannot be later %1.3f (time of the last frame)' %
fst.duration)
if not log_fname is None:
if not tf.gfile.Exists(log_fname):
raise ValueError('Failed to find file: %s' % log_fname)
else:
log_data, log_header = read_log_data(log_fname)
Nframe = len(log_data)
if verbose:
# Counter printed on command line
cdp = CountdownPrinter(Nframe)
with writer.saving(fig, out_fname, dpi):
for i, dat in enumerate(log_data):
if verbose:
cdp.print(i)
fst.read_t(dat['frame_time'])
true_pos = {'x': dat['center_x'], 'y': dat['center_y']}
if dat['angle_ok']:
true_angle = (180 * (dat['angle'] / np.pi)).round()
else:
true_angle = None
self.plot(fst.frame, true_pos=true_pos,
true_angle=true_angle, fig=fig, verbose=False)
writer.grab_frame()
fig.clf()
else:
Nframe = int(np.ceil((t_end - t_start) / fst.dt))
if verbose:
# Counter printed on command line
cdp = CountdownPrinter(Nframe)
with writer.saving(fig, out_fname, dpi):
ok = fst.read_t(t_start)
i = 0
while (fst.t < t_end) and ok:
if verbose:
cdp.print(i)
self.plot(fst.frame, true_pos=None, fig=fig, verbose=False)
writer.grab_frame()
fig.clf()
ok = fst.next()
i += ok
fst.close()
def track2fig(self, in_fname, out_fname, log_data, verbose=True):
"""
"""
if not tf.gfile.Exists(in_fname):
raise ValueError('Failed to find file: %s' % in_fname)
fst = FrameStepper(in_fname)
#figsize=figsize, dpi=dpi
fig = plt.figure()
Nframe = len(log_data)
if verbose:
# Counter printed on command line
cdp = CountdownPrinter(Nframe)
for i, dat in enumerate(log_data):
if verbose:
cdp.print(i)
print(i, dat['frame_time'])
fst.read_t(dat['frame_time'])
true_pos = {'x': dat['center_x'], 'y': dat['center_y']}
if dat['angle_ok']:
true_angle = (180 * (dat['angle'] / np.pi)).round()
else:
true_angle = None
self.plot(fst.frame, true_pos=true_pos,
true_angle=true_angle, fig=fig, verbose=False)
fig.savefig('%s_%03d.svg' % (out_fname, i))
fig.savefig('%s_%03d.png' % (out_fname, i))
fig.clf()
fst.close()
def track(self, video_fname, t_start=0.0, t_end=-1, dur=None, verbose=True):
"""
"""
if not tf.gfile.Exists(video_fname):
raise ValueError('Failed to find file: %s' % video_fname)
fst = FrameStepper(video_fname)
if t_start < 0:
raise ValueError('t_start cannot be less than 0.0 (beginning of the video).')
if t_end < 0:
t_end = fst.duration
if not dur is None:
t_end = min(t_end, t_start + dur)
if t_end > fst.duration:
raise ValueError('t_end cannot be later %1.3f (time of the last frame)' %
fst.duration)
Nframe = int(np.ceil((t_end - t_start) / fst.dt))
if verbose:
cdp = CountdownPrinter(Nframe)
est_track = np.recarray(shape=Nframe+1,
dtype=[('t', float), ('x', float),
('y', float), ('angle', float),
('angle_w', float)])
i = 0
ok = fst.read_t(t_start)
while (fst.t < t_end) and ok:
if verbose:
cdp.print(i)
x, y, angle, angle_w, _ = self.predict(fst.frame, verbose=False)
est_track[i].x = x
est_track[i].y = y
est_track[i].angle = angle
est_track[i].angle_w = angle_w
est_track[i].t = fst.t
ok = fst.next()
i += ok
est_track = est_track[:i]
fst.close()
return est_track
def test_track(self, log_fname, video_dir, Nframe=None):
"""
Nframe : number of frames to predict.
Default all frames in the log file.
"""
verbose=False
log_data, log_header = read_log_data(log_fname)
if Nframe is None:
Nframe = len(log_data) - 1
if Nframe >= len(log_data):
raise ValueError('Nframes cannot be greater than the number of frames in the log file.')
#video_fname = '%s/%s' % (video_dir.rstrip('/'), log_header['video_fname'])
video_fname = os.path.join(video_dir.rstrip('/'),
log_header['video_fname'])
video_fname = glob(video_fname)[0]
fst = FrameStepper(video_fname)
est_track = np.recarray(shape=Nframe,
dtype=[('t', float), ('x', float),
('y', float), ('angle', float),
('angle_w', float)])
true_track = np.recarray(shape=Nframe,
dtype=[('t', float), ('x', float),
('y', float), ('angle', float)])
if verbose:
cdp = CountdownPrinter(Nframe)
for i, dat in enumerate(log_data[:Nframe]):
if verbose:
cdp.print(i)
# Read the frame
fst.read_t(dat['frame_time'])
# Time of frame
true_track[i].t = fst.t
est_track[i].t = fst.t
# True head position
true_track[i].x = dat['center_x']
true_track[i].y = dat['center_y']
# True head orientation
if not dat['angle_ok']:
true_track[i].angle = np.nan
else:
true_track[i].angle = 180. * (dat['angle'] / np.pi)
# Estimated head position and orientation
x, y, angle, angle_w, _ = self.predict(fst.frame, verbose=verbose)
est_track[i].x = x
est_track[i].y = y
est_track[i].angle = angle
est_track[i].angle_w = angle_w
fst.close()
error, error_desrc = get_error(est_track, true_track)
return est_track, true_track, error, error_desrc
def predict(self, frame, verbose=True):
"""
Frame by frame
x, y -- in frame coordinates
"""
self.restore_model(verbose=verbose)
if frame.ndim == 3:
frame = frame.mean(axis=2)
rescale = False
if frame.shape[0] == 480 and frame.shape[1] == 640:
im = imresize(frame, self.im_scale)
rescale = True
elif frame.shape[0] == self.im_h and frame.shape[1] == self.im_w:
im = frame
else:
raise ValueError('Some odd differences btw frame.shape and'
' self.im_w/im_w. FIX this.')
# Reshape and whiten the image
im = whiten(im.astype(float)).reshape((1, self.im_h, self.im_w, 1))
p = softmax(self.model.logits.eval(session=self.model.session,
feed_dict={self.model.X: im}))
label = p.argmax()
angles = class2angle(np.arange(self.Nclass-1), self.Nclass-1)
# Use the Softmax output, p, as weights for a weighted average.
p = (p[0, :-1] / p[0, :-1].sum()).flatten()
z_w = (angles2complex(angles) * p).sum()
angle_w = complex2angles(z_w)
if (label == (self.Nclass - 1)): # head orientation is the horiz plane not visible.
angle = np.nan
angle_w = np.nan
else:
angle = angles[label]
cam = self.model.cam.eval(session=self.model.session,
feed_dict={self.model.X: im,
self.model.y_: label})
# rescale cam to the same size as frame
if rescale:
cam = imresize(cam.reshape((self.im_h, self.im_w)), 1/self.im_scale)
else:
cam = cam.reshape((self.im_h, self.im_w))
y, x = np.unravel_index(cam.argmax(), cam.shape)
return x, y, angle, angle_w, cam
def restore_model(self, verbose=True):
"""
"""
if hasattr(self, 'model'):
msg = ('Model %s already restored.' %
self.model.fname.split('/')[-1])
else:
model = Model(Nclass=self.Nclass, im_w=self.im_w, im_h=self.im_h)
model_fn = os.path.join(self.model_dir,
'CAM_Nclass%d_acc*.ckpt' % self.Nclass)
#model_fn = '%s/CAM_Nclass%d_acc*.ckpt' % (self.model_dir, self.Nclass)
model_fn = glob(model_fn)
model_fn.sort()
if model_fn[-1].endswith('meta'):
model.fname = model_fn[-1].rstrip('.meta')
else:
model.fname = model_fn[-1]
# Following rlrs's comment on:
# https://github.com/tensorflow/tensorflow/issues/1325
# seems to be neccesary for getting access to the GAP weights
model_fn_meta = glob('%s.meta' % model.fname)[0]
saved = tf.train.import_meta_graph(model_fn_meta)
model.session = tf.Session(graph=model.graph)
saved.restore(model.session, model.fname)
# Restore variables from disk.
#model.saver.restore(model.session, model.fname)
self.model = model
msg = ('Model %s restored.' % model.fname.split('/')[-1])
if verbose:
print(msg)
def plot(self, frame, true_pos=None, true_angle=None,
fname=None, fig=None, verbose=False):
"""
"""
x, y, angle, angle_w, cam = self.predict(frame, verbose=verbose)
if fig is None:
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.imshow(frame)
im_h, im_w = frame.shape[:2]
plt.hold(True)
ax.imshow(cam, cmap=plt.cm.jet, alpha=0.3, interpolation='bilinear')
if not np.isnan(angle):
ax.plot(x, y, 'o', ms=5, mec=[1, 0.6, 0.3], mfc='none', mew=1)
ax.plot(x, y, 'o', ms=20, mec=[1, 0.6, 0.3], mfc='none', mew=1)
x1, y1 = get_max_gaze_line(angle, x, y, im_w, im_h, units='deg')
ax.plot([x, x1], [y, y1], '-', color=[1, 0.6, 0.2], lw=2, label='argmax')
x1, y1 = get_max_gaze_line(angle_w, x, y, im_w, im_h, units='deg')
ax.plot([x, x1], [y, y1], '-', color=[1, 0.3, 0.0], lw=2, label='weighted')
else:
ax.plot(x, y, 'o', ms=20, mfc='w', mec='w', lw=2)
if not true_pos is None:
# Maximum possible error given x, y
max_xerr, max_yerr = max(x, im_w-x), max(y, im_h-y)
max_err = np.sqrt(max_xerr**2 + max_yerr**2)
error = im_h * np.sqrt((x - true_pos['x'])**2 + (y - true_pos['y'])**2) / max_err
# Note that x,y gets replaced so that true_angle will be drawn
# starting at true_pos instead of predicted pos.
x, y = true_pos['x'], true_pos['y']
ax.plot(x, y, 'o', ms=5, mec='g', mfc='none', mew=1)
ax.plot(x, y, 'o', ms=20, mec='g', mfc='none', mew=1)
# draw position error as a bar to the right
ax.plot([im_w-4, im_w-4], [0, error], '-', c='r', lw=4)
if not true_angle is None:
x1, y1 = get_max_gaze_line(true_angle, x, y, im_w, im_h, units='deg')
ax.plot([x, x1], [y, y1], '-', color=[.3, 1., 0.], lw=2, label='True')
error_w = im_h * np.abs(anglediff(true_angle, angle_w, 'deg')) / 180
error = im_h * np.abs(anglediff(true_angle, angle, 'deg')) / 180
# Draw orientation error as a bar to the left
ax.plot([4, 4], [0, error], '-', c=[1, .6, .2], lw=4)
ax.plot([11, 11], [0, error_w], '-', c=[1, .3, 0.], lw=4)
ax.set_xlim([0, im_w])
ax.set_ylim([0, im_h])
ax.set_xticks([])
ax.set_yticks([])
#ax.legend()
if not fname is None:
fig.savefig(fname)
plt.close(fig)
def close(self):
"""
"""
if hasattr(self, 'model'):
if hasattr(self.model, 'session'):
self.model.session.close()
| 0
| 0
| 0
| 29,807
| 0
| 0
| 0
| 187
| 343
|
138027569280d43b0a88e865f59f229b1878cf3b
| 14,614
|
py
|
Python
|
Py2ExeDecompiler/resources/pyc2.py
|
kuteminh11/Py2ExeDecompiler
|
e871e045334074314ab5fc377cfd8955d768c809
|
[
"Apache-2.0"
] | 175
|
2017-04-25T21:58:42.000Z
|
2022-03-28T19:19:46.000Z
|
Py2ExeDecompiler/resources/pyc2.py
|
nsxz/Py2ExeDecompiler
|
e871e045334074314ab5fc377cfd8955d768c809
|
[
"Apache-2.0"
] | 2
|
2017-04-27T11:31:43.000Z
|
2018-04-02T05:54:01.000Z
|
Py2ExeDecompiler/resources/pyc2.py
|
nsxz/Py2ExeDecompiler
|
e871e045334074314ab5fc377cfd8955d768c809
|
[
"Apache-2.0"
] | 36
|
2017-04-26T17:22:00.000Z
|
2021-08-08T09:02:42.000Z
|
import sys
from dis import opmap
def clean_ROT_TWO(bcg, skip_xrefs=True):
'''
Replace two sequential ROT_TWO sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None:
break
if current.opcode == opmap['ROT_TWO'] and \
current.next.opcode == opmap['ROT_TWO']:
if current.next.xrefs != [] and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
count += 1
return count
def clean_ROT_THREE(bcg, skip_xrefs=True):
'''
Replace three sequential ROT_THREE sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None or current.next.next is None:
break
if current.opcode == opmap['ROT_THREE'] and \
current.next.opcode == opmap['ROT_THREE'] and \
current.next.next.opcode == opmap['ROT_THREE']:
if (current.next.xrefs != [] or current.next.next.xrefs != []) \
and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
current.next.next.opcode = opmap['NOP']
count += 1
return count
def clean_LOAD_POP(bcg, skip_xrefs=True):
'''
Replace LOAD_CONST/POP_TOP sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None:
break
if current.opcode == opmap['LOAD_CONST'] and \
current.next.opcode == opmap['POP_TOP']:
if current.next.xrefs != [] and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
count += 1
return count
def clean_NOPS(bcg):
'''
Remove NOP instrustions from bytecode
'''
count = 0
for current in bcg.nodes():
if current.opcode == opmap['NOP']:
bcg.delete_node(current)
count += 1
return count
if __name__ == "__main__":
main(sys.argv)
| 29.053678
| 115
| 0.529561
|
import marshal
import imp
import struct
import os
import sys
import base64
import new
import dis
from dis import opmap, opname
class Bytecode():
'''
Class to store individual instruction as a node in the graph
'''
def __init__(self, addr, buffer, prev=None, next=None, xrefs=[]):
self.opcode = ord(buffer[0])
self.addr = addr
if self.opcode >= dis.HAVE_ARGUMENT:
self.oparg = ord(buffer[1]) | (ord(buffer[2]) << 8)
else:
self.oparg = None
self.prev = prev
self.next = next
self.xrefs = []
self.target = None
self.co_lnotab = None
def len(self):
'''
Returns the length of the bytecode
1 for no argument
3 for argument
'''
if self.opcode < dis.HAVE_ARGUMENT:
return 1
else:
return 3
def disassemble(self):
'''
Return disassembly of bytecode
'''
rvalue = opname[self.opcode].ljust(20)
if self.opcode >= dis.HAVE_ARGUMENT:
rvalue += " %04x" % (self.oparg)
return rvalue
def hex(self):
'''
Return ASCII hex representation of bytecode
'''
rvalue = "%02x" % self.opcode
if self.opcode >= dis.HAVE_ARGUMENT:
rvalue += "%02x%02x" % \
(self.oparg & 0xff, (self.oparg >> 8) & 0xff)
return rvalue
def bin(self):
'''
Return bytecode string
'''
if self.opcode >= dis.HAVE_ARGUMENT:
return struct.pack("<BH", self.opcode, self.oparg)
else:
return struct.pack("<B", self.opcode)
def get_target_addr(self):
'''
Returns the target address for the current instruction based on the
current address.
'''
rvalue = None
if self.opcode in dis.hasjrel:
rvalue = self.addr + self.oparg + self.len()
if self.opcode in dis.hasjabs:
rvalue = self.oparg
return rvalue
class BytecodeGraph():
def __init__(self, code, base=0):
self.base = base
self.code = code
self.head = None
self.parse_bytecode()
self.apply_lineno()
def add_node(self, parent, bc, lnotab=None):
'''
Adds an instruction node to the graph
'''
# setup pointers for new node
bc.next = parent.next
bc.prev = parent
if lnotab is None:
bc.co_lnotab = parent.co_lnotab
else:
bc.co_lnotab = lnotab
if parent.next is not None:
parent.next.prev = bc
parent.next = bc
def apply_labels(self, start=None):
'''
Find all JMP REL and ABS bytecode sequences and update the target
within branch instruction and add xref to the destination.
'''
for current in self.nodes(start):
current.xrefs = []
current.target = None
for current in self.nodes(start):
label = -1
if current.opcode >= dis.HAVE_ARGUMENT:
if current.opcode in dis.hasjrel:
label = current.addr+current.oparg+current.len()
elif current.opcode in dis.hasjabs:
label = current.oparg
if label >= 0:
if current not in self.bytecodes[label].xrefs:
self.bytecodes[label].xrefs.append(current)
current.target = self.bytecodes[label]
current = current.next
return
def apply_lineno(self):
'''
Parses the code object co_lnotab list and applies line numbers to
bytecode. This is used to create a new co_lnotab list after modifying
bytecode.
'''
byte_increments = [ord(c) for c in self.code.co_lnotab[0::2]]
line_increments = [ord(c) for c in self.code.co_lnotab[1::2]]
lineno = self.code.co_firstlineno
addr = self.base
linenos = []
for byte_incr, line_incr in zip(byte_increments, line_increments):
addr += byte_incr
lineno += line_incr
linenos.append((addr, lineno))
if linenos == []:
return
current_addr, current_lineno = linenos.pop(0)
if linenos == []:
return
current_addr, next_lineno = linenos.pop(0)
for x in self.nodes():
if x.addr >= current_addr:
current_lineno = next_lineno
if len(linenos) != 0:
current_addr, next_lineno = linenos.pop(0)
x.co_lnotab = current_lineno
def calc_lnotab(self):
'''
Creates a new co_lineno after modifying bytecode
'''
rvalue = ""
prev_lineno = self.code.co_firstlineno
prev_offset = self.head.addr
for current in self.nodes():
if current.co_lnotab == prev_lineno:
continue
new_offset = current.co_lnotab - prev_lineno
new_offset = 0xff if new_offset > 0xff else new_offset
rvalue += struct.pack("BB", current.addr - prev_offset,
(current.co_lnotab - prev_lineno) & 0xff)
prev_lineno = current.co_lnotab
prev_offset = current.addr
return rvalue
def delete_node(self, node):
'''
Deletes a node from the graph, removing the instruction from the
produced bytecode stream
'''
# For each instruction pointing to instruction to be delete,
# move the pointer to the next instruction
for x in node.xrefs:
x.target = node.next
if node.next is not None:
node.next.xrefs.append(x)
# Clean up the doubly linked list
if node.prev is not None:
node.prev.next = node.next
if node.next is not None:
node.next.prev = node.prev
if node == self.head:
self.head = node.next
del self.bytecodes[node.addr]
def disassemble(self, start=None, count=None):
'''
Simple disassembly routine for analyzing nodes in the graph
'''
rvalue = ""
for x in self.nodes(start):
rvalue += "[%04d] %04x %-6s %s\n" % \
(x.co_lnotab, x.addr, x.hex(), x.disassemble())
return rvalue
def get_code(self, start=None):
'''
Produce a new code object based on the graph
'''
self.refactor()
# generate a new co_lineno
new_co_lineno = self.calc_lnotab()
# generate new bytecode stream
new_co_code = ""
for x in self.nodes(start):
new_co_code += x.bin()
# create a new code object with modified bytecode and updated line numbers
# a new code object is necessary because co_code is readonly
rvalue = new.code(self.code.co_argcount,
self.code.co_nlocals,
self.code.co_stacksize,
self.code.co_flags,
new_co_code,
self.code.co_consts,
self.code.co_names,
self.code.co_varnames,
self.code.co_filename,
self.code.co_name,
self.code.co_firstlineno,
new_co_lineno)
return rvalue
def nodes(self, start=None):
'''
Iterator for stepping through bytecodes in order
'''
if start is None:
current = self.head
else:
current = start
while current is not None:
yield current
current = current.next
raise StopIteration
def parse_bytecode(self):
'''
Parses the bytecode stream and creates an instruction graph
'''
self.bytecodes = {}
prev = None
offset = 0
targets = []
while offset < len(self.code.co_code):
next = Bytecode(self.base + offset,
self.code.co_code[offset:offset+3],
prev)
self.bytecodes[self.base + offset] = next
offset += self.bytecodes[offset].len()
if prev is not None:
prev.next = next
prev = next
if next.get_target_addr() is not None:
targets.append(next.get_target_addr())
for x in targets:
if x not in self.bytecodes:
print "Nonlinear issue at offset: %08x" % x
self.head = self.bytecodes[self.base]
self.apply_labels()
return
def patch_opargs(self, start=None):
'''
Updates branch instructions to correct offsets after adding or
deleting bytecode
'''
for current in self.nodes(start):
# No argument, skip to next
if current.opcode < dis.HAVE_ARGUMENT:
continue
# Patch relative offsets
if current.opcode in dis.hasjrel:
current.oparg = current.target.addr - \
(current.addr+current.len())
# Patch absolute offsets
elif current.opcode in dis.hasjabs:
current.oparg = current.target.addr
def refactor(self):
'''
iterates through all bytecodes and determines correct offset
position in code sequence after adding or removing bytecode
'''
offset = self.base
new_bytecodes = {}
for current in self.nodes():
new_bytecodes[offset] = current
current.addr = offset
offset += current.len()
current = current.next
self.bytecodes = new_bytecodes
self.patch_opargs()
self.apply_labels()
def remove_obf(code):
code = bytearray(code)
i = 0
while i < len(code):
op = code[i]
if code[i] == opmap['ROT_TWO'] and code[i+1] == opmap['ROT_TWO']:
code[i] = opmap['NOP']
code[i+1] = opmap['NOP']
elif code[i] == opmap['ROT_THREE'] and code[i+1] == opmap['ROT_THREE'] and code[i+2] == opmap['ROT_THREE']:
code[i] = opmap['NOP']
code[i+1] = opmap['NOP']
code[i+2] = opmap['NOP']
elif code[i] == opmap['LOAD_CONST'] and code[i+3] == opmap['POP_TOP']:
code[i] = opmap['NOP']
code[i+1] = opmap['NOP']
code[i+2] = opmap['NOP']
code[i+3] = opmap['NOP']
i += 1
if op >= dis.HAVE_ARGUMENT:
i += 2
return "".join(chr(c) for c in code)
def clean_ROT_TWO(bcg, skip_xrefs=True):
'''
Replace two sequential ROT_TWO sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None:
break
if current.opcode == opmap['ROT_TWO'] and \
current.next.opcode == opmap['ROT_TWO']:
if current.next.xrefs != [] and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
count += 1
return count
def clean_ROT_THREE(bcg, skip_xrefs=True):
'''
Replace three sequential ROT_THREE sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None or current.next.next is None:
break
if current.opcode == opmap['ROT_THREE'] and \
current.next.opcode == opmap['ROT_THREE'] and \
current.next.next.opcode == opmap['ROT_THREE']:
if (current.next.xrefs != [] or current.next.next.xrefs != []) \
and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
current.next.next.opcode = opmap['NOP']
count += 1
return count
def clean_LOAD_POP(bcg, skip_xrefs=True):
'''
Replace LOAD_CONST/POP_TOP sequences with NOPS
'''
count = 0
for current in bcg.nodes():
if current.next is None:
break
if current.opcode == opmap['LOAD_CONST'] and \
current.next.opcode == opmap['POP_TOP']:
if current.next.xrefs != [] and skip_xrefs:
continue
else:
current.opcode = opmap['NOP']
current.next.opcode = opmap['NOP']
count += 1
return count
def clean_NOPS(bcg):
'''
Remove NOP instrustions from bytecode
'''
count = 0
for current in bcg.nodes():
if current.opcode == opmap['NOP']:
bcg.delete_node(current)
count += 1
return count
def clean(code, skip_xrefs=True):
bcg = BytecodeGraph(code)
rot_two = clean_ROT_TWO(bcg, skip_xrefs)
rot_three = clean_ROT_THREE(bcg, skip_xrefs)
load_pop = clean_LOAD_POP(bcg, skip_xrefs)
nops = clean_NOPS(bcg)
# return new code object if modifications were made
if rot_two > 0 or rot_three > 0 or load_pop > 0 or nops > 0:
return bcg.get_code()
return None
def main(argv):
pycodeobject = argv[1]
deobfuscate = "False"
if len(argv) > 2:
deobfuscate = argv[2]
if pycodeobject is None:
sys.exit(1)
bytesdecoded = bytes(base64.b64decode(pycodeobject))
ob = marshal.loads(bytesdecoded)
for i in range(0, len(ob)):
with open(str(i)+'.pyc', 'wb') as fc:
fc.write(imp.get_magic())
fc.close()
with open(str(i)+'.pyc', 'a') as fc:
x = int(os.stat(str(i)+'.pyc').st_mtime)
fc.write(chr(x & 0xff))
fc.write(chr((x >> 8) & 0xff))
fc.write(chr((x >> 16) & 0xff))
fc.write(chr((x >> 24) & 0xff))
fc.close()
with open(str(i)+'.pyc', 'ab') as fc:
code = clean(ob[i])
if "False" in deobfuscate:
marshal.dump(ob[i], fc)
elif code is None:
marshal.dump(ob[i], fc)
else:
marshal.dump(code, fc)
fc.close()
with open(str(i)+'.pyc', 'rb') as fc:
print str(i)+'.pyc;'+base64.b64encode(fc.read())
fc.close()
os.remove(str(i)+'.pyc')
return
if __name__ == "__main__":
main(sys.argv)
| 0
| 0
| 0
| 9,818
| 0
| 2,324
| 0
| -60
| 269
|
be8eb67926a73287d11d270f9ad6d308dbf977a1
| 23,118
|
py
|
Python
|
rotkehlchen/chain/ethereum/modules/liquity/trove.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/chain/ethereum/modules/liquity/trove.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/chain/ethereum/modules/liquity/trove.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import logging
from typing import TYPE_CHECKING
from rotkehlchen.logging import RotkehlchenLogsAdapter
if TYPE_CHECKING:
MIN_COLL_RATE = '1.1'
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
| 40.700704
| 128
| 0.541483
|
import logging
from collections import defaultdict
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, List, Literal, NamedTuple, Optional
from eth_utils import to_checksum_address
from gevent.lock import Semaphore
from rotkehlchen.accounting.structures.balance import AssetBalance, Balance
from rotkehlchen.chain.ethereum.contracts import EthereumContract
from rotkehlchen.chain.ethereum.defi.defisaver_proxy import HasDSProxy
from rotkehlchen.chain.ethereum.graph import (
SUBGRAPH_REMOTE_ERROR_MSG,
Graph,
format_query_indentation,
)
from rotkehlchen.chain.ethereum.utils import multicall_2, token_normalized_value_decimals
from rotkehlchen.constants.assets import A_ETH, A_LQTY, A_LUSD, A_USD
from rotkehlchen.constants.ethereum import LIQUITY_TROVE_MANAGER
from rotkehlchen.errors.misc import ModuleInitializationFailure, RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.fval import FVal
from rotkehlchen.history.price import PriceHistorian
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.premium.premium import Premium
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_optional_to_fval,
)
from rotkehlchen.types import ChecksumEthAddress, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
from rotkehlchen.utils.mixins.serializableenum import SerializableEnumMixin
from .graph import QUERY_STAKE, QUERY_TROVE
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.manager import EthereumManager
from rotkehlchen.db.dbhandler import DBHandler
MIN_COLL_RATE = '1.1'
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
class TroveOperation(SerializableEnumMixin):
OPENTROVE = 1
CLOSETROVE = 2
ADJUSTTROVE = 3
ACCRUEREWARDS = 4
LIQUIDATEINNORMALMODE = 5
LIQUIDATEINRECOVERYMODE = 6
REDEEMCOLLATERAL = 7
def __str__(self) -> str:
if self == TroveOperation.OPENTROVE:
return 'Open Trove'
if self == TroveOperation.CLOSETROVE:
return 'Close Trove'
if self == TroveOperation.ADJUSTTROVE:
return 'Adjust Trove'
if self == TroveOperation.ACCRUEREWARDS:
return 'Accrue Rewards'
if self == TroveOperation.LIQUIDATEINNORMALMODE:
return 'Liquidation In Normal Mode'
if self == TroveOperation.LIQUIDATEINRECOVERYMODE:
return 'Liquidation In Recovery Mode'
if self == TroveOperation.REDEEMCOLLATERAL:
return 'Redeem Collateral'
# else
raise AssertionError(f'Invalid value {self} for TroveOperation')
class LiquityStakeEventType(SerializableEnumMixin):
STAKE_CREATED = 1
STAKE_INCREASED = 2
STAKE_DECREASED = 3
STAKE_REMOVED = 4
STAKE_WITHDRAWN = 5
@staticmethod
def deserialize(value: str) -> 'LiquityStakeEventType':
if value == 'stakeCreated':
return LiquityStakeEventType.STAKE_CREATED
if value == 'stakeIncreased':
return LiquityStakeEventType.STAKE_INCREASED
if value == 'stakeDecreased':
return LiquityStakeEventType.STAKE_DECREASED
if value == 'stakeRemoved':
return LiquityStakeEventType.STAKE_REMOVED
if value == 'gainsWithdrawn':
return LiquityStakeEventType.STAKE_WITHDRAWN
# else
raise DeserializationError(f'Encountered unknown LiquityStakeEventType value {value}')
@dataclass(frozen=True)
class LiquityEvent:
kind: Literal['stake', 'trove']
tx: str
address: str
timestamp: Timestamp
sequence_number: str
def serialize(self) -> Dict[str, Any]:
return {
'kind': self.kind,
'tx': self.tx,
'sequence_number': self.sequence_number,
'address': self.address,
'timestamp': self.timestamp,
}
@dataclass(frozen=True)
class LiquityTroveEvent(LiquityEvent):
debt_after: AssetBalance
collateral_after: AssetBalance
debt_delta: AssetBalance
collateral_delta: AssetBalance
trove_operation: TroveOperation
def serialize(self) -> Dict[str, Any]:
result = super().serialize()
result['debt_after'] = self.debt_after.serialize()
result['debt_delta'] = self.debt_delta.serialize()
result['collateral_after'] = self.collateral_after.serialize()
result['collateral_delta'] = self.collateral_delta.serialize()
result['trove_operation'] = str(self.trove_operation)
return result
@dataclass(frozen=True)
class LiquityStakeEvent(LiquityEvent):
stake_after: AssetBalance
stake_change: AssetBalance
issuance_gain: AssetBalance
redemption_gain: AssetBalance
stake_operation: LiquityStakeEventType
def serialize(self) -> Dict[str, Any]:
result = super().serialize()
result['stake_after'] = self.stake_after.serialize()
result['stake_change'] = self.stake_change.serialize()
result['issuance_gain'] = self.issuance_gain.serialize()
result['redemption_gain'] = self.redemption_gain.serialize()
result['stake_operation'] = str(self.stake_operation)
return result
class Trove(NamedTuple):
collateral: AssetBalance
debt: AssetBalance
collateralization_ratio: Optional[FVal]
liquidation_price: Optional[FVal]
active: bool
trove_id: int
def serialize(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
result['collateral'] = self.collateral.serialize()
result['debt'] = self.debt.serialize()
result['collateralization_ratio'] = self.collateralization_ratio
result['liquidation_price'] = self.liquidation_price
result['active'] = self.active
result['trove_id'] = self.trove_id
return result
class StakePosition(NamedTuple):
staked: AssetBalance
def serialize(self) -> Dict[str, Any]:
return self.staked.serialize()
class Liquity(HasDSProxy):
def __init__(
self,
ethereum_manager: 'EthereumManager',
database: 'DBHandler',
premium: Optional[Premium],
msg_aggregator: MessagesAggregator,
) -> None:
super().__init__(
ethereum_manager=ethereum_manager,
database=database,
premium=premium,
msg_aggregator=msg_aggregator,
)
self.history_lock = Semaphore()
try:
self.graph = Graph(
'https://api.thegraph.com/subgraphs/name/liquity/liquity',
)
except RemoteError as e:
self.msg_aggregator.add_error(
SUBGRAPH_REMOTE_ERROR_MSG.format(protocol='Liquity', error_msg=str(e)),
)
raise ModuleInitializationFailure('Liquity Subgraph remote error') from e
def get_positions(
self,
addresses_list: List[ChecksumEthAddress],
) -> Dict[ChecksumEthAddress, Trove]:
contract = EthereumContract(
address=LIQUITY_TROVE_MANAGER.address,
abi=LIQUITY_TROVE_MANAGER.abi,
deployed_block=LIQUITY_TROVE_MANAGER.deployed_block,
)
# make a copy of the list to avoid modifications in the list that is passed as argument
addresses = list(addresses_list)
proxied_addresses = self._get_accounts_having_proxy()
proxies_to_address = {v: k for k, v in proxied_addresses.items()}
addresses += proxied_addresses.values()
calls = [
(LIQUITY_TROVE_MANAGER.address, contract.encode(method_name='Troves', arguments=[x]))
for x in addresses
]
outputs = multicall_2(
ethereum=self.ethereum,
require_success=False,
calls=calls,
)
data: Dict[ChecksumEthAddress, Trove] = {}
eth_price = Inquirer().find_usd_price(A_ETH)
lusd_price = Inquirer().find_usd_price(A_LUSD)
for idx, output in enumerate(outputs):
status, result = output
if status is True:
try:
trove_info = contract.decode(result, 'Troves', arguments=[addresses[idx]])
trove_is_active = bool(trove_info[3]) # pylint: disable=unsubscriptable-object
if not trove_is_active:
continue
collateral = deserialize_asset_amount(
token_normalized_value_decimals(trove_info[1], 18), # noqa: E501 pylint: disable=unsubscriptable-object
)
debt = deserialize_asset_amount(
token_normalized_value_decimals(trove_info[0], 18), # noqa: E501 pylint: disable=unsubscriptable-object
)
collateral_balance = AssetBalance(
asset=A_ETH,
balance=Balance(
amount=collateral,
usd_value=eth_price * collateral,
),
)
debt_balance = AssetBalance(
asset=A_LUSD,
balance=Balance(
amount=debt,
usd_value=lusd_price * debt,
),
)
# Avoid division errors
collateralization_ratio: Optional[FVal]
liquidation_price: Optional[FVal]
if debt > 0:
collateralization_ratio = eth_price * collateral / debt * 100
else:
collateralization_ratio = None
if collateral > 0:
liquidation_price = debt * lusd_price * FVal(MIN_COLL_RATE) / collateral
else:
liquidation_price = None
account_address = addresses[idx]
if account_address in proxies_to_address:
account_address = proxies_to_address[account_address]
data[account_address] = Trove(
collateral=collateral_balance,
debt=debt_balance,
collateralization_ratio=collateralization_ratio,
liquidation_price=liquidation_price,
active=trove_is_active,
trove_id=trove_info[4], # pylint: disable=unsubscriptable-object
)
except DeserializationError as e:
self.msg_aggregator.add_warning(
f'Ignoring Liquity trove information. '
f'Failed to decode contract information. {str(e)}.',
)
return data
def liquity_staking_balances(
self,
addresses: List[ChecksumEthAddress],
) -> Dict[ChecksumEthAddress, StakePosition]:
staked = self._get_raw_history(addresses, 'stake')
lqty_price = Inquirer().find_usd_price(A_LQTY)
data = {}
for stake in staked['lqtyStakes']:
try:
owner = to_checksum_address(stake['id'])
amount = deserialize_optional_to_fval(
value=stake['amount'],
name='amount',
location='liquity',
)
position = AssetBalance(
asset=A_LQTY,
balance=Balance(
amount=amount,
usd_value=lqty_price * amount,
),
)
data[owner] = StakePosition(position)
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Ignoring Liquity staking information. '
f'Failed to decode remote response. {msg}.',
)
continue
return data
def _get_raw_history(
self,
addresses: List[ChecksumEthAddress],
query_for: Literal['stake', 'trove'],
) -> Dict[str, Any]:
param_types = {
'$addresses': '[Bytes!]',
}
param_values = {
'addresses': [addr.lower() for addr in addresses],
}
if query_for == 'trove':
querystr = format_query_indentation(QUERY_TROVE)
else:
querystr = format_query_indentation(QUERY_STAKE)
return self.graph.query(
querystr=querystr,
param_types=param_types,
param_values=param_values,
)
def get_trove_history(
self,
addresses: List[ChecksumEthAddress],
from_timestamp: Timestamp,
to_timestamp: Timestamp,
) -> Dict[ChecksumEthAddress, List[LiquityEvent]]:
addresses_to_query = list(addresses)
proxied_addresses = self._get_accounts_having_proxy()
proxies_to_address = {v: k for k, v in proxied_addresses.items()}
addresses_to_query += proxied_addresses.values()
try:
query = self._get_raw_history(addresses_to_query, 'trove')
except RemoteError as e:
log.error(f'Failed to query trove graph events for liquity. {str(e)}')
query = {}
result: Dict[ChecksumEthAddress, List[LiquityEvent]] = defaultdict(list)
for trove in query.get('troves', []):
owner = to_checksum_address(trove['owner']['id'])
if owner in proxies_to_address:
owner = proxies_to_address[owner]
for change in trove['changes']:
try:
timestamp = change['transaction']['timestamp']
if timestamp < from_timestamp:
continue
if timestamp > to_timestamp:
break
operation = TroveOperation.deserialize(change['troveOperation'])
collateral_change = deserialize_optional_to_fval(
value=change['collateralChange'],
name='collateralChange',
location='liquity',
)
debt_change = deserialize_optional_to_fval(
value=change['debtChange'],
name='debtChange',
location='liquity',
)
lusd_price = PriceHistorian().query_historical_price(
from_asset=A_LUSD,
to_asset=A_USD,
timestamp=timestamp,
)
eth_price = PriceHistorian().query_historical_price(
from_asset=A_ETH,
to_asset=A_USD,
timestamp=timestamp,
)
debt_after_amount = deserialize_optional_to_fval(
value=change['debtAfter'],
name='debtAfter',
location='liquity',
)
collateral_after_amount = deserialize_optional_to_fval(
value=change['collateralAfter'],
name='collateralAfter',
location='liquity',
)
event = LiquityTroveEvent(
kind='trove',
tx=change['transaction']['id'],
address=owner,
timestamp=timestamp,
debt_after=AssetBalance(
asset=A_LUSD,
balance=Balance(
amount=debt_after_amount,
usd_value=lusd_price * debt_after_amount,
),
),
collateral_after=AssetBalance(
asset=A_ETH,
balance=Balance(
amount=collateral_after_amount,
usd_value=eth_price * collateral_after_amount,
),
),
debt_delta=AssetBalance(
asset=A_LUSD,
balance=Balance(
amount=debt_change,
usd_value=lusd_price * debt_change,
),
),
collateral_delta=AssetBalance(
asset=A_ETH,
balance=Balance(
amount=collateral_change,
usd_value=eth_price * collateral_change,
),
),
trove_operation=operation,
sequence_number=str(change['sequenceNumber']),
)
result[owner].append(event)
except (DeserializationError, KeyError) as e:
log.debug(f'Failed to deserialize Liquity trove event: {change}')
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Ignoring Liquity Trove event in Liquity. '
f'Failed to decode remote information. {msg}.',
)
continue
return result
def get_staking_history(
self,
addresses: List[ChecksumEthAddress],
from_timestamp: Timestamp,
to_timestamp: Timestamp,
) -> Dict[ChecksumEthAddress, List[LiquityEvent]]:
try:
staked = self._get_raw_history(addresses, 'stake')
except RemoteError as e:
log.error(f'Failed to query stake graph events for liquity. {str(e)}')
staked = {}
result: Dict[ChecksumEthAddress, List[LiquityEvent]] = defaultdict(list)
for stake in staked.get('lqtyStakes', []):
owner = to_checksum_address(stake['id'])
for change in stake['changes']:
try:
timestamp = change['transaction']['timestamp']
if timestamp < from_timestamp:
continue
if timestamp > to_timestamp:
break
operation_stake = LiquityStakeEventType.deserialize(change['stakeOperation'])
lqty_price = PriceHistorian().query_historical_price(
from_asset=A_LQTY,
to_asset=A_USD,
timestamp=timestamp,
)
lusd_price = PriceHistorian().query_historical_price(
from_asset=A_LUSD,
to_asset=A_USD,
timestamp=timestamp,
)
stake_after = deserialize_optional_to_fval(
value=change['stakedAmountAfter'],
name='stakedAmountAfter',
location='liquity',
)
stake_change = deserialize_optional_to_fval(
value=change['stakedAmountChange'],
name='stakedAmountChange',
location='liquity',
)
issuance_gain = deserialize_optional_to_fval(
value=change['issuanceGain'],
name='issuanceGain',
location='liquity',
)
redemption_gain = deserialize_optional_to_fval(
value=change['redemptionGain'],
name='redemptionGain',
location='liquity',
)
stake_event = LiquityStakeEvent(
kind='stake',
tx=change['transaction']['id'],
address=owner,
timestamp=timestamp,
stake_after=AssetBalance(
asset=A_LQTY,
balance=Balance(
amount=stake_after,
usd_value=lqty_price * stake_after,
),
),
stake_change=AssetBalance(
asset=A_LQTY,
balance=Balance(
amount=stake_change,
usd_value=lqty_price * stake_change,
),
),
issuance_gain=AssetBalance(
asset=A_LUSD,
balance=Balance(
amount=issuance_gain,
usd_value=lusd_price * issuance_gain,
),
),
redemption_gain=AssetBalance(
asset=A_LUSD,
balance=Balance(
amount=redemption_gain,
usd_value=lusd_price * redemption_gain,
),
),
stake_operation=operation_stake,
sequence_number=str(change['transaction']['sequenceNumber']),
)
result[owner].append(stake_event)
except (DeserializationError, KeyError) as e:
msg = str(e)
log.debug(f'Failed to deserialize Liquity entry: {change}')
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Ignoring Liquity Stake event in Liquity. '
f'Failed to decode remote information. {msg}.',
)
continue
return result
# -- Methods following the EthereumModule interface -- #
def on_account_addition(self, address: ChecksumEthAddress) -> Optional[List['AssetBalance']]:
super().on_account_addition(address)
trove_info = self.get_positions([address])
result = []
if address in trove_info:
result.append(trove_info[address].collateral)
stake_info = self.liquity_staking_balances([address])
if address in stake_info:
result.append(stake_info[address].staked)
return result
| 0
| 2,290
| 0
| 18,867
| 0
| 0
| 0
| 1,010
| 723
|
09649fc65519ebd7fff4291f0c134bd3591e2f27
| 792
|
py
|
Python
|
accounts/api/views.py
|
mcastellin/anem-per-feina
|
5c7072c560e8e34355f7bbf7db12e36403766e68
|
[
"MIT"
] | null | null | null |
accounts/api/views.py
|
mcastellin/anem-per-feina
|
5c7072c560e8e34355f7bbf7db12e36403766e68
|
[
"MIT"
] | null | null | null |
accounts/api/views.py
|
mcastellin/anem-per-feina
|
5c7072c560e8e34355f7bbf7db12e36403766e68
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
User = get_user_model()
| 33
| 80
| 0.768939
|
from rest_framework import decorators, permissions, response, status
from rest_framework.request import Request
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext as _
from .serializers import UserCreateSerializer
User = get_user_model()
@decorators.api_view(["POST"])
@decorators.permission_classes([permissions.AllowAny])
def registration(request: Request) -> response.Response:
serializer = UserCreateSerializer(data=request.data)
if not serializer.is_valid(raise_exception=True):
return response.Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
serializer.save()
res = {
"status": True,
"message": _("Successfully registered"),
}
return response.Response(res, status.HTTP_201_CREATED)
| 0
| 485
| 0
| 0
| 0
| 0
| 0
| 121
| 112
|
3863645c8e2604383c3376d4ce87e6f9580e9466
| 1,526
|
py
|
Python
|
Blog/migrations/0002_multimedia_news_posttags.py
|
softrebel/djangoBlog
|
1c93d15788b37cf3fd53479419064dcaa234ecad
|
[
"MIT"
] | null | null | null |
Blog/migrations/0002_multimedia_news_posttags.py
|
softrebel/djangoBlog
|
1c93d15788b37cf3fd53479419064dcaa234ecad
|
[
"MIT"
] | null | null | null |
Blog/migrations/0002_multimedia_news_posttags.py
|
softrebel/djangoBlog
|
1c93d15788b37cf3fd53479419064dcaa234ecad
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-03-31 16:43
| 38.15
| 114
| 0.577326
|
# Generated by Django 2.1.7 on 2019-03-31 16:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MultiMedia',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('multiMedia', models.FileField(blank=True, null=True, upload_to='')),
('post', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Blog.Post')),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lead', models.CharField(max_length=200)),
('post', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='Blog.Post')),
],
),
migrations.CreateModel(
name='PostTags',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('createdDate', models.DateTimeField(auto_now=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Blog.Post')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Blog.Tag')),
],
),
]
| 0
| 0
| 0
| 1,379
| 0
| 0
| 0
| 30
| 68
|
183886fe1e13ac8ce545eb2db5d498e15f3c866e
| 8,435
|
py
|
Python
|
qiskit/transpiler/passes/optimization/consolidate_blocks.py
|
HuangJunye/qiskit-terra
|
0c8bb3dbf8d688590431ca79a83ba8aede84ed20
|
[
"Apache-2.0"
] | null | null | null |
qiskit/transpiler/passes/optimization/consolidate_blocks.py
|
HuangJunye/qiskit-terra
|
0c8bb3dbf8d688590431ca79a83ba8aede84ed20
|
[
"Apache-2.0"
] | 2
|
2022-03-30T10:09:44.000Z
|
2022-03-30T10:09:45.000Z
|
qiskit/transpiler/passes/optimization/consolidate_blocks.py
|
HuangJunye/qiskit-terra
|
0c8bb3dbf8d688590431ca79a83ba8aede84ed20
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=cell-var-from-loop
"""Replace each block of consecutive gates by a single Unitary node."""
| 44.867021
| 90
| 0.583521
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=cell-var-from-loop
"""Replace each block of consecutive gates by a single Unitary node."""
from qiskit.circuit import QuantumRegister, ClassicalRegister, QuantumCircuit, Gate
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info.synthesis import TwoQubitBasisDecomposer
from qiskit.extensions import UnitaryGate
from qiskit.circuit.library.standard_gates import CXGate
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
from qiskit.transpiler.passes.synthesis import unitary_synthesis
class ConsolidateBlocks(TransformationPass):
"""Replace each block of consecutive gates by a single Unitary node.
Pass to consolidate sequences of uninterrupted gates acting on
the same qubits into a Unitary node, to be resynthesized later,
to a potentially more optimal subcircuit.
Notes:
This pass assumes that the 'blocks_list' property that it reads is
given such that blocks are in topological order. The blocks are
collected by a previous pass, such as `Collect2qBlocks`.
"""
def __init__(self,
kak_basis_gate=None,
force_consolidate=False,
basis_gates=None):
"""ConsolidateBlocks initializer.
Args:
kak_basis_gate (Gate): Basis gate for KAK decomposition.
force_consolidate (bool): Force block consolidation
basis_gates (List(str)): Basis gates from which to choose a KAK gate.
"""
super().__init__()
self.basis_gates = basis_gates
self.force_consolidate = force_consolidate
if kak_basis_gate is not None:
self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate)
elif basis_gates is not None:
kak_basis_gate = unitary_synthesis._choose_kak_gate(basis_gates)
if kak_basis_gate is not None:
self.decomposer = TwoQubitBasisDecomposer(kak_basis_gate)
else:
self.decomposer = None
else:
self.decomposer = TwoQubitBasisDecomposer(CXGate())
def run(self, dag):
"""Run the ConsolidateBlocks pass on `dag`.
Iterate over each block and replace it with an equivalent Unitary
on the same wires.
"""
if self.decomposer is None:
return dag
new_dag = dag._copy_circuit_metadata()
# compute ordered indices for the global circuit wires
global_index_map = {wire: idx for idx, wire in enumerate(dag.qubits)}
blocks = self.property_set['block_list']
# just to make checking if a node is in any block easier
all_block_nodes = {nd for bl in blocks for nd in bl}
for node in dag.topological_op_nodes():
if node not in all_block_nodes:
# need to add this node to find out where in the list it goes
preds = [nd for nd in dag.predecessors(node) if nd.type == 'op']
block_count = 0
while preds:
if block_count < len(blocks):
block = blocks[block_count]
# if any of the predecessors are in the block, remove them
preds = [p for p in preds if p not in block]
else:
# should never occur as this would mean not all
# nodes before this one topologically had been added
# so not all predecessors were removed
raise TranspilerError("Not all predecessors removed due to error"
" in topological order")
block_count += 1
# we have now seen all predecessors
# so update the blocks list to include this block
blocks = blocks[:block_count] + [[node]] + blocks[block_count:]
# create the dag from the updated list of blocks
basis_gate_name = self.decomposer.gate.name
for block in blocks:
if len(block) == 1 and block[0].name != basis_gate_name:
# pylint: disable=too-many-boolean-expressions
if block[0].type == 'op' \
and self.basis_gates \
and block[0].name not in self.basis_gates \
and len(block[0].cargs) == 0 and block[0].condition is None \
and isinstance(block[0].op, Gate) \
and hasattr(block[0].op, '__array__') \
and not block[0].op.is_parameterized():
new_dag.apply_operation_back(UnitaryGate(block[0].op.to_matrix()),
block[0].qargs, block[0].cargs)
else:
# an intermediate node that was added into the overall list
new_dag.apply_operation_back(block[0].op, block[0].qargs,
block[0].cargs)
else:
# find the qubits involved in this block
block_qargs = set()
block_cargs = set()
for nd in block:
block_qargs |= set(nd.qargs)
if nd.condition:
block_cargs |= set(nd.condition[0])
# convert block to a sub-circuit, then simulate unitary and add
q = QuantumRegister(len(block_qargs))
# if condition in node, add clbits to circuit
if len(block_cargs) > 0:
c = ClassicalRegister(len(block_cargs))
subcirc = QuantumCircuit(q, c)
else:
subcirc = QuantumCircuit(q)
block_index_map = self._block_qargs_to_indices(block_qargs,
global_index_map)
basis_count = 0
for nd in block:
if nd.op.name == basis_gate_name:
basis_count += 1
subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs])
unitary = UnitaryGate(Operator(subcirc)) # simulates the circuit
max_2q_depth = 20 # If depth > 20, there will be 1q gates to consolidate.
if ( # pylint: disable=too-many-boolean-expressions
self.force_consolidate
or unitary.num_qubits > 2
or self.decomposer.num_basis_gates(unitary) < basis_count
or len(subcirc) > max_2q_depth
or (self.basis_gates is not None
and not set(subcirc.count_ops()).issubset(self.basis_gates))
):
new_dag.apply_operation_back(
UnitaryGate(unitary),
sorted(block_qargs, key=lambda x: block_index_map[x]))
else:
for nd in block:
new_dag.apply_operation_back(nd.op, nd.qargs, nd.cargs)
return new_dag
def _block_qargs_to_indices(self, block_qargs, global_index_map):
"""Map each qubit in block_qargs to its wire position among the block's wires.
Args:
block_qargs (list): list of qubits that a block acts on
global_index_map (dict): mapping from each qubit in the
circuit to its wire position within that circuit
Returns:
dict: mapping from qarg to position in block
"""
block_indices = [global_index_map[q] for q in block_qargs]
ordered_block_indices = sorted(block_indices)
block_positions = {q: ordered_block_indices.index(global_index_map[q])
for q in block_qargs}
return block_positions
| 0
| 0
| 0
| 7,333
| 0
| 0
| 0
| 306
| 200
|
a41c6af363966da210561bb7fd8f3b3ce7b0c46e
| 891
|
py
|
Python
|
apps/auth/migrations/0002_websettings.py
|
realnoobs/wagtail_simple_blog
|
01b35153f6dd90e9c12234a5aaae8eebe3940f37
|
[
"MIT"
] | null | null | null |
apps/auth/migrations/0002_websettings.py
|
realnoobs/wagtail_simple_blog
|
01b35153f6dd90e9c12234a5aaae8eebe3940f37
|
[
"MIT"
] | null | null | null |
apps/auth/migrations/0002_websettings.py
|
realnoobs/wagtail_simple_blog
|
01b35153f6dd90e9c12234a5aaae8eebe3940f37
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2021-12-24 07:16
| 33
| 131
| 0.606061
|
# Generated by Django 3.2.10 on 2021-12-24 07:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0066_collection_management_permissions'),
('authentication', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='WebSettings',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logo', models.ImageField(help_text='Logo image for header, footer etc.', upload_to='', verbose_name='logo')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.site')),
],
options={
'abstract': False,
},
),
]
| 0
| 0
| 0
| 743
| 0
| 0
| 0
| 30
| 68
|
b442fb3307b5147128f913d2434d01da33a6bf32
| 12,664
|
py
|
Python
|
feichangzun/allflight.py
|
Octoberr/feivhangzunpac
|
af080c9fac777b80c053ce187b8eec6e4b29b2e5
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/allflight.py
|
Octoberr/feivhangzunpac
|
af080c9fac777b80c053ce187b8eec6e4b29b2e5
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/allflight.py
|
Octoberr/feivhangzunpac
|
af080c9fac777b80c053ce187b8eec6e4b29b2e5
|
[
"Apache-2.0"
] | null | null | null |
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}
feichangzun = 'http://www.variflight.com'
allUrl = "http://www.variflight.com/sitemap.html?AE71649A58c77="
pausetime = 1000
if __name__ == '__main__':
fp = FCZPAC()
fp.start()
# flightdata = fp.getchuanghanglist()
# flightlink = flightdata.flightlink
# fp.getListData(flightlink)
# fp.getaflightinfo(['/schedule/SZX-CTU-3U3033.html?AE71649A58c77='])
| 44.591549
| 108
| 0.52669
|
import requests
from bs4 import BeautifulSoup
from time import sleep
from retrying import retry
import json
import re
import pymongo
import datetime
from feichangzun import config
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}
feichangzun = 'http://www.variflight.com'
allUrl = "http://www.variflight.com/sitemap.html?AE71649A58c77="
pausetime = 1000
class HANDL:
def __init__(self, flight, flightlink):
self.flight = flight
self.flightlink = flightlink
class FCZPAC:
@retry(wait_fixed=pausetime)
def getoneipaddress(self):
try:
r = requests.get('http://127.0.0.1:5010/get/')
proxy = BeautifulSoup(r.text, "lxml").get_text()
ip = 'http://' + proxy
proxies = {
"http": ip
}
print(proxies)
except:
print("no more ip address pleasewaite {} seconds".format(30))
raise IOError("no more ip address.")
try:
startHtml = requests.get('http://icanhazip.com ', headers=headers, proxies=proxies)
except:
deleteurl = 'http://127.0.0.1:5010/delete/?proxy=339.84..19195.116:8560'+proxy
con = requests.get(deleteurl)
print("cant connect, waite {} seconds".format(pausetime/1000))
raise IOError("cant connect.")
return proxies
def getquerydate(self, aircarfNo):
client = pymongo.MongoClient(host=config.mongo_config['host'], port=config.mongo_config['port'])
db = client.swmdb
eagleyedates = db.runtest
cursor = eagleyedates.find({"Info.fno": aircarfNo}, {"Info.Date": 1}).sort("Info.Date", -1).limit(1)
for el in cursor:
havedate = datetime.datetime.strptime(el["Info"]['Date'], "%Y-%m-%dT%H:%M:%S").date()
return havedate
def insertintomongo(self, flightdata):
client = pymongo.MongoClient(host=config.mongo_config['host'], port=config.mongo_config['port'])
db = client.swmdb
eagleyedates = db.runtest
eagleyedates.insert(flightdata)
print(datetime.datetime.now(), 'insert mongodb success')
@retry
def getchuanghanglist(self):
# ips = self.getoneipaddress()
# ๅ้่ฏทๆฑ
startHtml = requests.get(allUrl, headers=headers)
sleep(1)
Soup = BeautifulSoup(startHtml.text, 'lxml')
allA = Soup.find('div', class_='f_content').find_all('a')
flight = []
flightlink = []
for i in range(1, len(allA)):
if '3U' in allA[i].get_text():
flight.append(allA[i].get_text())
flightlink.append(allA[i].get('href'))
return HANDL(flight, flightlink)
def jangeListHtml(self, url, listHtml, ips):
text = listHtml.find('p').get_text()
jsonstr = json.loads(text)['msg']
if jsonstr == 'IP blocked':
proxy = ips['http'].split('http://')[1] # ๅ ้คๆ ๆ็IP
delurl = 'http://127.0.0.1:5010/delete/?proxy='+proxy
invaild = requests.get(delurl)
newip = self.getoneipaddress()
print('get a new ip')
newlistHtml = requests.get(url, headers=headers, proxies=newip)
listSoup = BeautifulSoup(newlistHtml.text, 'lxml')
return listSoup, newip
else:
return listHtml, ips
@retry
def getListData(self, flightlink, flightstr):
ips = self.getoneipaddress()
today = datetime.datetime.now().date()
allflightLink = []
for i in range(len(flightlink)):
flightlist = []
alreadydate = self.getquerydate(flightstr[i])
print("ๆฅ่ฏข็ปๆ", alreadydate)
if alreadydate is not None:
looptimes = (today + datetime.timedelta(days=7) - alreadydate).days
tmpurl = (feichangzun + flightlink[i]).split('=')[0]
for n in range(1, looptimes+1):
querydate = alreadydate + datetime.timedelta(days=n)
url = tmpurl + '&fdate={}'.format(querydate.strftime("%Y%m%d"))
print("ๅ้่ฏทๆฑ")
# ๅ้่ฏทๆฑ
listHtml = requests.get(url, headers=headers, proxies=ips)
sleep(1)
testlistSoup = BeautifulSoup(listHtml.text, 'lxml')
print("่ทๅพ็ปๆ", testlistSoup)
jangedata = self.jangeListHtml(url, testlistSoup, ips)
listSoup = jangedata[0]
ips = jangedata[1]
listUrl = listSoup.find('div', class_='fly_list')
if listUrl is not None:
listhref = listUrl.find('div', class_='li_box').find_all('a')
for link in listhref:
if '/schedule' in link.get('href'):
print('find a schedule link')
flightlist.append(link.get('href'))
else:
print("no data:", n)
continue
allflightLink.append(flightlist)
elif alreadydate is None:
# print("ๅฝๆฅ่ฏข็ปๆไธบ็ฉบ็ๆถๅ")
tmpurl2 = (feichangzun + flightlink[i]).split('=')[0]
# print("็ฉบlink", tmpurl2)
for n in range(1, 7):
querydate2 = today + datetime.timedelta(days=n)
url2 = tmpurl2 + '&fdate={}'.format(querydate2.strftime("%Y%m%d"))
# print("็ฉบๆฅ่ฏขlink", url2)
# ๅ้่ฏทๆฑ
listHtml2 = requests.get(url2, headers=headers, proxies=ips)
sleep(1)
testlistSoup2 = BeautifulSoup(listHtml2.text, 'lxml')
jangedata = self.jangeListHtml(url2, testlistSoup2, ips)
listSoup2 = jangedata[0]
ips = jangedata[1]
listUrl2 = listSoup2.find('div', class_='fly_list')
if listUrl2 is not None:
listhref2 = listUrl2.find('div', class_='li_box').find_all('a')
for link2 in listhref2:
if '/schedule' in link2.get('href'):
print('ๅฝๆฅ่ฏขไธบ็ฉบๆถfind a schedule link')
flightlist.append(link2.get('href'))
else:
break
allflightLink.append(flightlist)
return allflightLink # [[ไธไธช่ช็ญ],[]]
@retry
def getaflightinfo(self, aflight, ips): # ไผ ่ฟๆฅไธไธช่ช็ญ็[link],่ทๅๅฐ่ฟไธช่ช็ญ็ไฟกๆฏ
flightinfolist = []
newips = ips
for el in aflight:
flightinfo = {}
url = feichangzun + el
# ๅ้่ฏทๆฑ
listHtml = requests.get(url, headers=headers, proxies=newips)
sleep(1)
testlistSoup = BeautifulSoup(listHtml.text, 'lxml')
jangedata = self.jangeListHtml(url, testlistSoup, newips)
listSoup = jangedata[0]
newips = jangedata[1]
qfcity = listSoup.find('div', class_='cir_l curr').get_text().strip()
ddcity = listSoup.find('div', class_='cir_r').get_text().strip()
code = el.split('/')[2].split('-')
qfcitycode = code[0]
ddcitycode = code[1]
fno = code[2].split('.')[0]
city = listSoup.find_all('div', class_='fly_mian')
qfsimple = city[0].find('h2').get('title').split(qfcity)[1]
if 'T' in qfsimple:
qfTerminal = 'T' + qfsimple.split('T')[1]
else:
qfTerminal = ""
qf = qfcity + " " + qfsimple
ddsimple = city[len(city)-1].find('h2').get('title').split(ddcity)[1]
if 'T' in ddsimple:
ddTerminal = 'T' + ddsimple.split('T')[1]
else:
ddTerminal = ""
dd = ddcity + " " + ddsimple
qftimestr = city[0].find('span', class_='date').get_text().strip()
qfdate = re.compile('\d{4}[-/]\d{2}[-/]\d{2}').findall(qftimestr)
qftime = qfdate[0] + "T" + re.compile('\d{2}[:/]\d{2}').findall(qftimestr)[0]
ddtimestr = city[len(city)-1].find('span', class_='date').get_text().strip()
dddate = re.compile('\d{4}[-/]\d{2}[-/]\d{2}').findall(ddtimestr)
ddtime = dddate[0] + "T" + re.compile('\d{2}[:/]\d{2}').findall(ddtimestr)[0]
state = listSoup.find('div', class_='reg').get_text()
if state == '่ฎกๅ':
stateid = 1
else:
stateid = 0
flightinfo['qf'] = qf
flightinfo['qf_city'] = qfcity
flightinfo['qf_citycode'] = qfcitycode
flightinfo['qf_simple'] = qfsimple
flightinfo['dd'] = dd
flightinfo['dd_simple'] = ddsimple
flightinfo['dd_city'] = ddcity
flightinfo['dd_citycode'] = ddcitycode
flightinfo['qfTerminal'] = qfTerminal
flightinfo['ddTerminal'] = ddTerminal
flightinfo['jhqftime_full'] = qftime
flightinfo['sjqftime_full'] = None
flightinfo['jhddtime_full'] = ddtime
flightinfo['sjddtime_full'] = None
flightinfo['State'] = state
flightinfo['stateid'] = stateid
flightinfo['djk'] = '--'
flightinfo['zjgt'] = '--'
flightinfo['xlzp'] = '--'
flightinfo['date'] = qfdate[0]
flightinfo['fno'] = fno
print('get a schedule from a schedule list')
flightinfolist.append(flightinfo)
return flightinfolist, newips
def start(self):
flightdata = self.getchuanghanglist()
flightlink = flightdata.flightlink
flightstr = flightdata.flight
listLink = self.getListData(flightlink, flightstr)
ips = self.getoneipaddress()
for flight in listLink:
flightdic = {}
info = {}
flightinfodata = self.getaflightinfo(flight, ips)
flightinfo = flightinfodata[0]
ips = flightinfodata[1]
if len(flightinfo) == 1:
init = 0
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']+"T00:00:00"
info['zql'] = ""
else:
init = 1
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']+"T00:00:00"
info['zql'] = ""
flightdic['Info'] = info
flightdic['List'] = flightinfo
# jsondatar = json.dumps(flightdic, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
# with open('flight.json', 'w') as outfile:
# json.dump(flightdic, outfile)
self.insertintomongo(flightdic)
if __name__ == '__main__':
fp = FCZPAC()
fp.start()
# flightdata = fp.getchuanghanglist()
# flightlink = flightdata.flightlink
# fp.getListData(flightlink)
# fp.getaflightinfo(['/schedule/SZX-CTU-3U3033.html?AE71649A58c77='])
| 231
| 7,619
| 0
| 4,178
| 0
| 0
| 0
| -18
| 245
|
3c6e114d1d7026284730233e91143fd1decf8300
| 1,103
|
py
|
Python
|
Gauss_elimination.py
|
subhamkd/Numerical-methods-Grad
|
0d89e56de3d8db6b0b16b79711ac5559e111f5bf
|
[
"MIT"
] | null | null | null |
Gauss_elimination.py
|
subhamkd/Numerical-methods-Grad
|
0d89e56de3d8db6b0b16b79711ac5559e111f5bf
|
[
"MIT"
] | null | null | null |
Gauss_elimination.py
|
subhamkd/Numerical-methods-Grad
|
0d89e56de3d8db6b0b16b79711ac5559e111f5bf
|
[
"MIT"
] | null | null | null |
from numpy import zeros
n=6 # number of equations
A=[[10.0, -1.0, 4.0, 0.0, 2.0, 9.0, 19.0], [0.0, 25.0, -2.0, 7.0, 8.0, 4.0, 2.0], [1.0, 0.0, 15.0, 7.0, 3.0, -2.0, 13.0], [6.0, -1.0, 2.0, 23.0, 0.0, 8.0, -7.0], [-4.0, 2.0, 0.0, 5.0, -25.0, 3.0, -9.0], [0.0, 7.0, -1.0, 5.0, 4.0, -22.0, 2.0]] #the augmented matrix
x = zeros(n) # solution matrix
x=GE(A)
print(x)
| 30.638889
| 264
| 0.468722
|
from numpy import zeros
import time
def GE(A):
# function for determining solution using Gaussian Elimination
# Forward Elimination step
start=time.time()
for i in range(n):
if A[i][i] == 0.0:
t=A[i]
A[i]=A[i+1]
A[i+1]=t
for j in range(i+1, n):
c = A[j][i]/A[i][i]
for k in range(n+1):
A[j][k] = A[j][k] - c * A[i][k]
# Backward Substitution step
x[n-1] = A[n-1][n]/A[n-1][n-1]
for i in range(n-2,-1,-1):
x[i] = A[i][n]
for j in range(i+1,n):
x[i] = x[i] - A[i][j]*x[j]
x[i] = x[i]/A[i][i]
end=time.time()
runtime=end - start
print("Calculation time taken is : " + str(runtime))
return x
n=6 # number of equations
A=[[10.0, -1.0, 4.0, 0.0, 2.0, 9.0, 19.0], [0.0, 25.0, -2.0, 7.0, 8.0, 4.0, 2.0], [1.0, 0.0, 15.0, 7.0, 3.0, -2.0, 13.0], [6.0, -1.0, 2.0, 23.0, 0.0, 8.0, -7.0], [-4.0, 2.0, 0.0, 5.0, -25.0, 3.0, -9.0], [0.0, 7.0, -1.0, 5.0, 4.0, -22.0, 2.0]] #the augmented matrix
x = zeros(n) # solution matrix
x=GE(A)
print(x)
| 0
| 0
| 0
| 0
| 0
| 704
| 0
| -10
| 45
|
39d574e23e5afc9e16a82e90c23924da5242a6a9
| 3,290
|
py
|
Python
|
ML_prep-master/ML_prep-master/Practical_Coding_Challenges/Solutions/Tic_Tac_Toe_EX3.py
|
anushka-DS/DS-Interview-Prep
|
c331769f8f2d11e167d782b3b2e48ca4709d9b54
|
[
"CC-BY-4.0"
] | 1
|
2022-01-01T07:18:42.000Z
|
2022-01-01T07:18:42.000Z
|
ML_prep-master/ML_prep-master/Practical_Coding_Challenges/Solutions/Tic_Tac_Toe_EX3.py
|
anushka-DS/DS-Interview-Prep
|
c331769f8f2d11e167d782b3b2e48ca4709d9b54
|
[
"CC-BY-4.0"
] | null | null | null |
ML_prep-master/ML_prep-master/Practical_Coding_Challenges/Solutions/Tic_Tac_Toe_EX3.py
|
anushka-DS/DS-Interview-Prep
|
c331769f8f2d11e167d782b3b2e48ca4709d9b54
|
[
"CC-BY-4.0"
] | null | null | null |
"""
Code up the game tic tac toe
1 class solution
"""
if __name__ == "__main__":
t = TicTacToe()
t.print_board()
t.play_game()
| 26.967213
| 120
| 0.518541
|
"""
Code up the game tic tac toe
1 class solution
"""
class TicTacToe():
def __init__(self):
self.board = self.create_board()
self.current = 'X'
def create_board(self):
"Create the board for Tic Tac Toe"
board = [['', '', ''],
['', '', ''],
['', '', '']]
return board
def print_board(self):
"""
Print the board just to check
"""
for row in self.board:
print(row)
def check_rows(self):
"""
Check all the rows for the current player
"""
for row in self.board:
if all([True if cell == self.current else False for cell in row]):
return True
return False
def check_diag(self):
"""
Check if the current player has completed either of the diagonals
"""
if self.board[0][0] == self.current and self.board[1][1] == self.current and self.board[2][2] == self.current:
return True
elif self.board[0][2] == self.current and self.board[1][1] == self.current and self.board[2][0] == self.current:
return True
else:
return False
def check_cols(self):
"""
Check if any of the columns have been completed.
"""
for i in range(3):
if all([True if self.board[j][i] == self.current else False for j in range(3)]):
return True
return False
def make_move(self, move):
curr_player = self.current
# Make a move
row, col = move
if self.board[row][col] == '':
self.board[row][col] = curr_player
return True
else:
print("Position is already filled with {self.board[row][col]}")
return False
def change_player(self):
"""
Change the player whose turn is
"""
if self.current == 'X':
self.current = 'O'
else:
self.current = 'X'
def check_board(self):
"""
Check if the current player has one the game
"""
won_game = False
# Check if any of the rows have been completed
if self.check_rows() or self.check_cols() or self.check_diag():
self.print_board()
won_game = True
return won_game
def play_game(self):
# Print message
print(f"Player 1 is X and Player2 is O. Player 1 goes first")
for i in range(9):
# Current Player makes a move
while True:
print(f"Player {self.current} needs to enter a move")
move = tuple(int(x.strip()) for x in input().split(','))
# Make the move and check if it is valid
if self.make_move(move):
break
# Check if the game has been won
if self.check_board():
print(f"Player {self.current} has already won.Congratulations")
return
# Change the turn of the player
self.change_player()
self.print_board()
print("Nobody won the game. Well played guys")
if __name__ == "__main__":
t = TicTacToe()
t.print_board()
t.play_game()
| 0
| 0
| 0
| 3,126
| 0
| 0
| 0
| 0
| 23
|
afc3f86a3c27c3e0823794ed1f7c2e0e76c6fd50
| 372
|
py
|
Python
|
data/contacts.py
|
anreysolovyev/python_training
|
ab109a4a64997ea4a668ec7004c2169d962c8d61
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
anreysolovyev/python_training
|
ab109a4a64997ea4a668ec7004c2169d962c8d61
|
[
"Apache-2.0"
] | null | null | null |
data/contacts.py
|
anreysolovyev/python_training
|
ab109a4a64997ea4a668ec7004c2169d962c8d61
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
testdata = [
Contact(firstname="name1", lastname="lastname1", middlename="middlename1",
nickname="nickname1", title="title1", company="company1",
address="address1", homephone="homephone1", mobilephone="mobphone1",
workphone="workphone1", fax="fax1", email="email1", secondaryphone="secphone1")
]
| 41.333333
| 91
| 0.680108
|
from model.contact import Contact
testdata = [
Contact(firstname="name1", lastname="lastname1", middlename="middlename1",
nickname="nickname1", title="title1", company="company1",
address="address1", homephone="homephone1", mobilephone="mobphone1",
workphone="workphone1", fax="fax1", email="email1", secondaryphone="secphone1")
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d9220984cf73f571b9944f46ad7558d5aec58bfd
| 20,880
|
py
|
Python
|
ukpsummarizer-be/cplex/python/docplex/docplex/cp/config.py
|
avineshpvs/vldb2018-sherlock
|
5e116f42f44c50bcb289be3c4b4b76e29b238c18
|
[
"Apache-2.0"
] | 2
|
2019-01-13T08:41:00.000Z
|
2021-03-27T22:55:10.000Z
|
ukpsummarizer-be/cplex/python/docplex/docplex/cp/config.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | null | null | null |
ukpsummarizer-be/cplex/python/docplex/docplex/cp/config.py
|
AIPHES/vldb2018-sherlock
|
3746efa35c4c1769cc4aaeb15aeb9453564e1226
|
[
"Apache-2.0"
] | 4
|
2018-11-06T16:12:55.000Z
|
2019-08-21T13:22:32.000Z
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
Configuration of the CP Optimizer Python API
This module is the top-level handler of the configuration parameters for
the CP Optimizer Python API. It contains the default values of the different
configuration parameters.
It should NOT be changed directly.
The preferable way is to add at least one of the following files that contain the changes
to be performed:
* *cpo_config.py*, a local set of changes on these parameters,
* *cpo_config_<hostname>.py*, a hostname dependent set of changes.
* *docloud_config.py* (for DOcloud url and key, file shared with docplex.mp package).
Final set of parameters is obtained by reading first this module, and then those
listed above.
These modules should be visible from the *PYTHONPATH* and are loaded in
this order to overwrite default values.
This module also defines two global variables:
* *DOCLOUD_CONTEXT*, that contains the configuration necessary to solve a model on DOcloud.
This context is the context by default, referenced by the global variable 'context'.
* *LOCAL_CONTEXT*, that contains the configuration appropriate to solve a model with a local
installation of the CPO solver.
This configuration is available for solver with version number greater or equal to 12.7.0.
The method :meth:`set_default` allows to set the default configuration to one that is predefined,
or another that has been totally customized.
If called as main, this module prints the actual configuration on standard output, including
all customizations made using the mechanism described above.
Following sections describe the most important parameters that can be easily modified to customize
the behavior of the Python API.
All available parameters are available by consulting the source code of this module.
General parameters
------------------
*context.log_output = sys.stdout*
This parameter contains the default log stream.
By default it is set to the standard output.
A value of *None* can be used to disable all logs.
*context.verbose = 0*
This parameter controls the verbosity level of the log, between 0 and 9, if *log_output* is not None.
The default value of 0 means no log.
*context.model.add_source_location = True*
This parameter indicates that when the model is transformed into CPO format, additional information is added
to correlate expressions with the Python file and line where it has been generated.
If any error is raised by the solver during the solve, this information is provided in the
error description, which allows for easier debugging.
*context.model.length_for_alias = 15*
This parameter allows to associate a shorter alias to variables whose name is longer than the given length.
In the CPO representation of the model, variable is declared with its original name and an alias is created
to use it with a shorter name in model expressions, allowing to reduce the size of the generated CPO format.
In the returned solution, variable can be still retrieved with their original names.
By default, the value is 15. A value of None would indicate to always keep original variable names.
*context.model.length_for_rename = None*
This parameter allows to replace the names of the variables when it is longer than the given length.
A shorter name is generated and is used everywhere in the generated model CPO format in place of the original name.
This allows to drastically reduce the size of the model generated in the CPO format.
In the returned solution, the value of such variables can be retrieved thanks to a mapping between previous and
new names, that is maintained in the client Python program.
By default, the value is None, indicating to keep original variable names.
*context.model.name_all_constraints = False*
This parameter enables the naming of all constraints when the model is generated in CPO format.
It is mandatory only if the *refine conflict* function is called.
Anyway, if the *refine conflict* function is called, and if the CPO format of the model has already been generated,
it is generated again with this option set in order to allow proper completion of the request.
Setting it to *True* is preferable only if *refine conflict* function is called on a big model.
*context.model.dump_directory = None*
This parameter gives the name of a directory where the CPO files that are generated for solving models are stored
for logging purpose.
If not None, the directory is created and generated models are stored in files named `<model_name>.cpo`.
*context.model.cache.size = 10000*
This parameter gives the maximum capacity of the internal cache used to speed-up conversion of Python expressions
into CPO expressions.
*context.model.cache.active = True*
This parameter allows to enable or disable the expression cache mechanism.
Value os a boolean (True or False). Default value is True.
*context.params.\**
The parameter `context.params` is an instance of the class
:class:`~docplex.cp.parameters.CpoParameters` (in :doc:`parameters.py</docplex.cp.parameters.py>`)
which describes all of the public solver parameters as properties.
The default configuration limits the solving time to 100 seconds by using following settings:
::
context.params.TimeMode = "ElapsedTime"
context.params.TimeLimit = 100
These parameters may have a different default setting if the solver is not *DOcplexcloud*.
Configuration of the model solving
----------------------------------
*context.solver.trace_cpo = False*
This parameter indicates to trace the CPO model that is generated before submitting it for solving.
The model is printed on the `context.log_output stream`, if given.
*context.solver.trace_log = False*
This parameter indicates to trace the log generated by the solver when solving the CPO model.
The log is printed on the `context.log_output stream`, if given.
The default value of this parameter is False for a solve on the cloud, and True for a local solve.
*context.solver.enable_undocumented_params = False*
This parameter allows to enable the possibility to set solving parameters that are not in the public parameters
detailed in the class
:class:`~docplex.cp.parameters.CpoParameters` (in :doc:`parameters.py</docplex.cp.parameters.py>`).
*context.solver.add_log_to_solution = True*
This parameter indicates to add the solver log content to the solution object.
By default, this parameter is True but it can be set to False if the log is very big or of no interest.
*context.solver.agent = 'docloud'*
This parameter specifies the name of the solver agent that is used to solve the model.
The value of this parameter is the name of a child context of `context.solver`, which contains necessary attributes
that allow to create and run the required agent.
There are two different agents described in the default configuration file:
* `docloud`, the default agent, for solving a CPO model using the DOcplexcloud service.
* `local`, the agent allowing to solve models locally using the CP Optimizer Interactive coming with
versions of COS greater or equal to 12.7.0.
If the CP Optimizer Interactive program *cpoptimizer(.exe)* is detected in the system path, the default solver
agent is automatically set to *local* instead of *docloud*.
*context.solver.log_prefix = "[Solver] "*
Prefix that is added to every message that is logged by the solver component.
Configuration of the `docloud` solving agent
--------------------------------------------
*context.solver.docloud.url = "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/"*
This parameter is used to specify the URL of the *DOcplexcloud* service.
*context.solver.docloud.key = "'Set your key in docloud_config.py'"*
This parameter contains the personal key for authorizing access to the *DOcplexcloud* service.
Access credentials (base URL and access key) can be retrieved after registration from `<http://developer.ibm.com/docloud/docs/api-key/>`_.
*context.solver.docloud.verify_ssl = True*
This parameter allows to enable/disable the verification of SSL certificates.
*context.solver.docloud.proxies = None*
This parameter allows to optionally define proxies to be used in the connection with *DOcplexcloud*.
It is a Python dictionary protocol_name / endpoint, as described in http://docs.python-requests.org/en/master/user/advanced/#proxies.
*context.solver.docloud.request_timeout = 30*
This parameter contains the maximum time, in seconds, that a response is waited for after a unitary request to *DOcplexcloud* server.
*context.solver.docloud.result_wait_extra_time = 60*
This parameter is a time in seconds added to the expected solve time to compute the total result waiting timeout.
*context.solver.docloud.clean_job_after_solve = True*
This parameter indicates whether the job is automatically cleaned after the model is solved.
If not set to True, the model stays on the *DOcplexcloud* server and is visible from its *DropSolve* interface.
Note that the server may block future solving requests if there are too many jobs waiting.
*context.solver.docloud.polling = Context(min=1, max=3, incr=0.2)*
This parameter describes how the Python client polls the result of the solve on *DOcplexcloud*.
Polling delay is inside an interval [min, max], starting by min, growing to max with the given increment.
Configuration of the `local` solving agent
------------------------------------------
*context.solver.local.execfile*
Name or full path of the CP Optimizer Interactive executable file.
By default, it is set to *cpoptimizer(.exe)*, which supposes that the program is visible from the system path.
Configuration for best performances
-----------------------------------
To configure the CP Python API for best performances, the following configuration settings may be used.
Obviously, this performance is won at the cost of the loss of some features that may be useful in other cases.
::
context.verbose = 0
context.model.add_source_location = False
context.model.length_for_rename = 10
context.model.name_all_constraints = False
context.model.dump_directory = None
context.solver.trace_cpo = False
context.solver.trace_log = False
context.solver.add_log_to_solution = False
Detailed description
--------------------
"""
from docplex.cp.utils import Context, CpoException, search_file_in_path, IS_IN_NOTEBOOK, is_string
from docplex.cp.parameters import CpoParameters, ALL_PARAMETER_NAMES
import sys, socket, os, platform, traceback
try:
import docplex.util.environment as runenv
ENVIRONMENT_PRESENT = True
except:
ENVIRONMENT_PRESENT = False
EXE_EXTENSION = ".exe" if platform.system() == 'Windows' else ""
##############################################################################
## Define default context for DOcloud solving
##############################################################################
#-----------------------------------------------------------------------------
# Global context
# Create default context infrastructure
DOCLOUD_CONTEXT = Context(model=Context(),
params=CpoParameters(),
solver=Context())
context = DOCLOUD_CONTEXT
# Default log output
context.log_output = sys.stdout
# Default log verbosity
context.verbose = 0
# Visu enable indicator (internal, can be disabled for testing purpose)
context.visu_enabled = True
#-----------------------------------------------------------------------------
# Modeling context
# Indicate to add source location in model
context.model.add_source_location = True
# Minimal variable name length that trigger use of shorter alias. None for no alias.
context.model.length_for_alias = 15
# Minimal variable name length that trigger renaming variable with a shorter name. None for no rename.
context.model.length_for_rename = None
# Automatically add a name to every top-level constraint
context.model.name_all_constraints = False
# Name of the directory where store copy of the generated CPO files. None for no dump.
context.model.dump_directory = None
# Expression cache
context.model.cache = Context()
context.model.cache.size = 10000
context.model.cache.active = True
#-----------------------------------------------------------------------------
# Solving parameters
# Default time limit
context.params.TimeLimit = 100
# Workers count
context.params.Workers = 4
#-----------------------------------------------------------------------------
# Solving context
# Indicate to trace CPO model before solving
context.solver.trace_cpo = False
# Indicate to trace solver log on log_output.
context.solver.trace_log = False
# Enable undocumented parameters
context.solver.enable_undocumented_params = False
# Max number of threads allowed for model solving
context.solver.max_threads = None
if ENVIRONMENT_PRESENT:
context.solver.max_threads = runenv.get_environment().get_available_core_count()
# Indicate to add solver log to the solution
context.solver.add_log_to_solution = True
# Indicate to auto-publish solve details and results in environment
context.solver.auto_publish = True
# Indicate to replace simple solve by a start/next loop
context.solver.solve_with_start_next = False
# Log prefix
context.solver.log_prefix = "[Solver] "
# Name of the agent to be used for solving. Value is name of one of this context child context (i.e. 'docloud').
context.solver.agent = 'docloud'
#-----------------------------------------------------------------------------
# DoCloud solving agent context
context.solver.docloud = Context()
# Agent class name
context.solver.docloud.class_name = "docplex.cp.solver.solver_docloud.CpoSolverDocloud"
# Url of the DOCloud service
context.solver.docloud.url = "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/"
# Authentication key.
context.solver.docloud.key = "'Set your key in docloud_config.py''"
# Secret key.
context.solver.docloud.secret = None
# Indicate to verify SSL certificates
context.solver.docloud.verify_ssl = True
# Proxies (map protocol_name/endpoint, as described in http://docs.python-requests.org/en/master/user/advanced/#proxies)
context.solver.docloud.proxies = None
# Default unitary request timeout in seconds
context.solver.docloud.request_timeout = 30
# Time added to expected solve time to compute the total result waiting timeout
context.solver.docloud.result_wait_extra_time = 60
# Clean job after solve indicator
context.solver.docloud.clean_job_after_solve = True
# Add 'Connection close' in all headers
context.solver.docloud.always_close_connection = False
# Log prefix
context.solver.docloud.log_prefix = "[DOcloud] "
# Polling delay (min, max and increment)
context.solver.docloud.polling = Context(min=1, max=3, incr=0.2)
#-----------------------------------------------------------------------------
# Local solving agent context
context.solver.local = Context(class_name = "docplex.cp.solver.solver_local.CpoSolverLocal",
execfile = "cpoptimizer" + EXE_EXTENSION,
parameters = ['-angel'],
log_prefix = "[Local] ")
LOCAL_CONTEXT = context.clone()
LOCAL_CONTEXT.params.pop('TimeLimit')
LOCAL_CONTEXT.params.pop('Workers')
LOCAL_CONTEXT.solver.trace_log = not IS_IN_NOTEBOOK
LOCAL_CONTEXT.solver.agent = 'local'
LOCAL_CONTEXT.solver.max_threads = None
# Select local context if exec file is visible in the path
cpfile = search_file_in_path(LOCAL_CONTEXT.solver.local.execfile)
if cpfile:
LOCAL_CONTEXT.solver.local.execpath = cpfile
context = LOCAL_CONTEXT
##############################################################################
## Public functions
##############################################################################
def get_default():
""" Get the default context
Default context is also accessible with the global variable 'context' in this module.
Returns:
Current default context
"""
return context
def set_default(ctx):
""" Set the default context.
Default context becomes accessible in the global variable 'context' in this module.
Args:
ctx: New default context
"""
if ctx is None:
ctx = Context()
else:
assert isinstance(ctx, Context), "Context object must be of class Context"
sys.modules[__name__].context = ctx
# Attribute values denoting a default value
DEFAULT_VALUES = ("ENTER YOUR KEY HERE", "ENTER YOUR URL HERE", "default")
def _get_effective_context(**kwargs):
""" Build a effective context from a variable list of arguments that may specify changes to default.
Args:
context (optional): Source context, if not default.
params (optional): Solving parameters (CpoParameters) that overwrite those in the solving context
(others) (optional): All other context parameters that can be changed.
Returns:
Updated (cloned) context
"""
# If 'url' and 'key' are defined, force agent to be docloud
if ('agent' not in kwargs) and not ENVIRONMENT_PRESENT:
url = kwargs.get('url')
key = kwargs.get('key')
if url and key and is_string(url) and is_string(key) and url.startswith('http'):
kwargs['agent'] = 'docloud'
# Determine source context
ctx = kwargs.get('context')
if (ctx is None) or (ctx in DEFAULT_VALUES):
ctx = context
ctx = ctx.clone()
# print("\n*** Source context");
# ctx.print_context()
# First set parameters if given
prms = kwargs.get('params')
if prms is not None:
ctx.params.add(prms)
# Process other changes
rplist = [] # List of replacements to be done in solving parameters
for k, v in kwargs.items():
if (k != 'context') and (k != 'params') and (v not in DEFAULT_VALUES):
rp = ctx.search_and_replace_attribute(k, v)
# If not found, set in solving parameters
if (rp is None):
rplist.append((k, v))
# Replace or set remaining fields in parameters
if rplist:
params = ctx.params
chkparams = not ctx.solver.enable_undocumented_params
if isinstance(params, CpoParameters):
for k, v in rplist:
if chkparams and not k in ALL_PARAMETER_NAMES:
raise CpoException("CPO solver does not accept a parameter named '{}'".format(k))
setattr(params, k, v)
# Return
# print("\n*** Result context");
# ctx.print_context()
return ctx
##############################################################################
## Overload this configuration with other customized configuraton python files
##############################################################################
def _eval_file(file):
""" If exists, evaluate the content of a python module in this module.
Args:
file: Python file to evaluate
"""
for f in filter(os.path.isfile, [dir + "/" + file for dir in sys.path]):
try:
exec(open(f).read())
except Exception as e:
traceback.print_exc()
raise Exception("Error while loading config file {}: {}".format(f, str(e)))
# Initialize default list of files to load
FILE_LIST = ("cpo_config.py",
"cpo_config_" + socket.gethostname() + ".py",
"docloud_config.py")
# Load all config changes
for f in FILE_LIST:
_eval_file(f)
##############################################################################
## Print configuration when called as main
##############################################################################
if __name__ == "__main__":
context.print_context()
| 39.028037
| 143
| 0.669109
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
Configuration of the CP Optimizer Python API
This module is the top-level handler of the configuration parameters for
the CP Optimizer Python API. It contains the default values of the different
configuration parameters.
It should NOT be changed directly.
The preferable way is to add at least one of the following files that contain the changes
to be performed:
* *cpo_config.py*, a local set of changes on these parameters,
* *cpo_config_<hostname>.py*, a hostname dependent set of changes.
* *docloud_config.py* (for DOcloud url and key, file shared with docplex.mp package).
Final set of parameters is obtained by reading first this module, and then those
listed above.
These modules should be visible from the *PYTHONPATH* and are loaded in
this order to overwrite default values.
This module also defines two global variables:
* *DOCLOUD_CONTEXT*, that contains the configuration necessary to solve a model on DOcloud.
This context is the context by default, referenced by the global variable 'context'.
* *LOCAL_CONTEXT*, that contains the configuration appropriate to solve a model with a local
installation of the CPO solver.
This configuration is available for solver with version number greater or equal to 12.7.0.
The method :meth:`set_default` allows to set the default configuration to one that is predefined,
or another that has been totally customized.
If called as main, this module prints the actual configuration on standard output, including
all customizations made using the mechanism described above.
Following sections describe the most important parameters that can be easily modified to customize
the behavior of the Python API.
All available parameters are available by consulting the source code of this module.
General parameters
------------------
*context.log_output = sys.stdout*
This parameter contains the default log stream.
By default it is set to the standard output.
A value of *None* can be used to disable all logs.
*context.verbose = 0*
This parameter controls the verbosity level of the log, between 0 and 9, if *log_output* is not None.
The default value of 0 means no log.
*context.model.add_source_location = True*
This parameter indicates that when the model is transformed into CPO format, additional information is added
to correlate expressions with the Python file and line where it has been generated.
If any error is raised by the solver during the solve, this information is provided in the
error description, which allows for easier debugging.
*context.model.length_for_alias = 15*
This parameter allows to associate a shorter alias to variables whose name is longer than the given length.
In the CPO representation of the model, variable is declared with its original name and an alias is created
to use it with a shorter name in model expressions, allowing to reduce the size of the generated CPO format.
In the returned solution, variable can be still retrieved with their original names.
By default, the value is 15. A value of None would indicate to always keep original variable names.
*context.model.length_for_rename = None*
This parameter allows to replace the names of the variables when it is longer than the given length.
A shorter name is generated and is used everywhere in the generated model CPO format in place of the original name.
This allows to drastically reduce the size of the model generated in the CPO format.
In the returned solution, the value of such variables can be retrieved thanks to a mapping between previous and
new names, that is maintained in the client Python program.
By default, the value is None, indicating to keep original variable names.
*context.model.name_all_constraints = False*
This parameter enables the naming of all constraints when the model is generated in CPO format.
It is mandatory only if the *refine conflict* function is called.
Anyway, if the *refine conflict* function is called, and if the CPO format of the model has already been generated,
it is generated again with this option set in order to allow proper completion of the request.
Setting it to *True* is preferable only if *refine conflict* function is called on a big model.
*context.model.dump_directory = None*
This parameter gives the name of a directory where the CPO files that are generated for solving models are stored
for logging purpose.
If not None, the directory is created and generated models are stored in files named `<model_name>.cpo`.
*context.model.cache.size = 10000*
This parameter gives the maximum capacity of the internal cache used to speed-up conversion of Python expressions
into CPO expressions.
*context.model.cache.active = True*
This parameter allows to enable or disable the expression cache mechanism.
Value os a boolean (True or False). Default value is True.
*context.params.\**
The parameter `context.params` is an instance of the class
:class:`~docplex.cp.parameters.CpoParameters` (in :doc:`parameters.py</docplex.cp.parameters.py>`)
which describes all of the public solver parameters as properties.
The default configuration limits the solving time to 100 seconds by using following settings:
::
context.params.TimeMode = "ElapsedTime"
context.params.TimeLimit = 100
These parameters may have a different default setting if the solver is not *DOcplexcloud*.
Configuration of the model solving
----------------------------------
*context.solver.trace_cpo = False*
This parameter indicates to trace the CPO model that is generated before submitting it for solving.
The model is printed on the `context.log_output stream`, if given.
*context.solver.trace_log = False*
This parameter indicates to trace the log generated by the solver when solving the CPO model.
The log is printed on the `context.log_output stream`, if given.
The default value of this parameter is False for a solve on the cloud, and True for a local solve.
*context.solver.enable_undocumented_params = False*
This parameter allows to enable the possibility to set solving parameters that are not in the public parameters
detailed in the class
:class:`~docplex.cp.parameters.CpoParameters` (in :doc:`parameters.py</docplex.cp.parameters.py>`).
*context.solver.add_log_to_solution = True*
This parameter indicates to add the solver log content to the solution object.
By default, this parameter is True but it can be set to False if the log is very big or of no interest.
*context.solver.agent = 'docloud'*
This parameter specifies the name of the solver agent that is used to solve the model.
The value of this parameter is the name of a child context of `context.solver`, which contains necessary attributes
that allow to create and run the required agent.
There are two different agents described in the default configuration file:
* `docloud`, the default agent, for solving a CPO model using the DOcplexcloud service.
* `local`, the agent allowing to solve models locally using the CP Optimizer Interactive coming with
versions of COS greater or equal to 12.7.0.
If the CP Optimizer Interactive program *cpoptimizer(.exe)* is detected in the system path, the default solver
agent is automatically set to *local* instead of *docloud*.
*context.solver.log_prefix = "[Solver] "*
Prefix that is added to every message that is logged by the solver component.
Configuration of the `docloud` solving agent
--------------------------------------------
*context.solver.docloud.url = "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/"*
This parameter is used to specify the URL of the *DOcplexcloud* service.
*context.solver.docloud.key = "'Set your key in docloud_config.py'"*
This parameter contains the personal key for authorizing access to the *DOcplexcloud* service.
Access credentials (base URL and access key) can be retrieved after registration from `<http://developer.ibm.com/docloud/docs/api-key/>`_.
*context.solver.docloud.verify_ssl = True*
This parameter allows to enable/disable the verification of SSL certificates.
*context.solver.docloud.proxies = None*
This parameter allows to optionally define proxies to be used in the connection with *DOcplexcloud*.
It is a Python dictionary protocol_name / endpoint, as described in http://docs.python-requests.org/en/master/user/advanced/#proxies.
*context.solver.docloud.request_timeout = 30*
This parameter contains the maximum time, in seconds, that a response is waited for after a unitary request to *DOcplexcloud* server.
*context.solver.docloud.result_wait_extra_time = 60*
This parameter is a time in seconds added to the expected solve time to compute the total result waiting timeout.
*context.solver.docloud.clean_job_after_solve = True*
This parameter indicates whether the job is automatically cleaned after the model is solved.
If not set to True, the model stays on the *DOcplexcloud* server and is visible from its *DropSolve* interface.
Note that the server may block future solving requests if there are too many jobs waiting.
*context.solver.docloud.polling = Context(min=1, max=3, incr=0.2)*
This parameter describes how the Python client polls the result of the solve on *DOcplexcloud*.
Polling delay is inside an interval [min, max], starting by min, growing to max with the given increment.
Configuration of the `local` solving agent
------------------------------------------
*context.solver.local.execfile*
Name or full path of the CP Optimizer Interactive executable file.
By default, it is set to *cpoptimizer(.exe)*, which supposes that the program is visible from the system path.
Configuration for best performances
-----------------------------------
To configure the CP Python API for best performances, the following configuration settings may be used.
Obviously, this performance is won at the cost of the loss of some features that may be useful in other cases.
::
context.verbose = 0
context.model.add_source_location = False
context.model.length_for_rename = 10
context.model.name_all_constraints = False
context.model.dump_directory = None
context.solver.trace_cpo = False
context.solver.trace_log = False
context.solver.add_log_to_solution = False
Detailed description
--------------------
"""
from docplex.cp.utils import Context, CpoException, search_file_in_path, IS_IN_NOTEBOOK, is_string
from docplex.cp.parameters import CpoParameters, ALL_PARAMETER_NAMES
import sys, socket, os, platform, traceback
try:
import docplex.util.environment as runenv
ENVIRONMENT_PRESENT = True
except:
ENVIRONMENT_PRESENT = False
EXE_EXTENSION = ".exe" if platform.system() == 'Windows' else ""
##############################################################################
## Define default context for DOcloud solving
##############################################################################
#-----------------------------------------------------------------------------
# Global context
# Create default context infrastructure
DOCLOUD_CONTEXT = Context(model=Context(),
params=CpoParameters(),
solver=Context())
context = DOCLOUD_CONTEXT
# Default log output
context.log_output = sys.stdout
# Default log verbosity
context.verbose = 0
# Visu enable indicator (internal, can be disabled for testing purpose)
context.visu_enabled = True
#-----------------------------------------------------------------------------
# Modeling context
# Indicate to add source location in model
context.model.add_source_location = True
# Minimal variable name length that trigger use of shorter alias. None for no alias.
context.model.length_for_alias = 15
# Minimal variable name length that trigger renaming variable with a shorter name. None for no rename.
context.model.length_for_rename = None
# Automatically add a name to every top-level constraint
context.model.name_all_constraints = False
# Name of the directory where store copy of the generated CPO files. None for no dump.
context.model.dump_directory = None
# Expression cache
context.model.cache = Context()
context.model.cache.size = 10000
context.model.cache.active = True
#-----------------------------------------------------------------------------
# Solving parameters
# Default time limit
context.params.TimeLimit = 100
# Workers count
context.params.Workers = 4
#-----------------------------------------------------------------------------
# Solving context
# Indicate to trace CPO model before solving
context.solver.trace_cpo = False
# Indicate to trace solver log on log_output.
context.solver.trace_log = False
# Enable undocumented parameters
context.solver.enable_undocumented_params = False
# Max number of threads allowed for model solving
context.solver.max_threads = None
if ENVIRONMENT_PRESENT:
context.solver.max_threads = runenv.get_environment().get_available_core_count()
# Indicate to add solver log to the solution
context.solver.add_log_to_solution = True
# Indicate to auto-publish solve details and results in environment
context.solver.auto_publish = True
# Indicate to replace simple solve by a start/next loop
context.solver.solve_with_start_next = False
# Log prefix
context.solver.log_prefix = "[Solver] "
# Name of the agent to be used for solving. Value is name of one of this context child context (i.e. 'docloud').
context.solver.agent = 'docloud'
#-----------------------------------------------------------------------------
# DoCloud solving agent context
context.solver.docloud = Context()
# Agent class name
context.solver.docloud.class_name = "docplex.cp.solver.solver_docloud.CpoSolverDocloud"
# Url of the DOCloud service
context.solver.docloud.url = "https://api-oaas.docloud.ibmcloud.com/job_manager/rest/v1/"
# Authentication key.
context.solver.docloud.key = "'Set your key in docloud_config.py''"
# Secret key.
context.solver.docloud.secret = None
# Indicate to verify SSL certificates
context.solver.docloud.verify_ssl = True
# Proxies (map protocol_name/endpoint, as described in http://docs.python-requests.org/en/master/user/advanced/#proxies)
context.solver.docloud.proxies = None
# Default unitary request timeout in seconds
context.solver.docloud.request_timeout = 30
# Time added to expected solve time to compute the total result waiting timeout
context.solver.docloud.result_wait_extra_time = 60
# Clean job after solve indicator
context.solver.docloud.clean_job_after_solve = True
# Add 'Connection close' in all headers
context.solver.docloud.always_close_connection = False
# Log prefix
context.solver.docloud.log_prefix = "[DOcloud] "
# Polling delay (min, max and increment)
context.solver.docloud.polling = Context(min=1, max=3, incr=0.2)
#-----------------------------------------------------------------------------
# Local solving agent context
context.solver.local = Context(class_name = "docplex.cp.solver.solver_local.CpoSolverLocal",
execfile = "cpoptimizer" + EXE_EXTENSION,
parameters = ['-angel'],
log_prefix = "[Local] ")
LOCAL_CONTEXT = context.clone()
LOCAL_CONTEXT.params.pop('TimeLimit')
LOCAL_CONTEXT.params.pop('Workers')
LOCAL_CONTEXT.solver.trace_log = not IS_IN_NOTEBOOK
LOCAL_CONTEXT.solver.agent = 'local'
LOCAL_CONTEXT.solver.max_threads = None
# Select local context if exec file is visible in the path
cpfile = search_file_in_path(LOCAL_CONTEXT.solver.local.execfile)
if cpfile:
LOCAL_CONTEXT.solver.local.execpath = cpfile
context = LOCAL_CONTEXT
##############################################################################
## Public functions
##############################################################################
def get_default():
""" Get the default context
Default context is also accessible with the global variable 'context' in this module.
Returns:
Current default context
"""
return context
def set_default(ctx):
""" Set the default context.
Default context becomes accessible in the global variable 'context' in this module.
Args:
ctx: New default context
"""
if ctx is None:
ctx = Context()
else:
assert isinstance(ctx, Context), "Context object must be of class Context"
sys.modules[__name__].context = ctx
# Attribute values denoting a default value
DEFAULT_VALUES = ("ENTER YOUR KEY HERE", "ENTER YOUR URL HERE", "default")
def _is_defined(arg, kwargs):
return (arg in kwargs) and kwargs[arg] and (kwargs[arg] not in DEFAULT_VALUES)
def _get_effective_context(**kwargs):
""" Build a effective context from a variable list of arguments that may specify changes to default.
Args:
context (optional): Source context, if not default.
params (optional): Solving parameters (CpoParameters) that overwrite those in the solving context
(others) (optional): All other context parameters that can be changed.
Returns:
Updated (cloned) context
"""
# If 'url' and 'key' are defined, force agent to be docloud
if ('agent' not in kwargs) and not ENVIRONMENT_PRESENT:
url = kwargs.get('url')
key = kwargs.get('key')
if url and key and is_string(url) and is_string(key) and url.startswith('http'):
kwargs['agent'] = 'docloud'
# Determine source context
ctx = kwargs.get('context')
if (ctx is None) or (ctx in DEFAULT_VALUES):
ctx = context
ctx = ctx.clone()
# print("\n*** Source context");
# ctx.print_context()
# First set parameters if given
prms = kwargs.get('params')
if prms is not None:
ctx.params.add(prms)
# Process other changes
rplist = [] # List of replacements to be done in solving parameters
for k, v in kwargs.items():
if (k != 'context') and (k != 'params') and (v not in DEFAULT_VALUES):
rp = ctx.search_and_replace_attribute(k, v)
# If not found, set in solving parameters
if (rp is None):
rplist.append((k, v))
# Replace or set remaining fields in parameters
if rplist:
params = ctx.params
chkparams = not ctx.solver.enable_undocumented_params
if isinstance(params, CpoParameters):
for k, v in rplist:
if chkparams and not k in ALL_PARAMETER_NAMES:
raise CpoException("CPO solver does not accept a parameter named '{}'".format(k))
setattr(params, k, v)
# Return
# print("\n*** Result context");
# ctx.print_context()
return ctx
##############################################################################
## Overload this configuration with other customized configuraton python files
##############################################################################
def _eval_file(file):
""" If exists, evaluate the content of a python module in this module.
Args:
file: Python file to evaluate
"""
for f in filter(os.path.isfile, [dir + "/" + file for dir in sys.path]):
try:
exec(open(f).read())
except Exception as e:
traceback.print_exc()
raise Exception("Error while loading config file {}: {}".format(f, str(e)))
# Initialize default list of files to load
FILE_LIST = ("cpo_config.py",
"cpo_config_" + socket.gethostname() + ".py",
"docloud_config.py")
# Load all config changes
for f in FILE_LIST:
_eval_file(f)
##############################################################################
## Print configuration when called as main
##############################################################################
if __name__ == "__main__":
context.print_context()
| 0
| 0
| 0
| 0
| 0
| 92
| 0
| 0
| 25
|
fe805a61b2b33eac84f19857fd056154b9478764
| 2,175
|
py
|
Python
|
pysol_cards/random_base.py
|
thesamesam/pysol_cards
|
55aeb94601a9f652b0e6001bf8c24179f5eccd27
|
[
"MIT"
] | 4
|
2019-06-20T17:00:46.000Z
|
2021-09-01T22:34:37.000Z
|
pysol_cards/random_base.py
|
thesamesam/pysol_cards
|
55aeb94601a9f652b0e6001bf8c24179f5eccd27
|
[
"MIT"
] | 4
|
2020-03-08T07:09:51.000Z
|
2021-11-25T07:09:25.000Z
|
pysol_cards/random_base.py
|
thesamesam/pysol_cards
|
55aeb94601a9f652b0e6001bf8c24179f5eccd27
|
[
"MIT"
] | 1
|
2021-08-05T20:11:42.000Z
|
2021-08-05T20:11:42.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright 2019 Shlomi Fish <[email protected]>
#
# Distributed under terms of the MIT license.
| 24.715909
| 76
| 0.584368
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright ยฉ 2019 Shlomi Fish <[email protected]>
#
# Distributed under terms of the MIT license.
from pysol_cards.errors import SubclassResponsibility
class RandomBase(object):
DEALS_PYSOL = 0
DEALS_PYSOLFC = 1
DEALS_MS = 2
MAX_SEED = 10 ** 20
ORIGIN_UNKNOWN = 0
ORIGIN_RANDOM = 1
ORIGIN_PREVIEW = 2
ORIGIN_SELECTED = 3
ORIGIN_NEXT_GAME = 4
def __init__(self, seed=None):
"""docstring for __init__"""
self.seed_as_string = None
def shuffle(self, seq):
for n in range(len(seq) - 1, 0, -1):
j = self.randint(0, n)
seq[n], seq[j] = seq[j], seq[n]
return seq
def randint(self, a, b):
""" Get a random integer in the range [a, b] including both ends."""
return a + int(self.random() * (b + 1 - a))
def randrange(self, a, b):
""" Get a random integer in the range [a, b) excluding b."""
return self.randint(a, b - 1)
def choice(self, sequence):
""" Pick a random element of sequence """
return sequence[self.randrange(0, len(sequence))]
def setSeedAsStr(self, new_s):
self.seed_as_string = new_s
def getSeedAsStr(self):
if self.seed_as_string:
return self.seed_as_string
return str(self)
def getSeedStr(self):
return str(self.initial_seed)
def __str__(self):
return self.str(self.initial_seed)
def str(self, seed):
return '%020d' % (seed)
def increaseSeed(self, seed):
if seed < self.MAX_SEED:
return seed + 1
return 0
def copy(self):
ret = self.__class__()
ret.__dict__.update(self.__dict__)
return ret
def reset(self):
raise SubclassResponsibility
def _getRandomSeed(self):
import time
ret = int(time.time() * 256.0)
return ((ret ^ (ret >> 24)) % (self.MAX_SEED + 1))
def getstate(self):
"""getstate() for PySolFC"""
return self.seed
def setstate(self, new_state):
"""set to a new state"""
self.seed = new_state
| 2
| 0
| 0
| 1,932
| 0
| 0
| 0
| 32
| 46
|
49b12c74814b733e89f12d59c80b0437328d4604
| 2,767
|
py
|
Python
|
conceptnet5/builders/morphology.py
|
CollectiWise/conceptnet
|
2998df5a9d287ca72032abb1d9b082747ba97c08
|
[
"Apache-2.0"
] | 1
|
2018-11-27T17:00:57.000Z
|
2018-11-27T17:00:57.000Z
|
conceptnet5/builders/morphology.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
conceptnet5/builders/morphology.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from conceptnet5.edges import make_edge
from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter
from conceptnet5.languages import ATOMIC_SPACE_LANGUAGES
from conceptnet5.nodes import split_uri
from conceptnet5.uri import get_uri_language, join_uri, Licenses
def prepare_vocab_for_morphology(language, input, output):
"""
Morfessor's input is a list of terms with their counts. Here, we
read a ConceptNet vocabulary file with counts (core_concept_counts.txt),
filter it for a single language, and convert it into the input form that
Morfessor expects.
We're stripping out the word sense information here, which would cause
the same term to appear multiple times. Because of that, we build up
a new dictionary of counts, summing all occurrences of a term.
We use _ to represent all spaces. In languages where the space-separated
segments are atomic (Vietnamese), we use _ to represent the locations where
subwords are allowed to end, and thus add _ to the end of the term as well.
"""
vocab_counts = defaultdict(int)
for line in input:
countstr, uri = line.strip().split(' ', 1)
if get_uri_language(uri) == language:
term = split_uri(uri)[2]
if language in ATOMIC_SPACE_LANGUAGES:
term += '_'
vocab_counts[term] += int(countstr)
for term, count in sorted(list(vocab_counts.items())):
print(count, term, file=output)
MORPH_SOURCES = [{'process': '/s/rule/morfessor'}]
def subwords_to_edges(language, input, output):
"""
Morfessor hypothesizes ways to break words into sub-word chunks. Produce
edges from these sub-words that can be used in retrofitting.
"""
writer = MsgpackStreamWriter(output)
for line in input:
line = line.rstrip()
if not line or line.startswith('#'):
continue
# Remove the unnecessary count ("1 ") from the start of each line
line = line.split(' ', 1)[1]
chunks = line.split(' + ')
# Strip a possible trailing underscore, which would particularly show
# up in the way we segment ATOMIC_SPACE_LANGUAGES (Vietnamese)
full_text = ''.join(chunks).strip('_')
end = join_uri('c', language, full_text)
for chunk in chunks:
if chunk != '_':
start = join_uri('x', language, chunk.strip('_'))
edge = make_edge(
'/r/SubwordOf', start, end,
dataset='/d/morphology',
license=Licenses.cc_attribution,
sources=MORPH_SOURCES,
weight=0.01
)
writer.write(edge)
writer.close()
| 38.430556
| 79
| 0.648356
|
from collections import defaultdict
from conceptnet5.edges import make_edge
from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter
from conceptnet5.languages import ATOMIC_SPACE_LANGUAGES
from conceptnet5.nodes import split_uri
from conceptnet5.uri import get_uri_language, join_uri, Licenses
def prepare_vocab_for_morphology(language, input, output):
"""
Morfessor's input is a list of terms with their counts. Here, we
read a ConceptNet vocabulary file with counts (core_concept_counts.txt),
filter it for a single language, and convert it into the input form that
Morfessor expects.
We're stripping out the word sense information here, which would cause
the same term to appear multiple times. Because of that, we build up
a new dictionary of counts, summing all occurrences of a term.
We use _ to represent all spaces. In languages where the space-separated
segments are atomic (Vietnamese), we use _ to represent the locations where
subwords are allowed to end, and thus add _ to the end of the term as well.
"""
vocab_counts = defaultdict(int)
for line in input:
countstr, uri = line.strip().split(' ', 1)
if get_uri_language(uri) == language:
term = split_uri(uri)[2]
if language in ATOMIC_SPACE_LANGUAGES:
term += '_'
vocab_counts[term] += int(countstr)
for term, count in sorted(list(vocab_counts.items())):
print(count, term, file=output)
MORPH_SOURCES = [{'process': '/s/rule/morfessor'}]
def subwords_to_edges(language, input, output):
"""
Morfessor hypothesizes ways to break words into sub-word chunks. Produce
edges from these sub-words that can be used in retrofitting.
"""
writer = MsgpackStreamWriter(output)
for line in input:
line = line.rstrip()
if not line or line.startswith('#'):
continue
# Remove the unnecessary count ("1 ") from the start of each line
line = line.split(' ', 1)[1]
chunks = line.split(' + ')
# Strip a possible trailing underscore, which would particularly show
# up in the way we segment ATOMIC_SPACE_LANGUAGES (Vietnamese)
full_text = ''.join(chunks).strip('_')
end = join_uri('c', language, full_text)
for chunk in chunks:
if chunk != '_':
start = join_uri('x', language, chunk.strip('_'))
edge = make_edge(
'/r/SubwordOf', start, end,
dataset='/d/morphology',
license=Licenses.cc_attribution,
sources=MORPH_SOURCES,
weight=0.01
)
writer.write(edge)
writer.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
94f484cca39d83c812666097b2a837e523ddf6b0
| 1,324
|
py
|
Python
|
src/timeset/month.py
|
eeriksp/timeset
|
7ac68a3e619057571c77680fe9e12f56fe77f641
|
[
"MIT"
] | 1
|
2021-06-06T20:17:23.000Z
|
2021-06-06T20:17:23.000Z
|
src/timeset/month.py
|
eeriksp/timeset
|
7ac68a3e619057571c77680fe9e12f56fe77f641
|
[
"MIT"
] | null | null | null |
src/timeset/month.py
|
eeriksp/timeset
|
7ac68a3e619057571c77680fe9e12f56fe77f641
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
| 33.1
| 101
| 0.695619
|
from __future__ import annotations
from calendar import monthrange
from datetime import timedelta, date
from .timerange import TimeRange
from .date_range import daterange
class CalendarMonth(TimeRange):
"""
Represent a calendar month.
"""
def __init__(self, year: int, month: int):
date_range = daterange(start=date(year, month, 1), end=self._last_date_of_month(year, month))
super().__init__(start=date_range.start, end=date_range.end)
self.year = year
self.month = month
def __repr__(self):
return f'CalendarMonth(year={self.year}, month={self.month})'
@property
def next(self) -> CalendarMonth:
"""Return an instance of next month."""
first_day_in_next_month = self.end.date() + timedelta(days=1)
return CalendarMonth(first_day_in_next_month.year, first_day_in_next_month.month)
@property
def prev(self) -> CalendarMonth:
"""Return an instance of previous month."""
last_day_in_previous_month = self.start.date() - timedelta(days=1)
return CalendarMonth(last_day_in_previous_month.year, last_day_in_previous_month.month)
@staticmethod
def _last_date_of_month(year: int, month: int) -> date:
_, last_day = monthrange(year, month)
return date(year, month, last_day)
| 0
| 622
| 0
| 505
| 0
| 0
| 0
| 48
| 113
|
46a2f6aa0d4a527286f097b05049619ed6808a58
| 1,008
|
py
|
Python
|
no_covers/migrations/0001_initial.py
|
qiwiGremL1n/blog
|
2ed534c0c62d91603f39da6b1c7e421b1cbf4047
|
[
"MIT"
] | null | null | null |
no_covers/migrations/0001_initial.py
|
qiwiGremL1n/blog
|
2ed534c0c62d91603f39da6b1c7e421b1cbf4047
|
[
"MIT"
] | null | null | null |
no_covers/migrations/0001_initial.py
|
qiwiGremL1n/blog
|
2ed534c0c62d91603f39da6b1c7e421b1cbf4047
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.2 on 2018-02-06 22:35
| 36
| 149
| 0.595238
|
# Generated by Django 2.0.2 on 2018-02-06 22:35
from django.db import migrations, models
import markupfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
('author', models.CharField(max_length=100, verbose_name='Author')),
('excerpt', markupfield.fields.MarkupField(rendered_field=True, verbose_name='Excerpt from a book')),
('excerpt_markup_type', models.CharField(choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain')], default='html', max_length=30)),
('url_link', models.URLField()),
('_excerpt_rendered', models.TextField(editable=False)),
],
),
]
| 0
| 0
| 0
| 868
| 0
| 0
| 0
| 23
| 68
|
baab3036b38177c9996cbcb4a9f5047bed5ed26e
| 94
|
py
|
Python
|
Custom middleware for user to autometically logout after xx.xx.xx time/BlogSite/blogPosts/admin.py
|
AyemunHossain/Django
|
0b1ed21fd6bd2906a4a1a220c029a2193658320f
|
[
"MIT"
] | 2
|
2020-02-14T19:23:50.000Z
|
2020-04-19T08:26:38.000Z
|
Custom middleware for user to autometically logout after xx.xx.xx time/BlogSite/blogPosts/admin.py
|
AyemunHossain/Django
|
0b1ed21fd6bd2906a4a1a220c029a2193658320f
|
[
"MIT"
] | 42
|
2021-02-02T23:08:30.000Z
|
2022-03-12T00:54:55.000Z
|
Project _ 2 -- BlogSite/blogPosts/admin.py
|
AyemunHossain/Django
|
0b1ed21fd6bd2906a4a1a220c029a2193658320f
|
[
"MIT"
] | 1
|
2022-03-07T08:09:41.000Z
|
2022-03-07T08:09:41.000Z
|
from django.contrib import admin
from .models import blogPost
admin.site.register(blogPost)
| 15.666667
| 32
| 0.819149
|
from django.contrib import admin
from .models import blogPost
admin.site.register(blogPost)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0774a8fc8155745e2ffce489ba770491adbcd7b9
| 68
|
py
|
Python
|
resources/colours.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
resources/colours.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
resources/colours.py
|
PyBot-Development/PyBot-v4
|
7fb821940bf43ded7d6996342b83afda4174d36e
|
[
"MIT"
] | null | null | null |
red = 0xff3d3d
green = 0xb8ff3d
blue = 0x2e66ff
yellow = 0xfff94d
| 17
| 17
| 0.735294
|
red = 0xff3d3d
green = 0xb8ff3d
blue = 0x2e66ff
yellow = 0xfff94d
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
07944afff49fa24e07a81620b7148f2200f931cc
| 10,403
|
py
|
Python
|
pyscf/sgx/sgx.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 1
|
2021-11-12T11:55:25.000Z
|
2021-11-12T11:55:25.000Z
|
pyscf/sgx/sgx.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 36
|
2018-08-22T19:44:03.000Z
|
2020-05-09T10:02:36.000Z
|
pyscf/sgx/sgx.py
|
mfkasim1/pyscf
|
7be5e015b2b40181755c71d888449db936604660
|
[
"Apache-2.0"
] | 4
|
2018-02-14T16:28:28.000Z
|
2019-08-12T16:40:30.000Z
|
#!/usr/bin/env python
# Copyright 2018-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Pseudo-spectral methods (COSX, PS, SN-K)
'''
import copy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf.scf import _vhf
def sgx_fit(mf, auxbasis=None, with_df=None):
'''For the given SCF object, update the J, K matrix constructor with
corresponding SGX or density fitting integrals.
Args:
mf : an SCF object
Kwargs:
auxbasis : str or basis dict
Same format to the input attribute mol.basis. If auxbasis is
None, optimal auxiliary basis based on AO basis (if possible) or
even-tempered Gaussian basis will be used.
Returns:
An SCF object with a modified J, K matrix constructor which uses density
fitting integrals to compute J and K
Examples:
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = sgx_fit(scf.RHF(mol))
>>> mf.scf()
-100.00978770917165
>>> mol.symmetry = 1
>>> mol.build(0, 0)
>>> mf = sgx_fit(scf.UHF(mol))
>>> mf.scf()
-100.00978770951018
'''
assert(isinstance(mf, scf.hf.SCF))
if with_df is None:
with_df = SGX(mf.mol)
with_df.max_memory = mf.max_memory
with_df.stdout = mf.stdout
with_df.verbose = mf.verbose
with_df.auxbasis = auxbasis
mf_class = mf.__class__
if isinstance(mf, _SGXHF):
if mf.with_df is None:
mf = mf_class(mf, with_df, auxbasis)
elif mf.with_df.auxbasis != auxbasis:
#logger.warn(mf, 'DF might have been initialized twice.')
mf = copy.copy(mf)
mf.with_df = with_df
return mf
return SGXHF(mf, with_df, auxbasis)
# A tag to label the derived SCF class
scf.hf.SCF.COSX = sgx_fit
mcscf.casci.CASCI.COSX = sgx_fit
def _make_opt(mol):
'''Optimizer to genrate 3-center 2-electron integrals'''
intor = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, intor)
# intor 'int1e_ovlp' is used by the prescreen method
# 'SGXnr_ovlp_prescreen' only. Not used again in other places.
# It can be released early
vhfopt = _vhf.VHFOpt(mol, 'int1e_ovlp', 'SGXnr_ovlp_prescreen',
'SGXsetnr_direct_scf')
vhfopt._intor = intor
vhfopt._cintopt = cintopt
return vhfopt
if __name__ == '__main__':
from pyscf import scf
mol = gto.Mole()
mol.build(
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
method = sgx_fit(scf.RHF(mol), 'weigend')
energy = method.scf()
print(energy - -76.02673747045691)
method.with_df.dfj = True
energy = method.scf()
print(energy - -76.02686422219752)
| 33.775974
| 89
| 0.604249
|
#!/usr/bin/env python
# Copyright 2018-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Pseudo-spectral methods (COSX, PS, SN-K)
'''
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf.scf import _vhf
from pyscf.lib import logger
from pyscf.sgx import sgx_jk
from pyscf.df import df_jk
from pyscf import __config__
def sgx_fit(mf, auxbasis=None, with_df=None):
'''For the given SCF object, update the J, K matrix constructor with
corresponding SGX or density fitting integrals.
Args:
mf : an SCF object
Kwargs:
auxbasis : str or basis dict
Same format to the input attribute mol.basis. If auxbasis is
None, optimal auxiliary basis based on AO basis (if possible) or
even-tempered Gaussian basis will be used.
Returns:
An SCF object with a modified J, K matrix constructor which uses density
fitting integrals to compute J and K
Examples:
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1', basis='ccpvdz', verbose=0)
>>> mf = sgx_fit(scf.RHF(mol))
>>> mf.scf()
-100.00978770917165
>>> mol.symmetry = 1
>>> mol.build(0, 0)
>>> mf = sgx_fit(scf.UHF(mol))
>>> mf.scf()
-100.00978770951018
'''
assert(isinstance(mf, scf.hf.SCF))
if with_df is None:
with_df = SGX(mf.mol)
with_df.max_memory = mf.max_memory
with_df.stdout = mf.stdout
with_df.verbose = mf.verbose
with_df.auxbasis = auxbasis
mf_class = mf.__class__
if isinstance(mf, _SGXHF):
if mf.with_df is None:
mf = mf_class(mf, with_df, auxbasis)
elif mf.with_df.auxbasis != auxbasis:
#logger.warn(mf, 'DF might have been initialized twice.')
mf = copy.copy(mf)
mf.with_df = with_df
return mf
class SGXHF(_SGXHF, mf_class):
def __init__(self, mf, df, auxbasis):
self.__dict__.update(mf.__dict__)
self._eri = None
self.auxbasis = auxbasis
self.with_df = df
# Grids/Integral quality varies during SCF. VHF cannot be
# constructed incrementally.
self.direct_scf = False
self._last_dm = 0
self._in_scf = False
self._keys = self._keys.union(['auxbasis', 'with_df'])
def build(self, mol=None, **kwargs):
if self.direct_scf:
self.with_df.build(level=self.with_df.grids_level_f)
else:
self.with_df.build(level=self.with_df.grids_level_i)
return mf_class.build(self, mol, **kwargs)
def reset(self, mol=None):
self.with_df.reset(mol)
return mf_class.reset(self, mol)
def pre_kernel(self, envs):
self._in_scf = True
def get_jk(self, mol=None, dm=None, hermi=1, with_j=True, with_k=True,
omega=None):
if dm is None: dm = self.make_rdm1()
with_df = self.with_df
if not with_df:
return mf_class.get_jk(self, mol, dm, hermi, with_j, with_k, omega)
if self._in_scf and not self.direct_scf:
if numpy.linalg.norm(dm - self._last_dm) < with_df.grids_switch_thrd:
logger.debug(self, 'Switching SGX grids')
with_df.build(level=with_df.grids_level_f)
self._in_scf = False
self._last_dm = 0
else:
self._last_dm = numpy.asarray(dm)
return with_df.get_jk(dm, hermi, with_j, with_k,
self.direct_scf_tol, omega)
def post_kernel(self, envs):
self._in_scf = False
self._last_dm = 0
def nuc_grad_method(self):
raise NotImplementedError
return SGXHF(mf, with_df, auxbasis)
# A tag to label the derived SCF class
class _SGXHF(object):
def method_not_implemented(self, *args, **kwargs):
raise NotImplementedError
nuc_grad_method = Gradients = method_not_implemented
Hessian = method_not_implemented
NMR = method_not_implemented
NSR = method_not_implemented
Polarizability = method_not_implemented
RotationalGTensor = method_not_implemented
MP2 = method_not_implemented
CISD = method_not_implemented
CCSD = method_not_implemented
CASCI = method_not_implemented
CASSCF = method_not_implemented
scf.hf.SCF.COSX = sgx_fit
mcscf.casci.CASCI.COSX = sgx_fit
def _make_opt(mol):
'''Optimizer to genrate 3-center 2-electron integrals'''
intor = mol._add_suffix('int3c2e')
cintopt = gto.moleintor.make_cintopt(mol._atm, mol._bas, mol._env, intor)
# intor 'int1e_ovlp' is used by the prescreen method
# 'SGXnr_ovlp_prescreen' only. Not used again in other places.
# It can be released early
vhfopt = _vhf.VHFOpt(mol, 'int1e_ovlp', 'SGXnr_ovlp_prescreen',
'SGXsetnr_direct_scf')
vhfopt._intor = intor
vhfopt._cintopt = cintopt
return vhfopt
class SGX(lib.StreamObject):
def __init__(self, mol, auxbasis=None):
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self.grids_thrd = 1e-10
self.grids_level_i = 0 # initial grids level
self.grids_level_f = 1 # final grids level
self.grids_switch_thrd = 0.03
# compute J matrix using DF and K matrix using SGX. It's identical to
# the RIJCOSX method in ORCA
self.dfj = False
self._auxbasis = auxbasis
# debug=True generates a dense tensor of the Coulomb integrals at each
# grids. debug=False utilizes the sparsity of the integral tensor and
# contracts the sparse tensor and density matrices on the fly.
self.debug = False
self.grids = None
self.blockdim = 1200
self.auxmol = None
self._vjopt = None
self._opt = None
self._last_dm = 0
self._rsh_df = {} # Range separated Coulomb DF objects
self._keys = set(self.__dict__.keys())
@property
def auxbasis(self):
return self._auxbasis
@auxbasis.setter
def auxbasis(self, x):
if self._auxbasis != x:
self._auxbasis = x
self.auxmol = None
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** %s ********', self.__class__)
log.info('max_memory = %s', self.max_memory)
log.info('grids_level_i = %s', self.grids_level_i)
log.info('grids_level_f = %s', self.grids_level_f)
log.info('grids_thrd = %s', self.grids_thrd)
log.info('grids_switch_thrd = %s', self.grids_switch_thrd)
log.info('df_j = %s', self.df_j)
log.info('auxbasis = %s', self.auxbasis)
return self
# To mimic DF object, so that SGX can be used as in DF-SCF method by setting
# mf.with_df = SGX(mol)
@property
def _cderi(self):
return self.grids
def build(self, level=None):
if level is None:
level = self.grids_level_f
self.grids = sgx_jk.get_gridss(self.mol, level, self.grids_thrd)
self._opt = _make_opt(self.mol)
# In the RSH-integral temporary treatment, recursively rebuild SGX
# objects in _rsh_df.
if self._rsh_df:
for k, v in self._rsh_df.items():
v.build(level)
return self
def kernel(self, *args, **kwargs):
return self.build(*args, **kwargs)
def reset(self, mol=None):
'''Reset mol and clean up relevant attributes for scanner mode'''
if mol is not None:
self.mol = mol
self.grids = None
self.auxmol = None
self._vjopt = None
self._opt = None
self._last_dm = 0
self._rsh_df = {}
return self
def get_jk(self, dm, hermi=1, with_j=True, with_k=True,
direct_scf_tol=getattr(__config__, 'scf_hf_SCF_direct_scf_tol', 1e-13),
omega=None):
if omega is not None:
# A temporary treatment for RSH integrals
key = '%.6f' % omega
if key in self._rsh_df:
rsh_df = self._rsh_df[key]
else:
rsh_df = copy.copy(self)
rsh_df._rsh_df = None # to avoid circular reference
# Not all attributes need to be reset. Resetting _vjopt
# because it is used by get_j method of regular DF object.
rsh_df._vjopt = None
self._rsh_df[key] = rsh_df
logger.info(self, 'Create RSH-SGX object %s for omega=%s', rsh_df, omega)
with rsh_df.mol.with_range_coulomb(omega):
return rsh_df.get_jk(dm, hermi, with_j, with_k,
direct_scf_tol)
if with_j and self.dfj:
vj = df_jk.get_j(self, dm, hermi, direct_scf_tol)
if with_k:
vk = sgx_jk.get_jk(self, dm, hermi, False, with_k, direct_scf_tol)[1]
else:
vk = None
else:
vj, vk = sgx_jk.get_jk(self, dm, hermi, with_j, with_k, direct_scf_tol)
return vj, vk
if __name__ == '__main__':
from pyscf import scf
mol = gto.Mole()
mol.build(
atom = [["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'ccpvdz',
)
method = sgx_fit(scf.RHF(mol), 'weigend')
energy = method.scf()
print(energy - -76.02673747045691)
method.with_df.dfj = True
energy = method.scf()
print(energy - -76.02686422219752)
| 0
| 194
| 0
| 6,504
| 0
| 0
| 0
| 17
| 204
|
d9fe37ca4723be9704a492674ba8604c492b90eb
| 9,975
|
py
|
Python
|
gobbli/model/base.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 276
|
2019-09-13T08:25:51.000Z
|
2022-03-05T13:07:55.000Z
|
gobbli/model/base.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 15
|
2019-09-06T14:05:30.000Z
|
2022-01-01T20:15:06.000Z
|
gobbli/model/base.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 24
|
2019-09-18T15:11:42.000Z
|
2021-12-23T18:59:55.000Z
|
import logging
LOGGER = logging.getLogger(__name__)
_WEIGHTS_DIR_NAME = "weights"
| 34.756098
| 105
| 0.602907
|
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from timeit import default_timer as timer
from typing import Any, Dict, Optional
import docker
from gobbli.model.context import ContainerTaskContext
from gobbli.util import (
format_duration,
generate_uuid,
gobbli_version,
is_dir_empty,
model_dir,
read_metadata,
write_metadata,
)
LOGGER = logging.getLogger(__name__)
_WEIGHTS_DIR_NAME = "weights"
class BaseModel(ABC):
"""
Abstract base class for all models.
Derived classes should be careful to call super().__init__(...) with the appropriate
arguments if they override __init__() to preserve all the functionality.
Functionality to facilitate making GPU(s) available to derived classes is available.
"""
# File containing information about the model, including type of model and gobbli version
# the model was created under
_INFO_FILENAME = "gobbli-model-info.json"
# File containing model parameters (i.e. arguments to init())
_METADATA_FILENAME = "gobbli-model-meta.json"
_WEIGHTS_DIR_NAME = _WEIGHTS_DIR_NAME
_CONTAINER_WEIGHTS_PATH = Path("/model") / _WEIGHTS_DIR_NAME
def __init__(
self,
data_dir: Optional[Path] = None,
load_existing: bool = False,
use_gpu: bool = False,
nvidia_visible_devices: str = "all",
logger: Optional[logging.Logger] = None,
**kwargs,
):
"""
Create a model.
Args:
data_dir: Optional path to a directory used to store model data. If not given,
a unique directory under GOBBLI_DIR will be created and used.
load_existing: If True, ``data_dir`` should be a directory that was previously used
to create a model. Parameters will be loaded to match the original model, and
user-specified model parameters will be ignored. If False, the data_dir must
be empty if it already exists.
use_gpu: If True, use the
nvidia-docker runtime (https://github.com/NVIDIA/nvidia-docker) to expose
NVIDIA GPU(s) to the container. Will cause an error if the computer you're running
on doesn't have an NVIDIA GPU and/or doesn't have the nvidia-docker runtime installed.
nvidia_visible_devices: Which GPUs to make available to the container; ignored if
``use_gpu`` is False. If not 'all', should be a comma-separated string: ex. ``1,2``.
logger: If passed, use this logger for logging instead of the default module-level logger.
**kwargs: Additional model-specific parameters to be passed to the model's :meth:`init` method.
"""
self._logger = LOGGER
if logger is not None:
self._logger = logger
if data_dir is None:
self._data_dir = self.model_class_dir() / generate_uuid()
else:
self._data_dir = data_dir
# Ensure we have an absolute data dir so any derived paths used in metadata files, etc
# aren't ambiguous
self._data_dir = self._data_dir.resolve()
self._data_dir.mkdir(parents=True, exist_ok=True)
class_name = self.__class__.__name__
cur_gobbli_version = gobbli_version()
if self.info_path.exists():
info = read_metadata(self.info_path)
if not info["class"] == class_name:
raise ValueError(
f"Model class mismatch: the model stored in {data_dir} is of "
f"class '{info['class']}'. Expected '{class_name}'."
)
if not info["gobbli_version"] == cur_gobbli_version:
warnings.warn(
f"The model stored in {data_dir} was created with gobbli version "
f"{info['gobbli_version']}, but you're running version {cur_gobbli_version}. "
"You may encounter compatibility issues."
)
if load_existing and self.metadata_path.exists():
params = read_metadata(self.metadata_path)
if len(kwargs) > 0:
warnings.warn(
"User-passed params ignored due to existing model being "
f"loaded: {kwargs}"
)
else:
if not is_dir_empty(self._data_dir):
raise ValueError(
f"data_dir '{self._data_dir}' is non-empty;"
" it must be empty to avoid overwriting data."
)
params = kwargs
write_metadata(params, self.metadata_path)
write_metadata(
{"class": class_name, "gobbli_version": cur_gobbli_version},
self.info_path,
)
self.use_gpu = use_gpu
self.nvidia_visible_devices = nvidia_visible_devices
self.docker_client = docker.from_env()
self.init(params)
self._logger.info(
f"{class_name} initialized with data directory '{self._data_dir}'"
)
@property
def logger(self) -> logging.Logger:
"""
Returns:
A logger for derived models to use.
"""
return self._logger
@property
def info_path(self) -> Path:
"""
Returns:
The path to the model's info file, containing information about the model including
the type of model, gobbli version it was trained using, etc.
"""
return self.data_dir() / BaseModel._INFO_FILENAME
@property
def metadata_path(self) -> Path:
"""
Returns:
The path to the model's metadata file containing model-specific parameters.
"""
return self.data_dir() / BaseModel._METADATA_FILENAME
@abstractmethod
def init(self, params: Dict[str, Any]):
"""
Initialize a derived model using parameters specific to that model.
Args:
params: A dictionary where keys are parameter names and values are
parameter values.
"""
raise NotImplementedError
def _base_docker_run_kwargs(self, context: ContainerTaskContext) -> Dict[str, Any]:
"""
Establish a base set of docker run kwargs to handle GPU support, etc.
Map directories as specified by the context.
Returns:
Base kwargs for any model that will be run using Docker.
"""
kwargs = {
"environment": {
# Minimize the probability of containers exiting without dumping
# buffered output
"PYTHONUNBUFFERED": "1"
},
"detach": True,
"volumes": {
str(context.task_root_dir): {
"bind": str(context.container_root_dir),
"mode": "rw",
},
# Ideally we'd mount this as read-only, but some models (e.g. fastText)
# need to write to their weights
str(self.weights_dir): {
"bind": str(BaseModel._CONTAINER_WEIGHTS_PATH),
"mode": "rw",
},
},
} # type: Dict[str, Any]
if self.use_gpu:
kwargs["environment"][
"NVIDIA_VISIBLE_DEVICES"
] = self.nvidia_visible_devices
kwargs["runtime"] = "nvidia"
return kwargs
@property
def _base_docker_build_kwargs(self) -> Dict[str, Any]:
"""
Handle GPU support, etc via common args for any model Docker container.
Returns:
Base kwargs for any model that will be built using Docker.
"""
kwargs = {"buildargs": {}} # type: Dict[str, Any]
if self.use_gpu:
kwargs["buildargs"]["GPU"] = "1"
return kwargs
def data_dir(self) -> Path:
"""
Returns:
The main data directory unique to this instance of the model.
"""
return self._data_dir
@classmethod
def model_class_dir(cls) -> Path:
"""
Returns:
A directory shared among all classes of the model.
"""
return model_dir() / cls.__name__
@property
def class_weights_dir(self) -> Path:
"""
The root directory used to store initial model weights (before fine-tuning).
These should generally be some pretrained weights made available by model
developers. This directory will NOT be created by default; models should
download their weights and remove the weights directory if the download doesn't
finish properly.
Most models making use of this directory will have multiple sets of weights and
will need to store those in subdirectories under this directory.
Returns:
The path to the class-wide weights directory.
"""
return self.model_class_dir() / BaseModel._WEIGHTS_DIR_NAME
@property
def weights_dir(self) -> Path:
"""
The directory containing weights for a specific instance of the model.
This is the class weights directory by default, but subclasses might
define this property to return a subdirectory based on a set of pretrained
model weights.
Returns:
The instance-specific weights directory.
"""
return self.class_weights_dir
def build(self):
"""
Perform any pre-setup that needs to be done before running the model
(building Docker images, etc).
"""
self.logger.info("Starting build.")
start = timer()
self._build()
end = timer()
self.logger.info(f"Build finished in {format_duration(end - start)}.")
@abstractmethod
def _build(self):
"""
Used for derived classes to define their implementation of the build method.
"""
raise NotImplementedError
| 0
| 2,797
| 0
| 6,681
| 0
| 0
| 0
| 210
| 201
|
4bfd526d716c81fc787d1b1f9508eebc364bffcb
| 2,631
|
py
|
Python
|
aws_gate/list.py
|
gnought/aws-gate
|
20728e0926e6eb36b5f6a6c8ed91b21c010674c8
|
[
"BSD-3-Clause"
] | null | null | null |
aws_gate/list.py
|
gnought/aws-gate
|
20728e0926e6eb36b5f6a6c8ed91b21c010674c8
|
[
"BSD-3-Clause"
] | null | null | null |
aws_gate/list.py
|
gnought/aws-gate
|
20728e0926e6eb36b5f6a6c8ed91b21c010674c8
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
# pylint: disable=unused-argument
| 30.952941
| 118
| 0.733561
|
# -*- encoding: utf-8 -*-
import csv
import io
import itertools
import json
from aws_gate.constants import (
AWS_DEFAULT_PROFILE,
AWS_DEFAULT_REGION,
DEFAULT_LIST_OUTPUT_FIELDS,
DEFAULT_LIST_HUMAN_FIELDS,
DEFAULT_LIST_OUTPUT,
)
from aws_gate.query import get_multiple_instance_details
from aws_gate.utils import (
get_aws_client,
get_aws_resource,
)
# pylint: disable=unused-argument
def _serialize_json(data, fields=None):
return json.dumps(data, indent=4, sort_keys=True)
def _serialize_csv(data, delimiter=",", fields=DEFAULT_LIST_OUTPUT_FIELDS):
output = io.StringIO()
writer = csv.DictWriter(output, delimiter=delimiter, fieldnames=fields)
writer.writerows(data)
return output.getvalue()
def _serialize_tsv(data, fields=DEFAULT_LIST_OUTPUT_FIELDS):
return _serialize_csv(data, delimiter="\t", fields=fields)
def _serialize_human(data, fields=DEFAULT_LIST_HUMAN_FIELDS):
return _serialize_csv(data, delimiter=" ", fields=fields)
def serialize(
data, output_format=DEFAULT_LIST_OUTPUT, fields=DEFAULT_LIST_OUTPUT_FIELDS
):
format_dispatcher = {
"csv": _serialize_csv,
"tsv": _serialize_tsv,
"human": _serialize_human,
"json": _serialize_json,
}
filtered_data = list(map(lambda x: { k:v for (k,v) in x.items() if k in fields }, data))
return format_dispatcher[output_format](filtered_data, fields=fields)
def list_instances(
profile_name=AWS_DEFAULT_PROFILE,
region_name=AWS_DEFAULT_REGION,
output_format=DEFAULT_LIST_OUTPUT,
fields=DEFAULT_LIST_HUMAN_FIELDS,
):
invalid_fields = list(set(fields) - set(DEFAULT_LIST_OUTPUT_FIELDS))
if invalid_fields:
raise ValueError(
'Invalid fields provided: "{}". Valid fields: "{}"'.format(
" ".join(invalid_fields), " ".join(DEFAULT_LIST_OUTPUT_FIELDS)
)
)
ssm = get_aws_client("ssm", region_name=region_name, profile_name=profile_name)
ec2 = get_aws_resource("ec2", region_name=region_name, profile_name=profile_name)
instances_ssm_paginator = ssm.get_paginator("describe_instance_information")
instances_ssm_response_iterator = instances_ssm_paginator.paginate()
instance_ids = []
for response in instances_ssm_response_iterator:
instance_ids = itertools.chain(instance_ids, [ i["InstanceId"] for i in response["InstanceInformationList"] ])
instance_details = list(get_multiple_instance_details(instance_ids=list(instance_ids), ec2=ec2))
print(
serialize(instance_details, output_format=output_format, fields=fields).rstrip()
)
| 0
| 0
| 0
| 0
| 0
| 2,074
| 0
| 198
| 292
|
c5d1832646589e3f5dff30fb1dbb0792478ae215
| 3,630
|
py
|
Python
|
network_test/network_test/suites/node.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 12
|
2021-04-06T06:27:18.000Z
|
2022-03-18T10:52:29.000Z
|
network_test/network_test/suites/node.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 6
|
2022-01-04T13:32:16.000Z
|
2022-03-28T21:13:59.000Z
|
network_test/network_test/suites/node.py
|
kkkkv/tgnms
|
a3b8fd8a69b647a614f9856933f05e50a4affadf
|
[
"MIT"
] | 7
|
2021-09-27T13:14:42.000Z
|
2022-03-28T16:24:15.000Z
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
| 38.617021
| 87
| 0.599449
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import logging
import random
from typing import Any, Dict, List, Set
from terragraph_thrift.Controller.ttypes import IperfTransportProtocol
from terragraph_thrift.Topology.ttypes import NodeStatusType
from tglib.clients import APIServiceClient
from tglib.exceptions import ClientRuntimeError
from ..models import NetworkTestDirection, NetworkTestType
from .base import BaseTest, TestAsset
class NodeTest(BaseTest):
def __init__(
self,
network_name: str,
test_type: NetworkTestType,
direction: NetworkTestDirection,
iperf_options: Dict[str, Any],
allowlist: List[str],
) -> None:
# Set default test configurations
if "bitrate" not in iperf_options:
iperf_options["bitrate"] = 150000000 # 150 MB/s
if "protocol" not in iperf_options:
iperf_options["protocol"] = IperfTransportProtocol.TCP
iperf_options["omitSec"] = 2 # 2 seconds
super().__init__(network_name, test_type, direction, iperf_options, allowlist)
async def prepare(self) -> bool: # noqa: C901
"""Prepare the network test assets.
Using the allowlist provided, or after selecting one node per site (excluding
PoPs), gather the node names and MAC address information.
"""
self.session_ids.clear()
try:
client = APIServiceClient(timeout=1)
topology = await client.request(self.network_name, "getTopology")
nodes: List[str] = []
name_to_mac: Dict[str, str] = {}
seen_sites: Set[str] = set()
allowlist_set = set(self.allowlist)
# Shuffle the nodes to avoid picking the same site representative each time
random.shuffle(topology["nodes"])
for node in topology["nodes"]:
node_name = node["name"]
node_mac = node["mac_addr"]
site_name = node["site_name"]
if node["pop_node"]:
name_to_mac[node_name] = node_mac
elif node["status"] == NodeStatusType.OFFLINE:
logging.error(f"Skipping {node_name} because it is 'OFFLINE'")
elif (allowlist_set and node_name in allowlist_set) or (
not allowlist_set and site_name not in seen_sites
):
name_to_mac[node_name] = node_mac
nodes.append(node_name)
seen_sites.add(site_name)
default_routes = (
await client.request(
self.network_name, "getDefaultRoutes", params={"nodes": nodes}
)
).get("defaultRoutes")
if default_routes is None:
logging.error(f"No default routes available for {self.network_name}")
return False
self.assets = []
for node_name, routes in default_routes.items():
if not routes:
logging.error(f"{node_name} has no default routes available")
continue
# Pick a random PoP node from the default routes if ECMP
pop_name = routes[random.randint(0, len(routes) - 1)][-1]
self.assets.append(
TestAsset(node_name, name_to_mac[node_name], name_to_mac[pop_name])
)
return True
except ClientRuntimeError:
logging.exception(f"Failed to prepare test assets for {self.network_name}")
return False
| 0
| 0
| 2,485
| 650
| 0
| 0
| 0
| 191
| 224
|
31490150ab82b1e45f821526838a643a9c49233f
| 1,449
|
py
|
Python
|
Medium/228_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6
|
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/228_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1
|
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/228_2.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 228. Summary Ranges
#
# Description:
#
# Version: 2.0
# 09/25/18 by Jianfa
# ------------------------------
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
#
| 28.98
| 107
| 0.461698
|
# ------------------------------
# 228. Summary Ranges
#
# Description:
#
# Version: 2.0
# 09/25/18 by Jianfa
# ------------------------------
class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
The idea is use two pointers: left and right to record the data range.
Go over the array, if current number equals to last number plus one, then the range is continuous;
otherwise, store the range and reset left and right pointer to current index.
"""
if not nums:
return []
res = []
left = right = 0
for i in range(1, len(nums)):
if nums[i] == nums[i-1] + 1:
right += 1
else:
if left == right: # If there is only one number in this range
res.append('%d' % nums[left])
else: # If there are more than one numbers in this range
res.append('%d->%d' % (nums[left], nums[right]))
left = right = i
if left == right: # Record the last range
res.append('%d' % nums[left])
else:
res.append('%d->%d' % (nums[left], nums[right]))
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
#
| 0
| 0
| 0
| 1,147
| 0
| 0
| 0
| 0
| 23
|
7f91f182a8ad028e09282b1a10aa2772dc367306
| 208
|
py
|
Python
|
using_name.py
|
miaoyinnu/-
|
039251f08398ebd18847e44bf8fd07e1178b5688
|
[
"MIT"
] | null | null | null |
using_name.py
|
miaoyinnu/-
|
039251f08398ebd18847e44bf8fd07e1178b5688
|
[
"MIT"
] | null | null | null |
using_name.py
|
miaoyinnu/-
|
039251f08398ebd18847e44bf8fd07e1178b5688
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
print("This is program is being run by itself")
else:
print("i am being imported from another module")
from mymodule import sayhi, version
sayhi()
print("Verison",version)
| 20.8
| 52
| 0.721154
|
if __name__ == '__main__':
print("This is program is being run by itself")
else:
print("i am being imported from another module")
from mymodule import sayhi,version
sayhi()
print("Verison",version)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -1
| 0
|
56ab80db150e9ad8631da73911e4034c96bd9870
| 5,890
|
py
|
Python
|
q2t/thermo.py
|
cgrambow/q2t
|
ab62d8840e1f7bfa30d0dfaa234ad86db0833809
|
[
"MIT"
] | null | null | null |
q2t/thermo.py
|
cgrambow/q2t
|
ab62d8840e1f7bfa30d0dfaa234ad86db0833809
|
[
"MIT"
] | null | null | null |
q2t/thermo.py
|
cgrambow/q2t
|
ab62d8840e1f7bfa30d0dfaa234ad86db0833809
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import rmgpy.constants as constants
# Experimental heats of formation of atoms (kcal/mol)
h0expt = {'H': 51.63,
'C': 169.98,
'N': 112.53,
'O': 58.99}
h298corr = {'H': 1.01,
'C': 0.25,
'N': 1.04,
'O': 1.04}
# Spin-orbit corrections for neutral atoms
atom_socs = {'H': 0.0,
'C': 0.000135,
'N': 0.0,
'O': 0.000355}
# Atomic reference energies at 0K in Hartree
atom_energies = {
'ccsd(t)-f12a/cc-pvdz-f12': {
'H': -0.499811124128,
'N': -54.525946786123,
'O': -74.994643838203,
'C': -37.787831744881,
},
'ccsd(t)-f12b/cc-pvdz-f12': {
'H': -0.499811124128,
'N': -54.522814689877,
'O': -74.989919455883,
'C': -37.785040449664,
},
'ccsd(t)-f12a/cc-pvtz-f12': {
'H': -0.499946213253,
'N': -54.529590447091,
'O': -75.003545717458,
'C': -37.789552049511,
},
'ccsd(t)-f12b/cc-pvtz-f12': {
'H': -0.499946213253,
'N': -54.527721253368,
'O': -75.000516530163,
'C': -37.787925879006,
},
'ccsd(t)-f12a/cc-pvqz-f12': {
'H': -0.499994558326,
'N': -54.530194782830,
'O': -75.005192195863,
'C': -37.789729174726,
},
'ccsd(t)-f12b/cc-pvqz-f12': {
'H': -0.499994558326,
'N': -54.529107245074,
'O': -75.003414816890,
'C': -37.788775207449,
},
'ccsd(t)-f12a/aug-cc-pv5z': {
'H': -0.499994816870,
'N': -54.529731561126,
'O': -75.004562049197,
'C': -37.789360554007,
},
'ccsd(t)-f12b/aug-cc-pv5z': {
'H': -0.499994816870,
'N': -54.528933245046,
'O': -75.003291308092,
'C': -37.788641170961,
},
'b3lyp/6-31g(2df,p)': {
'H': -0.500273,
'N': -54.583861,
'O': -75.064579,
'C': -37.846772,
}
}
freq_scale_factors = {
'b3lyp/6-31g(2df,p)': 0.965,
'wb97x-d3/def2-tzvp': 0.975,
}
| 32.541436
| 117
| 0.605772
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import rmgpy.constants as constants
from rmgpy.statmech import Conformer, IdealGasTranslation, LinearRotor, NonlinearRotor, HarmonicOscillator
from rmgpy.qm.qmdata import QMData
from rmgpy.qm.symmetry import PointGroupCalculator
from .qchem import QChem
from .molpro import Molpro
from .mol import atomic_symbol_dict, geo_to_rmg_mol, get_bac_correction
# Experimental heats of formation of atoms (kcal/mol)
h0expt = {'H': 51.63,
'C': 169.98,
'N': 112.53,
'O': 58.99}
h298corr = {'H': 1.01,
'C': 0.25,
'N': 1.04,
'O': 1.04}
# Spin-orbit corrections for neutral atoms
atom_socs = {'H': 0.0,
'C': 0.000135,
'N': 0.0,
'O': 0.000355}
# Atomic reference energies at 0K in Hartree
atom_energies = {
'ccsd(t)-f12a/cc-pvdz-f12': {
'H': -0.499811124128,
'N': -54.525946786123,
'O': -74.994643838203,
'C': -37.787831744881,
},
'ccsd(t)-f12b/cc-pvdz-f12': {
'H': -0.499811124128,
'N': -54.522814689877,
'O': -74.989919455883,
'C': -37.785040449664,
},
'ccsd(t)-f12a/cc-pvtz-f12': {
'H': -0.499946213253,
'N': -54.529590447091,
'O': -75.003545717458,
'C': -37.789552049511,
},
'ccsd(t)-f12b/cc-pvtz-f12': {
'H': -0.499946213253,
'N': -54.527721253368,
'O': -75.000516530163,
'C': -37.787925879006,
},
'ccsd(t)-f12a/cc-pvqz-f12': {
'H': -0.499994558326,
'N': -54.530194782830,
'O': -75.005192195863,
'C': -37.789729174726,
},
'ccsd(t)-f12b/cc-pvqz-f12': {
'H': -0.499994558326,
'N': -54.529107245074,
'O': -75.003414816890,
'C': -37.788775207449,
},
'ccsd(t)-f12a/aug-cc-pv5z': {
'H': -0.499994816870,
'N': -54.529731561126,
'O': -75.004562049197,
'C': -37.789360554007,
},
'ccsd(t)-f12b/aug-cc-pv5z': {
'H': -0.499994816870,
'N': -54.528933245046,
'O': -75.003291308092,
'C': -37.788641170961,
},
'b3lyp/6-31g(2df,p)': {
'H': -0.500273,
'N': -54.583861,
'O': -75.064579,
'C': -37.846772,
}
}
freq_scale_factors = {
'b3lyp/6-31g(2df,p)': 0.965,
'wb97x-d3/def2-tzvp': 0.975,
}
def get_thermo(optfreq_log, optfreq_level, energy_level, energy_log=None,
mol=None, bacs=None, soc=False,
infer_symmetry=False, infer_chirality=False, unique_id='0', scr_dir='SCRATCH'):
q = QChem(logfile=optfreq_log)
symbols, coords = q.get_geometry()
inertia = q.get_moments_of_inertia()
freqs = q.get_frequencies()
zpe = q.get_zpe()
if energy_log is None:
e0 = q.get_energy()
multiplicity = q.get_multiplicity()
else:
m = Molpro(logfile=energy_log)
e0 = m.get_energy()
multiplicity = m.get_multiplicity()
# Infer connections only if not given explicitly
if mol is None:
mol = geo_to_rmg_mol((symbols, coords)) # Does not contain bond orders
# Try to infer point group to calculate symmetry number and chirality
symmetry = optical_isomers = 1
point_group = None
if infer_symmetry or infer_chirality:
qmdata = QMData(
groundStateDegeneracy=multiplicity, # Only needed to check if valid QMData
numberOfAtoms=len(symbols),
atomicNumbers=[atomic_symbol_dict[sym] for sym in symbols],
atomCoords=(coords, 'angstrom'),
energy=(e0 * 627.5095, 'kcal/mol') # Only needed to avoid error
)
settings = type("", (), dict(symmetryPath='symmetry', scratchDirectory=scr_dir))() # Creates anonymous class
pgc = PointGroupCalculator(settings, unique_id, qmdata)
point_group = pgc.calculate()
if point_group is not None:
if infer_symmetry:
symmetry = point_group.symmetryNumber
if infer_chirality and point_group.chiral:
optical_isomers = 2
# Translational mode
mass = mol.getMolecularWeight()
translation = IdealGasTranslation(mass=(mass, 'kg/mol'))
# Rotational mode
if isinstance(inertia, list): # Nonlinear
rotation = NonlinearRotor(inertia=(inertia, 'amu*angstrom^2'), symmetry=symmetry)
else:
rotation = LinearRotor(inertia=(inertia, 'amu*angstrom^2'), symmetry=symmetry)
# Vibrational mode
freq_scale_factor = freq_scale_factors.get(optfreq_level, 1.0)
freqs = [f * freq_scale_factor for f in freqs]
vibration = HarmonicOscillator(frequencies=(freqs, 'cm^-1'))
# Bring energy to gas phase reference state
e0 *= constants.E_h * constants.Na
zpe *= constants.E_h * constants.Na * freq_scale_factor
for sym in symbols:
if soc:
e0 -= (atom_energies[energy_level][sym] - atom_socs[sym]) * constants.E_h * constants.Na
else:
e0 -= atom_energies[energy_level][sym] * constants.E_h * constants.Na
e0 += (h0expt[sym] - h298corr[sym]) * 4184.0
if bacs is not None:
e0 -= get_bac_correction(mol, **bacs) * 4184.0
# Group modes into Conformer object
modes = [translation, rotation, vibration]
conformer = Conformer(modes=modes, spinMultiplicity=multiplicity, opticalIsomers=optical_isomers)
# Calculate heat of formation, entropy of formation, and heat capacities
conformer.E0 = (e0 + zpe, 'J/mol')
hf298 = conformer.getEnthalpy(298.0) + conformer.E0.value_si
s298 = conformer.getEntropy(298.0)
Tlist = [300.0, 400.0, 500.0, 600.0, 800.0, 1000.0, 1500.0]
cp = np.zeros(len(Tlist))
for i, T in enumerate(Tlist):
cp[i] = conformer.getHeatCapacity(T)
# Return in kcal/mol and cal/mol/K
return hf298/4184.0, s298/4.184, cp/4.184
| 0
| 0
| 0
| 0
| 0
| 3,460
| 0
| 182
| 179
|
985e48804eec16912f6e851e88c72504c2821d39
| 118
|
py
|
Python
|
kokki/cookbooks/pip/recipes/default.py
|
samuel/kokki
|
da98da55e0bba8db5bda993666a43c6fdc4cacdb
|
[
"BSD-3-Clause"
] | 11
|
2015-01-14T00:43:26.000Z
|
2020-12-29T06:12:51.000Z
|
kokki/cookbooks/pip/recipes/default.py
|
samuel/kokki
|
da98da55e0bba8db5bda993666a43c6fdc4cacdb
|
[
"BSD-3-Clause"
] | null | null | null |
kokki/cookbooks/pip/recipes/default.py
|
samuel/kokki
|
da98da55e0bba8db5bda993666a43c6fdc4cacdb
|
[
"BSD-3-Clause"
] | 3
|
2015-01-14T01:05:56.000Z
|
2019-01-26T05:09:37.000Z
|
from kokki import Package
Package("pip",
provider = "kokki.providers.package.easy_install.EasyInstallProvider"
)
| 19.666667
| 73
| 0.779661
|
from kokki import Package
Package("pip",
provider = "kokki.providers.package.easy_install.EasyInstallProvider"
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8914ec4eb38cef59aa4da0cb839ebb5c3b4206a
| 154
|
py
|
Python
|
siteprefs/signals.py
|
jayvdb/django-siteprefs
|
9cb3026b94a98299d60ccb61baf567b3d0c64a2f
|
[
"BSD-3-Clause"
] | null | null | null |
siteprefs/signals.py
|
jayvdb/django-siteprefs
|
9cb3026b94a98299d60ccb61baf567b3d0c64a2f
|
[
"BSD-3-Clause"
] | null | null | null |
siteprefs/signals.py
|
jayvdb/django-siteprefs
|
9cb3026b94a98299d60ccb61baf567b3d0c64a2f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.dispatch import Signal
prefs_save = Signal(providing_args=['app', 'updated_prefs'])
"""Issued when dynamic preferences models are saved."""
| 25.666667
| 60
| 0.766234
|
from django.dispatch import Signal
prefs_save = Signal(providing_args=['app', 'updated_prefs'])
"""Issued when dynamic preferences models are saved."""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7b427d803546e23638833b9f5efc283b9528d9c6
| 2,779
|
py
|
Python
|
question/migrations/0001_initial.py
|
abrehman90/Student-Portal-LMS-in-Django
|
fe5f338e309deb7aeaa10d9ff5c60fcdc3844ee1
|
[
"MIT"
] | 2
|
2021-09-17T04:10:57.000Z
|
2021-12-15T03:47:21.000Z
|
question/migrations/0001_initial.py
|
abrehman90/Student-Portal-LMS-in-Django
|
fe5f338e309deb7aeaa10d9ff5c60fcdc3844ee1
|
[
"MIT"
] | null | null | null |
question/migrations/0001_initial.py
|
abrehman90/Student-Portal-LMS-in-Django
|
fe5f338e309deb7aeaa10d9ff5c60fcdc3844ee1
|
[
"MIT"
] | 1
|
2021-07-12T06:42:13.000Z
|
2021-07-12T06:42:13.000Z
|
# Generated by Django 3.2.3 on 2021-06-13 05:29
| 44.822581
| 148
| 0.611011
|
# Generated by Django 3.2.3 on 2021-06-13 05:29
import ckeditor.fields
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', ckeditor.fields.RichTextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now_add=True)),
('votes', models.IntegerField(default=0)),
('is_accepted_answer', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Votes',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote', models.CharField(choices=[('U', 'Up Vote'), ('D', 'Down Vote')], max_length=1)),
('date', models.DateTimeField(auto_now_add=True)),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answer_votes', to='question.answer')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=300)),
('body', ckeditor.fields.RichTextField()),
('created_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now_add=True)),
('has_accepted_answer', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='question.question'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answer_user', to=settings.AUTH_USER_MODEL),
),
]
| 0
| 0
| 0
| 2,576
| 0
| 0
| 0
| 42
| 112
|
4ca73c7ffad9ba15f8c868f25c0c291b4e13401f
| 1,322
|
py
|
Python
|
unmaintain/benchmark/benchmark_asyncio_postgres.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 1,176
|
2019-12-24T01:51:22.000Z
|
2022-03-29T06:00:25.000Z
|
unmaintain/benchmark/benchmark_asyncio_postgres.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 33
|
2020-03-06T03:29:46.000Z
|
2022-03-11T06:24:26.000Z
|
unmaintain/benchmark/benchmark_asyncio_postgres.py
|
zuzhi/rssant
|
06d985845f6af3be7097e6d718afba7eeb195ec8
|
[
"BSD-3-Clause"
] | 110
|
2019-12-29T05:49:24.000Z
|
2022-03-28T06:44:21.000Z
|
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
dsn = 'dbname=rssant user=rssant password=rssant host=127.0.0.1'
loop = asyncio.get_event_loop()
loop.run_until_complete(run_aiopg())
loop = asyncio.get_event_loop()
loop.run_until_complete(run_asyncpg())
| 26.44
| 64
| 0.596823
|
import time
import asyncio
import aiopg
import uvloop
import asyncpg
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
dsn = 'dbname=rssant user=rssant password=rssant host=127.0.0.1'
async def run_aiopg():
pool = await aiopg.create_pool(dsn, minsize=5, maxsize=5)
t0 = time.time()
for i in range(1000):
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT 1")
ret = []
async for row in cur:
ret.append(row)
assert ret == [(1,)]
print('run_aiopg', time.time() - t0)
pool.close()
await pool.wait_closed()
async def run_asyncpg():
async with asyncpg.create_pool(
user='rssant', password='rssant',
database='rssant', host='127.0.0.1',
command_timeout=60, min_size=5, max_size=5
) as pool:
t0 = time.time()
for i in range(1000):
async with pool.acquire() as conn:
values = await conn.fetch("SELECT 1")
assert values == [(1,)]
print('run_asyncpg', time.time() - t0)
await pool.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(run_aiopg())
loop = asyncio.get_event_loop()
loop.run_until_complete(run_asyncpg())
| 0
| 0
| 937
| 0
| 0
| 0
| 0
| -26
| 112
|
2fcaa636cb43ac39470731a033ff7cb7d8b1c199
| 60,972
|
py
|
Python
|
deepviz.py
|
YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs
|
983fcfde19c17b4d61223df8d7433c286db6b3db
|
[
"MIT"
] | null | null | null |
deepviz.py
|
YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs
|
983fcfde19c17b4d61223df8d7433c286db6b3db
|
[
"MIT"
] | null | null | null |
deepviz.py
|
YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs
|
983fcfde19c17b4d61223df8d7433c286db6b3db
|
[
"MIT"
] | null | null | null |
# use("Qt5Agg")
# use('TkAgg')
import mpl_toolkits.axisartist.floating_axes as floating_axes # Not explicitly used, but necessary
from matplotlib import pyplot as plt
import os
import platform
os.environ['KMP_DUPLICATE_LIB_OK']='True'
plt.rcParams['savefig.facecolor'] = "0.8"
if platform.system() == "Darwin":
print("Using MacOS.")
plt.rcParams['animation.ffmpeg_path'] = "/usr/local/bin/ffmpeg"
elif platform.system() == "Linux":
print("Using Linux.")
plt.rcParams['animation.ffmpeg_path'] = "/usr/bin/ffmpeg"
else:
print("Using Windows.")
plt.rcParams['animation.ffmpeg_path'] = 'C:/Users/juyil/ffmpeg/bin/ffmpeg.exe'
data_folder = "Data"
figures_folder = "Figures"
""" Calculate gaussian kde estimate for a dataset """
""" Create a python generator for a pickle file """
""" Record positio of panels of viz """
fig_size = 6
grid_span = 6
span_figure_r = 3
span_figure_c = 6
#%%
if __name__ == "__main__":
pass
| 61.34004
| 384
| 0.599439
|
from matplotlib import use
# use("Qt5Agg")
# use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D # Not explicitly used, but necessary
from matplotlib.transforms import Affine2D # Not explicitly used, but necessary
import mpl_toolkits.axisartist.floating_axes as floating_axes # Not explicitly used, but necessary
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
from matplotlib.pyplot import *
from matplotlib import animation
from matplotlib import cm
import matplotlib.colors as mc
from scipy.spatial.transform import Rotation
import pylab as pl
import pickle
import os
import platform
import time
import datetime
from BPs import *
from ComputationalTools import *
from utils import Get_models
os.environ['KMP_DUPLICATE_LIB_OK']='True'
plt.rcParams['savefig.facecolor'] = "0.8"
if platform.system() == "Darwin":
print("Using MacOS.")
plt.rcParams['animation.ffmpeg_path'] = "/usr/local/bin/ffmpeg"
elif platform.system() == "Linux":
print("Using Linux.")
plt.rcParams['animation.ffmpeg_path'] = "/usr/bin/ffmpeg"
else:
print("Using Windows.")
plt.rcParams['animation.ffmpeg_path'] = 'C:/Users/juyil/ffmpeg/bin/ffmpeg.exe'
data_folder = "Data"
figures_folder = "Figures"
def Torch_loss_list_val_list(loss_list):
if isinstance(loss_list[0], float):
return loss_list
else:
return [ele.item() for ele in loss_list]
""" Calculate gaussian kde estimate for a dataset """
def kde(mu, tau, bbox=[-5, 5, -5, 5], save_file="", xlabel="", ylabel="", cmap='Blues'):
values = np.vstack([mu, tau])
kernel = stats.gaussian_kde(values)
fig, ax = plt.subplots()
ax.axis(bbox) # set axis range by [xmin, xmax, ymin, ymax]
ax.set_aspect(abs(bbox[1]-bbox[0])/abs(bbox[3]-bbox[2])) # set axis value ratio manually to get equal length
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
xx, yy = np.mgrid[bbox[0]:bbox[1]:300j, bbox[2]:bbox[3]:300j]
positions = np.vstack([xx.ravel(), yy.ravel()])
f = np.reshape(kernel(positions).T, xx.shape)
if save_file != "":
plt.savefig(save_file, bbox_inches='tight')
plt.close(fig)
""" Create a python generator for a pickle file """
def Load_all_pickles(title, data_folder=data_folder):
# print("Load_all_pickles title", title)
if platform.system() == "Darwin":
print("Using MacOS.")
elif platform.system() == "Linux":
print("Using Linux.")
else:
print("Using Windows.")
filepath = os.path.join(os.getcwd(), data_folder, title + ".pickle")
if os.path.exists(filepath):
with open(filepath, "rb") as f:
while True:
try:
yield pickle.load(f)
except:
print(f"End of file: {title}")
break
else:
raise ValueError("File not found")
""" Record positio of panels of viz """
fig_size = 6
grid_span = 6
span_figure_r = 3
span_figure_c = 6
#%%
class DeepVisuals_2D():
def __init__(self, args=None, z_mesh=None, x_real=None, xx_D=None, yy_D=None, xx_z=None, yy_z=None, bbox_x=[-5, 5, -5, 5], bbox_z=[-3, 3, -3, 3], name="", z_test=None, attr_seq_name_list=None, handle=None, data_folder=data_folder, dataset=None, device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')):
if attr_seq_name_list is None:
attr_seq_name_list = ["iter", "loss_G", "loss_D", "loss_G_tot", "loss_D_tot", "grad_raw_norm_x", "grad_raw_norm_y", "grad_corr_norm_x", "grad_corr_norm_y",
"update_tot_norm_x", "update_tot_norm_y", "wall_time", "phase_factor", "conditioning_factor"]
""" Time-invariant members """
self.attr = {}
self.attr['z_mesh'] = z_mesh
self.attr['x_real'] = x_real
self.attr['xx_D'] = xx_D
self.attr['yy_D'] = yy_D
self.attr['xx_z'] = xx_z
self.attr['yy_z'] = yy_z
self.attr['bbox_x'] = bbox_x
self.attr['bbox_z'] = bbox_z
self.attr['timestamp'] = Now()
self.attr['name'] = name
self.attr['z_test'] = z_test
self.attr['dataset'] = dataset
self.attr['device'] = device
self.attr['args'] = args
""" Time-variant members """
self.attr_seq = {}
for item in attr_seq_name_list:
self.attr_seq[item] = []
""" Utils members """
self.data_container_dict = {}
self.last_contour_plot = None
self.legend_drawn = False
self.cmap = 'Blues'
self.cmap_D = 'Reds'
self.first_frame = True
self.num_parts = 1
self.skip_frame = 1
self.total_frame = 0
self.data_folder = data_folder
self.image_min = None # min pixel intensity for normalizing images
self.image_max = None # max pixel intensity for normalizing images
self.handle = handle
if self.attr["args"] is not None:
if not hasattr(self.attr["args"], "save_path"):
print("self.attr['args']", self.attr['args'])
self.attr['args'].save_path == ""
if not os.path.exists(self.attr['args'].save_path):
os.makedirs(self.attr['args'].save_path)
self.save_file_path = os.path.join(self.attr['args'].save_path, f"{self.attr['name']}_{self.attr['timestamp']}.pickle")
if name != "" and self.handle is None:
self.handle = open(self.save_file_path, "wb")
""" Adding items into data_container """
self.data_container_dict["attr"] = self.attr
self.data_container_dict["attr_seq"] = self.attr_seq
pickle.dump(self.data_container_dict, self.handle)
self.Calculate_max_t()
def Calculate_max_t(self):
if self.attr["args"] is not None:
self.attr["max_iter_div_5"] = self.attr["args"].iteration // 5
if self.attr["max_iter_div_5"] == 0:
self.attr["max_iter_div_5"] += 1
self.attr["max_t"] = self.attr["args"].iteration // self.attr["args"].plot_iter + 1
def Init_figure(self):
self.Calculate_max_t()
self.ims = []
self.ax_dict = {}
self.figure_nrow = span_figure_r
self.figure_ncol = span_figure_c
self.fig = pl.figure(figsize=(self.figure_ncol * fig_size, self.figure_nrow * fig_size))
""" Factors line plot """
""" conditioning factor """
self.ax_dict["conditioning_factor"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span + 4, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
self.ax_dict["conditioning_factor"].set_xlabel(r"Iteration")
self.ax_dict["conditioning_factor"].set_ylabel(r"Value")
""" Phase factor """
self.ax_dict["phase_factor"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span + 2, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
# self.ax_dict["phase_factor"].set_xlabel(r"Value")
self.ax_dict["phase_factor"].set_ylabel(r"Count")
# self.ax_dict["traj_angle"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
# self.ax_dict["traj_angle"].set_xlabel("Iteration")
# self.ax_dict["traj_angle"].set_ylabel("Trajectory Angle")
# self.ax_dict["ref_angle"] = self.ax_dict["traj_angle"].twinx()
# self.ax_dict["ref_angle"].set_ylabel("Reference Angle")
# self.ax_dict["l2_dist_GD"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span + 4, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
# self.ax_dict["l2_dist_GD"].set_xlabel("Wall time (s)")
# self.ax_dict["l2_dist_GD"].set_ylabel("L2 Distance to Params")
# self.ax_dict["grad_angle_GD"] = self.ax_dict["l2_dist_GD"].twinx()
# self.ax_dict["grad_angle_GD"].set_ylabel("Grad Angle")
""" Minimax Criterion """
self.ax_dict["eig_vals_Hyy_g"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
# self.ax_dict["eig_vals_Hyy_g"].set_xlabel(r"Value")
self.ax_dict["eig_vals_Hyy_g"].set_ylabel(r"Count")
# self.ax_dict["eig_vals_Hyy_g"].set_title(r"Histogram of $\lambda$(H_{DD}) or $\lambda$(H_{yy})")
self.ax_dict["minimax_eig_2"] = self.ax_dict["eig_vals_Hyy_g"].twinx()
""" Maximin Criterion """
self.ax_dict["eig_vals_Hxx_f"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span + 4, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
# self.ax_dict["eig_vals_Hxx_f"].set_xlabel(r"Value")
self.ax_dict["eig_vals_Hxx_f"].set_ylabel(r"Count")
# self.ax_dict["eig_vals_Hxx_f"].set_title(r"Histogram of $\lambda$(H_{GG}) or $\lambda$(H_{xx})")
self.ax_dict["minimax_eig_1"] = self.ax_dict["eig_vals_Hxx_f"].twinx()
""" Grad norm curve """
self.ax_dict["grad_norm"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span + 2, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
self.ax_dict["grad_norm"].set_xlabel(r"Iteration")
self.ax_dict["grad_norm"].set_ylabel(r"$||\nabla_x f||_2$, $||\nabla_y g||_2$")
""" Grad norm curve """
self.ax_dict["grad_corr_norm"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span + 2, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
self.ax_dict["grad_corr_norm"].set_xlabel("Wall time (s)")
self.ax_dict["grad_corr_norm"].set_ylabel(r"Grad Correction Norm")
# self.ax_dict["l2_dist"] = self.ax_dict["grad_corr_norm"].twinx()
# self.ax_dict["l2_dist"].set_ylabel(r"$||\theta_T - \theta_t||_2$")
""" Learning curve """
self.ax_dict["loss_G"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span, 0 * grid_span), rowspan=2, colspan=2 * grid_span)
self.ax_dict["loss_G"].set_xlabel(r"Iteration")
self.ax_dict["loss_G"].set_ylabel(r"Loss_G")
self.ax_dict["loss_D"] = self.ax_dict["loss_G"].twinx()
self.ax_dict["loss_D"].set_ylabel(r"Loss_D")
if self.attr['args'].divergence == "standard":
self.opt_loss_G_ref_val = np.log(2)
else:
self.opt_loss_G_ref_val = -np.log(2)
""" Eigenvalue histogram """
# self.ax_dict["eig_mod"] = plt.subplot2grid((self.figure_nrow * grid_size, self.figure_ncol * grid_size), (0 * grid_size + 3, 2 * grid_size), rowspan=3, colspan=1 * grid_size)
# # self.ax_dict["eig_mod"].set_xlabel(r"Value")
# self.ax_dict["eig_mod"].set_ylabel(r"Count")
# # self.ax_dict["eig_mod"].set_title(r"Histogram of $\lambda$")
# self.ax_dict["eig_real"] = plt.subplot2grid((self.figure_nrow * grid_size, self.figure_ncol * grid_size), (0 * grid_size + 3, 3 * grid_size), rowspan=3, colspan=1 * grid_size)
# # self.ax_dict["eig_real"].set_xlabel(r"Value")
# self.ax_dict["eig_real"].set_ylabel(r"Count")
# # self.ax_dict["eig_real"].set_title(r"Histogram of $\lambda$")
# self.ax_dict["eig_imag"] = plt.subplot2grid((self.figure_nrow * grid_size, self.figure_ncol * grid_size), (0 * grid_size + 3, 4 * grid_size), rowspan=3, colspan=1 * grid_size)
# # self.ax_dict["eig_imag"].set_xlabel(r"Value")
# self.ax_dict["eig_imag"].set_ylabel(r"Count")
# # self.ax_dict["eig_imag"].set_title(r"Histogram of $\lambda$")
""" Eigenvalue scatter """
# self.ax_dict["eig"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span, 5 * grid_span), rowspan=1 * grid_span,
# colspan=1 * grid_span)
# self.ax_dict["eig"].set_xlabel(r"$\Re(\lambda)$")
# self.ax_dict["eig"].set_ylabel(r"$\Im(\lambda)$")
# self.ax_dict["eig"].add_artist(Circle((0, 0), 1, color="#00FF00", fill=False))
# self.ax_dict["eig"].set_aspect("equal")
if self.attr['args'].data in ["mnist", "cifar"]:
self.show_num = 32
col_num = 8
for i in range(self.show_num):
row_idx = i // col_num
col_idx = i % col_num
self.ax_dict[f"out_{i}"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span + 3 * row_idx, 2 * grid_span + 3 * col_idx), rowspan=3, colspan=3)
self.ax_dict[f"out_{i}"].set_xlabel(None)
self.ax_dict[f"out_{i}"].set_ylabel(None)
self.ax_dict[f"out_{i}"].set_xticklabels([])
self.ax_dict[f"out_{i}"].set_yticklabels([])
self.ax_dict[f"out_{i}"].xaxis.set_visible(False)
self.ax_dict[f"out_{i}"].yaxis.set_visible(False)
else:
""" Input plot """
self.ax_dict["in"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span, 4 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
self.ax_dict["in"].axis(self.attr["bbox_z"]) # set axis range by [xmin, xmax, ymin, ymax]
self.ax_dict["in"].set_aspect(abs(self.attr["bbox_z"][1] - self.attr["bbox_z"][0]) / abs(
self.attr["bbox_z"][3] - self.attr["bbox_z"][2])) # set axis value ratio manually to get equal length
self.ax_dict["in"].set_xlabel(r"$z_1$")
self.ax_dict["in"].set_ylabel(r"$z_2$")
# plasma, winter, RdPu
self.z_color_map = "plasma"
self.z_color_map_val = np.abs(self.attr["z_mesh"][:, 0]) + np.abs(self.attr["z_mesh"][:, 1])
self.z_color_map_val = np.angle(self.attr["z_mesh"][:, 0] + 1j * self.attr["z_mesh"][:, 1]) # np.abs(self.attr["z_mesh"][:, 0]) + np.abs(self.attr["z_mesh"][:, 1])
self.ax_dict["in"].scatter(self.attr["z_mesh"][:, 0], self.attr["z_mesh"][:, 1], linewidth=4.0, alpha=0.8, cmap=self.z_color_map, c=self.z_color_map_val)
self.ax_dict["in"].scatter(self.attr["z_test"][:, 0], self.attr["z_test"][:, 1], linewidth=1.0, alpha=0.7, c="#00FFFF")
""" U plot """
self.ax_dict["U"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span, 5 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span, projection='3d')
self.ax_dict["U"].set_xlabel(r"$z_1$")
self.ax_dict["U"].set_ylabel(r"$z_2$")
self.ax_dict["U"].set_zlabel(r"$U(z)$")
self.ax_dict["U"].set_xlim(self.attr["bbox_z"][0], self.attr["bbox_z"][1])
self.ax_dict["U"].set_ylim(self.attr["bbox_z"][2], self.attr["bbox_z"][3])
self.ax_dict["U"].set_zlim(-50, 50)
""" Gz1 plot """
self.ax_dict["G_z_1"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span, 3 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span, projection='3d')
self.ax_dict["G_z_1"].set_xlabel(r"$x_1$, $G(z)_1$")
self.ax_dict["G_z_1"].set_ylabel(r"$z_1$")
self.ax_dict["G_z_1"].set_zlabel(r"$z_2$")
self.ax_dict["G_z_1"].set_xlim(self.attr["bbox_x"][0], self.attr["bbox_x"][1])
self.ax_dict["G_z_1"].set_ylim(self.attr["bbox_z"][0], self.attr["bbox_z"][1])
self.ax_dict["G_z_1"].set_zlim(self.attr["bbox_z"][2], self.attr["bbox_z"][3])
""" Gz2 plot """
self.ax_dict["G_z_2"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span, 4 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span, projection='3d')
self.ax_dict["G_z_2"].set_xlabel(r"$z_1$")
self.ax_dict["G_z_2"].set_ylabel(r"$z_2$")
self.ax_dict["G_z_2"].set_zlabel(r"$x_2$, $G(z)_2$")
self.ax_dict["G_z_2"].set_xlim(self.attr["bbox_z"][0], self.attr["bbox_z"][1])
self.ax_dict["G_z_2"].set_ylim(self.attr["bbox_z"][2], self.attr["bbox_z"][3])
self.ax_dict["G_z_2"].set_zlim(self.attr["bbox_x"][2], self.attr["bbox_x"][3])
""" Output plot with D contour"""
self.ax_dict["out"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span, 3 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
self.ax_dict["out"].scatter(self.attr["x_real"][:, 0], self.attr["x_real"][:, 1], color="#000000", linewidth=2.0, alpha=1)
self.ax_dict["out"].axis(self.attr["bbox_x"]) # set axis range by [xmin, xmax, ymin, ymax]
self.ax_dict["out"].set_aspect(abs(self.attr["bbox_x"][1] - self.attr["bbox_x"][0]) / abs(
self.attr["bbox_x"][3] - self.attr["bbox_x"][2])) # set axis value ratio manually to get equal length
self.ax_dict["out"].set_xlabel(r"$x_1$, $G(z)_1$")
self.ax_dict["out"].set_ylabel(r"$x_2$, $G(z)_2$")
""" Output plot with kde estimate"""
self.ax_dict["data"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span, 2 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
# self.ax_dict["data"].scatter(self.attr["x_real"][:, 0], self.attr["x_real"][:, 1], color="#000000", linewidth=2.0, alpha=0.7)
self.ax_dict["data"].axis(self.attr["bbox_x"]) # set axis range by [xmin, xmax, ymin, ymax]
self.ax_dict["data"].set_aspect(abs(self.attr["bbox_x"][1] - self.attr["bbox_x"][0]) / abs(self.attr["bbox_x"][3] - self.attr["bbox_x"][2])) # set axis value ratio manually to get equal length
self.ax_dict["data"].set_xlabel(r"$x_1$, $G(z)_1$")
self.ax_dict["data"].set_ylabel(r"$x_2$, $G(z)_2$")
""" BP plots """
""" Generator """
self.ax_dict["delta_slope_G_z_2"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span + 4, 5 * grid_span), rowspan=2, colspan=1 * grid_span)
self.ax_dict["delta_slope_G_z_1"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span + 2, 5 * grid_span), rowspan=2, colspan=1 * grid_span)
self.ax_dict["signed_distance_G"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span, 5 * grid_span), rowspan=2, colspan=1 * grid_span)
self.ax_dict["BP_G_z_1"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (2 * grid_span, 2 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
self.ax_dict["BP_G_z_1"].set_xlabel(r"$z_1$")
self.ax_dict["BP_G_z_1"].set_ylabel(r"$z_2$")
self.ax_dict["BP_G_z_1"].axis(self.attr["bbox_z"])
self.ax_dict["BP_G_z_1"].set_aspect(abs(self.attr["bbox_z"][1] - self.attr["bbox_z"][0]) / abs(self.attr["bbox_z"][3] - self.attr["bbox_z"][2]))
self.ax_dict["BP_G_z_2"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (1 * grid_span, 5 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
self.ax_dict["BP_G_z_2"].set_xlabel(r"$z_1$")
self.ax_dict["BP_G_z_2"].set_ylabel(r"$z_2$")
self.ax_dict["BP_G_z_2"].axis(self.attr["bbox_z"])
self.ax_dict["BP_G_z_2"].set_aspect(abs(self.attr["bbox_z"][1] - self.attr["bbox_z"][0]) / abs(self.attr["bbox_z"][3] - self.attr["bbox_z"][2]))
""" Discriminator """
self.ax_dict["delta_slope_D"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span + 3, 4 * grid_span), rowspan=3, colspan=1 * grid_span)
self.ax_dict["signed_distance_D"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span, 4 * grid_span), rowspan=3, colspan=1 * grid_span)
self.ax_dict["BP_D_x"] = plt.subplot2grid((self.figure_nrow * grid_span, self.figure_ncol * grid_span), (0 * grid_span, 3 * grid_span), rowspan=1 * grid_span, colspan=1 * grid_span)
self.ax_dict["BP_D_x"].axis(self.attr["bbox_x"])
self.ax_dict["BP_D_x"].set_xlabel(r"$x_1$")
self.ax_dict["BP_D_x"].set_ylabel(r"$x_2$")
self.ax_dict["BP_D_x"].set_aspect(abs(self.attr["bbox_x"][1] - self.attr["bbox_x"][0]) / abs(self.attr["bbox_x"][3] - self.attr["bbox_x"][2]))
self.fig.set_tight_layout(True)
print("Figure intialized.")
def Plot_step(self, idd, loading=False):
toc = time.time()
if not loading:
""" Adding items into data_container """
if self.handle is not None:
del idd["G"]
del idd["D"]
pickle.dump(idd, self.handle)
if idd["iter"] % self.attr["max_iter_div_5"] == 0:
plot_time = time.time() - toc
print(f"> Mid [iter {idd['iter']} / {self.attr['args'].iteration}], plot time taken: {plot_time:.3f}")
return
for item in self.attr_seq:
val = idd.get(item, None)
self.attr_seq[item].append(val)
line_animated = True
imgs = []
""" ====================== Optional Viz ====================== """
if self.attr['args'].data in ["mnist", "cifar"]:
for i in range(self.show_num):
if self.attr['args'].data in ["mnist"]:
image_normed = (idd['x_out'][i, 0, ...] - self.image_min) / (self.image_max - self.image_min)
image_show_i = self.ax_dict[f"out_{i}"].imshow(image_normed, cmap=cm.get_cmap("gray"))
elif self.attr['args'].data in ["cifar"]:
# print(np.transpose(idd['x_out'][i, ...], (1, 2, 0)))
image_show_i = self.ax_dict[f"out_{i}"].imshow(np.transpose(idd['x_out'][i, ...], (1, 2, 0)))
else:
raise NotImplementedError
imgs.append(image_show_i)
else:
try:
""" Compute density for G(z) """
kernel = stats.gaussian_kde(idd['x_out'].T)
xx_x, yy_x = np.mgrid[self.attr["bbox_x"][0]:self.attr["bbox_x"][1]:50j, self.attr["bbox_x"][2]:self.attr["bbox_x"][3]:50j]
positions_z = np.vstack([xx_x.ravel(), yy_x.ravel()])
G_z_density_surf = np.reshape(kernel(positions_z).T, xx_x.shape)
""" Contour for G density @ output"""
cfset = self.ax_dict["data"].contourf(xx_x, yy_x, G_z_density_surf, cmap=self.cmap, alpha=0.8)
imgs.extend(cfset.collections)
except:
print("KDE error")
""" Contour for D @ output """
D_prob_grid = 1 / (1 + np.exp(-idd['D_output_grid']))
cfset_D = self.ax_dict["out"].contourf(self.attr['xx_D'], self.attr['yy_D'], D_prob_grid, alpha=0.3, levels=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], colors=["#110000", "#440000", "#770000", "#AA0000", "#DD0000", "#00FF00", "#00DD00", "#00AA00", "#007700", "#004400", "#001100"])
imgs.extend(cfset_D.collections)
""" z_test scatter """
if idd.get('x_fake_mesh_vec_out', None) is not None:
""" G(z) mesh """
G_z_mesh_scatter = self.ax_dict["out"].scatter(idd['x_fake_mesh_vec_out'][:, 0], idd['x_fake_mesh_vec_out'][:, 1], linewidth=3.0, alpha=0.3, cmap=self.z_color_map, c=self.z_color_map_val)
imgs.append(G_z_mesh_scatter)
""" Scatter for G output """
x_out_scatter = self.ax_dict["data"].scatter(idd['x_out'][:, 0], idd['x_out'][:, 1], linewidth=1.5, alpha=0.5, c="#00FFFF")
imgs.append(x_out_scatter)
# G_z_1_view_elev = -60
# G_z_1_view_azim = 75
G_z_1_view_elev = None # View angle for 3D plots
G_z_1_view_azim = None
is_brenier = False
if hasattr(self.attr["args"], "brenier"):
if self.attr["args"].brenier:
is_brenier = True
if idd.get('state_dict_G', None) is not None and is_brenier:
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
G, D = Get_models(self.attr["args"], None)
G = G.to(device)
G.load_state_dict(idd["state_dict_G"])
U_mesh_vec_out = G(torch.from_numpy(self.attr["z_mesh"]).to(device).float()).cpu().detach().numpy()
""" G(z)_2 scatter """
U_scatter = self.ax_dict["U"].scatter(self.attr["z_mesh"][:, 0], self.attr["z_mesh"][:, 1], U_mesh_vec_out, linewidth=4.0, alpha=0.8, cmap=self.z_color_map, c=self.z_color_map_val)
imgs.append(U_scatter)
stride = 5
U_grid = np.reshape(U_mesh_vec_out.T, self.attr['xx_z'].shape)
U_wireframe = self.ax_dict["U"].plot_wireframe(self.attr['xx_z'], self.attr['yy_z'], U_grid, rstride=stride, cstride=stride)
imgs.append(U_wireframe)
if idd.get('x_fake_mesh_vec_out', None) is not None:
# print("x_fake_mesh_vec_out")
rot_z_1 = Rotation.from_rotvec([np.pi / 2, 0, 0])
rot_z_2 = Rotation.from_rotvec([0, np.pi / 2, 0])
point_cloud = np.vstack([self.attr["z_mesh"][:, 0], self.attr["z_mesh"][:, 1], idd['x_fake_mesh_vec_out'][:, 0]]).T
point_cloud_rotated = rot_z_1.apply(rot_z_2.apply(point_cloud))
""" G(z)_1 scatter """
G_z_1_scatter = self.ax_dict["G_z_1"].scatter(point_cloud_rotated[:, 0], point_cloud_rotated[:, 1], point_cloud_rotated[:, 2], linewidth=4.0, alpha=0.8, cmap=self.z_color_map, c=self.z_color_map_val)
self.ax_dict["G_z_1"].view_init(elev=G_z_1_view_elev, azim=G_z_1_view_azim)
imgs.append(G_z_1_scatter)
""" G(z)_2 scatter """
G_z_2_scatter = self.ax_dict["G_z_2"].scatter(self.attr["z_mesh"][:, 0], self.attr["z_mesh"][:, 1], idd['x_fake_mesh_vec_out'][:, 1], linewidth=4.0, alpha=0.8, cmap=self.z_color_map, c=self.z_color_map_val)
imgs.append(G_z_2_scatter)
stride = 5
""" G(z)_1 wireframe """
xx_z_rotated = np.reshape(point_cloud_rotated[:, 0].T, self.attr['xx_z'].shape)
yy_z_rotated = np.reshape(point_cloud_rotated[:, 1].T, self.attr['yy_z'].shape)
x_fake_1_grid_rotated = np.reshape(point_cloud_rotated[:, 2].T, self.attr['xx_z'].shape)
G_z_1_wireframe = self.ax_dict["G_z_1"].plot_wireframe(xx_z_rotated, yy_z_rotated, x_fake_1_grid_rotated, rstride=stride, cstride=stride)
imgs.append(G_z_1_wireframe)
""" G(z)_2 wireframe """
G_z_2_wireframe = self.ax_dict["G_z_2"].plot_wireframe(self.attr['xx_z'], self.attr['yy_z'], idd['x_fake_2_grid'], rstride=stride, cstride=stride)
imgs.append(G_z_2_wireframe)
""" Calculate BP parameters """
if self.attr['args'].arch != "mlp" or is_brenier:
is_pure_mlp = False
else:
is_pure_mlp = True
if idd.get('state_dict_G', None) is not None and is_brenier:
BP_directions_G, BP_signed_distances_G, BP_delta_slopes_G = Get_BP_params(idd['state_dict_G']["hidden_layer.weight"], idd['state_dict_G']["hidden_layer.bias"], idd['state_dict_G']["output_layer.weight"])
BP_delta_slopes_G = BP_delta_slopes_G.ravel()
_, _, delta_slope_G_z_1_hist = self.ax_dict["delta_slope_G_z_1"].hist(BP_delta_slopes_G[np.isfinite(BP_delta_slopes_G)], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\mu_G^{(1)}$", bins=30)
imgs.extend(delta_slope_G_z_1_hist)
_, _, signed_distance_G_hist = self.ax_dict["signed_distance_G"].hist(BP_signed_distances_G[np.isfinite(BP_signed_distances_G)], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\gamma_G$", bins=30)
imgs.extend(signed_distance_G_hist)
G_hidden_layer_weights_np = idd['state_dict_G']["hidden_layer.weight"].cpu().numpy()
G_hidden_layer_biases_np = idd['state_dict_G']["hidden_layer.bias"].cpu().numpy()
G_alt_act_num = 0
if self.attr['args'] is not None:
if hasattr(self.attr['args'], "alt_act_prop"):
if self.attr['args'].alt_act_prop is not None:
G_alt_act_num = np.floor(self.attr['args'].alt_act_prop * self.attr['args'].g_hidden).astype(int)
else:
setattr(self.attr['args'], "alt_act_prop", None)
for i, (w, b) in enumerate(zip(G_hidden_layer_weights_np, G_hidden_layer_biases_np)):
BP_line_points = Get_2D_line_points(w, b, plot_lim=self.attr['args'].plot_lim_z)
if i == G_alt_act_num - 1:
BP_label = "G alt BP"
elif i == G_alt_act_num:
BP_label = "G BP"
else:
BP_label = ""
BP_line_plot, = self.ax_dict["U"].plot(BP_line_points[:, 0], BP_line_points[:, 1], np.ones_like(BP_line_points[:, 0]) * (-50), '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
BP_line_plot, = self.ax_dict["BP_G_z_2"].plot(BP_line_points[:, 0], BP_line_points[:, 1], '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
if idd.get('state_dict_G', None) is not None and is_pure_mlp:
BP_directions_G, BP_signed_distances_G, BP_delta_slopes_G = Get_BP_params(idd['state_dict_G']["hidden_layer.weight"], idd['state_dict_G']["hidden_layer.bias"], idd['state_dict_G']["output_layer.weight"])
_, _, delta_slope_G_z_1_hist = self.ax_dict["delta_slope_G_z_1"].hist(BP_delta_slopes_G[0, :][np.isfinite(BP_delta_slopes_G[0, :])], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\mu_G^{(1)}$", bins=30)
imgs.extend(delta_slope_G_z_1_hist)
_, _, delta_slope_G_z_2_hist = self.ax_dict["delta_slope_G_z_2"].hist(BP_delta_slopes_G[1, :][np.isfinite(BP_delta_slopes_G[1, :])], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\mu_G^{(2)}$", bins=30)
imgs.extend(delta_slope_G_z_2_hist)
_, _, signed_distance_G_hist = self.ax_dict["signed_distance_G"].hist(BP_signed_distances_G[np.isfinite(BP_signed_distances_G)], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\gamma_G$", bins=30)
imgs.extend(signed_distance_G_hist)
G_hidden_layer_weights_np = idd['state_dict_G']["hidden_layer.weight"].cpu().numpy()
G_hidden_layer_biases_np = idd['state_dict_G']["hidden_layer.bias"].cpu().numpy()
G_alt_act_num = 0
if self.attr['args'] is not None:
if hasattr(self.attr['args'], "alt_act_prop"):
if self.attr['args'].alt_act_prop is not None:
G_alt_act_num = np.floor(self.attr['args'].alt_act_prop * self.attr['args'].g_hidden).astype(int)
else:
setattr(self.attr['args'], "alt_act_prop", None)
for i, (w, b) in enumerate(zip(G_hidden_layer_weights_np, G_hidden_layer_biases_np)):
BP_line_points = Get_2D_line_points(w, b, plot_lim=self.attr['args'].plot_lim_z)
if i == G_alt_act_num - 1:
BP_label = "G alt BP"
elif i == G_alt_act_num:
BP_label = "G BP"
else:
BP_label = ""
BP_line_plot, = self.ax_dict["G_z_1"].plot(np.ones_like(BP_line_points[:, 0]) * self.attr["bbox_x"][0], BP_line_points[:, 0], BP_line_points[:, 1], '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[0, i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
BP_line_plot, = self.ax_dict["G_z_2"].plot(BP_line_points[:, 0], BP_line_points[:, 1], np.ones_like(BP_line_points[:, 0]) * self.attr["bbox_x"][0], '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[1, i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
BP_line_plot, = self.ax_dict["BP_G_z_1"].plot(BP_line_points[:, 0], BP_line_points[:, 1], '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[0, i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
BP_line_plot, = self.ax_dict["BP_G_z_2"].plot(BP_line_points[:, 0], BP_line_points[:, 1], '-', linewidth=1, color="#00FF00" if i < G_alt_act_num else Get_diverging_color(BP_delta_slopes_G[1, i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
if idd.get('state_dict_D', None) is not None:
BP_directions_D, BP_signed_distances_D, BP_delta_slopes_D = Get_BP_params(idd['state_dict_D']["hidden_layer.weight"], idd['state_dict_D']["hidden_layer.bias"], idd['state_dict_D']["output_layer.weight"])
_, _, delta_slope_D_hist = self.ax_dict["delta_slope_D"].hist(BP_delta_slopes_D.ravel()[np.isfinite(BP_delta_slopes_D.ravel())], animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\mu_D$", bins=30)
imgs.extend(delta_slope_D_hist)
BP_signed_distances_D_nona = BP_signed_distances_D[np.isfinite(BP_signed_distances_D)]
_, _, signed_distance_D_hist = self.ax_dict["signed_distance_D"].hist(BP_signed_distances_D_nona, animated=True, density=False, color="#000000", alpha=0.7, log=False, label=r"$\gamma_D$", bins=30)
imgs.extend(signed_distance_D_hist)
D_hidden_layer_weights_np = idd['state_dict_D']["hidden_layer.weight"].cpu().numpy()
D_hidden_layer_biases_np = idd['state_dict_D']["hidden_layer.bias"].cpu().numpy()
for i, (w, b) in enumerate(zip(D_hidden_layer_weights_np, D_hidden_layer_biases_np)):
BP_line_points = Get_2D_line_points(w, b, plot_lim=self.attr['args'].plot_lim_x)
BP_label = r"D BPs" if i == 0 else None
BP_line_plot, = self.ax_dict["BP_D_x"].plot(BP_line_points[:, 0], BP_line_points[:, 1], '-', linewidth=1, color=Get_diverging_color(BP_delta_slopes_D[0, i]), animated=True, label=BP_label, alpha=0.7)
imgs.append(BP_line_plot)
""" ====================== Generic Viz ====================== """
""" Eigenvalues scatter """
if (idd.get('eig_vals_Hxx_f', None) is not None) and (idd.get('eig_vals_Hyy_g', None) is not None):
bin_num = "sqrt"
Hxx_f_sym = r"$\lambda(H_{xx}f)$"
Hyy_g_sym = r"$\lambda(H_{yy}g)$"
if len(idd['eig_vals_Hxx_f']) <= 20:
gg_eig_vals_bar = self.ax_dict["eig_vals_Hxx_f"].bar(x=list(range(1, 1 + len(idd['eig_vals_Hxx_f']))), height=np.sort(idd['eig_vals_Hxx_f'].real), color="#2222DD", alpha=0.7, label=Hxx_f_sym, log=False, width=0.6)
imgs.extend(gg_eig_vals_bar)
self.ax_dict["eig_vals_Hxx_f"].set_ylabel("Magnitude")
else:
_, _, gg_eig_vals_hist = self.ax_dict["eig_vals_Hxx_f"].hist(idd['eig_vals_Hxx_f'].real, animated=True, bins=bin_num, density=False, color="#2222DD", alpha=0.7, log=True, label=Hxx_f_sym)
imgs.extend(gg_eig_vals_hist)
gg_eig_vals_hist_text = self.ax_dict["eig_vals_Hxx_f"].text(0.75, 0.5, f"{Hxx_f_sym}\nmax: {np.max(idd['eig_vals_Hxx_f'].real):.4f}\nmin: {np.min(idd['eig_vals_Hxx_f'].real):.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["eig_vals_Hxx_f"].transAxes, fontsize=13)
imgs.append(gg_eig_vals_hist_text)
if len(idd['eig_vals_Hyy_g']) <= 20:
dd_eig_vals_bar = self.ax_dict["eig_vals_Hyy_g"].bar(x=list(range(1, 1 + len(idd['eig_vals_Hyy_g']))), height=np.sort(idd['eig_vals_Hyy_g'].real), color="#DD22DD", alpha=0.7, label=Hyy_g_sym, log=False, width=0.6)
imgs.extend(dd_eig_vals_bar)
self.ax_dict["eig_vals_Hyy_g"].set_ylabel("Magnitude")
else:
_, _, dd_eig_vals_hist = self.ax_dict["eig_vals_Hyy_g"].hist(idd['eig_vals_Hyy_g'].real, animated=True, bins=bin_num, density=False, color="#DD22DD", alpha=0.7, log=True, label=Hyy_g_sym)
imgs.extend(dd_eig_vals_hist)
dd_eig_vals_hist_text = self.ax_dict["eig_vals_Hyy_g"].text(0.75, 0.5, f"{Hyy_g_sym}\nmax: {np.max(idd['eig_vals_Hyy_g'].real):.4f}\nmin: {np.min(idd['eig_vals_Hyy_g'].real):.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["eig_vals_Hyy_g"].transAxes, fontsize=13)
imgs.append(dd_eig_vals_hist_text)
if idd.get('eig_vals_Hxx_f_Schur', None) is not None:
mc1_sym = r"$\lambda(H_{yy}g - H_{yx}g H_{xx}^{-1}f H_{xy}f)$"
# print("idd['eig_vals_Hxx_f_Schur']", idd['eig_vals_Hxx_f_Schur'])
if len(idd['eig_vals_Hxx_f_Schur']) <= 20:
minimax_eig_1_bar = self.ax_dict["eig_vals_Hxx_f"].bar(x=list(range(1, 1 + len(idd['eig_vals_Hxx_f_Schur']))), height=np.sort(idd['eig_vals_Hxx_f_Schur'].real), color="#222222", alpha=0.7, label=mc1_sym, log=False, width=0.3)
imgs.extend(minimax_eig_1_bar)
else:
_, _, minimax_eig_1_hist = self.ax_dict["minimax_eig_1"].hist(idd['eig_vals_Hxx_f_Schur'].real, animated=True, bins=bin_num, density=False, color="#222222", alpha=0.4, log=True, label=mc1_sym)
imgs.extend(minimax_eig_1_hist)
minimax_eig_1_hist_text = self.ax_dict["minimax_eig_1"].text(0.25, 0.5, f"{mc1_sym}\nmax: {np.max(idd['eig_vals_Hxx_f_Schur'].real):.4f}\nmin: {np.min(idd['eig_vals_Hxx_f_Schur'].real):.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["eig_vals_Hxx_f"].transAxes, fontsize=13)
imgs.append(minimax_eig_1_hist_text)
if idd.get('eig_vals_Hyy_g_Schur', None) is not None:
mc2_sym = r"$\lambda(H_{xx}f - H_{xy}f H_{yy}^{-1}g H_{yx}g)$"
# print("idd['eig_vals_Hyy_g_Schur']", idd['eig_vals_Hyy_g_Schur'])
if len(idd['eig_vals_Hyy_g_Schur']) <= 20:
minimax_eig_2_bar = self.ax_dict["eig_vals_Hyy_g"].bar(x=list(range(1, 1 + len(idd['eig_vals_Hyy_g_Schur']))), height=np.sort(idd['eig_vals_Hyy_g_Schur'].real), color="#222222", alpha=0.7, label=mc2_sym, log=False, width=0.3)
imgs.extend(minimax_eig_2_bar)
else:
_, _, minimax_eig_2_hist = self.ax_dict["minimax_eig_2"].hist(idd['eig_vals_Hyy_g_Schur'].real, animated=True, bins=bin_num, density=False, color="#222222", alpha=0.4, log=True, label=mc2_sym)
imgs.extend(minimax_eig_2_hist)
minimax_eig_2_hist_text = self.ax_dict["minimax_eig_2"].text(0.25, 0.5, f"{mc2_sym}\nmax: {np.max(idd['eig_vals_Hyy_g_Schur'].real):.4f}\nmin: {np.min(idd['eig_vals_Hyy_g_Schur'].real):.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["eig_vals_Hyy_g"].transAxes, fontsize=13)
imgs.append(minimax_eig_2_hist_text)
if idd.get('eig_vals_J', None) is not None:
if self.attr['args'].data not in ["mnist", "cifar"]:
eig_vals_scatter = self.ax_dict["eig"].scatter(idd['eig_vals_J'].real, idd['eig_vals_J'].imag, color="#000000", alpha=0.3)
imgs.append(eig_vals_scatter)
""" Eigenvalues histogram """
bin_num = "sqrt"
eig_vals_modula = np.absolute(idd['eig_vals_J'])
# _, _, eig_vals_modula_hist = self.ax_dict["eig_mod"].hist(eig_vals_modula, animated=True, bins=bin_num,
# density=False, color="#000000", alpha=0.7,
# log=True, label=r"$||\lambda||$")
# imgs.extend(eig_vals_modula_hist)
#
# eig_vals_modula_text = self.ax_dict["eig_mod"].text(0.7, 0.5, f"max: {np.max(eig_vals_modula):.4f}\nmin: {np.min(eig_vals_modula):.4f}", {"ha": "center", "va": "center"},
# horizontalalignment="left", verticalalignment="top",
# transform=self.ax_dict["eig_mod"].transAxes, fontsize=13)
# imgs.append(eig_vals_modula_text)
#
#
# _, _, eig_vals_real_hist = self.ax_dict["eig_real"].hist(idd['eig_vals_J'].real, animated=True, bins=bin_num,
# density=False, color="#22DD22", alpha=0.6,
# log=True, label=r"$\Re(\lambda)$")
# imgs.extend(eig_vals_real_hist)
#
# eig_vals_real_text = self.ax_dict["eig_real"].text(0.7, 0.5, f"max: {np.max(idd['eig_vals_J'].real):.4f}\nmin: {np.min(idd['eig_vals_J'].real):.4f}", {"ha": "center", "va": "center"},
# horizontalalignment="left", verticalalignment="top",
# transform=self.ax_dict["eig_real"].transAxes, fontsize=13)
# imgs.append(eig_vals_real_text)
#
#
# _, _, eig_vals_imag_hist = self.ax_dict["eig_imag"].hist(idd['eig_vals_J'].imag, animated=True, bins=bin_num,
# density=False, color="#DD2222", alpha=0.6,
# log=True, label=r"$\Im(\lambda)$")
# imgs.extend(eig_vals_imag_hist)
#
# eig_vals_imag_text = self.ax_dict["eig_imag"].text(0.7, 0.5, f"max: {np.max(idd['eig_vals_J'].imag):.4f}\nmin: {np.min(idd['eig_vals_J'].imag):.4f}", {"ha": "center", "va": "center"},
# horizontalalignment="left", verticalalignment="top",
# transform=self.ax_dict["eig_imag"].transAxes, fontsize=13)
# imgs.append(eig_vals_imag_text)
""" Histogram for phase factor """
phase_factor_list = np.nan_to_num(np.abs(idd['eig_vals_J'].imag / idd['eig_vals_J'].real))
_, _, phase_factor_hist = self.ax_dict["phase_factor"].hist(phase_factor_list, animated=True, bins=bin_num, density=False, color="#AAAA22", alpha=0.6, log=True, label=r"Phase factor")
imgs.extend(phase_factor_hist)
phase_factor_text = self.ax_dict["phase_factor"].text(0.75, 0.5, f"max: {np.max(phase_factor_list):.4f}\nmin: {np.min(phase_factor_list):.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["phase_factor"].transAxes, fontsize=13)
imgs.append(phase_factor_text)
""" Line plot for conditioning factor """
eig_vals_modula_nonzero = eig_vals_modula[eig_vals_modula > 0]
conditioning_factor = np.nan_to_num(np.abs(np.max(eig_vals_modula_nonzero) / np.min(eig_vals_modula_nonzero)))
self.attr_seq['conditioning_factor'].append(conditioning_factor)
conditioning_factor_curve, = self.ax_dict["conditioning_factor"].semilogy(self.attr_seq['iter'], self.attr_seq['conditioning_factor'], '-.', linewidth=1.5, color="#0000FF", animated=True, label=r"Conditioning factor", alpha=0.7)
imgs.append(conditioning_factor_curve)
conditioning_factor_sci = "{:.4E}".format(self.attr_seq['conditioning_factor'][-1])
conditioning_factor_text = self.ax_dict["conditioning_factor"].text(0.75, 0.5, f"value: {conditioning_factor_sci}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["conditioning_factor"].transAxes, fontsize=13)
imgs.append(conditioning_factor_text)
if (self.attr_seq['grad_corr_norm_x'] is not None) and (self.attr_seq['grad_corr_norm_y'] is not None):
if idd.get('corr_norm_x_ma', None) is not None:
x_corr_text = f"x corr norm MA: {idd['corr_norm_x_ma']:.4f}"
else:
x_corr_text = ""
if idd.get('corr_rel_norm_x_ma', None) is not None:
x_corr_text += f"\nrel x corr norm MA: {idd['corr_rel_norm_x_ma']:.4f}"
if idd.get('use_x_corr', None) is not None:
if idd['use_x_corr']:
x_corr_text += f"\nx corr: on"
else:
x_corr_text += f"\nx corr: off"
corr_norm_x_plot, = self.ax_dict["grad_corr_norm"].semilogy(self.attr_seq['wall_time'], self.attr_seq['grad_corr_norm_x'], '-', linewidth=2, color="#000000", animated=line_animated, label=r"$||H_{xy}H_{yy}^{-1}\nabla_y f||_2 / ||\nabla_x f||_2$", alpha=0.7, marker="x")
imgs.append(corr_norm_x_plot)
corr_norm_y_plot, = self.ax_dict["grad_corr_norm"].semilogy(self.attr_seq['wall_time'], self.attr_seq['grad_corr_norm_y'], '--', linewidth=2, color="#000000", animated=line_animated, label=r"$||H_{yy}^{-1}H_{yx}\nabla_x f||_2 / ||\nabla_y f||_2$", alpha=0.7, marker="1")
imgs.append(corr_norm_y_plot)
corr_norm_x_text = self.ax_dict["grad_corr_norm"].text(0.75, 0.5, x_corr_text, {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["grad_corr_norm"].transAxes, fontsize=13)
imgs.append(corr_norm_x_text)
""" Grad norms """
grad_norm_G_plot, = self.ax_dict["grad_norm"].semilogy(self.attr_seq['iter'], self.attr_seq['grad_raw_norm_x'], '-', linewidth=1.0, color="#2222FF", animated=line_animated, label=r"$||\nabla_x f||_2$", alpha=0.7)
imgs.append(grad_norm_G_plot)
if self.attr_seq['update_tot_norm_x'] is not None:
grad_norm_G_plot, = self.ax_dict["grad_norm"].semilogy(self.attr_seq['iter'], self.attr_seq['update_tot_norm_x'], '-.', linewidth=1.5, color="#2222FF", animated=line_animated, label=r"$||\nabla_x \tilde{f}||_2$", alpha=0.7)
imgs.append(grad_norm_G_plot)
grad_norm_D_plot, = self.ax_dict["grad_norm"].semilogy(self.attr_seq['iter'], self.attr_seq['grad_raw_norm_y'], '-', linewidth=1.0, color="#FF22FF", animated=line_animated, label=r"$||\nabla_y g||_2$", alpha=0.7)
imgs.append(grad_norm_D_plot)
if self.attr_seq['update_tot_norm_y'] is not None:
grad_norm_D_plot, = self.ax_dict["grad_norm"].semilogy(self.attr_seq['iter'], self.attr_seq['update_tot_norm_y'], '-.', linewidth=1.5, color="#FF22FF", animated=line_animated, label=r"$||\nabla_y \tilde{g}||_2$", alpha=0.7)
imgs.append(grad_norm_D_plot)
""" Learning curve G """
if self.attr['args'].divergence == "standard":
learning_curve_G, = self.ax_dict["loss_G"].semilogy(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_G']), ':', linewidth=1.5, color="#0000FF", animated=line_animated, label=r"loss_G", alpha=0.7)
imgs.append(learning_curve_G)
learning_curve_G_tot, = self.ax_dict["loss_G"].semilogy(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_G_tot']), '-', linewidth=2.5, color="#0000FF", animated=line_animated, label=r"loss_G_tot")
imgs.append(learning_curve_G_tot)
""" Reference line for optimal G loss """
opt_loss_G_ref, = self.ax_dict["loss_G"].semilogy(self.attr_seq['iter'], np.ones_like(np.array(Torch_loss_list_val_list(self.attr_seq['loss_G']))) * self.opt_loss_G_ref_val, 'r-', linewidth=1, color="#000055", animated=line_animated) # , label=r"loss_G$^*$"
imgs.append(opt_loss_G_ref)
opt_loss_G_val_text = self.ax_dict["loss_G"].text(1, self.opt_loss_G_ref_val, f"opt_loss_G = {self.opt_loss_G_ref_val:.5f}", fontsize=10)
imgs.append(opt_loss_G_val_text)
""" Learning curve D """
learning_curve_D, = self.ax_dict["loss_D"].semilogy(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_D']), ':', linewidth=1.5, color="#FF00FF", animated=line_animated, label=r"loss_D", alpha=0.7)
imgs.append(learning_curve_D)
learning_curve_D_tot, = self.ax_dict["loss_D"].semilogy(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_D_tot']), '-', linewidth=2.5, color="#FF00FF", animated=line_animated, label=r"loss_D_tot")
imgs.append(learning_curve_D_tot)
""" Reference line for optimal D loss """
opt_loss_D_ref, = self.ax_dict["loss_D"].semilogy(self.attr_seq['iter'], np.ones_like(np.array(Torch_loss_list_val_list(self.attr_seq['loss_D']))) * 2 * np.log(2), 'r-', linewidth=1, color="#550055", animated=line_animated, label=r"loss_D$^*$")
imgs.append(opt_loss_D_ref)
opt_loss_D_val_text = self.ax_dict["loss_D"].text(1, 2 * np.log(2), r"opt_loss_D = 1.38629", fontsize=10)
imgs.append(opt_loss_D_val_text)
else:
# print("self.attr_seq['iter']", self.attr_seq['iter'])
# print("self.attr_seq['loss_G']", self.attr_seq['loss_G'])
learning_curve_G, = self.ax_dict["loss_G"].plot(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_G']), ':', linewidth=1.5, color="#0000FF", animated=line_animated, label=r"loss_G", alpha=0.7)
imgs.append(learning_curve_G)
learning_curve_G_tot, = self.ax_dict["loss_G"].plot(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_G_tot']), '-', linewidth=2.5, color="#0000FF", animated=line_animated, label=r"loss_G_tot")
imgs.append(learning_curve_G_tot)
""" Reference line for optimal G loss """
opt_loss_G_ref, = self.ax_dict["loss_G"].plot(self.attr_seq['iter'], np.ones_like(np.array(Torch_loss_list_val_list(self.attr_seq['loss_G']))) * self.opt_loss_G_ref_val, 'r-', linewidth=1, color="#000055", animated=line_animated) # , label=r"loss_G$^*$"
imgs.append(opt_loss_G_ref)
opt_loss_G_val_text = self.ax_dict["loss_G"].text(1, self.opt_loss_G_ref_val, f"opt_loss_G = {self.opt_loss_G_ref_val:.5f}", fontsize=10)
imgs.append(opt_loss_G_val_text)
""" Learning curve D """
learning_curve_D, = self.ax_dict["loss_D"].plot(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_D']), ':', linewidth=1.5, color="#FF00FF", animated=line_animated, label=r"loss_D", alpha=0.7)
imgs.append(learning_curve_D)
learning_curve_D_tot, = self.ax_dict["loss_D"].plot(self.attr_seq['iter'], Torch_loss_list_val_list(self.attr_seq['loss_D_tot']), '-', linewidth=2.5, color="#FF00FF", animated=line_animated, label=r"loss_D_tot")
imgs.append(learning_curve_D_tot)
""" Reference line for optimal D loss """
opt_loss_D_ref, = self.ax_dict["loss_D"].plot(self.attr_seq['iter'], np.ones_like(np.array(Torch_loss_list_val_list(self.attr_seq['loss_D']))) * 2 * np.log(2), 'r-', linewidth=1, color="#550055", animated=line_animated, label=r"loss_D$^*$")
imgs.append(opt_loss_D_ref)
opt_loss_D_val_text = self.ax_dict["loss_D"].text(1, 2 * np.log(2), r"opt_loss_D = 1.38629", fontsize=10)
imgs.append(opt_loss_D_val_text)
""" Text """
iter_info = self.ax_dict["loss_G"].text(0.5, 0.5, f"iter: {self.attr_seq['iter'][-1]}\nloss_G: {Torch_loss_list_val_list(self.attr_seq['loss_G_tot'])[-1]:.4f}\nloss_D: {Torch_loss_list_val_list(self.attr_seq['loss_D_tot'])[-1]:.4f}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["loss_G"].transAxes, fontsize=8)
imgs.append(iter_info)
spanning_init_text = ""
if (idd.get('spanning_init', None) is not None) and self.attr['args'].spanning_init:
if idd['spanning_init']:
spanning_init_text = "\nspanning ..."
else:
spanning_init_text = "\nspanning completed"
time_info = self.ax_dict["loss_G"].text(0.7, 0.5, f"wall time (s): {idd['cumul_training_time']:.2f}\nPer iter (s): {idd['cumul_training_time'] / (idd['iter'] + 1):.4f}{spanning_init_text}", {"ha": "center", "va": "center"}, horizontalalignment="left", verticalalignment="top", transform=self.ax_dict["loss_G"].transAxes, fontsize=8)
imgs.append(time_info)
""" Legends """
if not self.legend_drawn:
for ax_name in self.ax_dict:
if ax_name[:3] != "out":
self.ax_dict[ax_name].legend(loc="upper right")
self.legend_drawn = True
""" ================================== """
self.ims.append(tuple(imgs))
plot_time = time.time() - toc
if idd['iter'] % self.attr["max_iter_div_5"] == 0:
print(f"> [End iter {idd['iter']} / {self.attr['args'].iteration}], plot time taken: {plot_time:.3f}")
def Load_data(self, filename):
if filename[-7:] == ".pickle":
filename = filename[:-7]
self.data_container_generator = Load_all_pickles(filename, data_folder=self.data_folder)
try:
self.data_container_dict = next(self.data_container_generator)
except:
print("self.data_container_generator is None")
self.data_container_dict = None
if self.data_container_dict is None:
return -1
else:
self.attr = self.data_container_dict["attr"]
self.attr_seq = self.data_container_dict["attr_seq"]
print("Data assigned to members.")
self.Calculate_max_t()
if self.attr['args'].data in ["mnist"]:
if self.image_min is None or self.image_max is None:
print("Determining image intensity range")
x_out_list = []
for t in range(self.attr["args"].iteration // self.attr["args"].plot_iter + 1):
try:
idd = next(self.data_container_generator)
except:
continue
x_out_list.append(idd["x_out"])
x_out_list_np = np.array(x_out_list)
if len(x_out_list_np) == 0 or x_out_list_np is None:
self.image_min = 0
self.image_max = 1
else:
self.image_min = np.min(x_out_list_np.ravel())
self.image_max = np.max(x_out_list_np.ravel())
if self.image_min is None:
self.image_min = 0
if self.image_max is None:
self.image_max = 1
print(f"Image intensity range: ({self.image_min:.3f}, {self.image_max:.3f})")
self.Load_data(filename)
return 0
def Generate_video_from_file(self, title, my_part=None, num_parts=None, iter_start=0, iter_end=np.inf, skip_frame=1):
generating_start_time = time.time()
self.Load_data(title)
max_t = 0
max_iter = 0
for t in range(self.attr["max_t"]):
if t % (np.max([self.attr["max_t"] // 5, 1]).astype(int)) == 0:
print("Checking... ", t, self.attr["max_t"], self.attr["args"].iteration, self.attr["args"].plot_iter)
try:
idd = next(self.data_container_generator)
max_iter = idd["iter"]
max_t = t
except:
print("file end")
self.Load_data(title)
self.attr["max_t"] = max_t
self.attr["max_iter_div_5"] = self.attr["max_t"] // 5
if self.attr["max_iter_div_5"] == 0:
self.attr["max_iter_div_5"] += 1
if iter_end == np.inf:
iter_end = max_iter
self.Init_figure()
self.num_parts = num_parts
self.skip_frame = skip_frame
base_start_pos = start_pos = 0
base_end_pos = end_pos = self.attr["max_t"]
if my_part is not None and num_parts is not None:
start_pos, end_pos = Get_start_and_end_pos_for_worker(my_part, num_parts, base_start_pos, base_end_pos)
self.attr["name"] += f"_{my_part}-{num_parts}"
print(f"part {my_part} / {num_parts}: ({start_pos}, {end_pos})")
for t in range(self.attr["max_t"]):
try:
idd = next(self.data_container_generator)
except:
print("file end")
return
if t % skip_frame != 0:
continue
for item in self.attr_seq:
self.attr_seq[item].append(idd.get(item, None))
""" If this part of video is not started from the beginning, plot the previous segments line plots """
if idd["iter"] >= iter_start and idd["iter"] <= iter_end and t >= start_pos and t < end_pos:
if t % (np.max([self.attr["max_t"] // 5, 1]).astype(int)) == 0:
print(f't {t}, max_t {self.attr["max_t"]}, iteration {self.attr["args"].iteration}, plot_iter {self.attr["args"].plot_iter}')
self.Plot_step(idd, loading=True)
self.total_frame += 1
print(f"Video production time: {time.time() - generating_start_time}")
def Save_plot(self, title=None, fps=10, figures_folder=figures_folder):
save_plot_start_time = time.time()
if title is None:
self.attr["name"] += f"_{Now()}"
title = self.attr["name"]
else:
title += f"_{Now()}"
self.fig.suptitle(title)
self.attr['name'] = title
subplots_adjust(top=.95)
# tight_layout()
mywriter = animation.FFMpegWriter(fps=fps, metadata=dict(artist='Me', title=title), bitrate=1000)
ani = animation.ArtistAnimation(self.fig, self.ims, interval=2, blit=False, repeat_delay=1000)
if not os.path.exists(figures_folder):
os.makedirs(figures_folder)
if platform.system() == "Darwin":
print("Using MacOS.")
elif platform.system() == "Linux":
print("Using Linux.")
else:
print("Using Windows.")
output_filename = os.path.join(figures_folder, title + ".mp4")
print(f"output_filename: {output_filename}")
def progress_callback_func(i, n):
prog_num_5 = np.round(self.total_frame / 5, 0).astype(int)
if prog_num_5 == 0:
prog_num_5 += 1
if i % prog_num_5 == 0 or i == self.total_frame - 1:
print(f'Saving frame {i + 1} of {self.total_frame}')
if self.total_frame > 0:
ani.save(output_filename, writer=mywriter, progress_callback=progress_callback_func) # , dpi=50
print(f"video saving time: {time.time() - save_plot_start_time}")
if __name__ == "__main__":
pass
| 0
| 0
| 0
| 57,891
| 644
| 811
| 0
| 87
| 586
|
16846bb53e261f3f34ae5b20388fb957aa7b755f
| 2,126
|
py
|
Python
|
setup.py
|
melissaboiko/uniscripts
|
e2daf1a52f307cda3a22387162d098dc98b0d4ad
|
[
"CC0-1.0"
] | 7
|
2015-05-11T19:53:12.000Z
|
2017-11-10T23:45:18.000Z
|
setup.py
|
leoboiko/uniscripts
|
e2daf1a52f307cda3a22387162d098dc98b0d4ad
|
[
"CC0-1.0"
] | 1
|
2021-12-06T13:01:19.000Z
|
2021-12-06T13:46:21.000Z
|
setup.py
|
leoboiko/uniscripts
|
e2daf1a52f307cda3a22387162d098dc98b0d4ad
|
[
"CC0-1.0"
] | 1
|
2021-10-17T08:51:01.000Z
|
2021-10-17T08:51:01.000Z
|
"""setuptools module for uniscripts.
"""
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# adapted from https://coderwall.com/p/qawuyq/use-markdown-readme-s-in-python-modules
try:
import pypandoc
long_description = pypandoc.convert(path.join(here, 'README.md'), 'rst', format='markdown_github')
except (IOError, ImportError):
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='uniscripts',
# PEP440
version='1.0.5',
description='query Unicode script metadata',
long_description=long_description,
url='https://github.com/leoboiko/uniscripts',
# Author details
author='Leonardo Boiko',
author_email='[email protected]',
# Choose your license
license='CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Localization',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2', # probably?
'Programming Language :: Python :: 3.3', # I hope...
'Programming Language :: Python :: 3.4', # actually tested here
],
keywords='unicode script scripts uax24 hiragana katakana kanji han',
# packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'update']),
packages=['uniscripts'],
# cf. https://packaging.python.org/en/latest/requirements.html
install_requires=[],
extras_require={},
package_data={},
data_files=[],
entry_points={},
)
| 27.973684
| 105
| 0.651929
|
"""setuptools module for uniscripts.
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# adapted from https://coderwall.com/p/qawuyq/use-markdown-readme-s-in-python-modules
try:
import pypandoc
long_description = pypandoc.convert(path.join(here, 'README.md'), 'rst', format='markdown_github')
except (IOError, ImportError):
with open(path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='uniscripts',
# PEP440
version='1.0.5',
description='query Unicode script metadata',
long_description=long_description,
url='https://github.com/leoboiko/uniscripts',
# Author details
author='Leonardo Boiko',
author_email='[email protected]',
# Choose your license
license='CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Internationalization',
'Topic :: Software Development :: Localization',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2', # probably?
'Programming Language :: Python :: 3.3', # I hope...
'Programming Language :: Python :: 3.4', # actually tested here
],
keywords='unicode script scripts uax24 hiragana katakana kanji han',
# packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'update']),
packages=['uniscripts'],
# cf. https://packaging.python.org/en/latest/requirements.html
install_requires=[],
extras_require={},
package_data={},
data_files=[],
entry_points={},
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0
|
4c269b3844a3d702eeb602612791e1b874638e1f
| 236
|
py
|
Python
|
2020/sparta08/0614/homework/myShop/init.py
|
loveAlakazam/TIL
|
e4f887bc1a6cf5639c361656e22abbe8ccfa314b
|
[
"Apache-2.0"
] | 1
|
2020-06-22T02:51:11.000Z
|
2020-06-22T02:51:11.000Z
|
2020/sparta08/0614/homework/myShop/init.py
|
loveAlakazam/TIL
|
e4f887bc1a6cf5639c361656e22abbe8ccfa314b
|
[
"Apache-2.0"
] | 1
|
2020-10-19T12:22:30.000Z
|
2020-10-19T12:22:30.000Z
|
2020/sparta08/0614/homework/myShop/init.py
|
loveAlakazam/TIL
|
e4f887bc1a6cf5639c361656e22abbe8ccfa314b
|
[
"Apache-2.0"
] | 1
|
2020-12-19T12:46:26.000Z
|
2020-12-19T12:46:26.000Z
|
from pymongo import MongoClient
client= MongoClient('localhost', 27017)
db=client.db_cek
if __name__=='__main__':
main()
| 16.857143
| 39
| 0.686441
|
from pymongo import MongoClient
client= MongoClient('localhost', 27017)
db=client.db_cek
def delete_all():
# ๋ฐ์ดํฐ๋ฒ ์ด์ค ์์ ์๋ ๊ฒ๋ค์ ๋ชจ๋ ์ง์ด๋ค.
db.orders.delete_many({})
def main():
delete_all()
if __name__=='__main__':
main()
| 54
| 0
| 0
| 0
| 0
| 46
| 0
| 0
| 46
|
501ac2a7e8cb9f4c023963af10abb5799f160092
| 136
|
py
|
Python
|
measurement/array-operations/vmul3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
measurement/array-operations/vmul3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
measurement/array-operations/vmul3.py
|
quepas/performance-estimation-array-operations
|
b209ba5efebf5dee60ec5fca0fa711ca2e766e17
|
[
"MIT"
] | null | null | null |
# Element-wise multiplication of three vectors
| 19.428571
| 46
| 0.698529
|
import numpy as np
# Element-wise multiplication of three vectors
def vmul3(V1, V2, V3):
R = np.multiply(V1, np.multiply(V2, V3))
| 0
| 0
| 0
| 0
| 0
| 46
| 0
| -3
| 44
|