hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5d3f7ba06d8f30ec1f43524b350975670db0b280
| 1,597
|
py
|
Python
|
tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py
|
usgs/groundmotion-processing-
|
ed188e2bb1dcd9b17433ef4677874eac654fdd16
|
[
"Unlicense"
] | null | null | null |
tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py
|
usgs/groundmotion-processing-
|
ed188e2bb1dcd9b17433ef4677874eac654fdd16
|
[
"Unlicense"
] | null | null | null |
tests/gmprocess/waveform_processing/adjust_highpass_ridder_test.py
|
usgs/groundmotion-processing-
|
ed188e2bb1dcd9b17433ef4677874eac654fdd16
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
if __name__ == "__main__":
os.environ["CALLED_FROM_PYTEST"] = "True"
test_auto_fchp()
| 27.067797
| 76
| 0.634314
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from gmprocess.core.streamcollection import StreamCollection
from gmprocess.io.read import read_data
from gmprocess.utils.test_utils import read_data_dir
from gmprocess.waveform_processing.adjust_highpass_ridder import ridder_fchp
from gmprocess.utils.config import get_config
def test_auto_fchp():
data_files, origin = read_data_dir("geonet", "us1000778i", "*.V1A")
data_files.sort()
streams = []
for f in data_files:
streams += read_data(f)
sc = StreamCollection(streams)
output_fchp = []
config = get_config()
config["integration"]["frequency"] = True
for st in sc:
for tr in st:
tr.setParameter(
"corner_frequencies",
{"type": "constant", "highpass": 0.001, "lowpass": 20},
)
tmp_st = ridder_fchp(st, config=config)
for tr in tmp_st:
initial_corners = tr.getParameter("corner_frequencies")
output_fchp.append(initial_corners["highpass"])
target_fchp = np.array(
[
0.021345158261480087,
0.022839239726168643,
0.02482398434993213,
0.01399481102242619,
0.026850167635921275,
0.004817661513765862,
0.008204101694236587,
0.006429246474225982,
0.004237087327289796,
]
)
np.testing.assert_allclose(output_fchp, target_fchp, atol=1e-7)
if __name__ == "__main__":
os.environ["CALLED_FROM_PYTEST"] = "True"
test_auto_fchp()
| 0
| 0
| 0
| 0
| 0
| 1,124
| 0
| 164
| 155
|
8314dc9023ae51350cf14c4fc6f29ae2621bb8d7
| 952
|
py
|
Python
|
supperfeed/importer.py
|
corydodt/SupperFeed
|
2980e3f6d287f56c6eade06cfe57870d9796c5ea
|
[
"MIT"
] | 2
|
2015-10-28T23:53:29.000Z
|
2018-02-27T12:39:54.000Z
|
supperfeed/importer.py
|
corydodt/SupperFeed
|
2980e3f6d287f56c6eade06cfe57870d9796c5ea
|
[
"MIT"
] | null | null | null |
supperfeed/importer.py
|
corydodt/SupperFeed
|
2980e3f6d287f56c6eade06cfe57870d9796c5ea
|
[
"MIT"
] | null | null | null |
"""
Import recipes from URLs to our database
"""
from txpx.process import LineGlueProtocol
LineGlueProtocol.MAX_LENGTH=10000
| 24.410256
| 77
| 0.658613
|
"""
Import recipes from URLs to our database
"""
import re
import json
from txpx import background, EchoProcess
from txpx.process import LineGlueProtocol
from supperfeed.build import Recipe
LineGlueProtocol.MAX_LENGTH=10000
class ImportProcess(EchoProcess):
"""
Import a recipe by loading the json data dumped by the downloader process
"""
def __init__(self, *a, **kw):
EchoProcess.__init__(self, *a, **kw)
self.linebuf = []
def outLineReceived(self, line):
if re.match(r'^/\*+/$', line):
return self.finished()
self.linebuf.append(line)
def finished(self):
data = json.loads('\n'.join(self.linebuf))
recipe = Recipe.fromLoadedData(data)
recipe.save()
self.linebuf[:] = []
def importRecipe(url):
d = background(['recipeschema', url], proto=ImportProcess)
d.addCallback(lambda ok: Recipe.objects(importedFrom=url).first())
return d
| 0
| 0
| 0
| 529
| 0
| 148
| 0
| 11
| 136
|
30f5d5f62a940e9c9c56d93b2735b45ae0a23f7e
| 759
|
py
|
Python
|
src/my_project/medium_problems/from1to50/group_people_give_group_size.py
|
ivan1016017/LeetCodeAlgorithmProblems
|
f617f30201fb1cd53e32de35084fdeb88ef36023
|
[
"MIT"
] | null | null | null |
src/my_project/medium_problems/from1to50/group_people_give_group_size.py
|
ivan1016017/LeetCodeAlgorithmProblems
|
f617f30201fb1cd53e32de35084fdeb88ef36023
|
[
"MIT"
] | 1
|
2021-09-22T12:26:14.000Z
|
2021-09-22T12:26:14.000Z
|
src/my_project/medium_problems/from1to50/group_people_give_group_size.py
|
ivan1016017/LeetCodeAlgorithmProblems
|
454284b76634cc34ed41f7fa30d857403cedf1bf
|
[
"MIT"
] | null | null | null |
solution = Solution()
print(solution.groupThePeople(groupSizes = [3,3,3,3,3,1,3]))
| 29.192308
| 71
| 0.546772
|
from typing import List
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
final_groups=[]
count_dict = {}
for ind, val in enumerate(groupSizes):
# print(count_dict)
if val in count_dict.keys():
if len(count_dict[val]) < val:
count_dict[val].append(ind)
else:
final_groups.append(count_dict[val])
count_dict[val] = [ind]
else:
count_dict[val] = [ind]
for key in count_dict.keys():
final_groups.append(count_dict[key])
return final_groups
solution = Solution()
print(solution.groupThePeople(groupSizes = [3,3,3,3,3,1,3]))
| 0
| 0
| 0
| 626
| 0
| 0
| 0
| 2
| 45
|
dbd8f6bc8f256424cc38bdbd062b914922bea024
| 1,011
|
py
|
Python
|
tests/bidirectional_lut_test.py
|
pnarvor/nephelae_simulation
|
7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/bidirectional_lut_test.py
|
pnarvor/nephelae_simulation
|
7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/bidirectional_lut_test.py
|
pnarvor/nephelae_simulation
|
7b3f3a2c2aaa49324f8b09a6ab62819c280efa4c
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/python3
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from netCDF4 import MFDataset
import mesonh_probe as cdf
"""
test file for periodiccontainer and netcdfinterface types
- arguments : mesonh (netcdf) files to open
"""
mesonhfiles = sys.argv[slice(1,len(sys.argv))]
atm = MFDataset(mesonhfiles)
lut = cdf.BiDirectionalLUT(atm.variables['VLEV'][:,0,0])
lin = cdf.BiDirectionalLinear(atm.variables['S_N_direction'][:])
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lut.to_output_space(np.linspace(0,160,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lut.to_input_space(np.linspace(0.005,3.95,1000)))
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lin.to_output_space(np.linspace(0,700,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lin.to_input_space(np.linspace(-1,5,1000)))
plt.show(block=False)
| 25.923077
| 66
| 0.732938
|
#! /usr/bin/python3
import sys
import os
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
import imageio
import matplotlib.cm as cm
import time
from netCDF4 import MFDataset
import mesonh_probe as cdf
"""
test file for periodiccontainer and netcdfinterface types
- arguments : mesonh (netcdf) files to open
"""
mesonhfiles = sys.argv[slice(1,len(sys.argv))]
atm = MFDataset(mesonhfiles)
lut = cdf.BiDirectionalLUT(atm.variables['VLEV'][:,0,0])
lin = cdf.BiDirectionalLinear(atm.variables['S_N_direction'][:])
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lut.to_output_space(np.linspace(0,160,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lut.to_input_space(np.linspace(0.005,3.95,1000)))
plot1, axes1 = plt.subplots(1,2)
x = np.linspace(0,160,1000)
axes1[0].plot(x, lin.to_output_space(np.linspace(0,700,1000)))
x = np.linspace(0.005,3.95,1000)
axes1[1].plot(x, lin.to_input_space(np.linspace(-1,5,1000)))
plt.show(block=False)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -29
| 66
|
5c0bef4089d7fb266a9c29954e06a567e8ce9a6d
| 800
|
py
|
Python
|
constant/constant.py
|
ZxbMsl160918/covid19-vaccin
|
7ebf3fa1de45fdaec8108e79ff6e090400cde9eb
|
[
"Apache-2.0"
] | null | null | null |
constant/constant.py
|
ZxbMsl160918/covid19-vaccin
|
7ebf3fa1de45fdaec8108e79ff6e090400cde9eb
|
[
"Apache-2.0"
] | null | null | null |
constant/constant.py
|
ZxbMsl160918/covid19-vaccin
|
7ebf3fa1de45fdaec8108e79ff6e090400cde9eb
|
[
"Apache-2.0"
] | null | null | null |
#
RESPONSE_OK = 200
# URL
URLS = {
#
"hostUrl": "https://m.r.umiaohealth.com/",
# POST
"vaccinationAddress": "/InstitutionMedicineStock/GetBykeyword_InstitutionMedicineStock",
#
"hospitalTimeRange": "/Reservation/GetByWorkDate_Rsv_TimeRange",
# urlGET
"secVaccination": "/Reservation/Reservation_Create",
# childId
"childId": "/Adult/Index",
#
"userMsg": "/Home/My"
}
#
AREAS = [
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
""
]
#
VACCINE_TYPES = {
"veroCell": 5601, # Vero
"adenovirusVector": 5602 #
# etc...
}
#
SEC_TYPE = VACCINE_TYPES["veroCell"]
| 18.181818
| 92
| 0.6075
|
# 请求成功
RESPONSE_OK = 200
# 请求所需的URL地址
URLS = {
# 主机地址
"hostUrl": "https://m.r.umiaohealth.com/",
# 获取疫苗接种列表地址;POST
"vaccinationAddress": "/InstitutionMedicineStock/GetBykeyword_InstitutionMedicineStock",
# 获取某个社区医院的某一天可预约的时间段
"hospitalTimeRange": "/Reservation/GetByWorkDate_Rsv_TimeRange",
# 执行疫苗预约请求 url;GET
"secVaccination": "/Reservation/Reservation_Create",
# 获取 childId
"childId": "/Adult/Index",
# 获取用户信息
"userMsg": "/Home/My"
}
# 区域名称
AREAS = [
"天河区",
"白云区",
"黄埔区",
"荔湾区",
"越秀区",
"海珠区",
"番禺区",
"花都区",
"南沙区",
"增城区",
"从化区"
]
# 所有疫苗类型
VACCINE_TYPES = {
"veroCell": 5601, # 新冠疫苗(Vero细胞)
"adenovirusVector": 5602 # 新冠疫苗(腺病毒载体)
# etc...
}
# 需要预约的疫苗类型
SEC_TYPE = VACCINE_TYPES["veroCell"]
| 399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5ebd8f1c512b1380b449a67ec585f3905c6bceac
| 9,220
|
py
|
Python
|
personal_context_builder/gensim_hdp.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
personal_context_builder/gensim_hdp.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
personal_context_builder/gensim_hdp.py
|
InternetOfUs/personal-context-builder
|
89e7388d622bc0efbf708542566fdcdca667a4e5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
| 42.100457
| 119
| 0.61974
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.hdpmodel.HdpModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.test.utils import common_dictionary, common_corpus
>>> from gensim.sklearn_api import HdpTransformer
>>>
>>> # Lets extract the distribution of each document in topics
>>> model = HdpTransformer(id2word=common_dictionary)
>>> distr = model.fit_transform(common_corpus)
"""
import numpy as np
from gensim import matutils # type: ignore
from gensim import models # type: ignore
from scipy import sparse # type: ignore
from sklearn.base import BaseEstimator, TransformerMixin # type: ignore
from sklearn.exceptions import NotFittedError # type: ignore
class HdpTransformer(TransformerMixin, BaseEstimator):
"""Base HDP module, wraps :class:`~gensim.models.hdpmodel.HdpModel`.
The inner workings of this class heavily depends on `Wang, Paisley, Blei: "Online Variational
Inference for the Hierarchical Dirichlet Process, JMLR (2011)"
<http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
"""
def __init__(
self,
id2word,
max_chunks=None,
max_time=None,
chunksize=256,
kappa=1.0,
tau=64.0,
K=15,
T=150,
alpha=1,
gamma=1,
eta=0.01,
scale=1.0,
var_converge=0.0001,
outputdir=None,
random_state=None,
):
"""
Parameters
----------
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping between a words ID and the word itself in the vocabulary.
max_chunks : int, optional
Upper bound on how many chunks to process.It wraps around corpus beginning in another corpus pass,
if there are not enough chunks in the corpus.
max_time : int, optional
Upper bound on time in seconds for which model will be trained.
chunksize : int, optional
Number of documents to be processed by the model in each mini-batch.
kappa : float, optional
Learning rate, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical Dirichlet
Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
tau : float, optional
Slow down parameter, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
K : int, optional
Second level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
T : int, optional
Top level truncation level, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
alpha : int, optional
Second level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
gamma : int, optional
First level concentration, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
eta : float, optional
The topic Dirichlet, see `Wang, Paisley, Blei: "Online Variational Inference for the Hierarchical
Dirichlet Process, JMLR (2011)" <http://jmlr.csail.mit.edu/proceedings/papers/v15/wang11a/wang11a.pdf>`_.
scale : float, optional
Weights information from the mini-chunk of corpus to calculate rhot.
var_converge : float, optional
Lower bound on the right side of convergence. Used when updating variational parameters
for a single document.
outputdir : str, optional
Path to a directory where topic and options information will be stored.
random_state : int, optional
Seed used to create a :class:`~np.random.RandomState`. Useful for obtaining reproducible results.
"""
self.gensim_model = None
self.id2word = id2word
self.max_chunks = max_chunks
self.max_time = max_time
self.chunksize = chunksize
self.kappa = kappa
self.tau = tau
self.K = K
self.T = T
self.alpha = alpha
self.gamma = gamma
self.eta = eta
self.scale = scale
self.var_converge = var_converge
self.outputdir = outputdir
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.HdpModel(
corpus=corpus,
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
return self
def transform(self, docs):
"""Infer a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BOW format.
Returns
-------
numpy.ndarray of shape [`len(docs), num_topics`]
Topic distribution for `docs`.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
distribution, max_num_topics = [], 0
for doc in docs:
topicd = self.gensim_model[doc]
distribution.append(topicd)
max_num_topics = max(max_num_topics, max(topic[0] for topic in topicd) + 1)
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(t, max_num_topics) for t in distribution]
return np.reshape(np.array(distribution), (len(docs), max_num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of list of (int, number), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.hdp.HdpTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.HdpModel(
id2word=self.id2word,
max_chunks=self.max_chunks,
max_time=self.max_time,
chunksize=self.chunksize,
kappa=self.kappa,
tau=self.tau,
K=self.K,
T=self.T,
alpha=self.alpha,
gamma=self.gamma,
eta=self.eta,
scale=self.scale,
var_converge=self.var_converge,
outputdir=self.outputdir,
random_state=self.random_state,
)
self.gensim_model.update(corpus=X)
return self
| 0
| 0
| 0
| 8,216
| 0
| 0
| 0
| 69
| 235
|
b582f1040cc1af3be284ad67fe9c371e838dde5d
| 2,060
|
py
|
Python
|
zaqar-8.0.0/zaqar/common/policies/claims.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 97
|
2015-01-02T09:35:23.000Z
|
2022-03-25T00:38:45.000Z
|
zaqar-8.0.0/zaqar/common/policies/claims.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
zaqar-8.0.0/zaqar/common/policies/claims.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 44
|
2015-01-28T03:01:28.000Z
|
2021-05-13T18:55:19.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from zaqar.common.policies import base
CLAIMS = 'claims:%s'
rules = [
policy.DocumentedRuleDefault(
name=CLAIMS % 'create',
check_str=base.UNPROTECTED,
description='Claims a set of messages from the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'get',
check_str=base.UNPROTECTED,
description='Queries the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'delete',
check_str=base.UNPROTECTED,
description='Releases the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'update',
check_str=base.UNPROTECTED,
description='Updates the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'PATCH'
}
]
)
]
| 29.428571
| 76
| 0.591748
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from zaqar.common.policies import base
CLAIMS = 'claims:%s'
rules = [
policy.DocumentedRuleDefault(
name=CLAIMS % 'create',
check_str=base.UNPROTECTED,
description='Claims a set of messages from the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'get',
check_str=base.UNPROTECTED,
description='Queries the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'delete',
check_str=base.UNPROTECTED,
description='Releases the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name=CLAIMS % 'update',
check_str=base.UNPROTECTED,
description='Updates the specified claim for the specified queue.',
operations=[
{
'path': '/v2/queues/{queue_name}/claims/{claim_id}',
'method': 'PATCH'
}
]
)
]
def list_rules():
return rules
| 0
| 0
| 0
| 0
| 0
| 13
| 0
| 0
| 23
|
48f845ebbaca4afd27efda9fdf590cf268429691
| 2,139
|
py
|
Python
|
bot.py
|
boyuan12/MLH-Helper
|
efa98907b89bae2ef1f1c8b551075356b61ad741
|
[
"MIT"
] | null | null | null |
bot.py
|
boyuan12/MLH-Helper
|
efa98907b89bae2ef1f1c8b551075356b61ad741
|
[
"MIT"
] | null | null | null |
bot.py
|
boyuan12/MLH-Helper
|
efa98907b89bae2ef1f1c8b551075356b61ad741
|
[
"MIT"
] | null | null | null |
import discord
import discord.utils
client = discord.Client()
SECRET_KEY="secretkey"
BASE_URL="http://0.0.0.0:1234"
client.run("")
| 40.358491
| 198
| 0.625993
|
import discord
import discord.utils
from discord.ext import commands
import requests
import random
client = discord.Client()
SECRET_KEY="secretkey"
BASE_URL="http://0.0.0.0:1234"
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
id = client.user.id
if str(id) in message.content:
# get the question
resp = str(message.content).split(f"<@!{str(id)}> ")[1]
if resp == "checkin":
await message.channel.send(f"Welcome! Please go ahead and go to {BASE_URL}/{message.author.id}. When you finished, please tag me and say finished, and I will send you more information!")
elif resp == "attendees" and "mlh" in [y.name.lower() for y in message.author.roles]:
curr = requests.get("https://mlh-events.now.sh/na-2020").json()[0]["name"]
csv_file = requests.get(f"{BASE_URL}/api/generate/{curr}/{SECRET_KEY}").json()["url"]
channel = await message.author.create_dm()
await channel.send(f"Here's the file link to download: {csv_file}")
elif resp == "attendees" and "mlh" not in [y.name.lower() for y in message.author.roles]:
await message.channel.send(f"Oops, looks like you don't have permission to use this command!")
elif resp == "finished":
resp = requests.get(f"{BASE_URL}/api/current_hack/{message.author.id}").json()
if resp["hack"] in [hack.name for hack in message.guild.roles] and resp["hack"] not in [y.name.lower() for y in message.author.roles]:
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
else:
guild = message.guild
await guild.create_role(name=resp["hack"], colour=discord.Colour(0x00FF00))
role = discord.utils.get(message.guild.roles, name=resp["hack"])
user = message.author
await user.add_roles(role)
await message.channel.send(resp["resp"])
client.run("")
| 0
| 1,896
| 0
| 0
| 0
| 0
| 0
| -3
| 112
|
f968e78a0a396c803ccb0a25d591d668dacf68bf
| 553
|
py
|
Python
|
django_pathfinder_statcrunch/urls.py
|
porowns/django-pathfinder-statcrunch
|
4a31dd014b6e1c27b3e70ae88ca5762841ce72db
|
[
"MIT"
] | null | null | null |
django_pathfinder_statcrunch/urls.py
|
porowns/django-pathfinder-statcrunch
|
4a31dd014b6e1c27b3e70ae88ca5762841ce72db
|
[
"MIT"
] | null | null | null |
django_pathfinder_statcrunch/urls.py
|
porowns/django-pathfinder-statcrunch
|
4a31dd014b6e1c27b3e70ae88ca5762841ce72db
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
# SSO
urlpatterns = [
path('reports/', views.list_reports,
name="django-pathfinder-statcrunch-list-reports"),
path('reports/<int:pk>/', views.view_report,
name="django-pathfinder-statcrunch-view-report"),
path('reports/<int:pk>/refresh/', views.refresh_report,
name="django-pathfinder-statcrunch-view-report-refresh"),
]
| 34.5625
| 67
| 0.735986
|
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.contrib import admin
from django.urls import path, re_path
from . import views
# SSO
urlpatterns = [
path('reports/', views.list_reports,
name="django-pathfinder-statcrunch-list-reports"),
path('reports/<int:pk>/', views.view_report,
name="django-pathfinder-statcrunch-view-report"),
path('reports/<int:pk>/refresh/', views.refresh_report,
name="django-pathfinder-statcrunch-view-report-refresh"),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 66
|
0ed78fe0d10f673e40978db5dbae120f7d215015
| 222
|
py
|
Python
|
adventofcode/2021/1/2.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
adventofcode/2021/1/2.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
adventofcode/2021/1/2.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
import fileinput
nums = list(map(int, fileinput.input()))
print(sum(inc for inc in gen()))
| 17.076923
| 56
| 0.540541
|
import fileinput
nums = list(map(int, fileinput.input()))
def gen():
for i in range(1, len(nums) - 2):
if sum(nums[i:i + 3]) > sum(nums[i - 1: i + 2]):
yield 1
print(sum(inc for inc in gen()))
| 0
| 0
| 0
| 0
| 104
| 0
| 0
| 0
| 23
|
2f907545d59c9d2bffc0a13955bd9ba29dc05554
| 477
|
py
|
Python
|
openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py
|
michilu/proto-api
|
aca02aaa11064e87462ab34674c0c4974cf70372
|
[
"Apache-2.0"
] | null | null | null |
openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py
|
michilu/proto-api
|
aca02aaa11064e87462ab34674c0c4974cf70372
|
[
"Apache-2.0"
] | 1
|
2020-07-15T09:50:06.000Z
|
2020-07-15T09:50:06.000Z
|
openapi-server/python-flask/openapi_server/controllers/health_check_service_controller.py
|
michilu/proto-openapi
|
aca02aaa11064e87462ab34674c0c4974cf70372
|
[
"Apache-2.0"
] | null | null | null |
def health_check_service_health_check(): # noqa: E501
"""health_check_service_health_check
# noqa: E501
:rtype: V1HealthCheckServiceHealthCheckResponse
"""
return 'do some magic!'
| 26.5
| 133
| 0.802935
|
import connexion
import six
from openapi_server.models.runtime_error import RuntimeError # noqa: E501
from openapi_server.models.v1_health_check_service_health_check_response import V1HealthCheckServiceHealthCheckResponse # noqa: E501
from openapi_server import util
def health_check_service_health_check(): # noqa: E501
"""health_check_service_health_check
# noqa: E501
:rtype: V1HealthCheckServiceHealthCheckResponse
"""
return 'do some magic!'
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 131
| 139
|
887fea9a7f42da7c5675403aa05cab40094c6fb6
| 1,067
|
py
|
Python
|
src/model/synapses/tensor_backend/VoltageJump.py
|
Fassial/pku-intern
|
4463e7d5a5844c8002f7e3d01b4fadc3a20e2038
|
[
"MIT"
] | null | null | null |
src/model/synapses/tensor_backend/VoltageJump.py
|
Fassial/pku-intern
|
4463e7d5a5844c8002f7e3d01b4fadc3a20e2038
|
[
"MIT"
] | null | null | null |
src/model/synapses/tensor_backend/VoltageJump.py
|
Fassial/pku-intern
|
4463e7d5a5844c8002f7e3d01b4fadc3a20e2038
|
[
"MIT"
] | null | null | null |
"""
Created on 12:39, June. 4th, 2021
Author: fassial
Filename: VoltageJump.py
"""
__all__ = [
"VoltageJump",
]
| 25.404762
| 79
| 0.582943
|
"""
Created on 12:39, June. 4th, 2021
Author: fassial
Filename: VoltageJump.py
"""
import brainpy as bp
__all__ = [
"VoltageJump",
]
class VoltageJump(bp.TwoEndConn):
target_backend = "general"
def __init__(self, pre, post, conn,
weight = 1., delay = 0., **kwargs
):
# init params
self.weight = weight
self.delay = delay
# init connections
self.conn = conn(pre.size, post.size)
self.conn_mat = self.conn.requires("conn_mat")
self.size = bp.ops.shape(self.conn_mat)
# init vars
self.w = bp.ops.ones(self.size) * self.weight
self.Isyn = self.register_constant_delay("Isyn",
size = self.size,
delay_time = self.delay
)
# init super
super(VoltageJump, self).__init__(pre = pre, post = post, **kwargs)
def update(self, _t):
# set Isyn & post.V
Isyn = self.w * bp.ops.unsqueeze(self.pre.spike, 1) * self.conn_mat
self.post.V += bp.ops.sum(Isyn * (1. - self.post.refractory), axis = 0)
| 0
| 0
| 0
| 905
| 0
| 0
| 0
| -1
| 45
|
2356a07736148deea7e9a4b60909d1ff37a28c82
| 845
|
py
|
Python
|
clahe_and_augmentation/parseConfig.py
|
RandomeName745/DD2424---Project-Covid-19
|
2e1e647e841eeb00760daecb58effeba3ca237c4
|
[
"MIT"
] | null | null | null |
clahe_and_augmentation/parseConfig.py
|
RandomeName745/DD2424---Project-Covid-19
|
2e1e647e841eeb00760daecb58effeba3ca237c4
|
[
"MIT"
] | null | null | null |
clahe_and_augmentation/parseConfig.py
|
RandomeName745/DD2424---Project-Covid-19
|
2e1e647e841eeb00760daecb58effeba3ca237c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 10:31:44 2020
@author: alex
"""
| 29.137931
| 85
| 0.654438
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 10 10:31:44 2020
@author: alex
"""
import argparse
from clodsa.utils.conf import Conf
def parseConfig(configfile):
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to configuration file")
args = vars(ap.parse_args(args=["-c",configfile]))
config = {}
conf = Conf(args["conf"])
config["problem"] = conf["problem"]
config["annotationMode"] = conf["annotation_mode"]
config["outputMode"] = conf["output_mode"]
config["generationMode"] = conf["generation_mode"]
config["inputPath"] = conf["input_path"]
# parameters = conf["parameters"]
config["outputPath"] = conf["output_path"]
config["augmentationTechniques"] = conf["augmentation_techniques"]
return config
| 0
| 0
| 0
| 0
| 0
| 666
| 0
| 7
| 67
|
d6844ad5af8ac8dd3731b4be397175e5d6c05d9f
| 3,563
|
py
|
Python
|
clientui/vstrm_server.py
|
cbk914/BlackMamba
|
826d5e2994368006cad09acaaa7c6bfa047891b5
|
[
"MIT"
] | 902
|
2021-02-09T09:42:57.000Z
|
2022-03-26T09:28:03.000Z
|
clientui/vstrm_server.py
|
cbk914/BlackMamba
|
826d5e2994368006cad09acaaa7c6bfa047891b5
|
[
"MIT"
] | 11
|
2021-02-12T16:46:51.000Z
|
2021-12-20T21:12:14.000Z
|
clientui/vstrm_server.py
|
cbk914/BlackMamba
|
826d5e2994368006cad09acaaa7c6bfa047891b5
|
[
"MIT"
] | 126
|
2021-02-09T12:16:50.000Z
|
2022-02-25T04:19:18.000Z
|
####################################################################################
# BLACKMAMBA BY: LOSEYS (https://github.com/loseys)
#
# QT GUI INTERFACE BY: WANDERSON M.PIMENTA (https://github.com/Wanderson-Magalhaes)
# ORIGINAL QT GUI: https://github.com/Wanderson-Magalhaes/Simple_PySide_Base
####################################################################################
"""
Video streaming server.
"""
import sys
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
try:
SERVER_IP = sys.argv[1]
PORT_VIDEO = sys.argv[2]
except:
SERVER_IP = 0
PORT_VIDEO = 0
if __name__ == "__main__":
start_stream()
| 27.407692
| 91
| 0.572271
|
####################################################################################
# BLACKMAMBA BY: LOSEYS (https://github.com/loseys)
#
# QT GUI INTERFACE BY: WANDERSON M.PIMENTA (https://github.com/Wanderson-Magalhaes)
# ORIGINAL QT GUI: https://github.com/Wanderson-Magalhaes/Simple_PySide_Base
####################################################################################
"""
Video streaming server.
"""
import sys
import socket
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import pygame
from zlib import decompress
from cryptography.fernet import Fernet
try:
SERVER_IP = sys.argv[1]
PORT_VIDEO = sys.argv[2]
except:
SERVER_IP = 0
PORT_VIDEO = 0
def crypt(msg, key):
command = str(msg)
command = bytes(command, encoding='utf8')
cipher_suite = Fernet(key)
encoded_text = cipher_suite.encrypt(command)
return encoded_text
def decrypt(msg, key):
cipher_suite = Fernet(key)
decoded_text_f = cipher_suite.decrypt(msg)
return decoded_text_f
def recvall(conn, length):
try:
buf = b''
while len(buf) < length:
data = conn.recv(length - len(buf))
if not data:
return data
buf += data
return buf
except:
pass
def start_stream(host=str(SERVER_IP), port=int(PORT_VIDEO)):
sock = socket.socket()
sock.bind((host, port))
print("Listening ....")
sock.settimeout(15.0)
sock.listen(5)
try:
conn, addr = sock.accept()
except:
print('socket.timeout: timed out')
return
print("Accepted ....", addr)
client_resolution = (conn.recv(50).decode())
client_resolution = str(client_resolution).split(',')
CLIENT_WIDTH = int(client_resolution[0])
CLIENT_HEIGHT = int(client_resolution[1])
with open('bin/profile/vstream_size.txt', 'r') as f:
scsize = f.read()
f.close()
try:
scsize = scsize.split(':')
SERVER_WIDTH = int(scsize[0])
SERVER_HEIGHT = int(
scsize[1])
except:
SERVER_WIDTH = 1000
SERVER_HEIGHT = 600
pygame.init()
pygame.display.set_caption('BlackMamba')
programIcon = pygame.image.load('icons/others/icon_3.png')
pygame.display.set_icon(programIcon)
screen = pygame.display.set_mode((SERVER_WIDTH, SERVER_HEIGHT))
clock = pygame.time.Clock()
watching = True
try:
while watching:
for event in pygame.event.get():
if event.type == pygame.QUIT:
watching = False
break
# Retreive the size of the pixels length, the pixels length and pixels
try:
size_len = int.from_bytes(conn.recv(1), byteorder='big')
size = int.from_bytes(conn.recv(size_len), byteorder='big')
pixels = decompress(recvall(conn, size))
# Create the Surface from raw pixels
img = pygame.image.fromstring(pixels, (CLIENT_WIDTH, CLIENT_HEIGHT), 'RGB')
# resize the client image to match the server's screen dimensions
scaled_img = pygame.transform.scale(img, (SERVER_WIDTH, SERVER_HEIGHT))
# Display the picture
screen.blit(scaled_img, (0, 0))
pygame.display.flip()
#clock.tick(60)
clock.tick(120)
except:
break
finally:
pygame.quit()
sock.close()
if __name__ == "__main__":
start_stream()
| 0
| 0
| 0
| 0
| 0
| 2,719
| 0
| 7
| 181
|
58f8d59cc2c4eb8e7df9a61a133e9fc628033ad5
| 4,775
|
py
|
Python
|
Unit3SC_0205/northwind.py
|
TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases
|
cba1e0f5476f5e7a13e10ad450474a565d302b33
|
[
"MIT"
] | null | null | null |
Unit3SC_0205/northwind.py
|
TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases
|
cba1e0f5476f5e7a13e10ad450474a565d302b33
|
[
"MIT"
] | null | null | null |
Unit3SC_0205/northwind.py
|
TemsyChen/DS-Unit-3-Sprint-2-SQL-and-Databases
|
cba1e0f5476f5e7a13e10ad450474a565d302b33
|
[
"MIT"
] | null | null | null |
import sqlite3
# Connect to the sqlite3 file
connection = sqlite3.connect("northwind_small.sqlite3")
cursor = connection.cursor()
# Queries
# `expensive_items`: What are the ten most expensive items (per unit price) in the database?
price_query = f""" SELECT UnitPrice, ProductName
FROM product
ORDER BY UnitPrice DESC
LIMIT 10;"""
expensive_items = cursor.execute(price_query).fetchall()
print("Expensive items:", expensive_items)
# Expensive items: [(263.5, 'Cte de Blaye'), (123.79, 'Thringer Rostbratwurst'),
# (97, 'Mishi Kobe Niku'), (81, "Sir Rodney's Marmalade"), (62.5, 'Carnarvon Tigers'),
# (55, 'Raclette Courdavault'), (53, 'Manjimup Dried Apples'), (49.3, 'Tarte au sucre'),
# (46, 'Ipoh Coffee'), (45.6, 'Rssle Sauerkraut')]
# `avg_hire_age`: What is the average age of an employee at the time of their hiring?
# ONLY RAN THIS THE FIRST TIME, then commented it out
# add_age_column = f"""
# ALTER TABLE Employee
# ADD age INT AS (hiredate - birthdate)
# """
# cursor.execute(add_age_column)
avghire_query = f"""SELECT AVG(age) from employee"""
avg_hire_age = cursor.execute(avghire_query).fetchone()[0]
print("Average hire age:", avg_hire_age)
# Average hire age: 37.22222222222222
# (*Stretch*) `avg_age_by_city`: How does the average age of employee at hire vary by city?
avg_by_city_query = f"""SELECT AVG(age), city FROM employee
GROUP BY city
"""
avg_age_by_city = cursor.execute(avg_by_city_query).fetchall()
print("Average age by city:", avg_age_by_city)
# Average age by city: [(29.0, 'Kirkland'), (32.5, 'London'),
# (56.0, 'Redmond'), (40.0, 'Seattle'), (40.0, 'Tacoma')]
# - `ten_most_expensive`: What are the ten most expensive items (per unit price) in the database
# *and* their suppliers?
# COMMENTING OUT AFTER RUNNING ONCE
# suppliers_prices_table = f"""CREATE TABLE suppliers_prices AS
# SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id
# """
# cursor.execute(suppliers_prices_table)
# insertion_query = f"""SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id"""
# cursor.execute(insertion_query)
price_supplier_query = f"""SELECT unitprice, companyname
FROM suppliers_prices
ORDER BY unitprice DESC
LIMIT 10;
"""
price_supplier_topten = cursor.execute(price_supplier_query).fetchall()
print("Top most expensive items and their suppliers:", price_supplier_topten)
# Top most expensive items and their suppliers: [(263.5, 'Aux
# joyeux ecclsiastiques'), (123.79, 'Plutzer Lebensmittelgromrkte AG'),
# (97, 'Tokyo Traders'), (81, 'Specialty Biscuits, Ltd.'),
# (62.5, 'Pavlova, Ltd.'), (55, 'Gai pturage'), (53, "G'day, Mate"),
# (49.3, "Forts d'rables"), (46, 'Leka Trading'), (45.6, 'Plutzer Lebensmittelgromrkte AG')]
# - `largest_category`: What is the largest category (by number of unique products in it)?
largest_category_query = f"""SELECT CategoryId, COUNT(DISTINCT ProductName) FROM Product
GROUP BY CategoryId
ORDER BY COUNT(DISTINCT ProductName) DESC"""
largest_category = cursor.execute(largest_category_query).fetchone()[0]
print("Largest category:", largest_category)
# Largest category: 3
# - (*Stretch*) `most_territories`: Who's the employee with the most territories?
# Use `TerritoryId` (not name, region, or other fields) as the unique
# identifier for territories.
# COMMENT OUT AFTER RUNNING ONCE
# employee_territory_table = f"""CREATE TABLE employee_territory AS
# SELECT Employee.FirstName, Employee.LastName,
# EmployeeTerritory.EmployeeId, EmployeeTerritory.TerritoryId
# FROM Employee
# JOIN EmployeeTerritory ON Employee.Id = EmployeeTerritory.EmployeeId;"""
# cursor.execute(employee_territory_table)
territory_query = f"""SELECT COUNT(DISTINCT TerritoryId), FirstName, LastName, EmployeeId from employee_territory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC"""
employee_territory = cursor.execute(territory_query).fetchone()
print("Which employee has the most territory?", employee_territory)
# Which employee has the most territory? (10, 'Robert', 'King', 7)
connection.commit()
connection.close()
| 45.47619
| 113
| 0.660314
|
import sqlite3
# Connect to the sqlite3 file
connection = sqlite3.connect("northwind_small.sqlite3")
cursor = connection.cursor()
# Queries
# `expensive_items`: What are the ten most expensive items (per unit price) in the database?
price_query = f""" SELECT UnitPrice, ProductName
FROM product
ORDER BY UnitPrice DESC
LIMIT 10;"""
expensive_items = cursor.execute(price_query).fetchall()
print("Expensive items:", expensive_items)
# Expensive items: [(263.5, 'Côte de Blaye'), (123.79, 'Thüringer Rostbratwurst'),
# (97, 'Mishi Kobe Niku'), (81, "Sir Rodney's Marmalade"), (62.5, 'Carnarvon Tigers'),
# (55, 'Raclette Courdavault'), (53, 'Manjimup Dried Apples'), (49.3, 'Tarte au sucre'),
# (46, 'Ipoh Coffee'), (45.6, 'Rössle Sauerkraut')]
# `avg_hire_age`: What is the average age of an employee at the time of their hiring?
# ONLY RAN THIS THE FIRST TIME, then commented it out
# add_age_column = f"""
# ALTER TABLE Employee
# ADD age INT AS (hiredate - birthdate)
# """
# cursor.execute(add_age_column)
avghire_query = f"""SELECT AVG(age) from employee"""
avg_hire_age = cursor.execute(avghire_query).fetchone()[0]
print("Average hire age:", avg_hire_age)
# Average hire age: 37.22222222222222
# (*Stretch*) `avg_age_by_city`: How does the average age of employee at hire vary by city?
avg_by_city_query = f"""SELECT AVG(age), city FROM employee
GROUP BY city
"""
avg_age_by_city = cursor.execute(avg_by_city_query).fetchall()
print("Average age by city:", avg_age_by_city)
# Average age by city: [(29.0, 'Kirkland'), (32.5, 'London'),
# (56.0, 'Redmond'), (40.0, 'Seattle'), (40.0, 'Tacoma')]
# - `ten_most_expensive`: What are the ten most expensive items (per unit price) in the database
# *and* their suppliers?
# COMMENTING OUT AFTER RUNNING ONCE
# suppliers_prices_table = f"""CREATE TABLE suppliers_prices AS
# SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id
# """
# cursor.execute(suppliers_prices_table)
# insertion_query = f"""SELECT Product.ProductName, Product.UnitPrice, Supplier.CompanyName
# FROM Product
# LEFT JOIN Supplier ON Product.SupplierId = Supplier.Id"""
# cursor.execute(insertion_query)
price_supplier_query = f"""SELECT unitprice, companyname
FROM suppliers_prices
ORDER BY unitprice DESC
LIMIT 10;
"""
price_supplier_topten = cursor.execute(price_supplier_query).fetchall()
print("Top most expensive items and their suppliers:", price_supplier_topten)
# Top most expensive items and their suppliers: [(263.5, 'Aux
# joyeux ecclésiastiques'), (123.79, 'Plutzer Lebensmittelgroßmärkte AG'),
# (97, 'Tokyo Traders'), (81, 'Specialty Biscuits, Ltd.'),
# (62.5, 'Pavlova, Ltd.'), (55, 'Gai pâturage'), (53, "G'day, Mate"),
# (49.3, "Forêts d'érables"), (46, 'Leka Trading'), (45.6, 'Plutzer Lebensmittelgroßmärkte AG')]
# - `largest_category`: What is the largest category (by number of unique products in it)?
largest_category_query = f"""SELECT CategoryId, COUNT(DISTINCT ProductName) FROM Product
GROUP BY CategoryId
ORDER BY COUNT(DISTINCT ProductName) DESC"""
largest_category = cursor.execute(largest_category_query).fetchone()[0]
print("Largest category:", largest_category)
# Largest category: 3
# - (*Stretch*) `most_territories`: Who's the employee with the most territories?
# Use `TerritoryId` (not name, region, or other fields) as the unique
# identifier for territories.
# COMMENT OUT AFTER RUNNING ONCE
# employee_territory_table = f"""CREATE TABLE employee_territory AS
# SELECT Employee.FirstName, Employee.LastName,
# EmployeeTerritory.EmployeeId, EmployeeTerritory.TerritoryId
# FROM Employee
# JOIN EmployeeTerritory ON Employee.Id = EmployeeTerritory.EmployeeId;"""
# cursor.execute(employee_territory_table)
territory_query = f"""SELECT COUNT(DISTINCT TerritoryId), FirstName, LastName, EmployeeId from employee_territory
GROUP BY EmployeeId
ORDER BY COUNT(DISTINCT TerritoryId) DESC"""
employee_territory = cursor.execute(territory_query).fetchone()
print("Which employee has the most territory?", employee_territory)
# Which employee has the most territory? (10, 'Robert', 'King', 7)
connection.commit()
connection.close()
| 22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1c787236266f19826db1b1ea01fce5c806ce4267
| 8,541
|
py
|
Python
|
lib/model/complete_net.py
|
chensjtu/poxture
|
f6abea1216c987f0e4c628b250054d764eaecf2e
|
[
"Apache-2.0"
] | null | null | null |
lib/model/complete_net.py
|
chensjtu/poxture
|
f6abea1216c987f0e4c628b250054d764eaecf2e
|
[
"Apache-2.0"
] | null | null | null |
lib/model/complete_net.py
|
chensjtu/poxture
|
f6abea1216c987f0e4c628b250054d764eaecf2e
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
# from .DepthNormalizer import DepthNormalizer
# from iPERCore.models.networks.criterions import VGGLoss
| 44.717277
| 164
| 0.619482
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
from .SurfaceClassifier import conv1_1, period_loss
# from .DepthNormalizer import DepthNormalizer
from ..net_util import *
# from iPERCore.models.networks.criterions import VGGLoss
from lib.model.Models import NestedUNet
import numpy as np
class Pint_Model(nn.Module):
def __init__(self, opt):
super(Pint_Model, self).__init__()
self.period_loss = period_loss()
self.feat_uv_error = nn.SmoothL1Loss() # A feature with B uvmap
self.opt = opt
self.NUnet = NestedUNet(in_ch=3, out_ch=3)
norm_type = get_norm_layer(norm_type=opt.norm_color)
self.image_filter = ResnetFilter(opt, norm_layer=norm_type)
# self.conv = conv1_1(input_layers=256, output_layers=16)
init_net(self)
def filter(self, images):
'''
Filter the input images
store all intermediate features.
:param images: [B, C, H, W] input images
'''
self.im_feat = self.image_filter(images)
def forward(self, uv_A, uv_B, part_uv_B, index):
'''
this function is made for pint total train.
'''
complete_feat = self.NUnet(uv_A)
complete_feat_B = self.NUnet(uv_B)
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B)
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# part_uv_B.requires_grad=True # to make uvb as one leaf
# A_feat = complete_feat[:,0:3,:,:]
# part_uv_B = F.interpolate(part_uv_B, scale_factor=0.25, mode='bilinear', align_corners=True)
A_vis_feat = complete_feat[index==1]
B_vis_uv = part_uv_B[index==1]
loss1 = self.feat_uv_error(A_vis_feat, B_vis_uv.detach())
# loss2 = self.vgg_loss(complete_feat[:,:3], complete_feat_B[:,:3].detach())
# loss2 = self.period_loss(complete_feat, complete_feat_B.detach())
loss2=0
return complete_feat, complete_feat_B, loss1, loss2
# def pint_forward(self, uv_A, uv_B):
# '''
# this function is made for pint total train.
# '''
# im_feat = self.image_filter(uv_A) # B C H W for 512 uv_B, B 256 128 128
# self.complete_feat = self.conv(im_feat) # B 16 128 128 -> b 16 512 512 [0:3] loss
# im_feat_B = self.image_filter(uv_B.squeeze(1))
# complete_feat_B = self.conv(im_feat_B)
# A_feat = F.interpolate(self.complete_feat[:,0:3,:,:], scale_factor=4, mode='bilinear', align_corners=True) # in this param, A_feat means complete feature.
# uv_B_feat = uv_B.squeeze(1).expand_as(A_feat)
# uv_B_feat.requires_grad=True # to make uvb as one leaf
# A_vis_feat = A_feat[uv_B_feat != 0.0]
# B_vis_uv = uv_B_feat[uv_B_feat != 0.0]
# loss_content = self.feat_uv_error(A_vis_feat, B_vis_uv) * 100
# loss_content1 = self.feat_uv_error(A_feat, uv_A)*100
# # loss_feat = self.error_term(self.complete_feat, complete_feat_B)
# return A_feat, A_vis_feat, B_vis_uv, self.complete_feat, complete_feat_B, loss_content+loss_content1
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, last)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, last=False):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if last:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class ResnetFilter(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, opt, input_nc=3, output_nc=256, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False,
n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert (n_blocks >= 0)
super(ResnetFilter, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
if i == n_blocks - 1:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias, last=True)]
else:
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer,
use_dropout=use_dropout, use_bias=use_bias)]
if opt.use_tanh:
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
| 0
| 0
| 0
| 8,146
| 0
| 0
| 0
| 44
| 223
|
91b66cd5922aac62b6769b7d844bbbf6732242a0
| 13,831
|
py
|
Python
|
ActiveCell_RealMorphology_Burst_Inh_cluster.py
|
EilamLeleo/burst
|
538dbf6845f4de5519c0392368d611d1e54608e1
|
[
"MIT"
] | null | null | null |
ActiveCell_RealMorphology_Burst_Inh_cluster.py
|
EilamLeleo/burst
|
538dbf6845f4de5519c0392368d611d1e54608e1
|
[
"MIT"
] | null | null | null |
ActiveCell_RealMorphology_Burst_Inh_cluster.py
|
EilamLeleo/burst
|
538dbf6845f4de5519c0392368d611d1e54608e1
|
[
"MIT"
] | null | null | null |
#!/usr/lib/python-exec/python2.7/python
import os
import sys
os.chdir('C:/Users/Leleo/Documents/Active Cell Real Morphology/')
from neuron import h
from neuron import gui
#%%
import numpy as np
import time
import math
import cPickle as pickle
#%%
sk = False
if sk==True:
from sklearn import decomposition
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
#%%
h.load_file('nrngui.hoc')
h.load_file("import3d.hoc")
cvode = h.CVode()
cvode.active(0)
morphologyFilename = "morphologies/cell1.asc"
#morphologyFilename = "morphologies/cell2.asc"
#morphologyFilename = "morphologies/cell3.asc"
#biophysicalModelFilename = "L5PCbiophys1.hoc"
#biophysicalModelFilename = "L5PCbiophys2.hoc"
#biophysicalModelFilename = "L5PCbiophys3.hoc"
#biophysicalModelFilename = "L5PCbiophys4.hoc"
#biophysicalModelFilename = "L5PCbiophys5.hoc"
biophysicalModelFilename = "L5PCbiophys5b.hoc"
#biophysicalModelTemplateFilename = "L5PCtemplate.hoc"
biophysicalModelTemplateFilename = "L5PCtemplate_2.hoc"
#%%
h.load_file(biophysicalModelFilename)
h.load_file(biophysicalModelTemplateFilename)
L5PC = h.L5PCtemplate(morphologyFilename)
h.celsius = 34
#%% set dendritic VDCC g=0
#secs = h.allsec
VDCC_g = 1
if VDCC_g==0:
for sec in h.allsec():
if hasattr(sec, 'gCa_HVAbar_Ca_HVA'):
sec.gCa_HVAbar_Ca_HVA = 0
#%% helper functions
#%% create length-weighted random section list
#%% add some random NMDA synapses and plot a somatic trace just to see all things are alive and kicking
#%% run simulation on some parameter pair, plot the space
L5PC = h.L5PCtemplate(morphologyFilename)
name = 'inh_secdt_meds62_exc60dt0sd0num15'
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/coincidence/wgh1/'+name+'/'
saveDir = 'C:/Users/Leleo/Documents/coincidence/wgh1/'+name+'/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
try:
randomSeed = int(sys.argv[1])
print 'random seed selected by user - %d' %(randomSeed)
except:
randomSeed = np.random.randint(100000)
print 'randomly chose seed - %d' %(randomSeed)
np.random.seed(randomSeed)
#ind = 1
#a = np.linspace(-50,-25,num=6),np.linspace(-20,20,num=21),np.linspace(25,100,num=16)
ApicalBasalInterval = [0]#np.linspace(-10,10,num=11) #[x for xs in a for x in xs]
numBasal = 50 #35 #np.linspace(0,200,num=81)
numApical = 30 #np.linspace(0,20,num=11)#50,num=21)#
numInh = 20 #0
#numOblique = 40-numApical
#totalSyn = [20,50,100,200,400,600,800]#[80,120,150,180]#np.linspace(0,200,num=5)#41)
partApical = 2 #[5,10,20,50,100,200,500]#[i for i in np.linspace(10,100,num=10)]+[200,300,400,500]#np.logspace(0,7,num=29,base=2)
medSegment = [0,36,60,63]#[36]+[i for i in np.linspace(60,65,num=6)]#37,44,num=8)] ##40#60 #
#secInh = [60[0.5],60[1],61[0],62[0],63[0],64[0],67[0]] #optimal planned inh at prox junc
#secInh = [60[1],61[0],63[1]] #encapsulating inh for partApi=20
#random.choice(secInh)
treeTime = 0 #0.1*np.logspace(3,10,num=22,base=2)
numExperiments = 20
spks = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
frqs = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
#trc = [[[] for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
i = 0
j = 0
start = time.time()
for ApiBasInd in ApicalBasalInterval:#treeT in treeTime:#
print "Running for interval: %s [ms]" % (int(ApiBasInd))#treeTime: %.2f [ms]" % (treeT)#
#for numB in numBasal:#totalS in totalSyn:#
# print "Running for %s basal synapses" % (int(numB))
# for partApi in partApical:
for medS in medSegment:
# for numA in numApical:#np.linspace(0,totalS,num=41):#
print "Running for inhibition in sec: %s" % (int(medS)) #partApi=%s" % (float(partApi)) #
# numA = int(totalS*0.4)
spks[j][i],frqs[j][i] = runSim(L5PC,ApiBasInd,treeTime,numBasal,numInh,numApical,medS,partApical,numExperiments)
j = j+1
j = 0
i = i+1
pickle.dump(spks,open(saveDir+name+'_spks'+str(randomSeed)+".npy","wb"),protocol=2)
pickle.dump(frqs,open(saveDir+name+'_frqs'+str(randomSeed)+".npy","wb"),protocol=2)
print "Saved as ", saveDir+name+'_spks'+str(randomSeed)+".npy"
print "Total running time was: ", (time.time()-start)/3600, "hours"
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/concidence/'
#pickle.dump(spks1,open(saveDir+'dt_treet_30tot_hires_spks',"wb"),protocol=2)
#pickle.dump(frqs1,open(saveDir+'dt_treet_30tot_hires_frqs',"wb"),protocol=2)
| 41.041543
| 150
| 0.670161
|
#!/usr/lib/python-exec/python2.7/python
import os
import sys
os.chdir('C:/Users/Leleo/Documents/Active Cell Real Morphology/')
from neuron import h
from neuron import gui
#%%
import numpy as np
import time
import math
import cPickle as pickle
#%%
sk = False
if sk==True:
from sklearn import decomposition
from sklearn import cluster
from sklearn import linear_model
from sklearn import ensemble
from sklearn import cross_validation
#%%
h.load_file('nrngui.hoc')
h.load_file("import3d.hoc")
cvode = h.CVode()
cvode.active(0)
morphologyFilename = "morphologies/cell1.asc"
#morphologyFilename = "morphologies/cell2.asc"
#morphologyFilename = "morphologies/cell3.asc"
#biophysicalModelFilename = "L5PCbiophys1.hoc"
#biophysicalModelFilename = "L5PCbiophys2.hoc"
#biophysicalModelFilename = "L5PCbiophys3.hoc"
#biophysicalModelFilename = "L5PCbiophys4.hoc"
#biophysicalModelFilename = "L5PCbiophys5.hoc"
biophysicalModelFilename = "L5PCbiophys5b.hoc"
#biophysicalModelTemplateFilename = "L5PCtemplate.hoc"
biophysicalModelTemplateFilename = "L5PCtemplate_2.hoc"
#%%
h.load_file(biophysicalModelFilename)
h.load_file(biophysicalModelTemplateFilename)
L5PC = h.L5PCtemplate(morphologyFilename)
h.celsius = 34
#%% set dendritic VDCC g=0
#secs = h.allsec
VDCC_g = 1
if VDCC_g==0:
for sec in h.allsec():
if hasattr(sec, 'gCa_HVAbar_Ca_HVA'):
sec.gCa_HVAbar_Ca_HVA = 0
#%% helper functions
def Add_NMDA_SingleSynapticEventToSegment(segment, activationTime, synapseWeight, exc_inh):
# synapse = h.ProbAMPANMDA2(segment)
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
if exc_inh==0: # inhibitory
synapse = h.ProbGABAAB_EMS(segment) #GABAA/B
synapse.tau_r_GABAA = 0.2
synapse.tau_d_GABAA = 8
synapse.tau_r_GABAB = 3.5
synapse.tau_d_GABAB = 260.9
# synapse.gmax = .001
synapse.e_GABAA = -80
synapse.e_GABAB = -97
synapse.GABAB_ratio = 0.0
# synapse.Use = 1
# synapse.u0 = 0
# synapse.Dep = 0
# synapse.Fac = 0
else: # excitatory
synapse = h.ProbAMPANMDA2(segment)
synapse.gmax = .0004
# synapse = h.ProbAMPANMDA_EMS(segLoc,sec=section)
synapse.Use = 1.0
synapse.Dep = 0
synapse.Fac = 0
netStimulation = h.NetStim()
netStimulation.number = 1
netStimulation.start = activationTime
netConnection = h.NetCon(netStimulation,synapse)
netConnection.delay = 0
netConnection.weight[0] = synapseWeight
return netStimulation,netConnection,synapse
#%% create length-weighted random section list
def randSecWeight(obj,medSeg,part,num):
allLen = []
for i in range(len(obj)):
allLen.append(obj[i].L)
randSecList = [0 for i in range(num)]
h.distance(sec=obj[medSeg]) # define distance measure from medSeg
# draw from cumulative length a seg for syn
x = np.sum(allLen[:medSeg])+(np.random.rand(num)-0.5)*np.sum(allLen)/part
j=0
farbug=0
while j<num:
# redraw boundary crossers
if x[j]<0 or x[j]>np.sum(allLen):
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
continue
# find sec
for i in range(len(obj)):
if x[j]<np.sum(allLen[:i+1]):
randSecList[j]=i
break
# check that sec is sufficiently close to medseg
if h.distance(obj[randSecList[j]](1))>sum(allLen)/part and farbug<5:#obj[medSeg].L+obj[randSecList[j]].L:#
x[j] = np.sum(allLen[:medSeg])+(np.random.rand()-0.5)*np.sum(allLen)/part
farbug+=1
continue
j+=1
farbug=0
return randSecList
#%% add some random NMDA synapses and plot a somatic trace just to see all things are alive and kicking
def runSim(cell,ApiBasInt,treeT,numBas,numApi,partApi,medSeg,inSec,numExp):
simulationTime = 400
silentTimeAtStart = 100
delayTime = 200
silentTimeAtEnd = 100
origNumSamplesPerMS = 40 #20 # was 20!!!
totalSimDuration = simulationTime + silentTimeAtStart + silentTimeAtEnd
listOfSomaTraces = []
spikes = []
numSpikes = 0
numSpikesPerExp = [0]*numExp
freq = [0]*numExp
for experiment in range(numExp):
startTime = time.time()
listOfRandBasalSectionInds = randSecWeight(cell.dend,44,1,int(numBas))#np.random.randint(0,len(cell.dend),int(numBas))
listOfRandApicalSectionInds = randSecWeight(cell.apic,62,20,20)#int(numApi)) #medSeg + np.random.randint(-distance,distance,int(numApi))
if partApi>15:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
else:
listOfRandInhSectionInds = randSecWeight(cell.apic,medSeg,partApi,numApi)
# listOfRandApicalSectionInds = randSecWeight(cell.apic,np.random.randint(37,78),partApi,10)#int(numApi))
# listOfRandObliqueSectionInds = np.random.randint(0,len(cell.apic)/partApi,0)#int(40-numApi)) #obliques
listOfBasalSections = [cell.dend[x] for x in listOfRandBasalSectionInds]
listOfApicalSections = [cell.apic[x] for x in listOfRandApicalSectionInds]
listOfInhSections = [cell.apic[x] for x in listOfRandInhSectionInds]
# listOfObliqueSections = [cell.apic[x] for x in listOfRandObliqueSectionInds]
# listOfSections = listOfApicalSections + listOfBasalSections
listOfRandBasalLocationsInSection = np.random.rand(len(listOfRandBasalSectionInds))
listOfRandApicalLocationsInSection = np.random.rand(len(listOfRandApicalSectionInds))
# listOfRandInhLocationsInSection = float(inSec)/4 + 0.25*np.random.rand(len(listOfRandInhSectionInds))
if partApi>30:
listOfRandInhLocationsInSection = [1]*numApi #min(1,float(7440)/partApi/cell.apic[medSeg].L)*np.random.rand(len(listOfRandInhSectionInds))
else:
listOfRandInhLocationsInSection = np.random.rand(len(listOfRandInhSectionInds))
# listOfRandObliqueLocationsInSection = np.random.rand(len(listOfRandObliqueSectionInds))
# listOfSegLocs = list(listOfRandApicalLocationsInSection) + list(listOfRandBasalLocationsInSection)
listOfEvents = []
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + 100*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
for k, section in enumerate(listOfApicalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #gauss(0.5,0.2)
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandApicalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfBasalSections):
eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandBasalLocationsInSection[k]), eventTime, 1, 1))
for k, section in enumerate(listOfInhSections):
eventTime = silentTimeAtStart + delayTime + ApiBasInt + treeT*np.random.normal(0,1) #simulationTime/2*np.random.rand(1)[0]
listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandInhLocationsInSection[k]), eventTime, 1, 0))
#add obliques
# for k, section in enumerate(listOfObliqueSections):
# eventTime = silentTimeAtStart + delayTime + treeT*np.random.normal(1,0.2) #simulationTime/2*np.random.rand(1)[0]
# listOfEvents.append(Add_NMDA_SingleSynapticEventToSegment(section(listOfRandObliqueLocationsInSection[k]), eventTime, 2))
##%% run the simulation
h.dt = 0.025
recTime = h.Vector()
recTime.record(h._ref_t)
recVoltage = h.Vector()
recVoltage.record(cell.soma[0](0.5)._ref_v)
cvode.cache_efficient(1)
h.finitialize(-76)
stopTime = totalSimDuration
neuron.run(stopTime)
# plot the trace
origRecordingTime = np.array(recTime.to_python()) # ugly workaround to recTime.as_numpy()
origSomaVoltage = np.array(recVoltage.to_python()) # ugly workaround to recVoltage.as_numpy()
recordingTime = np.arange(0,totalSimDuration,1.0/origNumSamplesPerMS)
somaVoltage = np.interp(recordingTime, origRecordingTime, origSomaVoltage)
listOfSomaTraces.append(somaVoltage)
origSpikes = []
tempSpikes = 0
k = (silentTimeAtStart+delayTime-50)*origNumSamplesPerMS #int(np.min([0,ApiBasInt]))
while k < (totalSimDuration-silentTimeAtEnd)*origNumSamplesPerMS:
if somaVoltage[k]>-10:
tempTime = float(k)/origNumSamplesPerMS
if tempSpikes>0 and tempTime-origSpikes[-1]>20:
break
origSpikes.append(tempTime)
# numSpikesPerExp[experiment] = tempSpikes + 1
numSpikes = numSpikes + 1
tempSpikes += 1 # numSpikesPerExp[experiment]
k = k+origNumSamplesPerMS*3
else:
k = k+5 # was 1 before
# spikes = []
spikes.append(origSpikes)
if tempSpikes>1:
freq[experiment] = tempSpikes/(origSpikes[-1]-origSpikes[-tempSpikes])
# plt.figure()
# plt.plot(recordingTime, somaVoltage)
# plt.xlabel('Time [ms]'); plt.ylabel('Voltage [mV]')
# plt.axis(xmin=0, xmax=stopTime, ymin=min(somaVoltage)-5, ymax=max(somaVoltage)+5)
#listOfEvents = []
if (experiment+1)%10==0 or (time.time()-startTime)/60>5 or numExp<5:
print "Dt %s treeTime %s exp. # %s took %.3f minutes" % (ApiBasInt,treeT,experiment+1, (time.time()-startTime)/60)
print "Mean no. of spikes: %s" % (float(numSpikes)/numExp)
return float(numSpikes)/numExp,np.mean(freq)#, listOfSomaTraces, recordingTime
#%% run simulation on some parameter pair, plot the space
L5PC = h.L5PCtemplate(morphologyFilename)
name = 'inh_secdt_meds62_exc60dt0sd0num15'
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/coincidence/wgh1/'+name+'/'
saveDir = 'C:/Users/Leleo/Documents/coincidence/wgh1/'+name+'/'
if not os.path.exists(saveDir):
os.makedirs(saveDir)
try:
randomSeed = int(sys.argv[1])
print 'random seed selected by user - %d' %(randomSeed)
except:
randomSeed = np.random.randint(100000)
print 'randomly chose seed - %d' %(randomSeed)
np.random.seed(randomSeed)
#ind = 1
#a = np.linspace(-50,-25,num=6),np.linspace(-20,20,num=21),np.linspace(25,100,num=16)
ApicalBasalInterval = [0]#np.linspace(-10,10,num=11) #[x for xs in a for x in xs]
numBasal = 50 #35 #np.linspace(0,200,num=81)
numApical = 30 #np.linspace(0,20,num=11)#50,num=21)#
numInh = 20 #0
#numOblique = 40-numApical
#totalSyn = [20,50,100,200,400,600,800]#[80,120,150,180]#np.linspace(0,200,num=5)#41)
partApical = 2 #[5,10,20,50,100,200,500]#[i for i in np.linspace(10,100,num=10)]+[200,300,400,500]#np.logspace(0,7,num=29,base=2)
medSegment = [0,36,60,63]#[36]+[i for i in np.linspace(60,65,num=6)]#37,44,num=8)] ##40#60 #
#secInh = [60[0.5],60[1],61[0],62[0],63[0],64[0],67[0]] #optimal planned inh at prox junc
#secInh = [60[1],61[0],63[1]] #encapsulating inh for partApi=20
#random.choice(secInh)
treeTime = 0 #0.1*np.logspace(3,10,num=22,base=2)
numExperiments = 20
spks = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
frqs = [[0 for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
#trc = [[[] for i in range(len(ApicalBasalInterval))] for j in range(len(medSegment))]#*4)]
i = 0
j = 0
start = time.time()
for ApiBasInd in ApicalBasalInterval:#treeT in treeTime:#
print "Running for interval: %s [ms]" % (int(ApiBasInd))#treeTime: %.2f [ms]" % (treeT)#
#for numB in numBasal:#totalS in totalSyn:#
# print "Running for %s basal synapses" % (int(numB))
# for partApi in partApical:
for medS in medSegment:
# for numA in numApical:#np.linspace(0,totalS,num=41):#
print "Running for inhibition in sec: %s" % (int(medS)) #partApi=%s" % (float(partApi)) #
# numA = int(totalS*0.4)
spks[j][i],frqs[j][i] = runSim(L5PC,ApiBasInd,treeTime,numBasal,numInh,numApical,medS,partApical,numExperiments)
j = j+1
j = 0
i = i+1
pickle.dump(spks,open(saveDir+name+'_spks'+str(randomSeed)+".npy","wb"),protocol=2)
pickle.dump(frqs,open(saveDir+name+'_frqs'+str(randomSeed)+".npy","wb"),protocol=2)
print "Saved as ", saveDir+name+'_spks'+str(randomSeed)+".npy"
print "Total running time was: ", (time.time()-start)/3600, "hours"
#saveDir = '/ems/elsc-labs/segev-i/eilam.goldenberg/Documents/concidence/'
#pickle.dump(spks1,open(saveDir+'dt_treet_30tot_hires_spks',"wb"),protocol=2)
#pickle.dump(frqs1,open(saveDir+'dt_treet_30tot_hires_frqs',"wb"),protocol=2)
| 0
| 0
| 0
| 0
| 0
| 9,194
| 0
| 0
| 69
|
ec42f79f6e9bebd0d6cc7d934779f60d52434f4f
| 1,263
|
py
|
Python
|
sharesansar/driver.py
|
prajwal-stha/web-scrapers
|
bfcc5a065e859c69f1a9a2065c9c857b22af42c0
|
[
"MIT"
] | null | null | null |
sharesansar/driver.py
|
prajwal-stha/web-scrapers
|
bfcc5a065e859c69f1a9a2065c9c857b22af42c0
|
[
"MIT"
] | null | null | null |
sharesansar/driver.py
|
prajwal-stha/web-scrapers
|
bfcc5a065e859c69f1a9a2065c9c857b22af42c0
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
import time
from datetime import date
from selenium.webdriver.common.keys import Keys
from scrape_table_all import scrape_table
from return_dates import return_dates
# Open the link
PATH = "/Users/prajwalshrestha/Desktop/PythonApp/thesis/web-scrapers/sharesansar/chromedriver"
browser = webdriver.Chrome(PATH)
browser.maximize_window()
browser.get("https://www.sharesansar.com/today-share-price")
# Select the type of data to scrape
searchBar = browser.find_element_by_id('sector')
browser.implicitly_wait(20)
# Select Commercial Bank
searchBar.send_keys('Commercial Bank')
sdate = date(2020, 3, 23)
edate = date(2021, 5, 13)
dates = return_dates(sdate, edate)
for day in dates:
# Enter the date
date_box = browser.find_elements_by_id('fromdate')
date_box[0].clear()
date_box[0].send_keys(day)
# Click Search
searchBar = browser.find_element_by_id('btn_todayshareprice_submit')
searchBar.click()
time.sleep(3)
# Needed for this sites
searchBar.send_keys(Keys.ENTER)
# Wait for data to show up longer wait time ensures data has loaded before scraping begins
time.sleep(8)
# Scrape the table
html = browser.page_source
scrape_table(data=html, date=day)
browser.close()
| 30.071429
| 94
| 0.760095
|
from selenium import webdriver
import time
from datetime import date
from selenium.webdriver.common.keys import Keys
from scrape_table_all import scrape_table
from return_dates import return_dates
# Open the link
PATH = "/Users/prajwalshrestha/Desktop/PythonApp/thesis/web-scrapers/sharesansar/chromedriver"
browser = webdriver.Chrome(PATH)
browser.maximize_window()
browser.get("https://www.sharesansar.com/today-share-price")
# Select the type of data to scrape
searchBar = browser.find_element_by_id('sector')
browser.implicitly_wait(20)
# Select Commercial Bank
searchBar.send_keys('Commercial Bank')
sdate = date(2020, 3, 23)
edate = date(2021, 5, 13)
dates = return_dates(sdate, edate)
for day in dates:
# Enter the date
date_box = browser.find_elements_by_id('fromdate')
date_box[0].clear()
date_box[0].send_keys(day)
# Click Search
searchBar = browser.find_element_by_id('btn_todayshareprice_submit')
searchBar.click()
time.sleep(3)
# Needed for this sites
searchBar.send_keys(Keys.ENTER)
# Wait for data to show up longer wait time ensures data has loaded before scraping begins
time.sleep(8)
# Scrape the table
html = browser.page_source
scrape_table(data=html, date=day)
browser.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b8886f82232200a3b8d715152e899cad7d68af4a
| 2,038
|
py
|
Python
|
ots/main/urls.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 3
|
2021-12-01T15:56:42.000Z
|
2021-12-23T15:49:48.000Z
|
ots/main/urls.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | null | null | null |
ots/main/urls.py
|
rashikbuksh/Optimal-Transportation-System
|
18c6d5341c6d3ecbb1c8fcfba8e46ca2ba493882
|
[
"MIT"
] | 2
|
2021-09-24T19:49:28.000Z
|
2021-12-22T10:25:38.000Z
|
from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
url(r'^$', views.homepage, name="list"),
url(r'^about/$', views.about, name="about"),
url(r'^contact/$', views.contact, name="contact"),
url(r'^bolaka/$', views.bolaka, name="bolaka"),
url(r'^offers_page/$', views.offers, name="offers_page"),
url(r'^bolakareview/$', views.bolakareview, name="bolakareview"),
url(r'^ticket/$', views.ticket, name="ticket"),
path('deletebalaka/<str:pk>/$', views.deletebalaka, name="deletebalaka"),
url(r'^ticket_page/$', views.ticket_page, name="ticket_page"),
# Air
url(r'^Air_Biman_Bangladesh/$', views.Air_Biman_Bangladesh, name="Air_Biman_Bangladesh"),
url(r'^Air_Novoair/$', views.Air_Novoair, name="Air_Novoair"),
url(r'^Air_US_Bangla/$', views.Air_US_Bangla, name="Air_US_Bangla"),
# Bus
url(r'^Bus_Akash/$', views.Bus_Akash, name="Bus_Akash"),
url(r'^Bus_Alif/$', views.Bus_Alif, name="Bus_Alif"),
url(r'^Bus_Anabil/$', views.Bus_Anabil, name="Bus_Anabil"),
url(r'^Bus_BRTC/$', views.Bus_BRTC, name="Bus_BRTC"),
url(r'^Bus_Green_Dhaka/$', views.Bus_Green_Dhaka, name="Bus_Green_Dhaka"),
url(r'^Bus_Raida/$', views.Bus_Raida, name="Bus_Raida"),
url(r'^Bus_Skyline/$', views.Bus_Skyline, name="Bus_Skyline"),
url(r'^Bus_Supravat/$', views.Bus_Supravat, name="Bus_Supravat"),
url(r'^Bus_VIP/$', views.Bus_VIP, name="Bus_VIP"),
# Train
url(r'^Train_Chitra_Express/$', views.Train_Chitra_Express, name="Train_Chitra_Express"),
url(r'^Train_Ekota_Express/$', views.Train_Ekota_Express, name="Train_Ekota_Express"),
url(r'^Train_Mahanagar_Godhuli/$', views.Train_Mahanagar_Godhuli, name="Train_Mahanagar_Godhuli"),
url(r'^Train_Suborno_Express/$', views.Train_Suborno_Express, name="Train_Suborno_Express"),
url(r'^Train_Tista_Express/$', views.Train_Tista_Express, name="Train_Tista_Express"),
url(r'^(?P<slug>[\w-]+)/$', views.homepage, name="list"),
]
| 44.304348
| 102
| 0.686948
|
from django.conf.urls import url
from django.urls import path
from . import views
app_name = 'articles'
urlpatterns = [
url(r'^$', views.homepage, name="list"),
url(r'^about/$', views.about, name="about"),
url(r'^contact/$', views.contact, name="contact"),
url(r'^bolaka/$', views.bolaka, name="bolaka"),
url(r'^offers_page/$', views.offers, name="offers_page"),
url(r'^bolakareview/$', views.bolakareview, name="bolakareview"),
url(r'^ticket/$', views.ticket, name="ticket"),
path('deletebalaka/<str:pk>/$', views.deletebalaka, name="deletebalaka"),
url(r'^ticket_page/$', views.ticket_page, name="ticket_page"),
# Air
url(r'^Air_Biman_Bangladesh/$', views.Air_Biman_Bangladesh, name="Air_Biman_Bangladesh"),
url(r'^Air_Novoair/$', views.Air_Novoair, name="Air_Novoair"),
url(r'^Air_US_Bangla/$', views.Air_US_Bangla, name="Air_US_Bangla"),
# Bus
url(r'^Bus_Akash/$', views.Bus_Akash, name="Bus_Akash"),
url(r'^Bus_Alif/$', views.Bus_Alif, name="Bus_Alif"),
url(r'^Bus_Anabil/$', views.Bus_Anabil, name="Bus_Anabil"),
url(r'^Bus_BRTC/$', views.Bus_BRTC, name="Bus_BRTC"),
url(r'^Bus_Green_Dhaka/$', views.Bus_Green_Dhaka, name="Bus_Green_Dhaka"),
url(r'^Bus_Raida/$', views.Bus_Raida, name="Bus_Raida"),
url(r'^Bus_Skyline/$', views.Bus_Skyline, name="Bus_Skyline"),
url(r'^Bus_Supravat/$', views.Bus_Supravat, name="Bus_Supravat"),
url(r'^Bus_VIP/$', views.Bus_VIP, name="Bus_VIP"),
# Train
url(r'^Train_Chitra_Express/$', views.Train_Chitra_Express, name="Train_Chitra_Express"),
url(r'^Train_Ekota_Express/$', views.Train_Ekota_Express, name="Train_Ekota_Express"),
url(r'^Train_Mahanagar_Godhuli/$', views.Train_Mahanagar_Godhuli, name="Train_Mahanagar_Godhuli"),
url(r'^Train_Suborno_Express/$', views.Train_Suborno_Express, name="Train_Suborno_Express"),
url(r'^Train_Tista_Express/$', views.Train_Tista_Express, name="Train_Tista_Express"),
url(r'^(?P<slug>[\w-]+)/$', views.homepage, name="list"),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b81f72c1a629494935d113e4876d1627760656f2
| 53
|
py
|
Python
|
bana/OpenMayaFX/__init__.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | 24
|
2017-01-11T15:57:46.000Z
|
2020-09-23T06:18:30.000Z
|
bana/OpenMayaFX/__init__.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | null | null | null |
bana/OpenMayaFX/__init__.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | 2
|
2017-03-06T23:52:08.000Z
|
2020-09-23T06:19:03.000Z
|
"""Extensions for the ``maya.OpenMayaFX`` module."""
| 26.5
| 52
| 0.679245
|
"""Extensions for the ``maya.OpenMayaFX`` module."""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8b0fe30871875ffa461d0b5d638a14149f7fd68f
| 378
|
py
|
Python
|
apps/metadata/users/models.py
|
DiegoCorrea/ouvidoMusical
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | 1
|
2021-10-06T19:35:48.000Z
|
2021-10-06T19:35:48.000Z
|
apps/metadata/users/models.py
|
DiegoCorrea/ouvido_musical-Back
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | null | null | null |
apps/metadata/users/models.py
|
DiegoCorrea/ouvido_musical-Back
|
e8bdb993e2c6ef2fe4a78e844bc60be2738a5ba5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
| 17.181818
| 39
| 0.589947
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models
class User(models.Model):
id = models.CharField(
max_length=255,
unique=True,
db_index=True,
primary_key=True,
default=uuid.uuid1().hex
)
def as_json(self):
return dict(
user_id=self.id
)
| 0
| 0
| 0
| 247
| 0
| 0
| 0
| -3
| 69
|
1477ad88726678c8460cc5fe89ba40da27efa1cb
| 626
|
py
|
Python
|
ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py
|
uk-gov-mirror/nhsdigital.api-management-utils
|
4ee5489f7ce7595c371e2f4e83fc0c753308905a
|
[
"MIT"
] | null | null | null |
ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py
|
uk-gov-mirror/nhsdigital.api-management-utils
|
4ee5489f7ce7595c371e2f4e83fc0c753308905a
|
[
"MIT"
] | 20
|
2020-05-27T15:00:31.000Z
|
2021-09-13T11:38:58.000Z
|
ansible/collections/ansible_collections/nhsd/apigee/plugins/filter/filters.py
|
uk-gov-mirror/nhsdigital.api-management-utils
|
4ee5489f7ce7595c371e2f4e83fc0c753308905a
|
[
"MIT"
] | 3
|
2021-04-11T07:31:36.000Z
|
2022-01-24T11:23:18.000Z
|
from ansible_collections.nhsd.apigee.plugins.module_utils import constants
def org_from_env(environment) -> str:
"""Get nhsd apigee organization name from environment name."""
for org, envs in constants.APIGEE_ORG_TO_ENV.items():
if environment in envs:
return org
valid_envs = []
for v in constants.APIGEE_ORG_TO_ENV.values():
valid_envs = valid_envs + v
raise ValueError(f"Unknown environment {environment}, valid environments are {valid_envs}")
| 28.454545
| 95
| 0.683706
|
from ansible_collections.nhsd.apigee.plugins.module_utils import constants
def org_from_env(environment) -> str:
"""Get nhsd apigee organization name from environment name."""
for org, envs in constants.APIGEE_ORG_TO_ENV.items():
if environment in envs:
return org
valid_envs = []
for v in constants.APIGEE_ORG_TO_ENV.values():
valid_envs = valid_envs + v
raise ValueError(f"Unknown environment {environment}, valid environments are {valid_envs}")
class FilterModule:
@staticmethod
def filters():
return {
'org_from_env': org_from_env
}
| 0
| 79
| 0
| -2
| 0
| 0
| 0
| 0
| 50
|
9821d58fd217247d13910323dd73ffca785b11ca
| 11,055
|
py
|
Python
|
IPython/deathrow/astyle.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 11
|
2019-03-20T07:38:35.000Z
|
2021-06-18T09:42:46.000Z
|
IPython/deathrow/astyle.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 3
|
2015-04-01T13:14:57.000Z
|
2015-05-26T16:01:37.000Z
|
IPython/deathrow/astyle.py
|
dchichkov/ipython
|
8096bb8640ee7e7c5ebdf3f428fe69cd390e1cd4
|
[
"BSD-3-Clause-Clear"
] | 5
|
2019-06-29T03:13:02.000Z
|
2020-04-23T04:47:11.000Z
|
"""
``astyle`` provides classes for adding style (foreground and background color;
bold; blink; etc.) to terminal and curses output.
"""
import os
try:
import curses
except ImportError:
curses = None
COLOR_BLACK = 0
COLOR_RED = 1
COLOR_GREEN = 2
COLOR_YELLOW = 3
COLOR_BLUE = 4
COLOR_MAGENTA = 5
COLOR_CYAN = 6
COLOR_WHITE = 7
A_BLINK = 1<<0 # Blinking text
A_BOLD = 1<<1 # Extra bright or bold text
A_DIM = 1<<2 # Half bright text
A_REVERSE = 1<<3 # Reverse-video text
A_STANDOUT = 1<<4 # The best highlighting mode available
A_UNDERLINE = 1<<5 # Underlined text
def switchstyle(s1, s2):
"""
Return the ANSI escape sequence needed to switch from style ``s1`` to
style ``s2``.
"""
attrmask = (A_BLINK|A_BOLD|A_UNDERLINE|A_REVERSE)
a1 = s1.attrs & attrmask
a2 = s2.attrs & attrmask
args = []
if s1 != s2:
# do we have to get rid of the bold/underline/blink bit?
# (can only be done by a reset)
# use reset when our target color is the default color
# (this is shorter than 37;40)
if (a1 & ~a2 or s2==style_default):
args.append("0")
s1 = style_default
a1 = 0
# now we know that old and new color have the same boldness,
# or the new color is bold and the old isn't,
# i.e. we only might have to switch bold on, not off
if not (a1 & A_BOLD) and (a2 & A_BOLD):
args.append("1")
# Fix underline
if not (a1 & A_UNDERLINE) and (a2 & A_UNDERLINE):
args.append("4")
# Fix blink
if not (a1 & A_BLINK) and (a2 & A_BLINK):
args.append("5")
# Fix reverse
if not (a1 & A_REVERSE) and (a2 & A_REVERSE):
args.append("7")
# Fix foreground color
if s1.fg != s2.fg:
args.append("3%d" % s2.fg)
# Finally fix the background color
if s1.bg != s2.bg:
args.append("4%d" % s2.bg)
if args:
return "\033[%sm" % ";".join(args)
return ""
try:
import ipipe
except ImportError:
pass
else:
ipipe.xrepr.when_type(Text)(xrepr_astyle_text)
def streamstyle(stream, styled=None):
"""
If ``styled`` is ``None``, return whether ``stream`` refers to a terminal.
If this can't be determined (either because ``stream`` doesn't refer to a
real OS file, or because you're on Windows) return ``False``. If ``styled``
is not ``None`` ``styled`` will be returned unchanged.
"""
if styled is None:
try:
styled = os.isatty(stream.fileno())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
styled = False
return styled
def write(stream, styled, *texts):
"""
Write ``texts`` to ``stream``.
"""
text = Text(*texts)
text.write(stream, streamstyle(stream, styled))
def writeln(stream, styled, *texts):
"""
Write ``texts`` to ``stream`` and finish with a line feed.
"""
write(stream, styled, *texts)
stream.write("\n")
stdout = stdout()
stderr = stderr()
if curses is not None:
# This is probably just range(8)
COLOR2CURSES = [
COLOR_BLACK,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE,
]
A2CURSES = {
A_BLINK: curses.A_BLINK,
A_BOLD: curses.A_BOLD,
A_DIM: curses.A_DIM,
A_REVERSE: curses.A_REVERSE,
A_STANDOUT: curses.A_STANDOUT,
A_UNDERLINE: curses.A_UNDERLINE,
}
# default style
style_default = Style.fromstr("white:black")
# Styles for datatypes
style_type_none = Style.fromstr("magenta:black")
style_type_bool = Style.fromstr("magenta:black")
style_type_number = Style.fromstr("yellow:black")
style_type_datetime = Style.fromstr("magenta:black")
style_type_type = Style.fromstr("cyan:black")
# Style for URLs and file/directory names
style_url = Style.fromstr("green:black")
style_dir = Style.fromstr("cyan:black")
style_file = Style.fromstr("green:black")
# Style for ellipsis (when an output has been shortened
style_ellisis = Style.fromstr("red:black")
# Style for displaying exceptions
style_error = Style.fromstr("red:black")
# Style for displaying non-existing attributes
style_nodata = Style.fromstr("red:black")
| 27.568579
| 92
| 0.569064
|
"""
``astyle`` provides classes for adding style (foreground and background color;
bold; blink; etc.) to terminal and curses output.
"""
import sys, os
try:
import curses
except ImportError:
curses = None
COLOR_BLACK = 0
COLOR_RED = 1
COLOR_GREEN = 2
COLOR_YELLOW = 3
COLOR_BLUE = 4
COLOR_MAGENTA = 5
COLOR_CYAN = 6
COLOR_WHITE = 7
A_BLINK = 1<<0 # Blinking text
A_BOLD = 1<<1 # Extra bright or bold text
A_DIM = 1<<2 # Half bright text
A_REVERSE = 1<<3 # Reverse-video text
A_STANDOUT = 1<<4 # The best highlighting mode available
A_UNDERLINE = 1<<5 # Underlined text
class Style(object):
"""
Store foreground color, background color and attribute (bold, underlined
etc.).
"""
__slots__ = ("fg", "bg", "attrs")
COLORNAMES = {
"black": COLOR_BLACK,
"red": COLOR_RED,
"green": COLOR_GREEN,
"yellow": COLOR_YELLOW,
"blue": COLOR_BLUE,
"magenta": COLOR_MAGENTA,
"cyan": COLOR_CYAN,
"white": COLOR_WHITE,
}
ATTRNAMES = {
"blink": A_BLINK,
"bold": A_BOLD,
"dim": A_DIM,
"reverse": A_REVERSE,
"standout": A_STANDOUT,
"underline": A_UNDERLINE,
}
def __init__(self, fg, bg, attrs=0):
"""
Create a ``Style`` object with ``fg`` as the foreground color,
``bg`` as the background color and ``attrs`` as the attributes.
Examples:
>>> Style(COLOR_RED, COLOR_BLACK)
<Style fg=red bg=black attrs=0>
>>> Style(COLOR_YELLOW, COLOR_BLUE, A_BOLD|A_UNDERLINE)
<Style fg=yellow bg=blue attrs=bold|underline>
"""
self.fg = fg
self.bg = bg
self.attrs = attrs
def __call__(self, *args):
text = Text()
for arg in args:
if isinstance(arg, Text):
text.extend(arg)
else:
text.append((self, arg))
return text
def __eq__(self, other):
return self.fg == other.fg and self.bg == other.bg and self.attrs == other.attrs
def __neq__(self, other):
return self.fg != other.fg or self.bg != other.bg or self.attrs != other.attrs
def __repr__(self):
color2name = ("black", "red", "green", "yellow", "blue", "magenta", "cyan", "white")
attrs2name = ("blink", "bold", "dim", "reverse", "standout", "underline")
return "<%s fg=%s bg=%s attrs=%s>" % (
self.__class__.__name__, color2name[self.fg], color2name[self.bg],
"|".join([attrs2name[b] for b in xrange(6) if self.attrs&(1<<b)]) or 0)
def fromstr(cls, value):
"""
Create a ``Style`` object from a string. The format looks like this:
``"red:black:bold|blink"``.
"""
# defaults
fg = COLOR_WHITE
bg = COLOR_BLACK
attrs = 0
parts = value.split(":")
if len(parts) > 0:
fg = cls.COLORNAMES[parts[0].lower()]
if len(parts) > 1:
bg = cls.COLORNAMES[parts[1].lower()]
if len(parts) > 2:
for strattr in parts[2].split("|"):
attrs |= cls.ATTRNAMES[strattr.lower()]
return cls(fg, bg, attrs)
fromstr = classmethod(fromstr)
def fromenv(cls, name, default):
"""
Create a ``Style`` from an environment variable named ``name``
(using ``default`` if the environment variable doesn't exist).
"""
return cls.fromstr(os.environ.get(name, default))
fromenv = classmethod(fromenv)
def switchstyle(s1, s2):
"""
Return the ANSI escape sequence needed to switch from style ``s1`` to
style ``s2``.
"""
attrmask = (A_BLINK|A_BOLD|A_UNDERLINE|A_REVERSE)
a1 = s1.attrs & attrmask
a2 = s2.attrs & attrmask
args = []
if s1 != s2:
# do we have to get rid of the bold/underline/blink bit?
# (can only be done by a reset)
# use reset when our target color is the default color
# (this is shorter than 37;40)
if (a1 & ~a2 or s2==style_default):
args.append("0")
s1 = style_default
a1 = 0
# now we know that old and new color have the same boldness,
# or the new color is bold and the old isn't,
# i.e. we only might have to switch bold on, not off
if not (a1 & A_BOLD) and (a2 & A_BOLD):
args.append("1")
# Fix underline
if not (a1 & A_UNDERLINE) and (a2 & A_UNDERLINE):
args.append("4")
# Fix blink
if not (a1 & A_BLINK) and (a2 & A_BLINK):
args.append("5")
# Fix reverse
if not (a1 & A_REVERSE) and (a2 & A_REVERSE):
args.append("7")
# Fix foreground color
if s1.fg != s2.fg:
args.append("3%d" % s2.fg)
# Finally fix the background color
if s1.bg != s2.bg:
args.append("4%d" % s2.bg)
if args:
return "\033[%sm" % ";".join(args)
return ""
class Text(list):
"""
A colored string. A ``Text`` object is a sequence, the sequence
items will be ``(style, string)`` tuples.
"""
def __init__(self, *args):
list.__init__(self)
self.append(*args)
def __repr__(self):
return "%s.%s(%s)" % (
self.__class__.__module__, self.__class__.__name__,
list.__repr__(self)[1:-1])
def append(self, *args):
for arg in args:
if isinstance(arg, Text):
self.extend(arg)
elif isinstance(arg, tuple): # must be (style, string)
list.append(self, arg)
elif isinstance(arg, unicode):
list.append(self, (style_default, arg))
else:
list.append(self, (style_default, str(arg)))
def insert(self, index, *args):
self[index:index] = Text(*args)
def __add__(self, other):
new = Text()
new.append(self)
new.append(other)
return new
def __iadd__(self, other):
self.append(other)
return self
def format(self, styled=True):
"""
This generator yields the strings that will make up the final
colorized string.
"""
if styled:
oldstyle = style_default
for (style, string) in self:
if not isinstance(style, (int, long)):
switch = switchstyle(oldstyle, style)
if switch:
yield switch
if string:
yield string
oldstyle = style
switch = switchstyle(oldstyle, style_default)
if switch:
yield switch
else:
for (style, string) in self:
if not isinstance(style, (int, long)):
yield string
def string(self, styled=True):
"""
Return the resulting string (with escape sequences, if ``styled``
is true).
"""
return "".join(self.format(styled))
def __str__(self):
"""
Return ``self`` as a string (without ANSI escape sequences).
"""
return self.string(False)
def write(self, stream, styled=True):
"""
Write ``self`` to the output stream ``stream`` (with escape sequences,
if ``styled`` is true).
"""
for part in self.format(styled):
stream.write(part)
try:
import ipipe
except ImportError:
pass
else:
def xrepr_astyle_text(self, mode="default"):
yield (-1, True)
for info in self:
yield info
ipipe.xrepr.when_type(Text)(xrepr_astyle_text)
def streamstyle(stream, styled=None):
"""
If ``styled`` is ``None``, return whether ``stream`` refers to a terminal.
If this can't be determined (either because ``stream`` doesn't refer to a
real OS file, or because you're on Windows) return ``False``. If ``styled``
is not ``None`` ``styled`` will be returned unchanged.
"""
if styled is None:
try:
styled = os.isatty(stream.fileno())
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
styled = False
return styled
def write(stream, styled, *texts):
"""
Write ``texts`` to ``stream``.
"""
text = Text(*texts)
text.write(stream, streamstyle(stream, styled))
def writeln(stream, styled, *texts):
"""
Write ``texts`` to ``stream`` and finish with a line feed.
"""
write(stream, styled, *texts)
stream.write("\n")
class Stream(object):
"""
Stream wrapper that adds color output.
"""
def __init__(self, stream, styled=None):
self.stream = stream
self.styled = streamstyle(stream, styled)
def write(self, *texts):
write(self.stream, self.styled, *texts)
def writeln(self, *texts):
writeln(self.stream, self.styled, *texts)
def __getattr__(self, name):
return getattr(self.stream, name)
class stdout(object):
"""
Stream wrapper for ``sys.stdout`` that adds color output.
"""
def write(self, *texts):
write(sys.stdout, None, *texts)
def writeln(self, *texts):
writeln(sys.stdout, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stdout = stdout()
class stderr(object):
"""
Stream wrapper for ``sys.stderr`` that adds color output.
"""
def write(self, *texts):
write(sys.stderr, None, *texts)
def writeln(self, *texts):
writeln(sys.stderr, None, *texts)
def __getattr__(self, name):
return getattr(sys.stdout, name)
stderr = stderr()
if curses is not None:
# This is probably just range(8)
COLOR2CURSES = [
COLOR_BLACK,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE,
]
A2CURSES = {
A_BLINK: curses.A_BLINK,
A_BOLD: curses.A_BOLD,
A_DIM: curses.A_DIM,
A_REVERSE: curses.A_REVERSE,
A_STANDOUT: curses.A_STANDOUT,
A_UNDERLINE: curses.A_UNDERLINE,
}
# default style
style_default = Style.fromstr("white:black")
# Styles for datatypes
style_type_none = Style.fromstr("magenta:black")
style_type_bool = Style.fromstr("magenta:black")
style_type_number = Style.fromstr("yellow:black")
style_type_datetime = Style.fromstr("magenta:black")
style_type_type = Style.fromstr("cyan:black")
# Style for URLs and file/directory names
style_url = Style.fromstr("green:black")
style_dir = Style.fromstr("cyan:black")
style_file = Style.fromstr("green:black")
# Style for ellipsis (when an output has been shortened
style_ellisis = Style.fromstr("red:black")
# Style for displaying exceptions
style_error = Style.fromstr("red:black")
# Style for displaying non-existing attributes
style_nodata = Style.fromstr("red:black")
| 0
| 0
| 0
| 6,404
| 97
| 0
| 0
| 5
| 141
|
0ba81da5ce04ec487462a1396a9e594fbd6ac8ba
| 853
|
py
|
Python
|
0000_examples/gelsight/tst.py
|
Photon26/wrs-main_0614
|
c0d0e38deac9785e9c382305f65f3ac5f221787d
|
[
"MIT"
] | null | null | null |
0000_examples/gelsight/tst.py
|
Photon26/wrs-main_0614
|
c0d0e38deac9785e9c382305f65f3ac5f221787d
|
[
"MIT"
] | null | null | null |
0000_examples/gelsight/tst.py
|
Photon26/wrs-main_0614
|
c0d0e38deac9785e9c382305f65f3ac5f221787d
|
[
"MIT"
] | null | null | null |
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import rbt_con.force_control as ur3dx
# import robot_con.ur.ur3_dual_x as ur3dx
import visualization.panda.world as wd
import modeling.geometric_model as gm
import numpy as np
ur_dual_x = ur3dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.100')
base = wd.World(cam_pos=[2,1,3], lookat_pos=[0,0,1.1])
gm.gen_frame().attach_to(base)
robot_s = ur3d.UR3Dual()
jnt = ur_dual_x.get_jnt_values("lft_arm")
robot_s.fk(component_name="lft_arm",jnt_values= np.array(jnt))
robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
robot_meshmodel.attach_to(base)
base.run()
| 32.807692
| 98
| 0.805393
|
import pickle
import robot_sim.robots.ur3_dual.ur3_dual as ur3d
import rbt_con.force_control as ur3dx
# import robot_con.ur.ur3_dual_x as ur3dx
import visualization.panda.world as wd
import modeling.geometric_model as gm
import motion.optimization_based.incremental_nik as inik
import numpy as np
import modeling.collision_model as cm
import cv2
import img_to_depth as itd
import time
import motion.probabilistic.rrt_connect as rrtc
ur_dual_x = ur3dx.UR3DualX(lft_robot_ip='10.2.0.50', rgt_robot_ip='10.2.0.51', pc_ip='10.2.0.100')
base = wd.World(cam_pos=[2,1,3], lookat_pos=[0,0,1.1])
gm.gen_frame().attach_to(base)
robot_s = ur3d.UR3Dual()
jnt = ur_dual_x.get_jnt_values("lft_arm")
robot_s.fk(component_name="lft_arm",jnt_values= np.array(jnt))
robot_meshmodel = robot_s.gen_meshmodel(toggle_tcpcs=True)
robot_meshmodel.attach_to(base)
base.run()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 53
| 154
|
3fd17c06364dbbcfbf61a1c852f8a863c6286b83
| 4,656
|
py
|
Python
|
galaxy_model/spiral_arms/three_kpc.py
|
K-Monty/galaxy-model
|
9515d7c87c31e68338466d2044d8e9c679bf8648
|
[
"MIT"
] | null | null | null |
galaxy_model/spiral_arms/three_kpc.py
|
K-Monty/galaxy-model
|
9515d7c87c31e68338466d2044d8e9c679bf8648
|
[
"MIT"
] | null | null | null |
galaxy_model/spiral_arms/three_kpc.py
|
K-Monty/galaxy-model
|
9515d7c87c31e68338466d2044d8e9c679bf8648
|
[
"MIT"
] | null | null | null |
"""
This module use SpiralArm superclass, with some modifications,
to create 3-kpc arm.
"""
| 40.486957
| 78
| 0.597938
|
"""
This module use SpiralArm superclass, with some modifications,
to create 3-kpc arm.
"""
from shapely.geometry.polygon import Polygon
from descartes import PolygonPatch
from .spiral_parameters import Three_Kpc
from . import spiral_property as spiral_eq
from .spiral_arm_superclass import SpiralArm
class ThreeKpcArm(SpiralArm):
def __init__(self):
self.params = Three_Kpc
self._color = 'yellow'
self.tuning_window = 3
self._spine_r_kpc, self.x_spine, self.y_spine, self._B_spine, \
self._width_kpc = \
self.spine_radii_coords_b_range_and_width_with_smoothing()
self._poly_coords_inner, self._poly_coords_outer = self._poly_coords()
self._polygon_near = Polygon(self._poly_coords_inner)
self._polygon_far = Polygon(self._poly_coords_outer)
self.polypatch_near = PolygonPatch(self._polygon_near,
color=self._color,
alpha=0.2)
self.polypatch_far = PolygonPatch(self._polygon_far,
color=self._color,
alpha=0.2)
def __repr__(self):
return "ThreeKpc"
def _radii_factory(self, B):
return self._spine_radius_at_B_and_psi(
B, self.params['B-kink'],
self.params['psi'],
self.params['R-kink'])
def _width_factory(self, B, r):
return spiral_eq.CylinderSize.width_kpc(
self.params['w-kink'], r, self.params['R-kink']) + 0.1
def spine_radii_coords_b_range_and_width_with_smoothing(self):
r_spine_near = []
x_spine_near = []
y_spine_near = []
width_kpc_near = []
B_list_near = [x for x in range(self.params['B-begin-near'],
self.params['B-end-near'])]
r_spine_far = []
x_spine_far = []
y_spine_far = []
width_kpc_far = []
B_list_far = [x for x in range(self.params['B-begin-far'],
self.params['B-end-far'])]
for B_near in B_list_near:
r_near = self._radii_factory(B_near)
w_near = self._width_factory(B_near, r_near)
r_spine_near.append(r_near)
width_kpc_near.append(w_near)
for B_far in B_list_far:
r_far = self._radii_factory(B_far)
w_far = self._width_factory(B_far, r_far)
r_spine_far.append(r_far)
width_kpc_far.append(w_far)
for B_near, r_near in zip(B_list_near, r_spine_near):
cartesian_coords = spiral_eq.polar_to_cartesian(r_near, B_near)
x_spine_near.append(cartesian_coords[0])
y_spine_near.append(cartesian_coords[1])
for B_far, r_far in zip(B_list_far, r_spine_far):
cartesian_coords = spiral_eq.polar_to_cartesian(r_far, B_far)
x_spine_far.append(cartesian_coords[0])
y_spine_far.append(cartesian_coords[1])
r_spine = [r_spine_near, r_spine_far]
x_spine = [x_spine_near, x_spine_far]
y_spine = [y_spine_near, y_spine_far]
B_list = [B_list_near, B_list_far]
width_kpc = [width_kpc_near, width_kpc_far]
return (r_spine, x_spine, y_spine, B_list, width_kpc)
def _poly_coords(self):
x_border_inner_near, y_border_inner_near, x_border_outer_near, \
y_border_outer_near = \
self._border_coords(self.x_spine[0],
self.y_spine[0],
self._B_spine[0],
self._width_kpc[0])
x_border_inner_far, y_border_inner_far, x_border_outer_far, \
y_border_outer_far = \
self._border_coords(self.x_spine[1],
self.y_spine[1],
self._B_spine[1],
self._width_kpc[1])
x_poly_edge_coords_near = x_border_inner_near \
+ x_border_outer_near[::-1]
y_poly_edge_coords_near = y_border_inner_near \
+ y_border_outer_near[::-1]
poly_edge_coords_near = [xy for xy in zip(x_poly_edge_coords_near,
y_poly_edge_coords_near)]
x_poly_edge_coords_far = x_border_inner_far + x_border_outer_far[::-1]
y_poly_edge_coords_far = y_border_inner_far + y_border_outer_far[::-1]
poly_edge_coords_far = [xy for xy in zip(x_poly_edge_coords_far,
y_poly_edge_coords_far)]
return poly_edge_coords_near, poly_edge_coords_far
| 0
| 0
| 0
| 4,328
| 0
| 0
| 0
| 99
| 135
|
dbeff05f1c61b09708b8a95665c71d6764ac49fd
| 2,045
|
py
|
Python
|
rbi/rbi_budgets_scraper.py
|
cbgaindia/scrappers
|
f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9
|
[
"MIT"
] | null | null | null |
rbi/rbi_budgets_scraper.py
|
cbgaindia/scrappers
|
f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9
|
[
"MIT"
] | null | null | null |
rbi/rbi_budgets_scraper.py
|
cbgaindia/scrappers
|
f8aab1d0f0a52007d8f0ab94e7ea38047e6e46a9
|
[
"MIT"
] | null | null | null |
'Code for scrapping RBI Data'
import logging
from logging.config import fileConfig
fileConfig('scrappers/logging_config.ini')
logger = logging.getLogger()
OUT_FOLDER = "rbi"
if __name__ == '__main__':
obj = RBIBudgetScraper()
for year in range(2002,2015):
year = str(year)
url1 = "https://www.rbi.org.in/scripts/AnnualPublications.aspx?head=Handbook%20of%20Statistics%20on%20Indian%20Economy"
url2 = "https://rbi.org.in/Scripts/AnnualPublications.aspx?head=State+Finances+%3a+A+Study+of+Budgets"
obj.fetch_docs_for_year(url1, year)
obj.fetch_docs_for_year(url2, year)
| 44.456522
| 127
| 0.630318
|
'Code for scrapping RBI Data'
from datetime import date
from lxml import etree
import logging
from logging.config import fileConfig
from scrappers.scrapping_utils import ScrappingUtils
fileConfig('scrappers/logging_config.ini')
logger = logging.getLogger()
OUT_FOLDER = "rbi"
class RBIBudgetScraper(ScrappingUtils):
def fetch_docs_for_year(self, url, year=None):
'''Fetches all documents for a budget year
'''
if not year:
current_year = date.today().year
year = "%s" % (current_year)
page_dom = self.get_page_dom(url)
title = self.get_text_from_element(page_dom, xpath="//h2[@class='page_title']/text()")
download_dir = "%s/%s/%s" % (OUT_FOLDER, year, title)
file_dir = download_dir
for node in page_dom.xpath("//table[@class='tablebg']/tr"):
node_title = self.get_text_from_element(node, xpath="./td[@class='tableheader']//text()")
if node_title:
file_dir = "%s/%s" % (download_dir, node_title)
continue
node_title = self.get_text_from_element(node, xpath="./td[@style]//text()")
file_path = "%s/%s" % (file_dir, node_title)
file_link = node.xpath("./td[2]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".xls")
file_link = node.xpath("./td[3]/a[@target]/@href")
if file_link:
self.fetch_and_save_file(file_link[0].replace('http://', 'https://'), file_path + ".pdf")
if __name__ == '__main__':
obj = RBIBudgetScraper()
for year in range(2002,2015):
year = str(year)
url1 = "https://www.rbi.org.in/scripts/AnnualPublications.aspx?head=Handbook%20of%20Statistics%20on%20Indian%20Economy"
url2 = "https://rbi.org.in/Scripts/AnnualPublications.aspx?head=State+Finances+%3a+A+Study+of+Budgets"
obj.fetch_docs_for_year(url1, year)
obj.fetch_docs_for_year(url2, year)
| 0
| 0
| 0
| 1,292
| 0
| 0
| 0
| 36
| 99
|
30b7600879ed7470a3a4e7671d503041ddaee708
| 404
|
py
|
Python
|
0x11-python-network_1/2-post_email.py
|
Nahi-Terefe/alx-higher_level_programming
|
c67a78a6f79e853918963971f8352979e7691541
|
[
"MIT"
] | null | null | null |
0x11-python-network_1/2-post_email.py
|
Nahi-Terefe/alx-higher_level_programming
|
c67a78a6f79e853918963971f8352979e7691541
|
[
"MIT"
] | null | null | null |
0x11-python-network_1/2-post_email.py
|
Nahi-Terefe/alx-higher_level_programming
|
c67a78a6f79e853918963971f8352979e7691541
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
""" post email """
import urllib.request
import urllib.parse
import sys
if __name__ == "__main__":
value = {'email': sys.argv[2]}
data = urllib.parse.urlencode(value)
data = data.encode('utf-8')
req = urllib.request.Request(sys.argv[1], data)
with urllib.request.urlopen(req) as response:
res = response.read().decode(encoding='UTF-8')
print(res)
| 25.25
| 54
| 0.65099
|
#!/usr/bin/python3
""" post email """
import urllib.request
import urllib.parse
import sys
if __name__ == "__main__":
value = {'email': sys.argv[2]}
data = urllib.parse.urlencode(value)
data = data.encode('utf-8')
req = urllib.request.Request(sys.argv[1], data)
with urllib.request.urlopen(req) as response:
res = response.read().decode(encoding='UTF-8')
print(res)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
98dd68e4820e061630336d4d9dc6896eb5df6fb1
| 2,534
|
py
|
Python
|
modules/selection.py
|
psp-codes/reduced-decoy-ensemble
|
096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f
|
[
"MIT"
] | 2
|
2020-04-22T04:16:03.000Z
|
2020-08-19T13:50:20.000Z
|
modules/selection.py
|
psp-codes/reduced-decoy-ensemble
|
096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f
|
[
"MIT"
] | null | null | null |
modules/selection.py
|
psp-codes/reduced-decoy-ensemble
|
096926d8d44a6d7bdbf9c49dd52fb83ff86e8b3f
|
[
"MIT"
] | 1
|
2020-08-19T13:50:26.000Z
|
2020-08-19T13:50:26.000Z
|
# selection.py
# since: 10/2018
# Developed by: Shehu Lab
"""Module for selecting next generation from current generation.
This module provides methods to select next generation from
current generation.
Available Functions:
- truncation: Selects next generation via elitism truncation selection.
"""
def truncation(parent_population, child_population, parents_scores,
children_scores, elitism_rate):
"""Selects next generation using elitism truncation selection.
This function implements truncation selection while ensuring elitism
to select a specific number of members for the next generation.
Args:
parent_population: A list containing members of parent
population.
child_population: A list containing members of offspring
population.
parents_scores: A list containing scores of each member of the
parent population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
parent_population argument.
children_scores: A list containing scores of each member of the
offspring population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
child_population argument.
elitism_rate: A float indicating the elitism percentage.
Returns:
A list of members for the next generation of population.
"""
population_size = len(parent_population)
population_indices = list(range(population_size))
sorted_parents_indices = [x for _, x in sorted(zip(
parents_scores, population_indices
))]
sorted_parents_scores = sorted(parents_scores)
# Slice parent population using elitism rate
slice_index = int(population_size * elitism_rate)
selected_parents_indices = sorted_parents_indices[:slice_index]
selected_parents = [parent_population[i] for i in selected_parents_indices]
combined_population = selected_parents + child_population
combined_scores = sorted_parents_scores[:slice_index] + children_scores
combined_population_indices = list(range(len(combined_population)))
sorted_population_indices = [x for _, x in sorted(zip(
combined_scores, combined_population_indices
))]
selected_population_indices = sorted_population_indices[:population_size]
# Truncate and return
return [combined_population[i] for i in selected_population_indices]
| 37.820896
| 79
| 0.723757
|
# selection.py
# since: 10/2018
# Developed by: Shehu Lab
"""Module for selecting next generation from current generation.
This module provides methods to select next generation from
current generation.
Available Functions:
- truncation: Selects next generation via elitism truncation selection.
"""
def truncation(parent_population, child_population, parents_scores,
children_scores, elitism_rate):
"""Selects next generation using elitism truncation selection.
This function implements truncation selection while ensuring elitism
to select a specific number of members for the next generation.
Args:
parent_population: A list containing members of parent
population.
child_population: A list containing members of offspring
population.
parents_scores: A list containing scores of each member of the
parent population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
parent_population argument.
children_scores: A list containing scores of each member of the
offspring population. The format is:
[member 1 score, member 2 score, ....]
The order of members has to be consistent with
child_population argument.
elitism_rate: A float indicating the elitism percentage.
Returns:
A list of members for the next generation of population.
"""
population_size = len(parent_population)
population_indices = list(range(population_size))
sorted_parents_indices = [x for _, x in sorted(zip(
parents_scores, population_indices
))]
sorted_parents_scores = sorted(parents_scores)
# Slice parent population using elitism rate
slice_index = int(population_size * elitism_rate)
selected_parents_indices = sorted_parents_indices[:slice_index]
selected_parents = [parent_population[i] for i in selected_parents_indices]
combined_population = selected_parents + child_population
combined_scores = sorted_parents_scores[:slice_index] + children_scores
combined_population_indices = list(range(len(combined_population)))
sorted_population_indices = [x for _, x in sorted(zip(
combined_scores, combined_population_indices
))]
selected_population_indices = sorted_population_indices[:population_size]
# Truncate and return
return [combined_population[i] for i in selected_population_indices]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3f46e12fb5d861255b9732050d691bdff492592b
| 129
|
py
|
Python
|
Desafios/desafio030.py
|
VanessaCML/python
|
56133b9000ba89154f37038e11a3c2d1aa6d1094
|
[
"MIT"
] | null | null | null |
Desafios/desafio030.py
|
VanessaCML/python
|
56133b9000ba89154f37038e11a3c2d1aa6d1094
|
[
"MIT"
] | null | null | null |
Desafios/desafio030.py
|
VanessaCML/python
|
56133b9000ba89154f37038e11a3c2d1aa6d1094
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um nmero: '))
if n % 2 == 0:
print(f'O nmero {n} par.')
else:
print(f'O nmero {n} mpar.')
| 18.428571
| 36
| 0.542636
|
n = int(input('Digite um número: '))
if n % 2 == 0:
print(f'O número {n} é par.')
else:
print(f'O número {n} é ímpar.')
| 12
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
be760d7ba6e881b8660d24ef1857485b859c9f5b
| 1,487
|
py
|
Python
|
sorted_insert_position.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
sorted_insert_position.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
sorted_insert_position.py
|
UPstartDeveloper/Problem_Solving_Practice
|
bd61333b3b056e82a94297e02bc05a17552e3496
|
[
"MIT"
] | null | null | null |
import math
def find_index(sorted_list, target):
"""Finds the index where the target value is expected in a sorted list."""
def binary_search(low_index, hi_index):
"""Searches for a value in a list, throwing away half each call"""
# locate the middle index
mid_index = math.ceil((low_index + hi_index) / 2)
# obtain values from all three indices
low_val, mid_val, high_val = (
sorted_list[low_index],
sorted_list[mid_index],
sorted_list[hi_index],
)
# Base case: the target value is found
if mid_val == target:
return mid_index
# target value not found:
elif mid_val > target:
# if target lies "before" the array
if low_index == hi_index:
# return the 0 index
return mid_index
# otherwise search the lower half of the array
return binary_search(low_index, mid_index - 1)
elif mid_val < target:
# if target lies "after" the last value
if low_index == hi_index:
return mid_index + 1
# otherwise search the larger half of the array
return binary_search(mid_index + 1, hi_index)
# store the array length
ARRAY_LENGTH = len(sorted_list)
# execute binary search on the array
return binary_search(0, ARRAY_LENGTH - 1)
if __name__ == "__main__":
print(find_index([1, 3, 5, 6], 5))
| 34.581395
| 78
| 0.597848
|
import math
def find_index(sorted_list, target):
"""Finds the index where the target value is expected in a sorted list."""
def binary_search(low_index, hi_index):
"""Searches for a value in a list, throwing away half each call"""
# locate the middle index
mid_index = math.ceil((low_index + hi_index) / 2)
# obtain values from all three indices
low_val, mid_val, high_val = (
sorted_list[low_index],
sorted_list[mid_index],
sorted_list[hi_index],
)
# Base case: the target value is found
if mid_val == target:
return mid_index
# target value not found:
elif mid_val > target:
# if target lies "before" the array
if low_index == hi_index:
# return the 0 index
return mid_index
# otherwise search the lower half of the array
return binary_search(low_index, mid_index - 1)
elif mid_val < target:
# if target lies "after" the last value
if low_index == hi_index:
return mid_index + 1
# otherwise search the larger half of the array
return binary_search(mid_index + 1, hi_index)
# store the array length
ARRAY_LENGTH = len(sorted_list)
# execute binary search on the array
return binary_search(0, ARRAY_LENGTH - 1)
if __name__ == "__main__":
print(find_index([1, 3, 5, 6], 5))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b4bd721ab4429c8eb5ef6b2554f6ed76ab1c49a3
| 404
|
py
|
Python
|
2021/Day_8/AoCSignals.py
|
ArrowThunder/AoC
|
f4649115fc83b989745c83251a85710e76eb0368
|
[
"MIT"
] | 1
|
2021-12-04T12:44:51.000Z
|
2021-12-04T12:44:51.000Z
|
2021/Day_8/AoCSignals.py
|
ArrowThunder/AoC2021
|
f4649115fc83b989745c83251a85710e76eb0368
|
[
"MIT"
] | null | null | null |
2021/Day_8/AoCSignals.py
|
ArrowThunder/AoC2021
|
f4649115fc83b989745c83251a85710e76eb0368
|
[
"MIT"
] | null | null | null |
with open('input.txt') as file:
total = 0
for line in file:
inputs, outputs = parse_line(line)
for code in outputs:
if len(code) == 2 or len(code) == 3 or len(code) == 4 or len(code) == 7:
total += 1
print(total)
| 28.857143
| 84
| 0.549505
|
def parse_line(line):
line = line.split('|')
inputs = line[0].split()
outputs = line[1].split()
return inputs, outputs
with open('input.txt') as file:
total = 0
for line in file:
inputs, outputs = parse_line(line)
for code in outputs:
if len(code) == 2 or len(code) == 3 or len(code) == 4 or len(code) == 7:
total += 1
print(total)
| 0
| 0
| 0
| 0
| 0
| 113
| 0
| 0
| 22
|
13d4af942e0928ac2e926dad1c9830ea003345d6
| 1,885
|
py
|
Python
|
4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py
|
YaoZhou89/TGG
|
b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd
|
[
"MIT"
] | 7
|
2022-01-19T14:17:23.000Z
|
2022-02-08T12:17:39.000Z
|
4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py
|
YaoZhou89/TGG
|
b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd
|
[
"MIT"
] | null | null | null |
4.Graph_pangenome/1.construction_graph_genome/02_scripts/prepareAugmentFiles.py
|
YaoZhou89/TGG
|
b9b30f6a1bf365895c39cb6fa4dddf0588d3c5dd
|
[
"MIT"
] | null | null | null |
import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
parser = argparse.ArgumentParser(description='Make fasta for each variant to align/augment.')
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-r', help='the reference FASTA file.', required=True)
parser.add_argument('-s', help='the output FASTA file with SV sequence to align/augment', required=True)
parser.add_argument('-f', default=50000, type=int,
help='the flank size. Default 50000.')
args = parser.parse_args()
# get chromosome length
ref = Fasta(args.r)
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
fa_outf = open(args.s, 'w')
tail_buff = 1000 # tail buffer: no sequence extracted from a buffer at the chunk tails to ensure they stay untouched
for record in vcf_reader:
chr_len = len(ref[record.CHROM])
# retrieve alt allele with flanks
# left flank sequence
fl1_e = record.POS - 1
if fl1_e < tail_buff:
l1_s = tail_buff / 2
else:
fl1_s = fl1_e - args.f
fl1_s = max(0, fl1_s) + tail_buff
fl1_seq = ref[record.CHROM][fl1_s:fl1_e]
fl1_seq = fl1_seq.seq
# Get flank 2 sequence
fl2_s = record.POS + len(record.REF) - 1
if fl2_s > chr_len - tail_buff:
fl2_e = (chr_len + fl2_s)/2
else:
fl2_e = fl2_s + args.f
fl2_e = min(fl2_e, len(ref[record.CHROM])) - tail_buff
fl2_seq = ref[record.CHROM][int(fl2_s):int(fl2_e)]
fl2_seq = fl2_seq.seq
# Fasta record
oseq = fl1_seq + str(record.ALT[0]) + fl2_seq
svid = '{}_{}_{}_{}'.format(record.CHROM, int(fl1_s), int(fl2_e), record.ID)
orec = SeqRecord(MutableSeq(oseq.upper()), id=svid,
description='')
SeqIO.write(orec, fa_outf, "fasta")
fa_outf.close()
vcfi.close()
| 35.566038
| 116
| 0.669496
|
import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
parser = argparse.ArgumentParser(description='Make fasta for each variant to align/augment.')
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-r', help='the reference FASTA file.', required=True)
parser.add_argument('-s', help='the output FASTA file with SV sequence to align/augment', required=True)
parser.add_argument('-f', default=50000, type=int,
help='the flank size. Default 50000.')
args = parser.parse_args()
# get chromosome length
ref = Fasta(args.r)
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
fa_outf = open(args.s, 'w')
tail_buff = 1000 # tail buffer: no sequence extracted from a buffer at the chunk tails to ensure they stay untouched
for record in vcf_reader:
chr_len = len(ref[record.CHROM])
# retrieve alt allele with flanks
# left flank sequence
fl1_e = record.POS - 1
if fl1_e < tail_buff:
l1_s = tail_buff / 2
else:
fl1_s = fl1_e - args.f
fl1_s = max(0, fl1_s) + tail_buff
fl1_seq = ref[record.CHROM][fl1_s:fl1_e]
fl1_seq = fl1_seq.seq
# Get flank 2 sequence
fl2_s = record.POS + len(record.REF) - 1
if fl2_s > chr_len - tail_buff:
fl2_e = (chr_len + fl2_s)/2
else:
fl2_e = fl2_s + args.f
fl2_e = min(fl2_e, len(ref[record.CHROM])) - tail_buff
fl2_seq = ref[record.CHROM][int(fl2_s):int(fl2_e)]
fl2_seq = fl2_seq.seq
# Fasta record
oseq = fl1_seq + str(record.ALT[0]) + fl2_seq
svid = '{}_{}_{}_{}'.format(record.CHROM, int(fl1_s), int(fl2_e), record.ID)
orec = SeqRecord(MutableSeq(oseq.upper()), id=svid,
description='')
SeqIO.write(orec, fa_outf, "fasta")
fa_outf.close()
vcfi.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
878b92fc40120b934c575d753a800a9538d9a14d
| 22,851
|
py
|
Python
|
gateware/daqnet/ethernet/rmii.py
|
dvdfreitag/daqnet
|
6a84185d2cf35d99dd620d1e09b4df7fb0630784
|
[
"MIT"
] | null | null | null |
gateware/daqnet/ethernet/rmii.py
|
dvdfreitag/daqnet
|
6a84185d2cf35d99dd620d1e09b4df7fb0630784
|
[
"MIT"
] | null | null | null |
gateware/daqnet/ethernet/rmii.py
|
dvdfreitag/daqnet
|
6a84185d2cf35d99dd620d1e09b4df7fb0630784
|
[
"MIT"
] | null | null | null |
"""
Ethernet RMII Interface
Copyright 2018-2019 Adam Greig
Released under the MIT license; see LICENSE for details.
"""
| 30.921516
| 78
| 0.495427
|
"""
Ethernet RMII Interface
Copyright 2018-2019 Adam Greig
Released under the MIT license; see LICENSE for details.
"""
from nmigen import Elaboratable, Module, Signal, Cat
from .crc import CRC32
from .mac_address_match import MACAddressMatch
class RMIIRx(Elaboratable):
"""
RMII receive module
Receives incoming packets and saves them to a memory. Validates incoming
frame check sequence and only asserts `rx_valid` when an entire valid
packet has been saved to the port.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Parameters:
* `mac_addr`: 6-byte MAC address (list of ints)
Ports:
* `write_port`: a write-capable memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `crs_dv`: RMII carrier sense/data valid
* `rxd0`: RMII receive data 0
* `rxd1`: RMII receive data 1
Outputs:
* `rx_valid`: pulsed when a valid packet is in memory
* `rx_offset`: n-bit start address of received packet
* `rx_len`: 11-bit length of received packet
"""
def __init__(self, mac_addr, write_port, crs_dv, rxd0, rxd1):
# Outputs
self.rx_valid = Signal()
self.rx_offset = Signal(write_port.addr.width)
self.rx_len = Signal(11)
# Store arguments
self.mac_addr = mac_addr
self.write_port = write_port
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
m.submodules.crc = crc = CRC32()
m.submodules.mac_match = mac_match = MACAddressMatch(self.mac_addr)
m.submodules.rxbyte = rxbyte = RMIIRxByte(
self.crs_dv, self.rxd0, self.rxd1)
adr = Signal(self.write_port.addr.width)
with m.FSM() as fsm:
m.d.comb += [
self.write_port.addr.eq(adr),
self.write_port.data.eq(rxbyte.data),
self.write_port.en.eq(rxbyte.data_valid),
crc.data.eq(rxbyte.data),
crc.data_valid.eq(rxbyte.data_valid),
crc.reset.eq(fsm.ongoing("IDLE")),
mac_match.data.eq(rxbyte.data),
mac_match.data_valid.eq(rxbyte.data_valid),
mac_match.reset.eq(fsm.ongoing("IDLE")),
]
# Idle until we see data valid
with m.State("IDLE"):
m.d.sync += self.rx_len.eq(0)
m.d.sync += self.rx_valid.eq(0)
with m.If(rxbyte.dv):
m.d.sync += self.rx_offset.eq(adr)
m.next = "DATA"
# Save incoming data to memory
with m.State("DATA"):
with m.If(rxbyte.data_valid):
m.d.sync += adr.eq(adr + 1)
m.d.sync += self.rx_len.eq(self.rx_len + 1)
with m.Elif(~rxbyte.dv):
m.next = "EOF"
with m.State("EOF"):
with m.If(crc.crc_match & mac_match.mac_match):
m.d.sync += self.rx_valid.eq(1)
m.next = "IDLE"
return m
class RMIIRxByte(Elaboratable):
"""
RMII Receive Byte De-muxer
Handles receiving a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its outputs are likewise in that domain.
Pins:
* `crs_dv`: Data valid, input
* `rxd0`: RX data 0, input
* `rxd1`: RX data 1, input
Outputs:
* `data`: 8-bit wide output data
* `data_valid`: Asserted for one cycle when `data` is valid
* `dv`: RMII Data valid recovered signal
* `crs`: RMII Carrier sense recovered signal
"""
def __init__(self, crs_dv, rxd0, rxd1):
# Outputs
self.data = Signal(8)
self.data_valid = Signal()
self.dv = Signal()
self.crs = Signal()
self.crs_dv = crs_dv
self.rxd0 = rxd0
self.rxd1 = rxd1
def elaborate(self, platform):
m = Module()
# Sample RMII signals on rising edge of ref_clk
crs_dv_reg = Signal()
rxd_reg = Signal(2)
m.d.sync += [
crs_dv_reg.eq(self.crs_dv),
rxd_reg.eq(Cat(self.rxd0, self.rxd1)),
]
with m.FSM():
with m.State("IDLE"):
m.d.sync += [
self.crs.eq(0),
self.dv.eq(0),
self.data_valid.eq(0),
]
with m.If(crs_dv_reg & (rxd_reg == 0b01)):
m.next = "PREAMBLE_SFD"
with m.State("PREAMBLE_SFD"):
m.d.sync += [
self.crs.eq(1),
self.dv.eq(1),
self.data_valid.eq(0),
]
with m.If(rxd_reg == 0b11):
m.next = "NIBBLE1"
with m.Elif(rxd_reg != 0b01):
m.next = "IDLE"
with m.State("NIBBLE1"):
m.d.sync += [
self.data[0:2].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE2"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE2"):
m.d.sync += [
self.data[2:4].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.dv.eq(crs_dv_reg)
m.next = "NIBBLE3"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE3"):
m.d.sync += [
self.data[4:6].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += self.crs.eq(crs_dv_reg)
m.next = "NIBBLE4"
with m.Else():
m.next = "IDLE"
with m.State("NIBBLE4"):
m.d.sync += [
self.data[6:8].eq(rxd_reg),
self.data_valid.eq(0),
]
with m.If(self.dv):
m.d.sync += [
self.dv.eq(crs_dv_reg),
self.data_valid.eq(1),
]
m.next = "NIBBLE1"
with m.Else():
m.d.sync += self.data_valid.eq(1),
m.next = "IDLE"
return m
class RMIITx(Elaboratable):
"""
RMII transmit module
Transmits outgoing packets from a memory. Adds preamble, start of frame
delimiter, and frame check sequence (CRC32) automatically.
This module must be run in the RMII ref_clk domain, and the memory port
and inputs and outputs must also be in that clock domain.
Ports:
* `read_port`: a read memory port, 8 bits wide by 2048,
running in the RMII ref_clk domain
Pins:
* `txen`: RMII transmit enable
* `txd0`: RMII transmit data 0
* `txd1`: RMII transmit data 1
Inputs:
* `tx_start`: Pulse high to begin transmission of a packet
* `tx_offset`: n-bit address offset of packet to transmit
* `tx_len`: 11-bit length of packet to transmit
Outputs:
* `tx_ready`: Asserted while ready to transmit a new packet
"""
def __init__(self, read_port, txen, txd0, txd1):
# Inputs
self.tx_start = Signal()
self.tx_offset = Signal(read_port.addr.width)
self.tx_len = Signal(11)
# Outputs
self.tx_ready = Signal()
self.read_port = read_port
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Transmit byte counter
tx_idx = Signal(self.read_port.addr.width)
# Transmit length latch
tx_len = Signal(11)
# Transmit offset latch
tx_offset = Signal(self.read_port.addr.width)
m.submodules.crc = crc = CRC32()
m.submodules.txbyte = txbyte = RMIITxByte(
self.txen, self.txd0, self.txd1)
with m.FSM() as fsm:
m.d.comb += [
self.read_port.addr.eq(tx_idx + tx_offset),
crc.data.eq(txbyte.data),
crc.reset.eq(fsm.ongoing("IDLE")),
crc.data_valid.eq(
(fsm.ongoing("DATA") | fsm.ongoing("PAD"))
& txbyte.ready),
self.tx_ready.eq(fsm.ongoing("IDLE")),
txbyte.data_valid.eq(
~(fsm.ongoing("IDLE") | fsm.ongoing("IPG"))),
]
with m.State("IDLE"):
m.d.comb += txbyte.data.eq(0)
m.d.sync += [
tx_idx.eq(0),
tx_offset.eq(self.tx_offset),
tx_len.eq(self.tx_len),
]
with m.If(self.tx_start):
m.next = "PREAMBLE"
with m.State("PREAMBLE"):
m.d.comb += txbyte.data.eq(0x55)
with m.If(txbyte.ready):
with m.If(tx_idx == 6):
m.d.sync += tx_idx.eq(0)
m.next = "SFD"
with m.Else():
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.State("SFD"):
m.d.comb += txbyte.data.eq(0xD5)
with m.If(txbyte.ready):
m.next = "DATA"
with m.State("DATA"):
m.d.comb += txbyte.data.eq(self.read_port.data)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == tx_len - 1):
with m.If(tx_len < 60):
m.next = "PAD"
with m.Else():
m.next = "FCS1"
with m.State("PAD"):
m.d.comb += txbyte.data.eq(0x00)
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 59):
m.next = "FCS1"
with m.State("FCS1"):
m.d.comb += txbyte.data.eq(crc.crc_out[0:8])
with m.If(txbyte.ready):
m.next = "FCS2"
with m.State("FCS2"):
m.d.comb += txbyte.data.eq(crc.crc_out[8:16])
with m.If(txbyte.ready):
m.next = "FCS3"
with m.State("FCS3"):
m.d.comb += txbyte.data.eq(crc.crc_out[16:24])
with m.If(txbyte.ready):
m.next = "FCS4"
with m.State("FCS4"):
m.d.comb += txbyte.data.eq(crc.crc_out[24:32])
with m.If(txbyte.ready):
m.d.sync += tx_idx.eq(0)
m.next = "IPG"
with m.State("IPG"):
m.d.sync += tx_idx.eq(tx_idx + 1)
with m.If(tx_idx == 48):
m.next = "IDLE"
return m
class RMIITxByte(Elaboratable):
"""
RMII Transmit Byte Muxer
Handles transmitting a byte dibit-by-dibit.
This submodule must be in the RMII ref_clk clock domain,
and its inputs and outputs are likewise in that domain.
Pins:
* `txen`: RMII Transmit enable
* `txd0`: TMII Transmit data 0
* `txd1`: TMII Transmit data 1
Inputs:
* `data`: 8-bit wide data to transmit. Latched internally so you may
update it to the next word after asserting `data_valid`.
* `data_valid`: Assert while valid data is present at `data`.
Outputs:
* `ready`: Asserted when ready to receive new data. This is asserted
while the final dibit is being transmitted so that new data
can be produced on the next clock cycle.
"""
def __init__(self, txen, txd0, txd1):
# Inputs
self.data = Signal(8)
self.data_valid = Signal()
# Outputs
self.ready = Signal()
self.txen = txen
self.txd0 = txd0
self.txd1 = txd1
def elaborate(self, platform):
m = Module()
# Register input data on the data_valid signal
data_reg = Signal(8)
with m.FSM() as fsm:
m.d.comb += [
self.ready.eq(fsm.ongoing("IDLE") | fsm.ongoing("NIBBLE4")),
self.txen.eq(~fsm.ongoing("IDLE")),
]
with m.State("IDLE"):
m.d.comb += [
self.txd0.eq(0),
self.txd1.eq(0),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.State("NIBBLE1"):
m.d.comb += [
self.txd0.eq(data_reg[0]),
self.txd1.eq(data_reg[1]),
]
m.next = "NIBBLE2"
with m.State("NIBBLE2"):
m.d.comb += [
self.txd0.eq(data_reg[2]),
self.txd1.eq(data_reg[3]),
]
m.next = "NIBBLE3"
with m.State("NIBBLE3"):
m.d.comb += [
self.txd0.eq(data_reg[4]),
self.txd1.eq(data_reg[5]),
]
m.next = "NIBBLE4"
with m.State("NIBBLE4"):
m.d.comb += [
self.txd0.eq(data_reg[6]),
self.txd1.eq(data_reg[7]),
]
m.d.sync += data_reg.eq(self.data)
with m.If(self.data_valid):
m.next = "NIBBLE1"
with m.Else():
m.next = "IDLE"
return m
def test_rmii_rx():
import random
from nmigen.back import pysim
from nmigen import Memory
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
mem = Memory(width=8, depth=128)
mem_port = mem.write_port()
mac_addr = [random.randint(0, 255) for _ in range(6)]
rmii_rx = RMIIRx(mac_addr, mem_port, crs_dv, rxd0, rxd1)
def testbench():
def tx_packet():
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data
for txbyte in txbytes:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
yield (crs_dv.eq(0))
# Finish clocking
for _ in range(6):
yield
for _ in range(10):
yield
txbytes = [
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xF0, 0xDE, 0xF1, 0x38, 0x89,
0x40, 0x08, 0x00, 0x45, 0x00, 0x00, 0x54, 0x00, 0x00, 0x40, 0x00,
0x40, 0x01, 0xB6, 0xD0, 0xC0, 0xA8, 0x01, 0x88, 0xC0, 0xA8, 0x01,
0x00, 0x08, 0x00, 0x0D, 0xD9, 0x12, 0x1E, 0x00, 0x07, 0x3B, 0x3E,
0x0C, 0x5C, 0x00, 0x00, 0x00, 0x00, 0x13, 0x03, 0x0F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x65,
0x6C, 0x6C, 0x6F, 0x20, 0x57, 0x6F, 0x72, 0x6C, 0x64, 0x48, 0x52,
0x32, 0x1F, 0x9E
]
# Transmit first packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 0
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[idx]))
assert mem_contents == txbytes
# Pause (inter-frame gap)
for _ in range(20):
yield
assert (yield rmii_rx.rx_valid) == 0
# Transmit a second packet
yield from tx_packet()
# Check packet was received
assert (yield rmii_rx.rx_valid)
assert (yield rmii_rx.rx_len) == 102
assert (yield rmii_rx.rx_offset) == 102
mem_contents = []
for idx in range(102):
mem_contents.append((yield mem[(102+idx) % 128]))
assert mem_contents == txbytes
yield
mod = Module()
mod.submodules += rmii_rx, mem_port
vcdf = open("rmii_rx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_rx_byte():
import random
from nmigen.back import pysim
crs_dv = Signal()
rxd0 = Signal()
rxd1 = Signal()
rmii_rx_byte = RMIIRxByte(crs_dv, rxd0, rxd1)
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
rxbytes = []
yield (crs_dv.eq(1))
# Preamble
for _ in range(random.randint(10, 40)):
yield (rxd0.eq(1))
yield (rxd1.eq(0))
yield
# SFD
yield (rxd0.eq(1))
yield (rxd1.eq(1))
yield
# Data (except last two bytes), with CRS=1 DV=1
for txbyte in txbytes[:-2]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
# Data (last two bytes), with CRS=0 DV=1
for txbyte in txbytes[-2:]:
for dibit in range(0, 8, 2):
yield (rxd0.eq((txbyte >> (dibit + 0)) & 1))
yield (rxd1.eq((txbyte >> (dibit + 1)) & 1))
if dibit in (0, 4):
# CRS=0
yield (crs_dv.eq(0))
else:
# DV=1
yield (crs_dv.eq(1))
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
yield (crs_dv.eq(0))
for _ in range(10):
yield
if (yield rmii_rx_byte.data_valid):
rxbytes.append((yield rmii_rx_byte.data))
assert rxbytes == txbytes
vcdf = open("rmii_rx_byte.vcd", "w")
with pysim.Simulator(rmii_rx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx():
from nmigen.back import pysim
from nmigen import Memory
txen = Signal()
txd0 = Signal()
txd1 = Signal()
txbytes = [
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x44, 0x4e, 0x30, 0x76,
0x9e, 0x08, 0x06, 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
0x02, 0x44, 0x4e, 0x30, 0x76, 0x9e, 0xc0, 0xa8, 0x02, 0xc8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x02, 0xc8
]
preamblebytes = [0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0xD5]
padbytes = [0x00] * (60 - len(txbytes))
crcbytes = [0x44, 0x5E, 0xB4, 0xD2]
txnibbles = []
rxnibbles = []
for txbyte in preamblebytes + txbytes + padbytes + crcbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
# Put the transmit bytes into memory at some offset, and fill the rest of
# memory with all-1s (to ensure we're not relying on memory being zeroed).
txbytes_zp = txbytes + [0xFF]*(128 - len(txbytes))
txoffset = 120
txbytes_mem = txbytes_zp[-txoffset:] + txbytes_zp[:-txoffset]
mem = Memory(width=8, depth=128, init=txbytes_mem)
mem_port = mem.read_port()
rmii_tx = RMIITx(mem_port, txen, txd0, txd1)
def testbench():
for _ in range(10):
yield
yield (rmii_tx.tx_start.eq(1))
yield (rmii_tx.tx_offset.eq(txoffset))
yield (rmii_tx.tx_len.eq(len(txbytes)))
yield
yield (rmii_tx.tx_start.eq(0))
yield (rmii_tx.tx_offset.eq(0))
yield (rmii_tx.tx_len.eq(0))
for _ in range((len(txbytes) + 12) * 4 + 120):
if (yield txen):
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
print(len(txnibbles), len(rxnibbles))
print(txnibbles)
print(rxnibbles)
assert txnibbles == rxnibbles
mod = Module()
mod.submodules += rmii_tx, mem_port
vcdf = open("rmii_tx.vcd", "w")
with pysim.Simulator(mod, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
def test_rmii_tx_byte():
import random
from nmigen.back import pysim
txen = Signal()
txd0 = Signal()
txd1 = Signal()
rmii_tx_byte = RMIITxByte(txen, txd0, txd1)
data = rmii_tx_byte.data
data_valid = rmii_tx_byte.data_valid
def testbench():
for _ in range(10):
yield
txbytes = [random.randint(0, 255) for _ in range(8)]
txnibbles = []
rxnibbles = []
yield (data_valid.eq(1))
for txbyte in txbytes:
txnibbles += [
(txbyte & 0b11),
((txbyte >> 2) & 0b11),
((txbyte >> 4) & 0b11),
((txbyte >> 6) & 0b11),
]
yield (data.eq(txbyte))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
yield (data_valid.eq(0))
yield
rxnibbles.append((yield txd0) | ((yield txd1) << 1))
rxnibbles = rxnibbles[1:]
assert txnibbles == rxnibbles
for _ in range(10):
yield
vcdf = open("rmii_tx_byte.vcd", "w")
with pysim.Simulator(rmii_tx_byte, vcd_file=vcdf) as sim:
sim.add_clock(1/50e6)
sim.add_sync_process(testbench())
sim.run()
| 0
| 0
| 0
| 13,801
| 8,613
| 0
| 0
| 57
| 251
|
d60d3beec1a8bf5f5b1156875db9e3b65d35b8d6
| 536
|
py
|
Python
|
cogs/roll.py
|
morozoffnor/govnoed_grisha_rewritten
|
6a34336cede03a081954479f998d5a8162e1a31d
|
[
"Apache-2.0"
] | null | null | null |
cogs/roll.py
|
morozoffnor/govnoed_grisha_rewritten
|
6a34336cede03a081954479f998d5a8162e1a31d
|
[
"Apache-2.0"
] | null | null | null |
cogs/roll.py
|
morozoffnor/govnoed_grisha_rewritten
|
6a34336cede03a081954479f998d5a8162e1a31d
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.path.insert(1, '../functions')
| 23.304348
| 73
| 0.697761
|
import discord
from discord.ext import commands
import random
import sys
sys.path.insert(1, '../functions')
from functions.cmd_print import cmd_print
class Roll(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def roll(self, ctx, *, number=100):
generatedNumber = random.randrange(1, number, 1)
await ctx.send(generatedNumber)
await cmd_print('debug', f'Generated number - {generatedNumber}')
def setup(client):
client.add_cog(Roll(client))
| 0
| 215
| 0
| 93
| 0
| 30
| 0
| 16
| 134
|
e3ae88a557bc39da15fb4fb98b0be693a0a7911c
| 377
|
py
|
Python
|
scripts/aggregation_test_script.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | null | null | null |
scripts/aggregation_test_script.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | 3
|
2021-12-10T01:22:05.000Z
|
2021-12-14T21:33:16.000Z
|
scripts/aggregation_test_script.py
|
michaelfaerber/Agnos
|
b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf
|
[
"MIT"
] | null | null | null |
sentences = [['a', 'b', 'c'], ['a', 'd','e']]
default_val = ''
entity_embeddings_dict = {}
entity_embeddings_dict = {sentence[0]: doThis(sentence) + entity_embeddings_dict.get(sentence[0], default_val) \
for sentence in sentences }
print(entity_embeddings_dict)
| 25.133333
| 112
| 0.681698
|
sentences = [['a', 'b', 'c'], ['a', 'd','e']]
def doThis(sentence):
ret = None
for x in sentence:
if ret is None:
ret = x
else:
ret += x
return ret
default_val = ''
entity_embeddings_dict = {}
entity_embeddings_dict = {sentence[0]: doThis(sentence) + entity_embeddings_dict.get(sentence[0], default_val) \
for sentence in sentences }
print(entity_embeddings_dict)
| 0
| 0
| 0
| 0
| 0
| 93
| 0
| 0
| 22
|
bd662dc3d2fdbefbc6e614c5d255badccde8b474
| 5,481
|
py
|
Python
|
scripts/mod_grav/plot_limits.py
|
charlesblakemore/opt_lev_analysis
|
704f174e9860907de349688ed82b5812bbb07c2d
|
[
"MIT"
] | null | null | null |
scripts/mod_grav/plot_limits.py
|
charlesblakemore/opt_lev_analysis
|
704f174e9860907de349688ed82b5812bbb07c2d
|
[
"MIT"
] | null | null | null |
scripts/mod_grav/plot_limits.py
|
charlesblakemore/opt_lev_analysis
|
704f174e9860907de349688ed82b5812bbb07c2d
|
[
"MIT"
] | 1
|
2019-11-27T19:10:25.000Z
|
2019-11-27T19:10:25.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import warnings
warnings.filterwarnings("ignore")
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
data_dirs = [#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \
#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz_elec-term', \
#\
#'/data/20180704/bead1/grav_data/shield', \
#'/data/20180704/bead1/grav_data/shield_1s_1h', \
#'/data/20180704/bead1/grav_data/shield2', \
#'/data/20180704/bead1/grav_data/shield3', \
#'/data/20180704/bead1/grav_data/shield4', \
'/data/20180704/no_bead/grav_data/shield', \
#\
#'/data/20180808/bead4/grav_data/shield1'
]
fit_type = 'Gaussian'
#fit_type = 'Planar'
p0_bead_dict = {'20180625': [19.0,40.0,20.0], \
'20180704': [18.7,40.0,20.0], \
'20180808': [18,40.0,23.0] \
}
load_agg = True
harms = [1,2,3,4,5,6]
#opt_ext = 'TEST'
opt_ext = '_6harm-full'
if fit_type == 'Gaussian':
data_ind = 2
err_ind = 4
if fit_type == 'Planar':
data_ind = 0
err_ind = 1
for ddir in data_dirs:
print()
parts = ddir.split('/')
date = parts[2]
p0_bead = p0_bead_dict[date]
nobead = ('no_bead' in parts) or ('nobead' in parts) or ('no-bead' in parts)
if nobead:
opt_ext += '_NO-BEAD'
agg_path = '/processed_data/aggdat/' + date + '_' + parts[-1] + opt_ext + '.agg'
alpha_arr_path = '/processed_data/alpha_arrs/' + date + '_' + parts[-1] + opt_ext + '.arr'
lambda_path = alpha_arr_path[:-4] + '_lambdas.arr'
if load_agg:
print(agg_path)
agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms)
agg_dat.load(agg_path)
agg_dat.reload_grav_funcs()
#agg_dat.fit_alpha_xyz_vs_alldim(weight_planar=False, plot=False, plot_hists=True)
alpha_arr = agg_dat.alpha_xyz_best_fit
lambdas = agg_dat.lambdas
np.save(open(alpha_arr_path, 'wb'), alpha_arr)
np.save(open(lambda_path, 'wb'), agg_dat.lambdas)
else:
alpha_arr = np.load(open(alpha_arr_path, 'rb'))
lambdas = np.load(open(lambda_path, 'rb'))
Ncomp = alpha_arr.shape[-2]
comp_colors = bu.get_color_map(Ncomp, cmap='viridis')
alpha_w = np.sum(alpha_arr[:,0:2,:,data_ind]*alpha_arr[:,0:2,:,err_ind]**(-2), axis=1) / \
np.sum(alpha_arr[:,0:2,:,err_ind]**(-2), axis=1)
#alpha_w = np.sum(alpha_arr[:,0:2,:,2], axis=1) * 0.5
errs_x = np.zeros_like(alpha_arr[:,0,0,0])
N = 0
for ind in range(Ncomp - 1):
errs_x += alpha_w[:,ind+1]**2
N += 1
errs_x = np.sqrt(errs_x / N)
sigma_alpha_w = 1.0 / np.sqrt( np.sum(alpha_arr[:,:2,:,3]**(-2), axis=1) )
N_w = np.sum(alpha_arr[:,:2,:,7], axis=1)
plt.figure(1)
if nobead:
plt.title(date + '_' + 'no-bead' + ': Result of %s Fitting' % fit_type, fontsize=16)
else:
plt.title(date + '_' + parts[-1] + ': Result of %s Fitting' % fit_type, fontsize=16)
plt.loglog(lambdas, np.abs(alpha_w[:,0]), lw=4, \
label='Template basis vector')
plt.loglog(lambdas, errs_x, '--', lw=2, \
label='Quadrature sum of other vectors')
plt.loglog(gu.limitdata[:,0], gu.limitdata[:,1], '--', label=gu.limitlab, \
linewidth=3, color='r')
plt.loglog(gu.limitdata2[:,0], gu.limitdata2[:,1], '--', label=gu.limitlab2, \
linewidth=3, color='k')
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-7, 1e-3)
plt.ylim(1e4, 1e14)
plt.legend()
plt.grid()
plt.show()
for ind in range(Ncomp):
fig2 = plt.figure(2)
plt.title("%s fit for Basis Vector: %i" % (fit_type, ind))
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,data_ind]), \
color=comp_colors[ind], ls='--', label='$\\alpha_x$')
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,err_ind]), \
color=comp_colors[ind], ls='--', label='$\sigma_{\\alpha_x}$', \
alpha=0.5)
plt.loglog(lambdas, np.abs(alpha_w[:,ind]), \
color=comp_colors[ind], ls='-', lw=3, label='Weighted mean')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,data_ind]), \
color=comp_colors[ind], ls='-.', label='$\\alpha_y$')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,err_ind]), \
color=comp_colors[ind], ls='-.', label='$\sigma_{\\alpha_y}$', \
alpha=0.5)
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-6, 1e-3)
plt.ylim(1e6, 1e15)
plt.legend()
plt.grid()
fig_title = '/home/charles/plots/' + date + '/' + parts[-1] + '/' \
+ date + '_' + parts[-1] + '_%s-fit_comp%i.png' % (fit_type, ind)
fig2.savefig(fig_title)
plt.close(fig2)
#plt.show()
#for fig_num in [1,2,3]:
# plt.figure(fig_num)
# plt.xlabel('Length Scale: $\lambda$ [m]')
# plt.ylabel('Strength: |$\\alpha$| [arb]')
# plt.legend()
# plt.grid()
#plt.show()
| 31.142045
| 94
| 0.560482
|
import dill as pickle
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import grav_util_3 as gu
import bead_util as bu
import configuration as config
import warnings
warnings.filterwarnings("ignore")
theory_data_dir = '/data/grav_sim_data/2um_spacing_data/'
data_dirs = [#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz', \
#'/data/20180625/bead1/grav_data/shield/X50-75um_Z15-25um_17Hz_elec-term', \
#\
#'/data/20180704/bead1/grav_data/shield', \
#'/data/20180704/bead1/grav_data/shield_1s_1h', \
#'/data/20180704/bead1/grav_data/shield2', \
#'/data/20180704/bead1/grav_data/shield3', \
#'/data/20180704/bead1/grav_data/shield4', \
'/data/20180704/no_bead/grav_data/shield', \
#\
#'/data/20180808/bead4/grav_data/shield1'
]
fit_type = 'Gaussian'
#fit_type = 'Planar'
p0_bead_dict = {'20180625': [19.0,40.0,20.0], \
'20180704': [18.7,40.0,20.0], \
'20180808': [18,40.0,23.0] \
}
load_agg = True
harms = [1,2,3,4,5,6]
#opt_ext = 'TEST'
opt_ext = '_6harm-full'
if fit_type == 'Gaussian':
data_ind = 2
err_ind = 4
if fit_type == 'Planar':
data_ind = 0
err_ind = 1
for ddir in data_dirs:
print()
parts = ddir.split('/')
date = parts[2]
p0_bead = p0_bead_dict[date]
nobead = ('no_bead' in parts) or ('nobead' in parts) or ('no-bead' in parts)
if nobead:
opt_ext += '_NO-BEAD'
agg_path = '/processed_data/aggdat/' + date + '_' + parts[-1] + opt_ext + '.agg'
alpha_arr_path = '/processed_data/alpha_arrs/' + date + '_' + parts[-1] + opt_ext + '.arr'
lambda_path = alpha_arr_path[:-4] + '_lambdas.arr'
if load_agg:
print(agg_path)
agg_dat = gu.AggregateData([], p0_bead=p0_bead, harms=harms)
agg_dat.load(agg_path)
agg_dat.reload_grav_funcs()
#agg_dat.fit_alpha_xyz_vs_alldim(weight_planar=False, plot=False, plot_hists=True)
alpha_arr = agg_dat.alpha_xyz_best_fit
lambdas = agg_dat.lambdas
np.save(open(alpha_arr_path, 'wb'), alpha_arr)
np.save(open(lambda_path, 'wb'), agg_dat.lambdas)
else:
alpha_arr = np.load(open(alpha_arr_path, 'rb'))
lambdas = np.load(open(lambda_path, 'rb'))
Ncomp = alpha_arr.shape[-2]
comp_colors = bu.get_color_map(Ncomp, cmap='viridis')
alpha_w = np.sum(alpha_arr[:,0:2,:,data_ind]*alpha_arr[:,0:2,:,err_ind]**(-2), axis=1) / \
np.sum(alpha_arr[:,0:2,:,err_ind]**(-2), axis=1)
#alpha_w = np.sum(alpha_arr[:,0:2,:,2], axis=1) * 0.5
errs_x = np.zeros_like(alpha_arr[:,0,0,0])
N = 0
for ind in range(Ncomp - 1):
errs_x += alpha_w[:,ind+1]**2
N += 1
errs_x = np.sqrt(errs_x / N)
sigma_alpha_w = 1.0 / np.sqrt( np.sum(alpha_arr[:,:2,:,3]**(-2), axis=1) )
N_w = np.sum(alpha_arr[:,:2,:,7], axis=1)
plt.figure(1)
if nobead:
plt.title(date + '_' + 'no-bead' + ': Result of %s Fitting' % fit_type, fontsize=16)
else:
plt.title(date + '_' + parts[-1] + ': Result of %s Fitting' % fit_type, fontsize=16)
plt.loglog(lambdas, np.abs(alpha_w[:,0]), lw=4, \
label='Template basis vector')
plt.loglog(lambdas, errs_x, '--', lw=2, \
label='Quadrature sum of other vectors')
plt.loglog(gu.limitdata[:,0], gu.limitdata[:,1], '--', label=gu.limitlab, \
linewidth=3, color='r')
plt.loglog(gu.limitdata2[:,0], gu.limitdata2[:,1], '--', label=gu.limitlab2, \
linewidth=3, color='k')
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-7, 1e-3)
plt.ylim(1e4, 1e14)
plt.legend()
plt.grid()
plt.show()
for ind in range(Ncomp):
fig2 = plt.figure(2)
plt.title("%s fit for Basis Vector: %i" % (fit_type, ind))
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,data_ind]), \
color=comp_colors[ind], ls='--', label='$\\alpha_x$')
plt.loglog(lambdas, np.abs(alpha_arr[:,0,ind,err_ind]), \
color=comp_colors[ind], ls='--', label='$\sigma_{\\alpha_x}$', \
alpha=0.5)
plt.loglog(lambdas, np.abs(alpha_w[:,ind]), \
color=comp_colors[ind], ls='-', lw=3, label='Weighted mean')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,data_ind]), \
color=comp_colors[ind], ls='-.', label='$\\alpha_y$')
plt.loglog(lambdas, np.abs(alpha_arr[:,1,ind,err_ind]), \
color=comp_colors[ind], ls='-.', label='$\sigma_{\\alpha_y}$', \
alpha=0.5)
plt.xlabel('Length Scale: $\lambda$ [m]')
plt.ylabel('Strength: |$\\alpha$| [arb]')
plt.xlim(1e-6, 1e-3)
plt.ylim(1e6, 1e15)
plt.legend()
plt.grid()
fig_title = '/home/charles/plots/' + date + '/' + parts[-1] + '/' \
+ date + '_' + parts[-1] + '_%s-fit_comp%i.png' % (fit_type, ind)
fig2.savefig(fig_title)
plt.close(fig2)
#plt.show()
#for fig_num in [1,2,3]:
# plt.figure(fig_num)
# plt.xlabel('Length Scale: $\lambda$ [m]')
# plt.ylabel('Strength: |$\\alpha$| [arb]')
# plt.legend()
# plt.grid()
#plt.show()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 67
|
6a7f23c80626351657198207fd0b08ed99b96da9
| 1,095
|
py
|
Python
|
Intro to us.py
|
DarshPro/Minecraft-in-python
|
3a52a60e9a36107252aafc971b3a32fc84b135df
|
[
"MIT"
] | 1
|
2021-03-04T15:42:36.000Z
|
2021-03-04T15:42:36.000Z
|
Intro to us.py
|
DarshPro/Minecraft-in-python
|
3a52a60e9a36107252aafc971b3a32fc84b135df
|
[
"MIT"
] | null | null | null |
Intro to us.py
|
DarshPro/Minecraft-in-python
|
3a52a60e9a36107252aafc971b3a32fc84b135df
|
[
"MIT"
] | null | null | null |
# Test Cube
# Test button
# update is run every frame
# basic window
app = Ursina()
# basic cube
cube = Entity(model='quad', color=color.orange, scale = (2,5), position = (5,1))
# quad with texture
#sans_image = load_texture('Sans.png')
#sans = Entity(model = 'quad', texture = sans_image)
#sans = Entity(model = 'quad', texture = 'Sans.png')
# creating a block properly
test = Test_cube()
# creating a button
btn = Test_button()
punch_sound = Audio('assets/punch', loop=False, autoplay=False)
app.run()
| 20.277778
| 80
| 0.660274
|
from ursina import *
# Test Cube
class Test_cube(Entity):
def __init__(self):
super().__init__(
parent = scene,
model = 'cube',
texture = 'white_cube',
rotation = Vec3(45,45,45))
# Test button
class Test_button(Button):
def __init__(self,scale = 0.1):
super().__init__(
parent = scene,
model = 'cube',
texture = 'brick',
color = color.white,
highlight_color = color.red,
pressed_color = color.lime)
def input(self,key):
if self.hovered:
if key == 'left mouse down':
punch_sound.play()
# update is run every frame
def update():
#print('test')
if held_keys['a']:
cube.x -= 1 * time.dt
# basic window
app = Ursina()
# basic cube
cube = Entity(model='quad', color=color.orange, scale = (2,5), position = (5,1))
# quad with texture
#sans_image = load_texture('Sans.png')
#sans = Entity(model = 'quad', texture = sans_image)
#sans = Entity(model = 'quad', texture = 'Sans.png')
# creating a block properly
test = Test_cube()
# creating a button
btn = Test_button()
punch_sound = Audio('assets/punch', loop=False, autoplay=False)
app.run()
| 0
| 0
| 0
| 441
| 0
| 52
| 0
| -1
| 88
|
dc32ad09fb9c4eae33279bcdb6ba48542e65e16b
| 20,087
|
py
|
Python
|
Python/utils/rvs/expressions.py
|
sgiguere/Fairness-Gaurantees-under-Demographic-Shift
|
d081307d34cde75ca74e07ddbe059e8273095aee
|
[
"MIT"
] | 1
|
2022-03-22T20:13:02.000Z
|
2022-03-22T20:13:02.000Z
|
Python/utils/rvs/expressions.py
|
sgiguere/Fairness-Gaurantees-under-Demographic-Shift
|
d081307d34cde75ca74e07ddbe059e8273095aee
|
[
"MIT"
] | null | null | null |
Python/utils/rvs/expressions.py
|
sgiguere/Fairness-Gaurantees-under-Demographic-Shift
|
d081307d34cde75ca74e07ddbe059e8273095aee
|
[
"MIT"
] | null | null | null |
def parse_value(value):
''' Attempts to interpret <value> as a number. '''
if isinstance(value, str):
try:
value = int(value)
except ValueError:
value = float(value)
return value
| 29.980597
| 129
| 0.697366
|
import numpy as np
from copy import copy, deepcopy
from utils.rvs.utils import COMPARATOR_NEGATIONS
def get_constant_name(counter={'c':0}):
name = 'c%d' % counter['c']
counter['c'] += 1
return name
def get_variable_name(counter={'v':0}):
name = 'v%d' % counter['v']
counter['v'] += 1
return name
def get_expression_name(counter={'e':0}):
name = 'e%d' % counter['c']
counter['e'] += 1
return name
class Expression():
def __init__(self):
self.trivial_bounds = None
self._terms = []
def __eq__(self, E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms)])
class CommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
if not(isinstance(E, self.__class__)):
return False
terms, _terms = copy(self._terms), copy(E._terms)
try:
for term in terms:
_terms.remove(term)
except ValueError:
return False
return len(_terms) == 0
class NoncommutativeExpression(Expression):
def __init__(self):
super().__init__()
def __eq__(self,E):
return isinstance(E, self.__class__) and all([ T==_T for (T,_T) in zip(self._terms,E._terms) ])
class SingleTermExpression():
pass
class SampleSet(Expression):
def __init__(self, expression, condition=None):
super().__init__()
self.expression = expression
self.condition = condition
class ConstantExpression(Expression, SingleTermExpression):
def __init__(self, name, value):
super().__init__()
self.name = get_constant_name()
self.value = value
def __repr__(self):
return str(self.value)
def __eq__(self, E):
return isinstance(E,self.__class__) and self.value == E.value
class VariableExpression(Expression, SingleTermExpression):
def __init__(self, name):
super().__init__()
if name.startswith('#'):
self.name = name[1:]
self._special = 'index'
else:
self.name = name
self._special = None
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and self.name == E.name and self._special == E._special
class SampleSet(Expression, SingleTermExpression):
def __init__(self, expression, condition=None):
super().__init__()
name = '%r' % expression
if not(condition is None):
name += '|%r' % condition
self.name = '[%s]' % name
self.expression = expression
self.condition = condition
def __repr__(self):
return self.name
def __eq__(self, E):
return isinstance(E,self.__class__) and (self.expression == E.expression) and (self.condition == E.condition)
class ExpectedValue(Expression, SingleTermExpression):
def __init__(self, sample_set, is_func=None, is_expr=None):
super().__init__()
if is_func is None:
self.name = 'E%s' % sample_set.name
else:
self.name = 'E{%s(%s)}%s' % (is_func, is_expr.name, sample_set.name)
# self.name = 'E{%s}%s' % () + sample_set.name
self.sample_set = sample_set
self._is_func = is_func
self._is_expr = is_expr
def __repr__(self):
return self.name
def __eq__(self, E):
if not(isinstance(E,self.__class__)):
return False
if not(self.sample_set == E.sample_set):
return False
if self._is_func is None and E._is_func is None:
return True
else:
return (self._is_func == E._is_func) and (self._is_expr == E._is_expr)
class ComparatorExpression(VariableExpression):
def __init__(self, term1, comp, term2):
name = '%r %s %r' % (term1, comp, term2)
super().__init__(name)
self.variable = term1
self.comparator = comp
self.value = term2
class NegativeExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
if isinstance(self._terms[0], SumExpression):
return '-(%r)' % self._terms[0]
return '-%r' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
if isinstance(E, SumExpression):
return E == self
return False
class NotExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '~(%r)' % self._terms[0]
def __eq__(self, E):
if isinstance(E,self.__class__) and (self._terms[0]==E._terms[0]):
return True
return False
class AbsExpression(NoncommutativeExpression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return '|%r|' % self._terms[0]
def __eq__(self, E):
return isinstance(E,self.__class__) and (self._terms[0]==E._terms[0])
class FractionExpression(NoncommutativeExpression):
def __init__(self, num, den):
super().__init__()
self._terms = [num, den]
def __repr__(self):
num, den = self._terms
num_str = '(%r)'%num if isinstance(num, SumExpression) else '%r'%num
den_str = '%r'%den if isinstance(den, SingleTermExpression) else '(%r)'%den
return '%s/%s' % (num_str, den_str)
def __eq__(self, E):
return isinstance(E, self.__class__) and (self._terms[0]==E._terms[0]) and (self._terms[1]==E._terms[1])
class SumExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '%r' % self._terms[0]
for t in self._terms[1:]:
string += '%r'%t if isinstance(t, NegativeExpression) else '+%r'%t
return string
def __eq__(self, E):
if super().__eq__(E):
return True
if isinstance(E, NegativeExpression):
return E == RVFuncs.negative(SumExpression([ RVFuncs.negative(e) for e in self._terms ]))
return False
class AndExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = ','.join('%s'%c.name for c in comparisons)
def __repr__(self):
return ','.join([('(%r)' % t) if isinstance(t,OrExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class OrExpression(CommutativeExpression):
def __init__(self, comparisons):
super().__init__()
self._terms = list(comparisons)
self.name = '||'.join('%s'%c.name for c in comparisons)
def __repr__(self):
return '||'.join([('(%r)' % t) if isinstance(t,AndExpression) else ('%r' % t) for t in self._terms])
def __eq__(self, E):
return super().__eq__(E)
class ProductExpression(CommutativeExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
string = '(%r)'%self._terms[0] if (isinstance(self._terms[0], SumExpression) and len(self._terms) > 1) else '%r'%self._terms[0]
for t in self._terms[1:]:
string += '*(%r)'%t if isinstance(t, SumExpression) else '*%r'%t
return string
class MaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'MAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
class MaxRecipExpression(Expression, SingleTermExpression):
def __init__(self, expression):
super().__init__()
self._terms = [expression]
def __repr__(self):
return 'MAX{%s, %s}' % (self._terms[0], RVFuncs.fraction(RVFuncs.constant(1), self._terms[0]))
class NANMaxExpression(CommutativeExpression, SingleTermExpression):
def __init__(self, expressions):
super().__init__()
self._terms = list(expressions)
def __repr__(self):
return 'NANMAX{%s}' % ', '.join([ '%r'%t for t in self._terms ])
def safesum(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if a_inf and b_inf and (np.sign(a) != np.sign(b)):
return np.nan
return a + b
def safeprod(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan):
return np.nan
if (a_inf and b==0) or (b_inf and a==0):
return 0.0
return a * b
def safediv(a, b):
a_inf, a_nan = np.isinf(a), np.isnan(a)
b_inf, b_nan = np.isinf(b), np.isnan(b)
if (a_nan or b_nan) or (a_inf and b_inf):
return np.nan
if (b==0):
return np.nan
return a / b
def parse_value(value):
''' Attempts to interpret <value> as a number. '''
if isinstance(value, str):
try:
value = int(value)
except ValueError:
value = float(value)
return value
class RVFuncs():
@staticmethod
def constant(value_raw):
value = parse_value(value_raw)
return ConstantExpression('c', value)
@staticmethod
def variable(name):
return VariableExpression(name)
@staticmethod
def comparator_variable(term1, comp, term2):
return ComparatorExpression(term1, comp, term2)
@staticmethod
def sample_set(variable, condition=None):
return SampleSet(variable, condition)
@staticmethod
def expected_value(sampleset, is_func=None, is_expr=None):
return ExpectedValue(sampleset, is_func=is_func, is_expr=is_expr)
@staticmethod
def negative(e):
''' Returns the negative of <e>, reducing nested negatives. '''
n_negatives = 1
while isinstance(e, NegativeExpression):
e = e._terms[0]
n_negatives += 1
if isinstance(e, ConstantExpression):
return RVFuncs.constant(-e.value if (n_negatives % 2 == 1) else e.value)
return NegativeExpression(e) if (n_negatives % 2 == 1) else e
@staticmethod
def logical_not(e):
n_nots = 1
while isinstance(e, NotExpression):
e = e._terms[0]
n_nots += 1
if (n_nots % 2 == 0):
return e
if isinstance(e, ComparatorExpression):
return ComparatorExpression(e.variable, COMPARATOR_NEGATIONS[e.comparator], e.value)
return NotExpression(e)
@staticmethod
def sum(*expressions):
''' Returns the sum of <expressions>, factoring out constants and shared factors. '''
# Aggregate terms that are sums themselves
exps = []
for e in expressions:
if isinstance(e, SumExpression):
exps.extend(e._terms)
else:
exps.append(e)
expressions = exps
# Aggregate terms that are constants
cval = 0
exps = []
for e in expressions:
if isinstance(e, ConstantExpression):
cval += e.value
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0], ConstantExpression):
cval -= e._terms[0].value
else:
exps.append(e)
if cval != 0 or len(exps) == 0:
const = RVFuncs.constant(cval)
exps = [ const, *exps]
expressions = exps
if len(expressions) == 1:
return expressions[0]
# Check if all terms share a common denominator and factor it out
def split_as_fraction(e):
if isinstance(e, FractionExpression):
return [e._terms[0], e._terms[1]]
elif isinstance(e, NegativeExpression) and isinstance(e._terms[0],FractionExpression):
return [RVFuncs.negative(e._terms[0]._terms[0]), e._terms[0]._terms[1]]
return [e, None]
nums, dens = zip(*[ split_as_fraction(e) for e in exps ])
if all([ not(dens[0] is None) and d==dens[0] for d in dens ]):
exps = nums
common_den = dens[0]
else:
common_den = None
# Check if any terms have shared product factors and factor them out
def extract_unsigned_terms(e):
if isinstance(e, NegativeExpression) or isinstance(e, FractionExpression):
return extract_unsigned_terms(e._terms[0])
if isinstance(e, ProductExpression):
return e._terms
return [e]
def remove_terms(e, terms):
if isinstance(e, NegativeExpression):
return RVFuncs.negative(remove_terms(e._terms[0], terms))
if isinstance(e, FractionExpression):
return RVFuncs.fraction(remove_terms(e._terms[0], terms), e._terms[1])
if isinstance(e, ProductExpression):
remaining = e._terms.copy()
for t in terms:
remaining.remove(t)
return RVFuncs.product(*remaining) if len(remaining) > 0 else RVFuncs.constant(1)
return RVFuncs.constant(1) if len(terms) > 0 else e
has_negative = [ isinstance(e,NegativeExpression) for e in exps ]
unsigned_terms = [ extract_unsigned_terms(e) for e in exps ]
unsigned_terms_tmp = deepcopy(unsigned_terms)
shared_terms = []
for st in unsigned_terms[0]:
if isinstance(st, ConstantExpression) and (st.value == 1):
continue
if all([ (st in terms) for terms in unsigned_terms_tmp[1:] ]):
shared_terms.append(st)
for terms in unsigned_terms_tmp:
terms.remove(st)
if len(shared_terms) > 0:
remainder = RVFuncs.sum(*[ remove_terms(e, shared_terms) for e in exps ])
else:
remainder = SumExpression(exps)
# Return the product of the common factor and the remainder sum
if len(shared_terms) > 0 and common_den is None:
common_factor = RVFuncs.product(*shared_terms)
return RVFuncs.product(common_factor, remainder)
elif len(shared_terms) > 0:
common_factor = RVFuncs.fraction(RVFuncs.product(*shared_terms), common_den)
return RVFuncs.product(common_factor, remainder)
return remainder
@staticmethod
def diff(e0, e1):
return RVFuncs.sum(e0, RVFuncs.negative(e1))
@staticmethod
def max(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
if len(expressions) == 2:
e1, e2 = expressions
# If the max *happens* to be Max(E, 1/E) for some E, reduce to a MaxRecip
if e1 == RVFuncs.fraction(RVFuncs.constant(1), e2):
return MaxRecipExpression(e1)
# If the max *happens* to be Max(E, -E) for some E, reduce to Abs
elif e1 == RVFuncs.negative(e2):
return AbsExpression(e1)
return MaxExpression(exps)
def nanmax(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
return NANMaxExpression(exps)
@staticmethod
def min(*expressions):
if len(expressions) == 1:
return expressions[0]
exps = []
for e in expressions:
if isinstance(e, MaxExpression):
exps.extend(e._terms)
else:
exps.append(e)
# Convert to a negative max
exps = [ RVFuncs.negative(e) for e in exps ]
return RVFuncs.negative(RVFuncs.max(*exps))
@staticmethod
def abs(e):
if isinstance(e, NegativeExpression):
e = e._terms[0]
return AbsExpression(e)
@staticmethod
def pow(e, c):
return e # fix
@staticmethod
def logical_and(expressions):
events = []
for e in expressions:
if isinstance(e, AndExpression):
events.extend(e._terms)
else:
events.append(e)
return AndExpression(events)
@staticmethod
def logical_or(expressions):
events = []
for e in expressions:
if isinstance(e, OrExpression):
events.extend(e._terms)
else:
events.append(e)
return OrExpression(events)
@staticmethod
def product(*expressions):
# Strip negatives from input expressions
n_negatives = 0
exps = []
for e in expressions:
if isinstance(e, NegativeExpression):
exps.append(e._terms[0])
n_negatives += 1
else:
exps.append(e)
expressions = exps
# Remove and input expressions that are a constant 1
exps = []
for e in expressions:
if not(isinstance(e, ConstantExpression) and (e.value == 1)):
exps.append(e)
expressions = exps
# # If there is only one input expression remaining, just return it
# if len(expressions) == 1:
# return RVFuncs.negative(expressions[0]) if n_negatives % 2 == 1 else expressions[0]
# If any of the input expressions are a constant equal to 0, return 0
if any([ isinstance(e,ConstantExpression) and (e.value==0) for e in expressions ]):
return RVFuncs.constant(0)
# Aggregate input expressions that are products or fractions
num_exps = []
den_exps = []
for e in expressions:
if isinstance(e, ProductExpression):
num_exps.extend(e._terms)
elif isinstance(e, FractionExpression):
num_exps.append(e._terms[0])
den_exps.append(e._terms[1])
else:
num_exps.append(e)
if len(den_exps) > 0:
# We have a fraction
num = RVFuncs.product(*num_exps) if len(num_exps) > 1 else num_exps[0]
den = RVFuncs.product(*den_exps) if len(den_exps) > 1 else den_exps[0]
expr = RVFuncs.fraction(num, den)
else:
# We have a non-fraction product
# Aggregate constants
cval = 1
_exps = []
for e in num_exps:
if isinstance(e, ConstantExpression):
cval = safeprod(cval, e.value)
else:
_exps.append(e)
if len(_exps) == 0:
expr = RVFuncs.constant(cval)
elif cval != 1:
_exps.append(RVFuncs.constant(cval))
expr = ProductExpression(_exps)
elif len(_exps) > 1:
expr = ProductExpression(_exps)
else:
expr = _exps[0]
return expr if (n_negatives % 2 == 0) else RVFuncs.negative(expr)
@staticmethod
def fraction(num, den):
''' Process the numerator and denominator to produce a reduced expression of one of the following forms, in this priority:
Constant or Variable
Negative(Product(PositiveConstant, Fraction))
Product(PositiveConstant, Fraction)
Negative(Fraction).
Assumes that num and den are already processed into Negative(Product(Constant, Expression)) form. '''
# Simplify negative signs in the numerator/denominator
n_negatives = 0
if isinstance(num, NegativeExpression):
num = num._terms[0]
n_negatives += 1
if isinstance(den, NegativeExpression):
den = den._terms[0]
n_negatives += 1
# Remove any constants in front of the numerator or denominator
num_val = 1
den_val = 1
if isinstance(num, ProductExpression) and isinstance(num._terms[0], ConstantExpression):
num_val = num._terms[0].value
num = RVFuncs.product(*num._terms[1:]) if len(num._terms) > 1 else RVFuncs.constant(1)
if isinstance(den, ProductExpression) and isinstance(den._terms[0], ConstantExpression):
den_val = den._terms[0].value
den = RVFuncs.product(*den._terms[1:]) if len(den._terms) > 1 else RVFuncs.constant(1)
cval = safediv(num_val, den_val)
if cval < 0:
n_negatives += 1
cval = -cval
# Aggregate terms in the numerator/denominator if one or both are already a fraction
if isinstance(num, FractionExpression) and isinstance(den, FractionExpression):
_num = RVFuncs.product(num._terms[0], den._terms[1])
_den = RVFuncs.product(num._terms[1], den._terms[0])
num, den = _num, _den
elif isinstance(num, FractionExpression):
_num = num._terms[0]
_den = RVFuncs.product(num._terms[1], den)
num, den = _num, _den
elif isinstance(den, FractionExpression):
_num = RVFuncs.product(den._terms[1], num)
_den = den._terms[0]
num, den = _num, _den
# Remove terms in products that are present in both the numerator and denominator
expr = None
if num == den:
expr = RVFuncs.constant(1)
elif isinstance(den, ConstantExpression) and den.value == 1:
expr = num
elif isinstance(num, ProductExpression) and isinstance(den, ProductExpression):
nterms, dterms = copy(num._terms), copy(den._terms)
for term in nterms:
if term in den._terms:
num._terms.remove(term)
den._terms.remove(term)
num = RVFuncs.constant(1) if len(num._terms) == 0 else RVFuncs.product(*num._terms)
den = RVFuncs.constant(1) if len(den._terms) == 0 else RVFuncs.product(*den._terms)
if isinstance(num, ConstantExpression) and isinstance(den, ConstantExpression):
expr = RVFuncs.constant(safediv(num.value, den.value))
elif isinstance(num, ProductExpression) and isinstance(den, SingleTermExpression):
if den in num._terms:
num._terms.remove(den)
expr = RVFuncs.product(*num._terms)
elif isinstance(den, ProductExpression) and isinstance(num, SingleTermExpression):
if num in den._terms:
den._terms.remove(num)
den = RVFuncs.product(*den._terms)
if isinstance(den, ConstantExpression):
print(safediv(1,den.value), RVFuncs.constant(safediv(1,den.value)).value)
expr = RVFuncs.constant(safediv(1,den.value))
else:
expr = FractionExpression(RVFuncs.constant(1), RVFuncs.product(*den._terms))
if expr is None:
expr = FractionExpression(num, den)
# Add a constant scaling factor if it is not 1
if cval != 1:
constant = RVFuncs.constant(cval)
expr = RVFuncs.product(constant, expr)
return RVFuncs.negative(expr) if n_negatives % 2 == 1 else expr
| 0
| 11,095
| 0
| 7,220
| 0
| 805
| 0
| 34
| 712
|
aa30c170932818c11a70361a198633d3d3e3f96b
| 1,659
|
py
|
Python
|
docs/conf.py
|
Qiskit/qiskit-aqt-provider
|
276b10bde45027e0d33c80a6942887f7de0204da
|
[
"Apache-2.0"
] | 5
|
2019-10-21T02:57:22.000Z
|
2020-04-09T00:03:42.000Z
|
docs/conf.py
|
Qiskit/qiskit-aqt-provider
|
276b10bde45027e0d33c80a6942887f7de0204da
|
[
"Apache-2.0"
] | 8
|
2019-09-25T19:48:34.000Z
|
2020-02-27T16:30:41.000Z
|
docs/conf.py
|
Qiskit/qiskit-aqt-provider
|
276b10bde45027e0d33c80a6942887f7de0204da
|
[
"Apache-2.0"
] | 10
|
2019-09-25T18:47:44.000Z
|
2020-06-05T17:45:35.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Sphinx documentation builder
"""
project = 'Qiskit AQT Provider'
copyright = '2021, Qiskit and AQT development teams' # pylint: disable=redefined-builtin
author = 'Qiskit and AQT development teams'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'jupyter_sphinx',
]
templates_path = ["_templates"]
html_static_path = ['_static']
html_css_files = []
autosummary_generate = True
autosummary_generate_overwrite = False
autoclass_content = "both"
numfig = True
numfig_format = {
'table': 'Table %s'
}
language = None
exclude_patterns = ['_build', '**.ipynb_checkpoints']
pygments_style = 'colorful'
add_module_names = False
modindex_common_prefix = ['qiskit_aqt.']
html_theme = 'qiskit_sphinx_theme'
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
| 24.397059
| 89
| 0.724533
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Sphinx documentation builder
"""
project = 'Qiskit AQT Provider'
copyright = '2021, Qiskit and AQT development teams' # pylint: disable=redefined-builtin
author = 'Qiskit and AQT development teams'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
'jupyter_sphinx',
]
templates_path = ["_templates"]
html_static_path = ['_static']
html_css_files = []
autosummary_generate = True
autosummary_generate_overwrite = False
autoclass_content = "both"
numfig = True
numfig_format = {
'table': 'Table %s'
}
language = None
exclude_patterns = ['_build', '**.ipynb_checkpoints']
pygments_style = 'colorful'
add_module_names = False
modindex_common_prefix = ['qiskit_aqt.']
html_theme = 'qiskit_sphinx_theme'
html_last_updated_fmt = '%Y/%m/%d'
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': True,
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4af823cb5f863d54376d0a7984be2ad7dd1c341a
| 448
|
py
|
Python
|
backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py
|
eth-library-lab/open-image-search
|
7be76cd4b7730dd76623e15f034f1c337ab99f84
|
[
"MIT"
] | 5
|
2021-06-14T10:49:52.000Z
|
2022-02-16T15:56:49.000Z
|
backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py
|
eth-library-lab/open-image-search
|
7be76cd4b7730dd76623e15f034f1c337ab99f84
|
[
"MIT"
] | 11
|
2021-06-11T16:12:49.000Z
|
2021-12-03T16:41:13.000Z
|
backend/api/ImageSearch/migrations/0009_imagemetadata_image_fpath.py
|
eth-library-lab/open-image-search
|
7be76cd4b7730dd76623e15f034f1c337ab99f84
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.2 on 2021-09-20 13:36
| 23.578947
| 98
| 0.629464
|
# Generated by Django 3.1.2 on 2021-09-20 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ImageSearch', '0008_auto_20210916_0909'),
]
operations = [
migrations.AddField(
model_name='imagemetadata',
name='image_fpath',
field=models.CharField(max_length=300, null=True, verbose_name='local path to image'),
),
]
| 0
| 0
| 0
| 334
| 0
| 0
| 0
| 19
| 46
|
244d8e1d804d4ab001743ae24cbdc613ee9ca42f
| 9,566
|
py
|
Python
|
test_interest.py
|
castacks/interestingness
|
b614818ab11dcc15c5fe6b55fe993882add3e8e6
|
[
"BSD-3-Clause"
] | 1
|
2021-07-20T14:58:36.000Z
|
2021-07-20T14:58:36.000Z
|
test_interest.py
|
castacks/interestingness
|
b614818ab11dcc15c5fe6b55fe993882add3e8e6
|
[
"BSD-3-Clause"
] | null | null | null |
test_interest.py
|
castacks/interestingness
|
b614818ab11dcc15c5fe6b55fe993882add3e8e6
|
[
"BSD-3-Clause"
] | 1
|
2021-04-17T08:25:05.000Z
|
2021-04-17T08:25:05.000Z
|
#!/usr/bin/env python3
# Copyright <2019> <Chen Wang [https://chenwang.site], Carnegie Mellon University>
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
import time
import torch
import os.path
import argparse
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as Data
import torchvision.transforms as transforms
from dataset import DroneFilming, SubTF, PersonalVideo
from torchutil import count_parameters, Timer, MovAvg
from torchutil import ConvLoss, CorrelationLoss, FiveSplit2d
if __name__ == "__main__":
# Arguements
parser = argparse.ArgumentParser(description='Test Interestingness Networks')
parser.add_argument("--data-root", type=str, default='/data/datasets', help="dataset root folder")
parser.add_argument("--model-save", type=str, default='saves/ae.pt.SubTF.n1000.mse', help="read model")
parser.add_argument("--test-data", type=int, default=2, help='test data ID.')
parser.add_argument("--seed", type=int, default=0, help='Random seed.')
parser.add_argument("--crop-size", type=int, default=320, help='crop size')
parser.add_argument("--num-interest", type=int, default=10, help='loss compute by grid')
parser.add_argument("--skip-frames", type=int, default=1, help='number of skip frame')
parser.add_argument("--window-size", type=int, default=1, help='smooth window size >=1')
parser.add_argument('--dataset', type=str, default='SubTF', help='dataset type (SubTF, DroneFilming')
parser.add_argument('--save-flag', type=str, default='n1000', help='save name flag')
parser.add_argument("--rr", type=float, default=5, help="reading rate")
parser.add_argument("--wr", type=float, default=5, help="writing rate")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--drawbox', dest='drawbox', action='store_true')
parser.set_defaults(debug=False)
parser.set_defaults(drawbox=False)
args = parser.parse_args(); print(args)
torch.manual_seed(args.seed)
os.makedirs('results', exist_ok=True)
if args.debug is True and not os.path.exists('images/%s-%d'%(args.dataset,args.test_data)):
os.makedirs('images/%s-%d'%(args.dataset,args.test_data))
transform = transforms.Compose([
# transforms.CenterCrop(args.crop_size),
transforms.Resize((args.crop_size,args.crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
timer = Timer()
test_name = '%s-%d-%s-%s'%(args.dataset, args.test_data, time.strftime('%Y-%m-%d-%H:%M:%S'), args.save_flag)
if args.dataset == 'DroneFilming':
test_data = DroneFilming(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'SubTF':
test_data = SubTF(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'PersonalVideo':
test_data = PersonalVideo(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
test_loader = Data.DataLoader(dataset=test_data, batch_size=1, shuffle=False)
net = torch.load(args.model_save)
net.set_train(False)
net.memory.set_learning_rate(rr=args.rr, wr=args.wr)
interest = Interest(args.num_interest, 'results/%s.txt'%(test_name))
movavg = MovAvg(args.window_size)
if torch.cuda.is_available():
net = net.cuda()
drawbox = ConvLoss(input_size=args.crop_size, kernel_size=args.crop_size//2, stride=args.crop_size//4)
criterion = CorrelationLoss(args.crop_size//2, reduce=False, accept_translation=False)
fivecrop = FiveSplit2d(args.crop_size//2)
print('number of parameters:', count_parameters(net))
val_loss = performance(test_loader, net)
print('Done.')
| 44.493023
| 137
| 0.681685
|
#!/usr/bin/env python3
# Copyright <2019> <Chen Wang [https://chenwang.site], Carnegie Mellon University>
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific prior
# written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import os
import cv2
import copy
import time
import math
import torch
import os.path
import argparse
import torchvision
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision import models
import torch.utils.data as Data
from torch.autograd import Variable
from torch.nn import functional as F
from torchvision.models.vgg import VGG
import torchvision.transforms as transforms
from torchvision.datasets import CocoDetection
from torch.optim.lr_scheduler import ReduceLROnPlateau
from interestingness import AE, VAE, AutoEncoder, Interestingness
from dataset import ImageData, Dronefilm, DroneFilming, SubT, SubTF, PersonalVideo
from torchutil import count_parameters, show_batch, show_batch_origin, Timer, MovAvg
from torchutil import ConvLoss, CosineLoss, CorrelationLoss, Split2d, Merge2d, PearsonLoss, FiveSplit2d
class Interest():
'''
Maintain top K interests
'''
def __init__(self, K, filename):
self.K = K
self.interests = []
self.filename = filename
f = open(self.filename, 'w')
f.close()
def add_interest(self, tensor, loss, batch_idx, visualize_window=None):
f = open(self.filename, 'a+')
f.write("%d %f\n" % (batch_idx, loss))
f.close()
self.interests.append((loss, tensor, batch_idx))
self.interests.sort(key=self._sort_loss, reverse=True)
self._maintain()
interests = np.concatenate([self.interests[i][1] for i in range(len(self.interests))], axis=1)
if visualize_window is not None:
cv2.imshow(visualize_window, interests)
return interests
def _sort_loss(self, val):
return val[0]
def _maintain(self):
if len(self.interests) > self.K:
self.interests = self.interests[:self.K]
def performance(loader, net):
test_loss, time_use = 0, 0
with torch.no_grad():
for batch_idx, inputs in enumerate(loader):
if batch_idx % args.skip_frames !=0:
continue
if torch.cuda.is_available():
inputs = inputs.cuda()
timer.tic()
inputs = Variable(inputs)
outputs, loss = net(inputs)
loss = movavg.append(loss)
time_use += timer.end()
if args.drawbox is True:
drawbox(inputs, outputs)
test_loss += loss.item()
frame = show_batch_box(inputs, batch_idx, loss.item())
top_interests = interest.add_interest(frame, loss, batch_idx, visualize_window='Top Interests')
if args.debug is True:
image = show_batch(torch.cat([outputs], dim=0), 'reconstruction')
recon = show_batch(torch.cat([(inputs-outputs).abs()], dim=0), 'difference')
cv2.imwrite('images/%s-%d/%s-interestingness-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), frame*255)
cv2.imwrite('images/%s-%d/%s-reconstruction-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), image*255)
cv2.imwrite('images/%s-%d/%s-difference-%06d.png'%(args.dataset,args.test_data,args.save_flag,batch_idx), recon*255)
print('batch_idx:', batch_idx, 'loss:%.6f'%(loss.item()))
print("Total time using: %.2f seconds, %.2f ms/frame"%(time_use, 1000*time_use/(batch_idx+1)))
cv2.imwrite('results/%s.png'%(test_name), 255*top_interests)
return test_loss/(batch_idx+1)
def level_height(bar, ranges=[0.02, 0.08]):
h = min(max(0,(bar-ranges[0])/(ranges[1]-ranges[0])),1)
return (np.tanh(np.tan(math.pi/2*(2*h-1))-0.8)+1)/2
def boxbar(height, bar, ranges=[0.02, 0.08], threshold=[0.05, 0.06]):
width = 15
box = np.zeros((height,width,3), np.uint8)
h = level_height(bar, ranges)
x1, y1 = 0, int((1-h)*height)
x2, y2 = int(width), int(height)
cv2.rectangle(box,(x1,y1),(x2,y2),(0,1,0),-1)
for i in threshold:
x1, y1 = 0, int((1.0-i/ranges[1])*height)
x2, y2 = width, int((1.0-i/ranges[1])*height)
cv2.line(box,(x1, y1), (x2, y2), (1,0,0), 3)
return box
def show_batch_box(batch, batch_idx, loss, box_id=None, show_now=True):
min_v = torch.min(batch)
range_v = torch.max(batch) - min_v
if range_v > 0:
batch = (batch - min_v) / range_v
else:
batch = torch.zeros(batch.size())
grid = torchvision.utils.make_grid(batch).cpu()
img = grid.numpy()[::-1].transpose((1, 2, 0))
box = boxbar(grid.size(-2), loss, threshold=[])
frame = np.hstack([img, box])
if show_now:
cv2.imshow('interestingness', frame)
cv2.waitKey(1)
return frame
if __name__ == "__main__":
# Arguements
parser = argparse.ArgumentParser(description='Test Interestingness Networks')
parser.add_argument("--data-root", type=str, default='/data/datasets', help="dataset root folder")
parser.add_argument("--model-save", type=str, default='saves/ae.pt.SubTF.n1000.mse', help="read model")
parser.add_argument("--test-data", type=int, default=2, help='test data ID.')
parser.add_argument("--seed", type=int, default=0, help='Random seed.')
parser.add_argument("--crop-size", type=int, default=320, help='crop size')
parser.add_argument("--num-interest", type=int, default=10, help='loss compute by grid')
parser.add_argument("--skip-frames", type=int, default=1, help='number of skip frame')
parser.add_argument("--window-size", type=int, default=1, help='smooth window size >=1')
parser.add_argument('--dataset', type=str, default='SubTF', help='dataset type (SubTF, DroneFilming')
parser.add_argument('--save-flag', type=str, default='n1000', help='save name flag')
parser.add_argument("--rr", type=float, default=5, help="reading rate")
parser.add_argument("--wr", type=float, default=5, help="writing rate")
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--drawbox', dest='drawbox', action='store_true')
parser.set_defaults(debug=False)
parser.set_defaults(drawbox=False)
args = parser.parse_args(); print(args)
torch.manual_seed(args.seed)
os.makedirs('results', exist_ok=True)
if args.debug is True and not os.path.exists('images/%s-%d'%(args.dataset,args.test_data)):
os.makedirs('images/%s-%d'%(args.dataset,args.test_data))
transform = transforms.Compose([
# transforms.CenterCrop(args.crop_size),
transforms.Resize((args.crop_size,args.crop_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
timer = Timer()
test_name = '%s-%d-%s-%s'%(args.dataset, args.test_data, time.strftime('%Y-%m-%d-%H:%M:%S'), args.save_flag)
if args.dataset == 'DroneFilming':
test_data = DroneFilming(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'SubTF':
test_data = SubTF(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
elif args.dataset == 'PersonalVideo':
test_data = PersonalVideo(root=args.data_root, train=False, test_data=args.test_data, transform=transform)
test_loader = Data.DataLoader(dataset=test_data, batch_size=1, shuffle=False)
net = torch.load(args.model_save)
net.set_train(False)
net.memory.set_learning_rate(rr=args.rr, wr=args.wr)
interest = Interest(args.num_interest, 'results/%s.txt'%(test_name))
movavg = MovAvg(args.window_size)
if torch.cuda.is_available():
net = net.cuda()
drawbox = ConvLoss(input_size=args.crop_size, kernel_size=args.crop_size//2, stride=args.crop_size//4)
criterion = CorrelationLoss(args.crop_size//2, reduce=False, accept_translation=False)
fivecrop = FiveSplit2d(args.crop_size//2)
print('number of parameters:', count_parameters(net))
val_loss = performance(test_loader, net)
print('Done.')
| 0
| 0
| 0
| 933
| 0
| 2,747
| 0
| 222
| 380
|
9a70fd2b8f51d98c7016e821a8b8f28b97a4e005
| 108,582
|
py
|
Python
|
src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py
|
Ericsson/oss-automatic-bug-assignment
|
f4965babd0491118713d7b19bd7ddd30fa39254f
|
[
"MIT"
] | 3
|
2018-09-25T02:29:54.000Z
|
2020-02-12T12:35:55.000Z
|
src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py
|
dbreddyAI/oss-automatic-bug-assignment
|
f4965babd0491118713d7b19bd7ddd30fa39254f
|
[
"MIT"
] | null | null | null |
src/eclipse_jdt/final_results/preliminary_experiment/plot_results.py
|
dbreddyAI/oss-automatic-bug-assignment
|
f4965babd0491118713d7b19bd7ddd30fa39254f
|
[
"MIT"
] | 3
|
2017-10-26T13:50:21.000Z
|
2019-12-17T03:40:11.000Z
|
import matplotlib.pyplot as plt
def plot_learning_curve(title, computed_score, train_sizes, \
train_scores_mean, train_scores_std, test_scores_mean, \
test_scores_std):
"""Generate a plot of the test and training learning curves.
Parameters
----------
title: string
Contains the title of the chart.
computed_score: string
Contains the name of the computed score.
train_sizes: a one dimension numpy.ndarray
An array containing the various sizes of the training set for
which the scores have been computed.
train_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the training set.
train_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the training set.
test_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the test set.
test_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the test set.
ylim: tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
"""
fig = plt.figure(figsize=(20.0, 12.5))
plt.title(title, size=31)
plt.xlim(xmin=0, xmax=25000)
plt.ylim(ymin=0.0, ymax=1.0)
plt.xlabel("Training examples", size=28)
plt.ylabel(computed_score.capitalize(), size=28)
plt.grid(linewidth=3)
plt.fill_between(train_sizes, train_scores_mean - \
train_scores_std, train_scores_mean + train_scores_std, \
alpha=0.3, color="r")
plt.fill_between(train_sizes, test_scores_mean - \
test_scores_std, test_scores_mean + test_scores_std, \
alpha=0.3, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", \
label="Training {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", \
label="Test {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.legend(loc="best", prop={'size': 26})
plt.tick_params(axis='both', which='major', labelsize=22)
return fig
if __name__ == "__main__":
main()
| 236.562092
| 26,139
| 0.795611
|
from numpy import array
import os
import inspect
import matplotlib.pyplot as plt
def plot_learning_curve(title, computed_score, train_sizes, \
train_scores_mean, train_scores_std, test_scores_mean, \
test_scores_std):
"""Generate a plot of the test and training learning curves.
Parameters
----------
title: string
Contains the title of the chart.
computed_score: string
Contains the name of the computed score.
train_sizes: a one dimension numpy.ndarray
An array containing the various sizes of the training set for
which the scores have been computed.
train_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the training set.
train_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the training set.
test_scores_mean: a one dimension numpy.ndarray
An array containing the various means of the scores related
to each element in train_sizes. These scores should have been
computed on the test set.
test_scores_std: a one dimension numpy.ndarray
An array containing the various standard deviations of the
scores related to each element in train_sizes. These scores
should have been computed on the test set.
ylim: tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
"""
fig = plt.figure(figsize=(20.0, 12.5))
plt.title(title, size=31)
plt.xlim(xmin=0, xmax=25000)
plt.ylim(ymin=0.0, ymax=1.0)
plt.xlabel("Training examples", size=28)
plt.ylabel(computed_score.capitalize(), size=28)
plt.grid(linewidth=3)
plt.fill_between(train_sizes, train_scores_mean - \
train_scores_std, train_scores_mean + train_scores_std, \
alpha=0.3, color="r")
plt.fill_between(train_sizes, test_scores_mean - \
test_scores_std, test_scores_mean + test_scores_std, \
alpha=0.3, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", \
label="Training {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", \
label="Test {}".format(computed_score), \
linewidth=5.0, markersize=13.0)
plt.legend(loc="best", prop={'size': 26})
plt.tick_params(axis='both', which='major', labelsize=22)
return fig
def main():
current_dir = os.path.dirname(os.path.abspath( \
inspect.getfile(inspect.currentframe())))
incremental_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565, 0.84031413612565442], 6112: [0.90068717277486909, 0.88890706806282727, 0.88219895287958117]}
incremental_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966, 0.224967277486911], 6112: [0.10091593065096501, 0.23707460732984292, 0.24803664921465968]}
incremental_train_sizes_4_folds = array([ 6112, 12224, 18336])
incremental_train_scores_mean_4_folds = array([ 0.89059773, 0.84685864, 0.82422557])
incremental_train_scores_std_4_folds = array([ 0.00764187, 0.0065445, 0. ])
incremental_test_scores_mean_4_folds = array([ 0.1953424, 0.16155135, 0.09421001])
incremental_test_scores_std_4_folds = array([ 0.0669194, 0.06341593, 0. ])
incremental_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952, 0.82993865030674852], 12225: [0.85824130879345606, 0.84932515337423309, 0.84040899795501023], 8150: [0.88981595092024535, 0.88110429447852756, 0.86932515337423311, 0.86687116564417177], 4075: [0.92122699386503071, 0.91533742331288348, 0.90625766871165647, 0.90134969325153369, 0.89447852760736202]}
incremental_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043, 0.21398773006134969], 12225: [0.05865030674846626, 0.22331288343558281, 0.24957055214723928], 8150: [0.063803680981595098, 0.23018404907975459, 0.26085889570552145, 0.27558282208588958], 4075: [0.075828220858895706, 0.22012269938650306, 0.25840490797546012, 0.2723926380368098, 0.23484662576687115]}
incremental_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
incremental_train_scores_mean_6_folds = array([ 0.90773006, 0.87677914, 0.84932515, 0.83276074, 0.8201227 ])
incremental_train_scores_std_6_folds = array([ 0.00957621, 0.00925196, 0.00728001, 0.00282209, 0. ])
incremental_test_scores_mean_6_folds = array([ 0.21231902, 0.20760736, 0.17717791, 0.13521472, 0.05496933])
incremental_test_scores_std_6_folds = array([ 0.07061286, 0.08462505, 0.08449442, 0.07877301, 0. ])
incremental_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754, 0.82422556719022688], 15280: [0.84332460732984293, 0.83776178010471203, 0.83082460732984298], 12224: [0.86338350785340312, 0.8534031413612565, 0.84710405759162299, 0.84031413612565442], 9168: [0.88568935427574169, 0.87565445026178013, 0.87041884816753923, 0.85907504363001741, 0.8586387434554974], 6112: [0.90085078534031415, 0.90068717277486909, 0.89725130890052351, 0.88890706806282727, 0.88334424083769636, 0.88219895287958117], 3056: [0.91819371727748689, 0.9240837696335078, 0.92702879581151831, 0.91852094240837701, 0.91819371727748689, 0.92833769633507857, 0.90150523560209428]}
incremental_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004, 0.17604712041884818], 15280: [0.043819489862655332, 0.17702879581151831, 0.2486910994764398], 12224: [0.048724656638325703, 0.18160994764397906, 0.25327225130890052, 0.26897905759162305], 9168: [0.055591890124264222, 0.18422774869109948, 0.26734293193717279, 0.27945026178010474, 0.30268324607329844], 6112: [0.072596468279921514, 0.18259162303664922, 0.27192408376963351, 0.28435863874345552, 0.29286649214659688, 0.26145287958115182], 3056: [0.11118378024852844, 0.1806282722513089, 0.2581806282722513, 0.27290575916230364, 0.27748691099476441, 0.26897905759162305, 0.25785340314136124]}
incremental_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
incremental_train_scores_mean_8_folds = array([ 0.91940912, 0.89220659, 0.86989529, 0.85105121, 0.83730366, 0.82646161,
0.81740838])
incremental_train_scores_std_8_folds = array([ 0.00831456, 0.00776394, 0.01026335, 0.00849238, 0.00511337, 0.00223604,
0. ])
incremental_test_scores_mean_8_folds = array([ 0.23245969, 0.2276317, 0.21785922, 0.18814648, 0.15651313, 0.10780773,
0.03924133])
incremental_test_scores_std_8_folds = array([ 0.05818413, 0.07814916, 0.09044222, 0.0869719, 0.08488723, 0.06823939,
0. ])
incremental_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444, 0.82249488752556232], 17115: [0.83651767455448434, 0.83102541630148996, 0.82810400233713122], 14670: [0.84935241990456711, 0.84226312201772324, 0.83974096796182685, 0.83278800272665299], 12225: [0.86633946830265851, 0.85758691206543969, 0.85251533742331287, 0.84621676891615538, 0.84040899795501023], 9780: [0.88548057259713697, 0.87525562372188137, 0.87300613496932511, 0.86124744376278117, 0.85429447852760731, 0.8535787321063395], 7335: [0.89788684389911388, 0.89570552147239269, 0.88943421949556922, 0.88657123381049763, 0.87607361963190189, 0.87075664621676896, 0.87457396046353097], 4890: [0.91942740286298563, 0.90817995910020455, 0.9122699386503067, 0.90817995910020455, 0.90122699386503069, 0.88568507157464216, 0.90081799591002043, 0.88670756646216764], 2445: [0.92842535787321068, 0.93006134969325149, 0.93660531697341509, 0.93047034764826175, 0.93987730061349695, 0.91574642126789363, 0.91983640081799589, 0.9235173824130879, 0.91002044989775055]}
incremental_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455, 0.095296523517382409], 17115: [0.12106339468302658, 0.10061349693251534, 0.25439672801635993], 14670: [0.12269938650306748, 0.10224948875255624, 0.2593047034764826, 0.23640081799591003], 12225: [0.12719836400817996, 0.11615541922290389, 0.25807770961145193, 0.23885480572597137, 0.29284253578732106], 9780: [0.14069529652351739, 0.11942740286298568, 0.26625766871165646, 0.24498977505112474, 0.30224948875255625, 0.31533742331288345], 7335: [0.16196319018404909, 0.12392638036809817, 0.2593047034764826, 0.2560327198364008, 0.31083844580777098, 0.31124744376278118, 0.2523517382413088], 4890: [0.17995910020449898, 0.1329243353783231, 0.2523517382413088, 0.26339468302658486, 0.29734151329243352, 0.29979550102249491, 0.26134969325153373, 0.26871165644171779], 2445: [0.19386503067484662, 0.13006134969325153, 0.2392638036809816, 0.24498977505112474, 0.29202453987730059, 0.27034764826175867, 0.24130879345603273, 0.24989775051124744, 0.25071574642126787]}
incremental_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
incremental_train_scores_mean_10_folds = array([ 0.92606226, 0.90281186, 0.88442886, 0.86714383, 0.8526135, 0.84103613,
0.83188236, 0.82433538, 0.81799591])
incremental_train_scores_std_10_folds = array([ 0.00914095, 0.0110811, 0.00994113, 0.01169251, 0.00897791, 0.005924,
0.00348791, 0.00184049, 0. ])
incremental_test_scores_mean_10_folds = array([ 0.23471938, 0.24447853, 0.23938066, 0.23149284, 0.20662577, 0.1801636,
0.15869121, 0.10633947, 0.11820041])
incremental_test_scores_std_10_folds = array([ 0.04451149, 0.05448996, 0.06594014, 0.07553149, 0.07157226, 0.06855415,
0.06818705, 0.01104294, 0. ])
incremental_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918, 0.81727229825389336], 19560: [0.83052147239263807, 0.82264826175869121, 0.82249488752556232], 17930: [0.83736754043502515, 0.830117122141662, 0.82861126603457891, 0.82381483547127721], 16300: [0.84699386503067486, 0.83619631901840485, 0.83668711656441719, 0.83073619631901841, 0.82993865030674852], 14670: [0.85405589638718471, 0.8477164280845263, 0.84226312201772324, 0.83871847307430125, 0.83653715064758005, 0.83278800272665299], 13040: [0.86618098159509205, 0.85552147239263798, 0.85421779141104293, 0.84739263803680986, 0.84631901840490797, 0.84072085889570547, 0.8377300613496933], 11410: [0.87765118317265556, 0.86958808063102544, 0.86126205083260299, 0.86038562664329532, 0.85723049956178787, 0.85021910604732687, 0.84469763365468886, 0.84522348816827342], 9780: [0.88588957055214723, 0.87924335378323104, 0.87525562372188137, 0.87044989775051129, 0.87157464212678937, 0.86124744376278117, 0.8563394683026585, 0.85685071574642124, 0.8535787321063395], 8150: [0.89447852760736202, 0.89042944785276079, 0.88858895705521468, 0.88638036809815945, 0.88110429447852756, 0.87754601226993867, 0.87067484662576689, 0.86907975460122699, 0.86453987730061355, 0.86687116564417177], 6520: [0.90567484662576692, 0.90000000000000002, 0.89892638036809813, 0.89984662576687113, 0.89309815950920246, 0.89401840490797546, 0.88696319018404912, 0.88312883435582823, 0.87423312883435578, 0.87944785276073623, 0.87898773006134967], 4890: [0.91390593047034763, 0.91451942740286296, 0.90817995910020455, 0.91267893660531696, 0.90899795501022496, 0.90817995910020455, 0.90061349693251536, 0.9002044989775051, 0.88568507157464216, 0.89836400817995909, 0.90040899795501017, 0.88670756646216764], 3260: [0.91932515337423315, 0.92638036809815949, 0.92699386503067482, 0.92791411042944782, 0.91748466257668715, 0.92392638036809815, 0.93006134969325149, 0.91625766871165648, 0.90490797546012269, 0.90674846625766869, 0.91901840490797548, 0.90889570552147236, 0.89785276073619635], 1630: [0.93496932515337428, 0.94049079754601228, 0.94601226993865029, 0.94969325153374229, 0.93619631901840494, 0.92883435582822083, 0.93803680981595094, 0.94907975460122695, 0.91717791411042948, 0.92699386503067482, 0.9319018404907975, 0.94723926380368095, 0.91533742331288348, 0.92638036809815949]}
incremental_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773, 0.060122699386503067], 19560: [0.15337423312883436, 0.059509202453987733, 0.12331288343558282], 17930: [0.15214723926380369, 0.058282208588957052, 0.13190184049079753, 0.2607361963190184], 16300: [0.15398773006134969, 0.061963190184049083, 0.13190184049079753, 0.27116564417177913, 0.25398773006134967], 14670: [0.15460122699386503, 0.066257668711656448, 0.13374233128834356, 0.27116564417177913, 0.25398773006134967, 0.2411042944785276], 13040: [0.15766871165644172, 0.068711656441717797, 0.14294478527607363, 0.27300613496932513, 0.25398773006134967, 0.24049079754601227, 0.29877300613496932], 11410: [0.16809815950920245, 0.066871165644171782, 0.15337423312883436, 0.26625766871165646, 0.26380368098159507, 0.2460122699386503, 0.30858895705521472, 0.33128834355828218], 9780: [0.17668711656441718, 0.073619631901840496, 0.15398773006134969, 0.27484662576687119, 0.26503067484662579, 0.25644171779141106, 0.30613496932515338, 0.32822085889570551, 0.31411042944785278], 8150: [0.18466257668711655, 0.078527607361963195, 0.16073619631901839, 0.26687116564417179, 0.27423312883435585, 0.26932515337423313, 0.31226993865030672, 0.33742331288343558, 0.31963190184049078, 0.26748466257668713], 6520: [0.19570552147239265, 0.095092024539877307, 0.16809815950920245, 0.2588957055214724, 0.26993865030674846, 0.26564417177914113, 0.30797546012269938, 0.33680981595092024, 0.31963190184049078, 0.27361963190184047, 0.27975460122699386], 4890: [0.20429447852760735, 0.11288343558282209, 0.17055214723926379, 0.26503067484662579, 0.26319018404907973, 0.2822085889570552, 0.30184049079754599, 0.32699386503067485, 0.30306748466257671, 0.28650306748466259, 0.27607361963190186, 0.26625766871165646], 3260: [0.21717791411042944, 0.12208588957055215, 0.16380368098159509, 0.25950920245398773, 0.252760736196319, 0.27300613496932513, 0.29815950920245399, 0.33128834355828218, 0.29509202453987732, 0.27975460122699386, 0.28343558282208586, 0.26503067484662579, 0.27300613496932513], 1630: [0.23803680981595093, 0.15889570552147239, 0.15705521472392639, 0.23251533742331287, 0.25521472392638039, 0.25582822085889573, 0.28159509202453986, 0.31901840490797545, 0.2570552147239264, 0.26196319018404907, 0.26319018404907973, 0.23558282208588957, 0.25214723926380367, 0.25950920245398773]}
incremental_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
incremental_train_scores_mean_15_folds = array([ 0.93488168, 0.91736668, 0.90320382, 0.8903932, 0.87896933, 0.86782549,
0.85828221, 0.84972612, 0.84201318, 0.83611043, 0.82997769, 0.82522154,
0.82012742, 0.81849255])
incremental_train_scores_std_15_folds = array([ 0.01074397, 0.00967823, 0.00931587, 0.00992714, 0.01023275, 0.01070537,
0.01082517, 0.00899664, 0.00711293, 0.00609528, 0.00485997, 0.00374814,
0.00285512, 0. ])
incremental_test_scores_mean_15_folds = array([ 0.2448291, 0.25493157, 0.25490798, 0.25192415, 0.24711656, 0.23878664,
0.22553681, 0.20508326, 0.18680982, 0.17460123, 0.15076687, 0.11206544,
0.10552147, 0.15276074])
incremental_test_scores_std_15_folds = array([ 0.04097555, 0.05470141, 0.05920192, 0.06843836, 0.07712113, 0.08085383,
0.08314464, 0.07722247, 0.07412044, 0.07818233, 0.07246458, 0.03913685,
0.04539877, 0. ])
incremental_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484, 0.81821819151773811], 21516: [0.824595649749024, 0.82041271611823763, 0.81692693809258221], 20538: [0.82753919563735512, 0.82520206446586819, 0.81892102444249681, 0.81862888304606096], 19560: [0.83195296523517381, 0.82878323108384455, 0.82356850715746421, 0.82014314928425358, 0.82249488752556232], 18582: [0.8366698955978904, 0.83397911957808635, 0.82741362608976432, 0.82692928640619956, 0.82348509310085027, 0.82396943278441503], 17604: [0.84003635537377863, 0.83702567598273114, 0.83242444898886614, 0.83094751192910699, 0.82969779595546467, 0.826630311292888, 0.82509656896159966], 16626: [0.84620473956453746, 0.84187417298207623, 0.83585949717310237, 0.83549861662456393, 0.8338145073980513, 0.83297245278479493, 0.82828100565379525, 0.8286418862023337], 15648: [0.85090746421267893, 0.84809560327198363, 0.84189672801635995, 0.83876533742331283, 0.83825408997955009, 0.83640081799591004, 0.8339723926380368, 0.83167177914110424, 0.83032975460122704], 14670: [0.85821404226312203, 0.85262440354464897, 0.84723926380368098, 0.84464894342194952, 0.84226312201772324, 0.84137695978186777, 0.83749147920927058, 0.83715064758009539, 0.83360599863667351, 0.83278800272665299], 13692: [0.86240140227870288, 0.86064855390008765, 0.8523225241016652, 0.85210341805433831, 0.84845165059888983, 0.84567630733274901, 0.8437043529068069, 0.84041776219690334, 0.83939526730937775, 0.83384458077709611, 0.83494011101373067], 12714: [0.871558911436212, 0.86644643699858426, 0.85921031933301872, 0.85614283467044205, 0.85456976561271036, 0.85189554821456659, 0.84867075664621672, 0.84827748938178382, 0.84481673745477426, 0.84135598552776469, 0.83687273871322954, 0.83852446122384772], 11736: [0.87585207907293794, 0.87491479209270617, 0.86699045671438313, 0.86255964553510567, 0.859492160872529, 0.859492160872529, 0.85506134969325154, 0.85565780504430811, 0.85216428084526241, 0.84688139059304701, 0.84398432174505789, 0.83980913428766191, 0.84355828220858897], 10758: [0.88213422569250788, 0.8812046848856665, 0.87488380739914484, 0.87153746049451575, 0.86642498605688789, 0.86558839933073062, 0.8642870422011526, 0.86242796058746984, 0.85833798103736758, 0.85489868005205427, 0.85015802193716306, 0.84960029745305821, 0.84690462911321807, 0.84987915969511063], 9780: [0.88486707566462164, 0.88926380368098157, 0.88159509202453989, 0.87770961145194271, 0.87525562372188137, 0.87259713701431496, 0.87157464212678937, 0.87044989775051129, 0.86543967280163603, 0.86124744376278117, 0.85664621676891617, 0.85521472392638032, 0.85644171779141109, 0.85419222903885483, 0.8535787321063395], 8802: [0.88559418314019545, 0.89036582594864799, 0.88798000454442172, 0.88479890933878669, 0.8827539195637355, 0.87991365598727567, 0.87855032947057488, 0.87832310838445804, 0.87502840263576465, 0.86980231765507843, 0.86639400136332656, 0.86321290615769142, 0.86332651670074978, 0.8626448534423995, 0.85810043172006367, 0.86332651670074978], 7824: [0.89187116564417179, 0.89570552147239269, 0.89327709611451944, 0.89199897750511248, 0.89059304703476483, 0.88905930470347649, 0.88854805725971375, 0.8880368098159509, 0.88036809815950923, 0.88087934560327197, 0.87359406952965235, 0.87397750511247441, 0.87282719836400813, 0.86707566462167684, 0.86809815950920244, 0.87103783231083842, 0.86950408997955009], 6846: [0.89789658194566169, 0.90213263219398188, 0.89702015775635413, 0.89789658194566169, 0.89775051124744376, 0.89599766286882854, 0.89322231960268772, 0.89307624890446979, 0.89176161262050835, 0.88913234005258546, 0.88504236050248319, 0.88080631025416301, 0.88036809815950923, 0.87554776511831722, 0.87394098743791993, 0.87773882559158634, 0.87642418930762489, 0.87452527023079174], 5868: [0.90184049079754602, 0.91104294478527603, 0.90252215405589642, 0.90235173824130877, 0.90593047034764829, 0.9038854805725971, 0.90081799591002043, 0.8989434219495569, 0.8979209270620313, 0.8989434219495569, 0.89383094751192915, 0.8933197000681663, 0.88957055214723924, 0.88599182004089982, 0.87917518745739609, 0.8887184730743013, 0.8873551465576005, 0.88650306748466257, 0.8834355828220859], 4890: [0.89959100204498976, 0.91676891615541922, 0.91554192229038855, 0.91083844580777096, 0.90817995910020455, 0.91206543967280163, 0.91349693251533748, 0.90531697341513295, 0.90490797546012269, 0.90817995910020455, 0.90081799591002043, 0.90368098159509203, 0.90163599182004095, 0.89345603271983642, 0.88568507157464216, 0.89345603271983642, 0.89938650306748469, 0.90000000000000002, 0.89775051124744376, 0.88670756646216764], 3912: [0.90030674846625769, 0.91768916155419222, 0.92075664621676889, 0.92382413087934556, 0.91589979550102252, 0.91641104294478526, 0.92663599182004086, 0.91385480572597133, 0.91155419222903888, 0.91794478527607359, 0.91641104294478526, 0.91308793456032722, 0.91411042944785281, 0.90720858895705525, 0.89800613496932513, 0.90030674846625769, 0.9066973415132924, 0.91206543967280163, 0.91359918200408996, 0.89979550102249484, 0.89340490797546013], 2934: [0.90149965916837083, 0.92638036809815949, 0.92092706203135655, 0.93387866394001362, 0.92876618950238587, 0.92706203135650989, 0.93626448534423989, 0.92842535787321068, 0.91990456714383095, 0.92603953646898429, 0.91922290388548056, 0.93456032719836402, 0.92160872528970683, 0.91717791411042948, 0.91240627130197682, 0.91717791411042948, 0.91342876618950242, 0.92058623040218135, 0.92808452624403548, 0.91615541922290389, 0.9035446489434219, 0.90558963871847309], 1956: [0.90286298568507162, 0.93762781186094069, 0.93098159509202449, 0.93711656441717794, 0.93813905930470343, 0.93865030674846628, 0.94734151329243355, 0.93762781186094069, 0.93302658486707568, 0.92944785276073616, 0.92484662576687116, 0.93404907975460127, 0.94529652351738236, 0.92331288343558282, 0.91666666666666663, 0.93456032719836402, 0.92995910020449901, 0.92586912065439675, 0.93404907975460127, 0.93660531697341509, 0.91922290388548056, 0.91768916155419222, 0.9253578732106339], 978: [0.89059304703476483, 0.95194274028629855, 0.94887525562372188, 0.95194274028629855, 0.93762781186094069, 0.94478527607361962, 0.95910020449897748, 0.95501022494887522, 0.9253578732106339, 0.95092024539877296, 0.93456032719836402, 0.9468302658486708, 0.95705521472392641, 0.9468302658486708, 0.91411042944785281, 0.94478527607361962, 0.93149284253578735, 0.94069529652351735, 0.93456032719836402, 0.96319018404907975, 0.93865030674846628, 0.93558282208588961, 0.93149284253578735, 0.92126789366053174]}
incremental_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292, 0.22903885480572597], 21516: [0.10736196319018405, 0.22392638036809817, 0.070552147239263799], 20538: [0.10736196319018405, 0.22699386503067484, 0.07259713701431493, 0.13905930470347649], 19560: [0.10531697341513292, 0.22903885480572597, 0.068507157464212681, 0.13803680981595093, 0.1492842535787321], 18582: [0.10736196319018405, 0.22290388548057261, 0.06646216768916155, 0.14519427402862986, 0.15235173824130879, 0.26687116564417179], 17604: [0.11349693251533742, 0.22903885480572597, 0.06646216768916155, 0.14519427402862986, 0.15644171779141106, 0.26789366053169733, 0.28936605316973413], 16626: [0.11042944785276074, 0.22392638036809817, 0.07259713701431493, 0.14519427402862986, 0.15848670756646216, 0.27505112474437626, 0.29345603271983639, 0.254601226993865], 15648: [0.1165644171779141, 0.22494887525562371, 0.07259713701431493, 0.14621676891615543, 0.15950920245398773, 0.26993865030674846, 0.29550102249488752, 0.25153374233128833, 0.31083844580777098], 14670: [0.11349693251533742, 0.22699386503067484, 0.07259713701431493, 0.14621676891615543, 0.15848670756646216, 0.27402862985685073, 0.29038854805725972, 0.24846625766871167, 0.30981595092024539, 0.21881390593047034], 13692: [0.11451942740286299, 0.23210633946830267, 0.070552147239263799, 0.14723926380368099, 0.16666666666666666, 0.27198364008179959, 0.29550102249488752, 0.25869120654396727, 0.31799591002044991, 0.22392638036809817, 0.2822085889570552], 12714: [0.11758691206543967, 0.23210633946830267, 0.075664621676891614, 0.14826175869120656, 0.17484662576687116, 0.27505112474437626, 0.29652351738241312, 0.2658486707566462, 0.31901840490797545, 0.23517382413087934, 0.28629856850715746, 0.34049079754601225], 11736: [0.1196319018404908, 0.24028629856850717, 0.075664621676891614, 0.15337423312883436, 0.18609406952965235, 0.26993865030674846, 0.28732106339468305, 0.26278118609406953, 0.31186094069529652, 0.22699386503067484, 0.28936605316973413, 0.34049079754601225, 0.34049079754601225], 10758: [0.130879345603272, 0.2607361963190184, 0.078732106339468297, 0.15848670756646216, 0.18711656441717792, 0.27607361963190186, 0.28936605316973413, 0.26278118609406953, 0.32106339468302658, 0.22801635991820041, 0.29141104294478526, 0.34458077709611451, 0.34151329243353784, 0.32617586912065438], 9780: [0.1329243353783231, 0.27709611451942739, 0.079754601226993863, 0.15439672801635992, 0.18813905930470348, 0.28527607361963192, 0.29141104294478526, 0.27198364008179959, 0.32515337423312884, 0.24335378323108384, 0.29243353783231085, 0.34662576687116564, 0.34458077709611451, 0.33435582822085891, 0.32515337423312884], 8802: [0.13190184049079753, 0.28629856850715746, 0.078732106339468297, 0.16564417177914109, 0.19325153374233128, 0.28118609406952966, 0.28834355828220859, 0.28834355828220859, 0.32924335378323111, 0.25153374233128833, 0.28834355828220859, 0.35276073619631904, 0.34253578732106338, 0.31697341513292432, 0.33333333333333331, 0.28323108384458079], 7824: [0.13496932515337423, 0.29550102249488752, 0.085889570552147243, 0.16871165644171779, 0.19836400817995911, 0.27300613496932513, 0.29243353783231085, 0.27811860940695299, 0.32822085889570551, 0.26482617586912066, 0.29447852760736198, 0.35787321063394684, 0.34867075664621677, 0.33128834355828218, 0.32310838445807771, 0.2822085889570552, 0.28732106339468305], 6846: [0.13701431492842536, 0.29447852760736198, 0.096114519427402859, 0.16973415132924335, 0.19836400817995911, 0.27402862985685073, 0.28527607361963192, 0.27402862985685073, 0.32719836400817998, 0.25153374233128833, 0.29345603271983639, 0.34969325153374231, 0.34969325153374231, 0.33026584867075665, 0.32617586912065438, 0.29243353783231085, 0.29550102249488752, 0.29141104294478526], 5868: [0.14723926380368099, 0.30572597137014312, 0.097137014314928424, 0.17177914110429449, 0.20449897750511248, 0.26380368098159507, 0.28118609406952966, 0.27607361963190186, 0.32515337423312884, 0.27300613496932513, 0.29754601226993865, 0.34049079754601225, 0.34969325153374231, 0.32310838445807771, 0.32924335378323111, 0.29243353783231085, 0.28629856850715746, 0.30572597137014312, 0.27096114519427406], 4890: [0.15132924335378323, 0.31697341513292432, 0.11247443762781185, 0.17791411042944785, 0.21370143149284254, 0.26278118609406953, 0.28732106339468305, 0.2822085889570552, 0.32310838445807771, 0.27709611451942739, 0.28732106339468305, 0.35378323108384457, 0.32924335378323111, 0.31799591002044991, 0.31288343558282211, 0.28834355828220859, 0.29447852760736198, 0.31083844580777098, 0.27198364008179959, 0.27505112474437626], 3912: [0.15746421267893659, 0.32515337423312884, 0.12678936605316973, 0.20040899795501022, 0.21267893660531698, 0.25664621676891614, 0.28834355828220859, 0.26993865030674846, 0.30981595092024539, 0.28118609406952966, 0.28016359918200406, 0.34969325153374231, 0.33946830265848671, 0.30981595092024539, 0.31492842535787319, 0.28425357873210633, 0.29141104294478526, 0.31186094069529652, 0.2822085889570552, 0.27811860940695299, 0.30163599182004092], 2934: [0.17586912065439672, 0.33435582822085891, 0.14314928425357873, 0.21676891615541921, 0.19938650306748465, 0.25255623721881393, 0.28732106339468305, 0.27096114519427406, 0.29959100204498978, 0.27709611451942739, 0.26380368098159507, 0.35378323108384457, 0.34049079754601225, 0.29038854805725972, 0.30470347648261759, 0.27096114519427406, 0.2658486707566462, 0.32208588957055212, 0.28834355828220859, 0.26278118609406953, 0.31492842535787319, 0.25869120654396727], 1956: [0.19325153374233128, 0.34253578732106338, 0.15848670756646216, 0.24130879345603273, 0.17382413087934559, 0.22290388548057261, 0.31390593047034765, 0.26482617586912066, 0.30674846625766872, 0.2822085889570552, 0.26789366053169733, 0.34049079754601225, 0.33231083844580778, 0.28936605316973413, 0.28016359918200406, 0.28425357873210633, 0.25766871165644173, 0.30470347648261759, 0.28527607361963192, 0.24335378323108384, 0.27198364008179959, 0.26482617586912066, 0.29243353783231085], 978: [0.19529652351738241, 0.34253578732106338, 0.18507157464212678, 0.24233128834355827, 0.21370143149284254, 0.20961145194274028, 0.27811860940695299, 0.27505112474437626, 0.25869120654396727, 0.2556237218813906, 0.2310838445807771, 0.29447852760736198, 0.29038854805725972, 0.27198364008179959, 0.24846625766871167, 0.24539877300613497, 0.20143149284253578, 0.25869120654396727, 0.25255623721881393, 0.26993865030674846, 0.18916155419222905, 0.22903885480572597, 0.28323108384458079, 0.23415132924335377]}
incremental_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
incremental_train_scores_mean_25_folds = array([ 0.93988582, 0.93044812, 0.9208496, 0.91140812, 0.90287321, 0.89537366,
0.88779336, 0.881556, 0.87438224, 0.86840491, 0.86273338, 0.85664753,
0.8515285, 0.84671872, 0.84274029, 0.83892155, 0.83539336, 0.8316941,
0.82874108, 0.82538855, 0.82257279, 0.8206451, 0.81868498, 0.81799591])
incremental_train_scores_std_25_folds = array([ 0.01574382, 0.00986344, 0.00941513, 0.0086523, 0.00859881, 0.00843904,
0.0094164, 0.00978435, 0.0102025, 0.01149665, 0.01132038, 0.011076,
0.01026567, 0.00910232, 0.00777419, 0.00661961, 0.00574061, 0.00496077,
0.00492852, 0.00433164, 0.00388806, 0.00313505, 0.00046679, 0. ])
incremental_test_scores_mean_25_folds = array([ 0.24816803, 0.27020539, 0.27244841, 0.27485636, 0.27234151, 0.27058444,
0.26868893, 0.26735234, 0.26322853, 0.2595092, 0.24978089, 0.23879188,
0.2305726, 0.21649005, 0.20593047, 0.20529425, 0.19171779, 0.18112767,
0.16019087, 0.13803681, 0.13650307, 0.13394683, 0.16717791, 0.10633947])
incremental_test_scores_std_25_folds = array([ 0.03709919, 0.0474123, 0.05100164, 0.0554582, 0.06067133, 0.06643791,
0.07039766, 0.07419473, 0.0772398, 0.08042228, 0.07961067, 0.08014752,
0.08023328, 0.07686982, 0.0755778, 0.07961578, 0.07611923, 0.07668308,
0.06730022, 0.05350297, 0.05728938, 0.06537574, 0.06186094, 0. ])
incremental_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894, 0.81799591002044991], 22983: [0.81877909759387368, 0.81821346212417878, 0.81890962885611107], 22494: [0.82177469547434867, 0.8191517738063484, 0.819196230105806, 0.81821819151773811], 22005: [0.82394910247670983, 0.82140422631220178, 0.81972279027493755, 0.81904112701658716, 0.81799591002044991], 21516: [0.82594348391894401, 0.824595649749024, 0.82250418293363081, 0.82041271611823763, 0.81911135898865961, 0.81692693809258221], 21027: [0.8270319113520711, 0.82679412184334422, 0.82455890046131164, 0.82260902648975132, 0.81966043658153798, 0.81832881533266755, 0.81766300470823228], 20538: [0.82958418541240631, 0.82753919563735512, 0.82753919563735512, 0.82520206446586819, 0.82232934073424868, 0.81892102444249681, 0.81819067095140718, 0.81862888304606096], 20049: [0.8316624270537184, 0.82891914808718636, 0.82822085889570551, 0.82762232530300761, 0.82448002394134368, 0.82168686717542017, 0.81874407701132224, 0.81944236620280309, 0.82058955558880742], 19560: [0.83333333333333337, 0.83195296523517381, 0.83067484662576685, 0.82878323108384455, 0.82617586912065444, 0.82356850715746421, 0.82249488752556232, 0.82014314928425358, 0.82055214723926384, 0.82249488752556232], 19071: [0.83682030307797184, 0.83320224424518907, 0.83377903623302396, 0.83141943264642648, 0.82858790834250962, 0.82612343348539663, 0.82476010696869595, 0.82344921608725286, 0.82093230559488228, 0.82240050338209847, 0.82318703791096426], 18582: [0.83785383704660421, 0.8366698955978904, 0.83435582822085885, 0.83397911957808635, 0.83118071251749004, 0.82741362608976432, 0.82800559681412123, 0.82692928640619956, 0.82488429663114837, 0.82348509310085027, 0.82386180174362289, 0.82396943278441503], 18093: [0.84115403747305584, 0.83722986790471454, 0.8366771679655115, 0.83474271817830104, 0.83280826839109046, 0.83087381860387999, 0.82998949870115513, 0.82860774885314759, 0.82706018902337919, 0.82656275907809651, 0.82484938926656715, 0.82595478914497322, 0.8232465594428785], 17604: [0.84350147693705979, 0.84003635537377863, 0.83844580777096112, 0.83702567598273114, 0.83441263349238814, 0.83242444898886614, 0.83145875937286984, 0.83094751192910699, 0.83037945921381506, 0.82969779595546467, 0.82833446943876388, 0.826630311292888, 0.8259486480345376, 0.82509656896159966], 17115: [0.84691790826760149, 0.84317849839322234, 0.84224364592462753, 0.83827052293309967, 0.83651767455448434, 0.83447268477943326, 0.83342097575226415, 0.83353783231083844, 0.83196026877008478, 0.83102541630148996, 0.83219398188723337, 0.82921413964358748, 0.82629272567922873, 0.8269354367513877, 0.82810400233713122], 16626: [0.84885119692048594, 0.84620473956453746, 0.84536268495128108, 0.84187417298207623, 0.83844580777096112, 0.83585949717310237, 0.83489714904366652, 0.83549861662456393, 0.83417538794658963, 0.8338145073980513, 0.83267171899434622, 0.83297245278479493, 0.82966438108985929, 0.82828100565379525, 0.82972452784794903, 0.8286418862023337], 16137: [0.85077771580839068, 0.84792712400074366, 0.84817500154923464, 0.84513850158021941, 0.84166821590134477, 0.83881762409369776, 0.83695854248001489, 0.83708248125426044, 0.83621490983454172, 0.83584309351180519, 0.83454173638222717, 0.83435582822085885, 0.83274462415566708, 0.83156720580033461, 0.83125735886472085, 0.83038978744500214, 0.83082357315486155], 15648: [0.8539749488752556, 0.85090746421267893, 0.84924591002044991, 0.84809560327198363, 0.84502811860940696, 0.84189672801635995, 0.83946830265848671, 0.83876533742331283, 0.83819018404907975, 0.83825408997955009, 0.83780674846625769, 0.83640081799591004, 0.83416411042944782, 0.8339723926380368, 0.83467535787321068, 0.83167177914110424, 0.83141615541922287, 0.83032975460122704], 15159: [0.85744442245530705, 0.85421201926248436, 0.85196912725113794, 0.84959430041559469, 0.84814301734942943, 0.84444884227191763, 0.843723200738835, 0.84148030872748858, 0.84075466719440595, 0.83956725377663433, 0.84002902566132331, 0.83818193812256747, 0.83659872023220527, 0.83574114387492582, 0.83587307869912264, 0.83448776304505579, 0.83277261033049677, 0.83217890362161095, 0.83125535985223298], 14670: [0.8597818677573279, 0.85821404226312203, 0.85555555555555551, 0.85262440354464897, 0.84935241990456711, 0.84723926380368098, 0.84689843217450578, 0.84464894342194952, 0.84294478527607364, 0.84226312201772324, 0.84192229038854804, 0.84137695978186777, 0.83871847307430125, 0.83749147920927058, 0.83974096796182685, 0.83715064758009539, 0.83640081799591004, 0.83360599863667351, 0.83251533742331285, 0.83278800272665299], 14181: [0.86150483040688242, 0.86002397574219025, 0.85995345885339536, 0.85508779352654962, 0.85057471264367812, 0.85050419575488334, 0.8488117904238065, 0.84860023975742194, 0.84648473309357586, 0.84648473309357586, 0.84380509131937098, 0.84458077709611457, 0.8404907975460123, 0.84161906776673012, 0.84098441576757632, 0.83978562865806361, 0.83886890910373035, 0.83752908821662786, 0.83329807488893592, 0.83414427755447429, 0.83160566955785908], 13692: [0.86634531113058721, 0.86240140227870288, 0.86364300321355536, 0.86064855390008765, 0.8555360794624598, 0.8523225241016652, 0.8507157464212679, 0.85210341805433831, 0.85159217061057557, 0.84845165059888983, 0.84786736780601812, 0.84567630733274901, 0.84421560035056964, 0.8437043529068069, 0.84341221151037105, 0.84041776219690334, 0.84290096406660819, 0.83939526730937775, 0.83822670172363423, 0.83384458077709611, 0.83333333333333337, 0.83494011101373067], 13203: [0.87002953874119515, 0.86737862606983263, 0.86609103991517078, 0.86344012724380825, 0.85942588805574494, 0.85465424524729228, 0.85442702416117544, 0.85435128379913661, 0.85359388017874727, 0.85344239945466938, 0.84965538135272289, 0.84995834280087856, 0.84511095963038707, 0.84647428614708775, 0.84556540180262063, 0.84397485419980312, 0.84382337347572522, 0.84382337347572522, 0.83958191320154507, 0.84071801863212903, 0.83458304930697569, 0.83624933727183215, 0.83655229871998793], 12714: [0.8728173666823974, 0.871558911436212, 0.86967122856693413, 0.86644643699858426, 0.8619631901840491, 0.85921031933301872, 0.85779455718106024, 0.85614283467044205, 0.85677206229353464, 0.85456976561271036, 0.85472707251848357, 0.85189554821456659, 0.84977190498662891, 0.84867075664621672, 0.84945729117508262, 0.84827748938178382, 0.84812018247601073, 0.84481673745477426, 0.84324366839704268, 0.84135598552776469, 0.84143463898065129, 0.83687273871322954, 0.83600755073147714, 0.83852446122384772], 12225: [0.87427402862985681, 0.874601226993865, 0.87337423312883433, 0.87108384458077714, 0.86633946830265851, 0.86298568507157469, 0.86159509202453988, 0.85946830265848673, 0.85881390593047036, 0.85758691206543969, 0.8555419222903885, 0.85652351738241306, 0.85194274028629857, 0.85284253578732105, 0.85251533742331287, 0.8512883435582822, 0.85194274028629857, 0.8489979550102249, 0.84719836400817994, 0.84621676891615538, 0.84220858895705519, 0.84261758691206545, 0.83918200408997956, 0.84016359918200412, 0.84040899795501023], 11736: [0.87857873210633952, 0.87585207907293794, 0.87849352419904569, 0.87491479209270617, 0.87125085207907293, 0.86699045671438313, 0.86486025903203823, 0.86255964553510567, 0.86170756646216773, 0.859492160872529, 0.85932174505794134, 0.859492160872529, 0.85591342876618948, 0.85506134969325154, 0.85753237900477164, 0.85565780504430811, 0.85582822085889576, 0.85216428084526241, 0.85046012269938653, 0.84688139059304701, 0.84654055896387181, 0.84398432174505789, 0.84492160872528965, 0.83980913428766191, 0.84262099522835721, 0.84355828220858897], 11247: [0.88094603005245842, 0.88085711745354311, 0.88005690406330572, 0.87970125366764473, 0.87329954654574549, 0.87009869298479592, 0.87018760558371122, 0.86805370320974484, 0.86520850004445626, 0.8627189472748289, 0.86378589846181209, 0.86458611185204948, 0.86031830710411661, 0.86014048190628611, 0.85765092913665864, 0.85791766693340443, 0.85889570552147243, 0.85622832755401446, 0.85373877478438698, 0.8513381346136748, 0.84689250466791144, 0.84787054325597933, 0.84680359206899614, 0.84635902907441984, 0.84431403929936877, 0.84573664088201295, 0.84493642749177555], 10758: [0.87999628183677259, 0.88213422569250788, 0.88315672058003347, 0.8812046848856665, 0.8789737869492471, 0.87488380739914484, 0.87265290946272545, 0.87153746049451575, 0.86921360847741214, 0.86642498605688789, 0.86605316973415136, 0.86558839933073062, 0.86335750139431122, 0.8642870422011526, 0.86475181260457334, 0.86242796058746984, 0.8608477412158394, 0.85833798103736758, 0.85796616471463094, 0.85489868005205427, 0.85201710355084592, 0.85015802193716306, 0.85127347090537275, 0.84960029745305821, 0.84848484848484851, 0.84690462911321807, 0.84885666480758504, 0.84987915969511063], 10269: [0.88207225630538511, 0.88178011490894925, 0.88723342097575231, 0.88479890933878669, 0.88109845165059886, 0.87730061349693256, 0.87642418930762489, 0.87525562372188137, 0.87321063394683029, 0.86951017625864246, 0.87136040510273638, 0.86853637160385622, 0.86542019670854031, 0.86892589346577076, 0.86814684974194178, 0.86512805531210435, 0.86629662089784787, 0.86220664134774561, 0.86064855390008765, 0.8603564125036518, 0.85470834550589148, 0.85597429155711369, 0.85353977992014807, 0.85305287759275494, 0.85042360502483205, 0.85315025805823352, 0.85052098549031063, 0.84983932223196024, 0.85032622455935336], 9780: [0.88466257668711656, 0.88486707566462164, 0.88670756646216764, 0.88926380368098157, 0.88548057259713697, 0.88159509202453989, 0.87985685071574637, 0.87770961145194271, 0.87934560327198363, 0.87525562372188137, 0.87300613496932511, 0.87259713701431496, 0.86912065439672803, 0.87157464212678937, 0.87300613496932511, 0.87044989775051129, 0.87106339468302663, 0.86543967280163603, 0.86431492842535784, 0.86124744376278117, 0.85899795501022491, 0.85664621676891617, 0.85838445807770958, 0.85521472392638032, 0.85429447852760731, 0.85644171779141109, 0.85480572597137017, 0.85419222903885483, 0.85255623721881391, 0.8535787321063395], 9291: [0.884834786352384, 0.88612635884188995, 0.88806371757614899, 0.88870950382090197, 0.88849424173931757, 0.88440426218921542, 0.88257453449574852, 0.88085243784307399, 0.88268216553654077, 0.88009902055752876, 0.87708535141534816, 0.87697772037455601, 0.87278010978366161, 0.87460983747712839, 0.87471746851792054, 0.87568614788505006, 0.8743945753955441, 0.87073512000861053, 0.86729092670326124, 0.86761381982563768, 0.86126358841890005, 0.86169411258206863, 0.86072543321493922, 0.86061780217414707, 0.85911096760305672, 0.85954149176622541, 0.85857281239909589, 0.85749650199117422, 0.85609729846087612, 0.85502098805295446, 0.85642019158325255], 8802: [0.88604862531242901, 0.88559418314019545, 0.89184276300840715, 0.89036582594864799, 0.89059304703476483, 0.88798000454442172, 0.88809361508748008, 0.88479890933878669, 0.88570779368325381, 0.8827539195637355, 0.88457168825266985, 0.87991365598727567, 0.87843671892751651, 0.87855032947057488, 0.87809588729834132, 0.87832310838445804, 0.87752783458304928, 0.87502840263576465, 0.87321063394683029, 0.86980231765507843, 0.86628039082026809, 0.86639400136332656, 0.86537150647580097, 0.86321290615769142, 0.86275846398545786, 0.86332651670074978, 0.86366734832992498, 0.8626448534423995, 0.85969097932288119, 0.85810043172006367, 0.86025903203817311, 0.86332651670074978], 8313: [0.89125466137375198, 0.88812702995308557, 0.89281847708408513, 0.89305906411644409, 0.8923373030193672, 0.8923373030193672, 0.89101407434139301, 0.88920967159870079, 0.8898111391795982, 0.88776614940454712, 0.88848791050162401, 0.88632262721039334, 0.88271382172500901, 0.88235294117647056, 0.88379646337062434, 0.88295440875736797, 0.88151088656321419, 0.87934560327198363, 0.87922530975580415, 0.87561650427041982, 0.86960182846144596, 0.86948153494526648, 0.86815830626729218, 0.86767713220257425, 0.86743654517021529, 0.86839889329965114, 0.86659449055695903, 0.86479008781426681, 0.86358715265247199, 0.86178274990977988, 0.86527126187898473, 0.86442920726572836, 0.86707566462167684], 7824: [0.89391615541922287, 0.89187116564417179, 0.8936605316973415, 0.89570552147239269, 0.89608895705521474, 0.89327709611451944, 0.89302147239263807, 0.89199897750511248, 0.89378834355828218, 0.89059304703476483, 0.89008179959100209, 0.88905930470347649, 0.88842024539877296, 0.88854805725971375, 0.8871421267893661, 0.8880368098159509, 0.8834355828220859, 0.88036809815950923, 0.88407464212678932, 0.88087934560327197, 0.8778118609406953, 0.87359406952965235, 0.87359406952965235, 0.87397750511247441, 0.87448875255623726, 0.87282719836400813, 0.86848159509202449, 0.86707566462167684, 0.86490286298568508, 0.86809815950920244, 0.86975971370143146, 0.87103783231083842, 0.86848159509202449, 0.86950408997955009], 7335: [0.89747784594410362, 0.89461486025903203, 0.89720518064076349, 0.89679618268575323, 0.89788684389911388, 0.8958418541240627, 0.89706884798909337, 0.89447852760736202, 0.89693251533742335, 0.89570552147239269, 0.89188820722563056, 0.89338786639400136, 0.89011588275391962, 0.89202453987730057, 0.88943421949556922, 0.88820722563053855, 0.8877982276755283, 0.88575323790047722, 0.88684389911383776, 0.88657123381049763, 0.88084526244035444, 0.88152692569870483, 0.87580095432856164, 0.8768916155419223, 0.87607361963190189, 0.87648261758691204, 0.87484662576687111, 0.87239263803680978, 0.86830265848670751, 0.87075664621676896, 0.87280163599182004, 0.87566462167689163, 0.87389229720518069, 0.87171097477845949, 0.87457396046353097], 6846: [0.90037978381536665, 0.89789658194566169, 0.90300905638328954, 0.90213263219398188, 0.90037978381536665, 0.89702015775635413, 0.9000876424189308, 0.89789658194566169, 0.89964943032427691, 0.89775051124744376, 0.89628980426526439, 0.89599766286882854, 0.8952673093777388, 0.89322231960268772, 0.89249196611159798, 0.89307624890446979, 0.89073911773298275, 0.89176161262050835, 0.89176161262050835, 0.88913234005258546, 0.88635699678644464, 0.88504236050248319, 0.8834355828220859, 0.88080631025416301, 0.88124452234881678, 0.88036809815950923, 0.87934560327198363, 0.87554776511831722, 0.87306456324861237, 0.87394098743791993, 0.87832310838445804, 0.87773882559158634, 0.87730061349693256, 0.87642418930762489, 0.87583990651475319, 0.87452527023079174], 6357: [0.89948088721094854, 0.90184049079754602, 0.90671700487651408, 0.90593047034764829, 0.90593047034764829, 0.89806512505899005, 0.90073934245713383, 0.90073934245713383, 0.90215510460909232, 0.90136857008022653, 0.89995280792826804, 0.90136857008022653, 0.89806512505899005, 0.89712128362435106, 0.89633474909548527, 0.89664936290703157, 0.89476168003775369, 0.8933459178857952, 0.89633474909548527, 0.89444706622620729, 0.89004247286455873, 0.88831209690105395, 0.88799748308950766, 0.88878401761837345, 0.88469403806827118, 0.88563787950291017, 0.8823344344816737, 0.88060405851816892, 0.87399716847569608, 0.87871637564889093, 0.88186251376435421, 0.88500865187981748, 0.88060405851816892, 0.88186251376435421, 0.8834355828220859, 0.87808714802579835, 0.87777253421425205], 5868: [0.8979209270620313, 0.90184049079754602, 0.90933878663940015, 0.91104294478527603, 0.90814587593728702, 0.90252215405589642, 0.90320381731424682, 0.90235173824130877, 0.90490797546012269, 0.90593047034764829, 0.90405589638718475, 0.9038854805725971, 0.9038854805725971, 0.90081799591002043, 0.89655760054533062, 0.8989434219495569, 0.90013633265167003, 0.8979209270620313, 0.90115882753919563, 0.8989434219495569, 0.89468302658486709, 0.89383094751192915, 0.89417177914110424, 0.8933197000681663, 0.89246762099522836, 0.88957055214723924, 0.88786639400136336, 0.88599182004089982, 0.88019768234492157, 0.87917518745739609, 0.8873551465576005, 0.8887184730743013, 0.8876959781867757, 0.8873551465576005, 0.88752556237218816, 0.88650306748466257, 0.88394683026584864, 0.8834355828220859], 5379: [0.8947759806655512, 0.89960959286112663, 0.91094999070459193, 0.912994980479643, 0.91318088864101132, 0.90797546012269936, 0.908347276445436, 0.90462911321807027, 0.90686001115448966, 0.9072318274772263, 0.908347276445436, 0.90760364379996283, 0.9072318274772263, 0.9094627254136457, 0.90258412344301919, 0.90016731734523148, 0.9038854805725971, 0.90295593976575572, 0.90407138873396542, 0.90425729689533374, 0.90128276631344117, 0.89719278676333891, 0.89644915411786574, 0.89998140918386316, 0.89459007250418299, 0.89607733779512921, 0.89105781743818557, 0.89105781743818557, 0.88417921546755907, 0.88455103179029559, 0.88622420524261014, 0.8947759806655512, 0.89273099089050012, 0.89533370514965605, 0.8936605316973415, 0.89459007250418299, 0.8925450827291318, 0.8903141847927124, 0.8823201338538762], 4890: [0.89427402862985683, 0.89959100204498976, 0.91186094069529655, 0.91676891615541922, 0.91942740286298563, 0.91554192229038855, 0.91165644171779137, 0.91083844580777096, 0.90961145194274029, 0.90817995910020455, 0.90756646216768921, 0.91206543967280163, 0.91329243353783229, 0.91349693251533748, 0.9122699386503067, 0.90531697341513295, 0.90531697341513295, 0.90490797546012269, 0.90920245398773003, 0.90817995910020455, 0.9057259713701431, 0.90081799591002043, 0.90163599182004095, 0.90368098159509203, 0.90122699386503069, 0.90163599182004095, 0.8991820040899795, 0.89345603271983642, 0.89161554192229042, 0.88568507157464216, 0.89284253578732109, 0.89345603271983642, 0.89877300613496935, 0.89938650306748469, 0.90081799591002043, 0.90000000000000002, 0.89959100204498976, 0.89775051124744376, 0.88936605316973416, 0.88670756646216764], 4401: [0.89502385821404229, 0.90024994319472851, 0.90956600772551688, 0.91660986139513745, 0.91820040899795496, 0.9177459668257214, 0.9177459668257214, 0.91638264030902072, 0.91660986139513745, 0.91092933424221767, 0.91070211315610083, 0.91433765053396954, 0.9184276300840718, 0.91933651442853892, 0.91274710293115202, 0.91229266075891846, 0.91161099750056807, 0.9082026812088162, 0.91297432401726886, 0.91297432401726886, 0.91092933424221767, 0.90593047034764829, 0.90547602817541473, 0.90956600772551688, 0.9038854805725971, 0.91024767098386727, 0.90252215405589642, 0.90024994319472851, 0.89479663712792545, 0.8936605316973415, 0.89706884798909337, 0.89934105885026128, 0.89956827993637811, 0.90365825948648038, 0.90570324926153145, 0.90706657577823224, 0.90865712338104976, 0.9038854805725971, 0.89888661667802772, 0.8900249943194728, 0.88798000454442172], 3912: [0.89570552147239269, 0.90030674846625769, 0.90925357873210633, 0.91768916155419222, 0.92050102249488752, 0.92075664621676889, 0.92459100204498978, 0.92382413087934556, 0.92484662576687116, 0.91589979550102252, 0.91180981595092025, 0.91641104294478526, 0.92331288343558282, 0.92663599182004086, 0.91871165644171782, 0.91385480572597133, 0.91538854805725967, 0.91155419222903888, 0.91666666666666663, 0.91794478527607359, 0.91538854805725967, 0.91641104294478526, 0.91078732106339466, 0.91308793456032722, 0.91155419222903888, 0.91411042944785281, 0.91155419222903888, 0.90720858895705525, 0.89979550102249484, 0.89800613496932513, 0.90005112474437632, 0.90030674846625769, 0.90465235173824132, 0.9066973415132924, 0.91308793456032722, 0.91206543967280163, 0.91666666666666663, 0.91359918200408996, 0.90260736196319014, 0.89979550102249484, 0.8936605316973415, 0.89340490797546013], 3423: [0.89862693543675143, 0.90154834940111017, 0.91527899503359622, 0.9199532573765703, 0.92112182296231371, 0.91936897458369848, 0.92900964066608238, 0.92988606485539005, 0.92608822670172364, 0.92433537832310841, 0.92112182296231371, 0.91586327782646804, 0.92638036809815949, 0.93280747881974879, 0.9301782062518259, 0.91907683318726263, 0.91703184341221156, 0.91907683318726263, 0.920537540169442, 0.92229038854805723, 0.92112182296231371, 0.91703184341221156, 0.92345895413380075, 0.92141396435874967, 0.91381828805141685, 0.91820040899795496, 0.91732398480864741, 0.913526146654981, 0.90651475314051999, 0.90213263219398188, 0.90739117732982766, 0.90885188431200703, 0.9038854805725971, 0.91177329827636577, 0.91557113643003218, 0.91878469179082678, 0.92112182296231371, 0.92550394390885193, 0.91323400525854515, 0.90534618755477647, 0.90271691498685369, 0.89628980426526439, 0.89775051124744376], 2934: [0.89604635310156777, 0.90149965916837083, 0.91547375596455349, 0.92638036809815949, 0.92263122017723243, 0.92092706203135655, 0.92638036809815949, 0.93387866394001362, 0.93353783231083842, 0.92876618950238587, 0.9253578732106339, 0.92706203135650989, 0.92569870483980909, 0.93626448534423989, 0.93490115882753921, 0.92842535787321068, 0.92092706203135655, 0.91990456714383095, 0.92842535787321068, 0.92603953646898429, 0.92126789366053174, 0.91922290388548056, 0.92672119972733469, 0.93456032719836402, 0.92229038854805723, 0.92160872528970683, 0.91956373551465576, 0.91717791411042948, 0.91240627130197682, 0.91240627130197682, 0.9147920927062031, 0.91717791411042948, 0.91513292433537829, 0.91342876618950242, 0.91717791411042948, 0.92058623040218135, 0.92808452624403548, 0.92808452624403548, 0.9253578732106339, 0.91615541922290389, 0.90865712338104976, 0.9035446489434219, 0.89843217450579416, 0.90558963871847309], 2445: [0.89775051124744376, 0.9038854805725971, 0.92229038854805723, 0.93047034764826175, 0.92842535787321068, 0.92147239263803682, 0.92801635991820042, 0.93742331288343561, 0.93946830265848669, 0.93006134969325149, 0.93210633946830268, 0.93251533742331283, 0.93537832310838442, 0.93701431492842535, 0.93660531697341509, 0.93047034764826175, 0.93251533742331283, 0.92842535787321068, 0.92760736196319016, 0.93047034764826175, 0.92801635991820042, 0.92106339468302656, 0.92638036809815949, 0.93333333333333335, 0.93987730061349695, 0.93333333333333335, 0.92515337423312882, 0.91901840490797548, 0.91411042944785281, 0.91574642126789363, 0.92515337423312882, 0.92678936605316975, 0.92229038854805723, 0.92024539877300615, 0.91983640081799589, 0.92433537832310841, 0.92760736196319016, 0.93456032719836402, 0.93742331288343561, 0.9235173824130879, 0.91901840490797548, 0.91615541922290389, 0.90838445807770962, 0.90920245398773003, 0.91002044989775055], 1956: [0.89570552147239269, 0.90286298568507162, 0.92126789366053174, 0.93762781186094069, 0.93200408997955009, 0.93098159509202449, 0.93149284253578735, 0.93711656441717794, 0.94478527607361962, 0.93813905930470343, 0.93404907975460127, 0.93865030674846628, 0.94171779141104295, 0.94734151329243355, 0.94427402862985688, 0.93762781186094069, 0.93098159509202449, 0.93302658486707568, 0.93711656441717794, 0.92944785276073616, 0.93251533742331283, 0.92484662576687116, 0.9253578732106339, 0.93404907975460127, 0.93813905930470343, 0.94529652351738236, 0.93762781186094069, 0.92331288343558282, 0.91922290388548056, 0.91666666666666663, 0.93251533742331283, 0.93456032719836402, 0.92433537832310841, 0.92995910020449901, 0.92638036809815949, 0.92586912065439675, 0.92842535787321068, 0.93404907975460127, 0.93967280163599187, 0.93660531697341509, 0.92944785276073616, 0.91922290388548056, 0.91564417177914115, 0.91768916155419222, 0.91615541922290389, 0.9253578732106339], 1467: [0.89229720518064082, 0.89843217450579416, 0.92774369461486028, 0.94274028629856854, 0.94478527607361962, 0.94069529652351735, 0.9468302658486708, 0.93524199045671441, 0.94069529652351735, 0.94546693933197001, 0.93865030674846628, 0.93660531697341509, 0.95023858214042267, 0.95023858214042267, 0.95637355146557601, 0.94001363326516696, 0.93660531697341509, 0.92842535787321068, 0.94546693933197001, 0.92910702113156096, 0.93524199045671441, 0.93456032719836402, 0.92978868438991136, 0.93865030674846628, 0.93796864349011588, 0.95023858214042267, 0.95092024539877296, 0.93047034764826175, 0.92024539877300615, 0.92160872528970683, 0.93592365371506481, 0.94137695978186775, 0.93387866394001362, 0.92842535787321068, 0.94069529652351735, 0.93456032719836402, 0.93592365371506481, 0.92978868438991136, 0.94887525562372188, 0.94410361281526922, 0.93933197000681667, 0.92774369461486028, 0.91002044989775055, 0.92706203135650989, 0.93183367416496254, 0.93319700068166322, 0.92978868438991136], 978: [0.91104294478527603, 0.89059304703476483, 0.92842535787321068, 0.95194274028629855, 0.95501022494887522, 0.94887525562372188, 0.95705521472392641, 0.95194274028629855, 0.94171779141104295, 0.93762781186094069, 0.94989775051124747, 0.94478527607361962, 0.94989775051124747, 0.95910020449897748, 0.94785276073619629, 0.95501022494887522, 0.93762781186094069, 0.9253578732106339, 0.94989775051124747, 0.95092024539877296, 0.92638036809815949, 0.93456032719836402, 0.94887525562372188, 0.9468302658486708, 0.94171779141104295, 0.95705521472392641, 0.95398773006134974, 0.9468302658486708, 0.92126789366053174, 0.91411042944785281, 0.93865030674846628, 0.94478527607361962, 0.94274028629856854, 0.93149284253578735, 0.93865030674846628, 0.94069529652351735, 0.94989775051124747, 0.93456032719836402, 0.94171779141104295, 0.96319018404907975, 0.94785276073619629, 0.93865030674846628, 0.92433537832310841, 0.93558282208588961, 0.93149284253578735, 0.93149284253578735, 0.93558282208588961, 0.92126789366053174], 489: [0.91820040899795496, 0.91002044989775055, 0.91206543967280163, 0.96932515337423308, 0.97750511247443761, 0.95501022494887522, 0.95910020449897748, 0.95296523517382414, 0.96114519427402867, 0.93660531697341509, 0.96523517382413093, 0.9468302658486708, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.95296523517382414, 0.96523517382413093, 0.90593047034764829, 0.96319018404907975, 0.95910020449897748, 0.95910020449897748, 0.91411042944785281, 0.94069529652351735, 0.97137014314928427, 0.93865030674846628, 0.95705521472392641, 0.95501022494887522, 0.95910020449897748, 0.95092024539877296, 0.90184049079754602, 0.93660531697341509, 0.9468302658486708, 0.95501022494887522, 0.94069529652351735, 0.94274028629856854, 0.95296523517382414, 0.94069529652351735, 0.95296523517382414, 0.94069529652351735, 0.96114519427402867, 0.9795501022494888, 0.94069529652351735, 0.92433537832310841, 0.93660531697341509, 0.93047034764826175, 0.93047034764826175, 0.96932515337423308, 0.92024539877300615, 0.94887525562372188]}
incremental_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859, 0.15132924335378323], 22983: [0.096114519427402859, 0.15132924335378323, 0.26993865030674846], 22494: [0.096114519427402859, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484], 22005: [0.09815950920245399, 0.15132924335378323, 0.27607361963190186, 0.22290388548057261, 0.16359918200408999], 21516: [0.096114519427402859, 0.15337423312883436, 0.27198364008179959, 0.22085889570552147, 0.16768916155419222, 0.096114519427402859], 21027: [0.09815950920245399, 0.1492842535787321, 0.27607361963190186, 0.22085889570552147, 0.16973415132924335, 0.096114519427402859, 0.15746421267893659], 20538: [0.096114519427402859, 0.15337423312883436, 0.27811860940695299, 0.22699386503067484, 0.16564417177914109, 0.10020449897750511, 0.15541922290388549, 0.15132924335378323], 20049: [0.096114519427402859, 0.15132924335378323, 0.27402862985685073, 0.22903885480572597, 0.16768916155419222, 0.096114519427402859, 0.15132924335378323, 0.15541922290388549, 0.20449897750511248], 19560: [0.09202453987730061, 0.1492842535787321, 0.27607361963190186, 0.22699386503067484, 0.15950920245398773, 0.094069529652351741, 0.15132924335378323, 0.14723926380368099, 0.20245398773006135, 0.14723926380368099], 19071: [0.096114519427402859, 0.15337423312883436, 0.27402862985685073, 0.22494887525562371, 0.16359918200408999, 0.094069529652351741, 0.15132924335378323, 0.15950920245398773, 0.20245398773006135, 0.15132924335378323, 0.21676891615541921], 18582: [0.094069529652351741, 0.15337423312883436, 0.28016359918200406, 0.22290388548057261, 0.16359918200408999, 0.09202453987730061, 0.1492842535787321, 0.16155419222903886, 0.20040899795501022, 0.15337423312883436, 0.21472392638036811, 0.32310838445807771], 18093: [0.096114519427402859, 0.15950920245398773, 0.28016359918200406, 0.22699386503067484, 0.16359918200408999, 0.085889570552147243, 0.1492842535787321, 0.16155419222903886, 0.20245398773006135, 0.15337423312883436, 0.21676891615541921, 0.32106339468302658, 0.28016359918200406], 17604: [0.09815950920245399, 0.16564417177914109, 0.2822085889570552, 0.22903885480572597, 0.17177914110429449, 0.087934560327198361, 0.14723926380368099, 0.16768916155419222, 0.20654396728016361, 0.15337423312883436, 0.21676891615541921, 0.32924335378323111, 0.29038854805725972, 0.30061349693251532], 17115: [0.096114519427402859, 0.16155419222903886, 0.27811860940695299, 0.22903885480572597, 0.17586912065439672, 0.094069529652351741, 0.14519427402862986, 0.16768916155419222, 0.21063394683026584, 0.15746421267893659, 0.21676891615541921, 0.32719836400817998, 0.28425357873210633, 0.30061349693251532, 0.27607361963190186], 16626: [0.09202453987730061, 0.15950920245398773, 0.28629856850715746, 0.22494887525562371, 0.16973415132924335, 0.094069529652351741, 0.14314928425357873, 0.16564417177914109, 0.20449897750511248, 0.16359918200408999, 0.22085889570552147, 0.33537832310838445, 0.28016359918200406, 0.29856850715746419, 0.2822085889570552, 0.24130879345603273], 16137: [0.094069529652351741, 0.15746421267893659, 0.2822085889570552, 0.2310838445807771, 0.17177914110429449, 0.096114519427402859, 0.14519427402862986, 0.17586912065439672, 0.21063394683026584, 0.16359918200408999, 0.22290388548057261, 0.32719836400817998, 0.28834355828220859, 0.30265848670756645, 0.28016359918200406, 0.2474437627811861, 0.33537832310838445], 15648: [0.09202453987730061, 0.16973415132924335, 0.2822085889570552, 0.22699386503067484, 0.17995910020449898, 0.094069529652351741, 0.14519427402862986, 0.17177914110429449, 0.21267893660531698, 0.16155419222903886, 0.22494887525562371, 0.32924335378323111, 0.29038854805725972, 0.30674846625766872, 0.29038854805725972, 0.24130879345603273, 0.33333333333333331, 0.28629856850715746], 15159: [0.096114519427402859, 0.16768916155419222, 0.28425357873210633, 0.22085889570552147, 0.17177914110429449, 0.09202453987730061, 0.1411042944785276, 0.17791411042944785, 0.21676891615541921, 0.15746421267893659, 0.21676891615541921, 0.33128834355828218, 0.29243353783231085, 0.29856850715746419, 0.29038854805725972, 0.23517382413087934, 0.33946830265848671, 0.27811860940695299, 0.2392638036809816], 14670: [0.094069529652351741, 0.16359918200408999, 0.28629856850715746, 0.22494887525562371, 0.17177914110429449, 0.09202453987730061, 0.14519427402862986, 0.17586912065439672, 0.21881390593047034, 0.15950920245398773, 0.21881390593047034, 0.33128834355828218, 0.29038854805725972, 0.29652351738241312, 0.29447852760736198, 0.22903885480572597, 0.34969325153374231, 0.28834355828220859, 0.24130879345603273, 0.25153374233128833], 14181: [0.096114519427402859, 0.17177914110429449, 0.28834355828220859, 0.2310838445807771, 0.17382413087934559, 0.094069529652351741, 0.14519427402862986, 0.17586912065439672, 0.20654396728016361, 0.17177914110429449, 0.21267893660531698, 0.33537832310838445, 0.29447852760736198, 0.30265848670756645, 0.29243353783231085, 0.24539877300613497, 0.34764826175869118, 0.2822085889570552, 0.23517382413087934, 0.25971370143149286, 0.26789366053169733], 13692: [0.10020449897750511, 0.16564417177914109, 0.29038854805725972, 0.23517382413087934, 0.17995910020449898, 0.087934560327198361, 0.14519427402862986, 0.17791411042944785, 0.20858895705521471, 0.16973415132924335, 0.21881390593047034, 0.33128834355828218, 0.29856850715746419, 0.30061349693251532, 0.28834355828220859, 0.24130879345603273, 0.35173824130879344, 0.28834355828220859, 0.24130879345603273, 0.26175869120654399, 0.27402862985685073, 0.32310838445807771], 13203: [0.09815950920245399, 0.17382413087934559, 0.29038854805725972, 0.24539877300613497, 0.17995910020449898, 0.089979550102249492, 0.1492842535787321, 0.17791411042944785, 0.21472392638036811, 0.17177914110429449, 0.21676891615541921, 0.33128834355828218, 0.30674846625766872, 0.30061349693251532, 0.28834355828220859, 0.24539877300613497, 0.35378323108384457, 0.28425357873210633, 0.24335378323108384, 0.26175869120654399, 0.27811860940695299, 0.32310838445807771, 0.3619631901840491], 12714: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.23312883435582821, 0.17177914110429449, 0.096114519427402859, 0.15950920245398773, 0.17177914110429449, 0.21267893660531698, 0.16768916155419222, 0.21676891615541921, 0.33946830265848671, 0.30061349693251532, 0.29856850715746419, 0.29243353783231085, 0.25153374233128833, 0.34355828220858897, 0.29038854805725972, 0.24539877300613497, 0.26380368098159507, 0.2822085889570552, 0.32310838445807771, 0.35787321063394684, 0.35378323108384457], 12225: [0.10224948875255624, 0.16768916155419222, 0.28834355828220859, 0.2392638036809816, 0.17382413087934559, 0.094069529652351741, 0.15541922290388549, 0.17382413087934559, 0.21881390593047034, 0.17586912065439672, 0.21676891615541921, 0.33742331288343558, 0.29652351738241312, 0.29243353783231085, 0.28834355828220859, 0.25153374233128833, 0.35173824130879344, 0.27402862985685073, 0.25153374233128833, 0.26175869120654399, 0.28425357873210633, 0.32515337423312884, 0.36400817995910023, 0.34969325153374231, 0.33333333333333331], 11736: [0.10224948875255624, 0.17177914110429449, 0.29447852760736198, 0.24335378323108384, 0.17382413087934559, 0.10224948875255624, 0.14723926380368099, 0.17791411042944785, 0.22494887525562371, 0.17177914110429449, 0.22290388548057261, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.29652351738241312, 0.25153374233128833, 0.34969325153374231, 0.27811860940695299, 0.25357873210633947, 0.25153374233128833, 0.28425357873210633, 0.32719836400817998, 0.35991820040899797, 0.35378323108384457, 0.34764826175869118, 0.3783231083844581], 11247: [0.10224948875255624, 0.18200408997955012, 0.30470347648261759, 0.24948875255623723, 0.17177914110429449, 0.10020449897750511, 0.14519427402862986, 0.16973415132924335, 0.21472392638036811, 0.16768916155419222, 0.22699386503067484, 0.33742331288343558, 0.29652351738241312, 0.29038854805725972, 0.28629856850715746, 0.25766871165644173, 0.35582822085889571, 0.30265848670756645, 0.25153374233128833, 0.2556237218813906, 0.28016359918200406, 0.33537832310838445, 0.3721881390593047, 0.35991820040899797, 0.33742331288343558, 0.37423312883435583, 0.33742331288343558], 10758: [0.10633946830265849, 0.18813905930470348, 0.30470347648261759, 0.26175869120654399, 0.18404907975460122, 0.10429447852760736, 0.15132924335378323, 0.18609406952965235, 0.21063394683026584, 0.17586912065439672, 0.22903885480572597, 0.33537832310838445, 0.30265848670756645, 0.30265848670756645, 0.29652351738241312, 0.25971370143149286, 0.35787321063394684, 0.29038854805725972, 0.26789366053169733, 0.25153374233128833, 0.27811860940695299, 0.33128834355828218, 0.36809815950920244, 0.35787321063394684, 0.34969325153374231, 0.36400817995910023, 0.32719836400817998, 0.33333333333333331], 10269: [0.10838445807770961, 0.18200408997955012, 0.30470347648261759, 0.27607361963190186, 0.18200408997955012, 0.10224948875255624, 0.15950920245398773, 0.18609406952965235, 0.21881390593047034, 0.17586912065439672, 0.23721881390593047, 0.34151329243353784, 0.30061349693251532, 0.29447852760736198, 0.30061349693251532, 0.25357873210633947, 0.35173824130879344, 0.30265848670756645, 0.25971370143149286, 0.25766871165644173, 0.26993865030674846, 0.32515337423312884, 0.36605316973415131, 0.35787321063394684, 0.34969325153374231, 0.36605316973415131, 0.34151329243353784, 0.33742331288343558, 0.30265848670756645], 9780: [0.10838445807770961, 0.19427402862985685, 0.30674846625766872, 0.28425357873210633, 0.18200408997955012, 0.10633946830265849, 0.16564417177914109, 0.18404907975460122, 0.21881390593047034, 0.18609406952965235, 0.22699386503067484, 0.34969325153374231, 0.30470347648261759, 0.29038854805725972, 0.30674846625766872, 0.26175869120654399, 0.34969325153374231, 0.30470347648261759, 0.2658486707566462, 0.27402862985685073, 0.28629856850715746, 0.33537832310838445, 0.35173824130879344, 0.35582822085889571, 0.34355828220858897, 0.36605316973415131, 0.32924335378323111, 0.33946830265848671, 0.30265848670756645, 0.36400817995910023], 9291: [0.11042944785276074, 0.18813905930470348, 0.31697341513292432, 0.29243353783231085, 0.18404907975460122, 0.10838445807770961, 0.17791411042944785, 0.18813905930470348, 0.21881390593047034, 0.18813905930470348, 0.22290388548057261, 0.34969325153374231, 0.30061349693251532, 0.29447852760736198, 0.31083844580777098, 0.2658486707566462, 0.3456032719836401, 0.31083844580777098, 0.26993865030674846, 0.26175869120654399, 0.28425357873210633, 0.32310838445807771, 0.35173824130879344, 0.35582822085889571, 0.35582822085889571, 0.36809815950920244, 0.32719836400817998, 0.32310838445807771, 0.31288343558282211, 0.36605316973415131, 0.29856850715746419], 8802: [0.11451942740286299, 0.19222903885480572, 0.32515337423312884, 0.29447852760736198, 0.19222903885480572, 0.10020449897750511, 0.18404907975460122, 0.19018404907975461, 0.21676891615541921, 0.18813905930470348, 0.22699386503067484, 0.34969325153374231, 0.30061349693251532, 0.28834355828220859, 0.30061349693251532, 0.26993865030674846, 0.35378323108384457, 0.31083844580777098, 0.2822085889570552, 0.27607361963190186, 0.29038854805725972, 0.32515337423312884, 0.35378323108384457, 0.37014314928425357, 0.35787321063394684, 0.35787321063394684, 0.33537832310838445, 0.33128834355828218, 0.30470347648261759, 0.3783231083844581, 0.30061349693251532, 0.2822085889570552], 8313: [0.1165644171779141, 0.19631901840490798, 0.32924335378323111, 0.29038854805725972, 0.19018404907975461, 0.10838445807770961, 0.19631901840490798, 0.18813905930470348, 0.22903885480572597, 0.18200408997955012, 0.22903885480572597, 0.34151329243353784, 0.29856850715746419, 0.30061349693251532, 0.29652351738241312, 0.26993865030674846, 0.35173824130879344, 0.30879345603271985, 0.28425357873210633, 0.27811860940695299, 0.29243353783231085, 0.31901840490797545, 0.35582822085889571, 0.3783231083844581, 0.35378323108384457, 0.36605316973415131, 0.3456032719836401, 0.34355828220858897, 0.31492842535787319, 0.36605316973415131, 0.29447852760736198, 0.29038854805725972, 0.32719836400817998], 7824: [0.1165644171779141, 0.19427402862985685, 0.33742331288343558, 0.29652351738241312, 0.19427402862985685, 0.11042944785276074, 0.20040899795501022, 0.19222903885480572, 0.22494887525562371, 0.18813905930470348, 0.2310838445807771, 0.33742331288343558, 0.30265848670756645, 0.29038854805725972, 0.28629856850715746, 0.2658486707566462, 0.35582822085889571, 0.30061349693251532, 0.30061349693251532, 0.28834355828220859, 0.29243353783231085, 0.32515337423312884, 0.35991820040899797, 0.37423312883435583, 0.35787321063394684, 0.36605316973415131, 0.33537832310838445, 0.34355828220858897, 0.31492842535787319, 0.3619631901840491, 0.28834355828220859, 0.28834355828220859, 0.32106339468302658, 0.28425357873210633], 7335: [0.12269938650306748, 0.19222903885480572, 0.33537832310838445, 0.30061349693251532, 0.19427402862985685, 0.11451942740286299, 0.20245398773006135, 0.19631901840490798, 0.24539877300613497, 0.19222903885480572, 0.23517382413087934, 0.33333333333333331, 0.29652351738241312, 0.29856850715746419, 0.30674846625766872, 0.26380368098159507, 0.36809815950920244, 0.29652351738241312, 0.30061349693251532, 0.2822085889570552, 0.29652351738241312, 0.31901840490797545, 0.35787321063394684, 0.36400817995910023, 0.35378323108384457, 0.36400817995910023, 0.32924335378323111, 0.35378323108384457, 0.30061349693251532, 0.35991820040899797, 0.30061349693251532, 0.27811860940695299, 0.33742331288343558, 0.29652351738241312, 0.25971370143149286], 6846: [0.12883435582822086, 0.19631901840490798, 0.33946830265848671, 0.29038854805725972, 0.19836400817995911, 0.12678936605316973, 0.21472392638036811, 0.19222903885480572, 0.24335378323108384, 0.19222903885480572, 0.22903885480572597, 0.3456032719836401, 0.30470347648261759, 0.28629856850715746, 0.30470347648261759, 0.26380368098159507, 0.35991820040899797, 0.29652351738241312, 0.31901840490797545, 0.26789366053169733, 0.30265848670756645, 0.32310838445807771, 0.35991820040899797, 0.35787321063394684, 0.35378323108384457, 0.36605316973415131, 0.33537832310838445, 0.34764826175869118, 0.30879345603271985, 0.36400817995910023, 0.29243353783231085, 0.28834355828220859, 0.32924335378323111, 0.29038854805725972, 0.26993865030674846, 0.35378323108384457], 6357: [0.1329243353783231, 0.19836400817995911, 0.33333333333333331, 0.30470347648261759, 0.20040899795501022, 0.12269938650306748, 0.21676891615541921, 0.18813905930470348, 0.24130879345603273, 0.19222903885480572, 0.22494887525562371, 0.34151329243353784, 0.30470347648261759, 0.28629856850715746, 0.29243353783231085, 0.2658486707566462, 0.3619631901840491, 0.27402862985685073, 0.32924335378323111, 0.28016359918200406, 0.29652351738241312, 0.31492842535787319, 0.34764826175869118, 0.36605316973415131, 0.34764826175869118, 0.37014314928425357, 0.32924335378323111, 0.35787321063394684, 0.30061349693251532, 0.37014314928425357, 0.30061349693251532, 0.29447852760736198, 0.33128834355828218, 0.30061349693251532, 0.27198364008179959, 0.34969325153374231, 0.29243353783231085], 5868: [0.1411042944785276, 0.20245398773006135, 0.35173824130879344, 0.30879345603271985, 0.21063394683026584, 0.12065439672801637, 0.21472392638036811, 0.19018404907975461, 0.23721881390593047, 0.19836400817995911, 0.21063394683026584, 0.33128834355828218, 0.31901840490797545, 0.28834355828220859, 0.28425357873210633, 0.27402862985685073, 0.35582822085889571, 0.29652351738241312, 0.31901840490797545, 0.26993865030674846, 0.2822085889570552, 0.33333333333333331, 0.35582822085889571, 0.35787321063394684, 0.35582822085889571, 0.3721881390593047, 0.32924335378323111, 0.34969325153374231, 0.28834355828220859, 0.36605316973415131, 0.30674846625766872, 0.30470347648261759, 0.33946830265848671, 0.29652351738241312, 0.2658486707566462, 0.35582822085889571, 0.2822085889570552, 0.27607361963190186], 5379: [0.14314928425357873, 0.20449897750511248, 0.3456032719836401, 0.31492842535787319, 0.21676891615541921, 0.12678936605316973, 0.22085889570552147, 0.19018404907975461, 0.24335378323108384, 0.20449897750511248, 0.21063394683026584, 0.33333333333333331, 0.32719836400817998, 0.29243353783231085, 0.29038854805725972, 0.27811860940695299, 0.34969325153374231, 0.29856850715746419, 0.32515337423312884, 0.28629856850715746, 0.29038854805725972, 0.33742331288343558, 0.34969325153374231, 0.35173824130879344, 0.33946830265848671, 0.35378323108384457, 0.32106339468302658, 0.33537832310838445, 0.28016359918200406, 0.35378323108384457, 0.30265848670756645, 0.29038854805725972, 0.34151329243353784, 0.30879345603271985, 0.27607361963190186, 0.35173824130879344, 0.28425357873210633, 0.27402862985685073, 0.30674846625766872], 4890: [0.15132924335378323, 0.20858895705521471, 0.34969325153374231, 0.31697341513292432, 0.2310838445807771, 0.13905930470347649, 0.2310838445807771, 0.19427402862985685, 0.2474437627811861, 0.22085889570552147, 0.21881390593047034, 0.33742331288343558, 0.33742331288343558, 0.30470347648261759, 0.31083844580777098, 0.28016359918200406, 0.34764826175869118, 0.30061349693251532, 0.34151329243353784, 0.29038854805725972, 0.2822085889570552, 0.32106339468302658, 0.34764826175869118, 0.37627811860940696, 0.33946830265848671, 0.35378323108384457, 0.34764826175869118, 0.35173824130879344, 0.28425357873210633, 0.35582822085889571, 0.28629856850715746, 0.2822085889570552, 0.33128834355828218, 0.31083844580777098, 0.27402862985685073, 0.35991820040899797, 0.29038854805725972, 0.28834355828220859, 0.31083844580777098, 0.28016359918200406], 4401: [0.15337423312883436, 0.21472392638036811, 0.3619631901840491, 0.33128834355828218, 0.2474437627811861, 0.14723926380368099, 0.24539877300613497, 0.20858895705521471, 0.24335378323108384, 0.21881390593047034, 0.21881390593047034, 0.32310838445807771, 0.32515337423312884, 0.30265848670756645, 0.30879345603271985, 0.28425357873210633, 0.33742331288343558, 0.2822085889570552, 0.32924335378323111, 0.28629856850715746, 0.28425357873210633, 0.31492842535787319, 0.35582822085889571, 0.3783231083844581, 0.34151329243353784, 0.36605316973415131, 0.34151329243353784, 0.3456032719836401, 0.28016359918200406, 0.3619631901840491, 0.30061349693251532, 0.27607361963190186, 0.33537832310838445, 0.31697341513292432, 0.27811860940695299, 0.34969325153374231, 0.27811860940695299, 0.28629856850715746, 0.30879345603271985, 0.2822085889570552, 0.32515337423312884], 3912: [0.17791411042944785, 0.21472392638036811, 0.36809815950920244, 0.32515337423312884, 0.24948875255623723, 0.15337423312883436, 0.22903885480572597, 0.21063394683026584, 0.24335378323108384, 0.21267893660531698, 0.22085889570552147, 0.31901840490797545, 0.31697341513292432, 0.29447852760736198, 0.29447852760736198, 0.2658486707566462, 0.35173824130879344, 0.28016359918200406, 0.33333333333333331, 0.29652351738241312, 0.28834355828220859, 0.31288343558282211, 0.34355828220858897, 0.37014314928425357, 0.34355828220858897, 0.35991820040899797, 0.33333333333333331, 0.33742331288343558, 0.27402862985685073, 0.36605316973415131, 0.31697341513292432, 0.28834355828220859, 0.32515337423312884, 0.30674846625766872, 0.26993865030674846, 0.36400817995910023, 0.28016359918200406, 0.29856850715746419, 0.29652351738241312, 0.28629856850715746, 0.33128834355828218, 0.30470347648261759], 3423: [0.19631901840490798, 0.21267893660531698, 0.3619631901840491, 0.33537832310838445, 0.2474437627811861, 0.1492842535787321, 0.24539877300613497, 0.23721881390593047, 0.2556237218813906, 0.21063394683026584, 0.22290388548057261, 0.33128834355828218, 0.32719836400817998, 0.29447852760736198, 0.31697341513292432, 0.24948875255623723, 0.34764826175869118, 0.2822085889570552, 0.31288343558282211, 0.29038854805725972, 0.30879345603271985, 0.30061349693251532, 0.35787321063394684, 0.3824130879345603, 0.34764826175869118, 0.35173824130879344, 0.33333333333333331, 0.33128834355828218, 0.26380368098159507, 0.3619631901840491, 0.29447852760736198, 0.30061349693251532, 0.33742331288343558, 0.30061349693251532, 0.26789366053169733, 0.3783231083844581, 0.29243353783231085, 0.28425357873210633, 0.30470347648261759, 0.27811860940695299, 0.34969325153374231, 0.29038854805725972, 0.22290388548057261], 2934: [0.19222903885480572, 0.24539877300613497, 0.36605316973415131, 0.33946830265848671, 0.23517382413087934, 0.17586912065439672, 0.2556237218813906, 0.2556237218813906, 0.25153374233128833, 0.20654396728016361, 0.21472392638036811, 0.31288343558282211, 0.31288343558282211, 0.31288343558282211, 0.30470347648261759, 0.25971370143149286, 0.34764826175869118, 0.27607361963190186, 0.31492842535787319, 0.29038854805725972, 0.28425357873210633, 0.28834355828220859, 0.36400817995910023, 0.37423312883435583, 0.33946830265848671, 0.37627811860940696, 0.33333333333333331, 0.31901840490797545, 0.27402862985685073, 0.33946830265848671, 0.29038854805725972, 0.27811860940695299, 0.32106339468302658, 0.27607361963190186, 0.24130879345603273, 0.3783231083844581, 0.28016359918200406, 0.29652351738241312, 0.30879345603271985, 0.2658486707566462, 0.33537832310838445, 0.30470347648261759, 0.23517382413087934, 0.30879345603271985], 2445: [0.19222903885480572, 0.24335378323108384, 0.38036809815950923, 0.34151329243353784, 0.24539877300613497, 0.18813905930470348, 0.25153374233128833, 0.25153374233128833, 0.26175869120654399, 0.21676891615541921, 0.20654396728016361, 0.29447852760736198, 0.30879345603271985, 0.32515337423312884, 0.30879345603271985, 0.25153374233128833, 0.3456032719836401, 0.28425357873210633, 0.31697341513292432, 0.29652351738241312, 0.29856850715746419, 0.31288343558282211, 0.34355828220858897, 0.39468302658486709, 0.34151329243353784, 0.3721881390593047, 0.31901840490797545, 0.30674846625766872, 0.27402862985685073, 0.32106339468302658, 0.29447852760736198, 0.30265848670756645, 0.30879345603271985, 0.25971370143149286, 0.23721881390593047, 0.37423312883435583, 0.28834355828220859, 0.30879345603271985, 0.30674846625766872, 0.26789366053169733, 0.31083844580777098, 0.28629856850715746, 0.21881390593047034, 0.28425357873210633, 0.29652351738241312], 1956: [0.20245398773006135, 0.26993865030674846, 0.35991820040899797, 0.33333333333333331, 0.25357873210633947, 0.17791411042944785, 0.2474437627811861, 0.28425357873210633, 0.29038854805725972, 0.17177914110429449, 0.19222903885480572, 0.29652351738241312, 0.29856850715746419, 0.33946830265848671, 0.32515337423312884, 0.25766871165644173, 0.33742331288343558, 0.29038854805725972, 0.34151329243353784, 0.28834355828220859, 0.28834355828220859, 0.29038854805725972, 0.33333333333333331, 0.35991820040899797, 0.31901840490797545, 0.36400817995910023, 0.33742331288343558, 0.31492842535787319, 0.26380368098159507, 0.31492842535787319, 0.29243353783231085, 0.28834355828220859, 0.30879345603271985, 0.2658486707566462, 0.2310838445807771, 0.3783231083844581, 0.27607361963190186, 0.30265848670756645, 0.31697341513292432, 0.24130879345603273, 0.26993865030674846, 0.2556237218813906, 0.19427402862985685, 0.31492842535787319, 0.31901840490797545, 0.31697341513292432], 1467: [0.21063394683026584, 0.26993865030674846, 0.33742331288343558, 0.32924335378323111, 0.26993865030674846, 0.19018404907975461, 0.26380368098159507, 0.28834355828220859, 0.31083844580777098, 0.19427402862985685, 0.17382413087934559, 0.28425357873210633, 0.28629856850715746, 0.32310838445807771, 0.28016359918200406, 0.24539877300613497, 0.32106339468302658, 0.26175869120654399, 0.32310838445807771, 0.28629856850715746, 0.26380368098159507, 0.28629856850715746, 0.31492842535787319, 0.3456032719836401, 0.32924335378323111, 0.34151329243353784, 0.35378323108384457, 0.30061349693251532, 0.24539877300613497, 0.31492842535787319, 0.28629856850715746, 0.28425357873210633, 0.31288343558282211, 0.25971370143149286, 0.24539877300613497, 0.34355828220858897, 0.26175869120654399, 0.29243353783231085, 0.32310838445807771, 0.24948875255623723, 0.26380368098159507, 0.22903885480572597, 0.20858895705521471, 0.30061349693251532, 0.30674846625766872, 0.28629856850715746, 0.2822085889570552], 978: [0.22085889570552147, 0.26789366053169733, 0.33946830265848671, 0.35378323108384457, 0.27402862985685073, 0.19631901840490798, 0.26175869120654399, 0.28834355828220859, 0.29856850715746419, 0.20858895705521471, 0.17791411042944785, 0.29652351738241312, 0.28016359918200406, 0.31901840490797545, 0.25153374233128833, 0.26380368098159507, 0.31901840490797545, 0.24130879345603273, 0.31492842535787319, 0.27811860940695299, 0.26789366053169733, 0.25766871165644173, 0.30265848670756645, 0.32310838445807771, 0.30879345603271985, 0.31288343558282211, 0.32310838445807771, 0.29038854805725972, 0.23517382413087934, 0.29652351738241312, 0.2310838445807771, 0.26175869120654399, 0.29652351738241312, 0.22290388548057261, 0.20654396728016361, 0.30879345603271985, 0.24130879345603273, 0.2658486707566462, 0.29652351738241312, 0.26789366053169733, 0.2392638036809816, 0.18813905930470348, 0.19631901840490798, 0.27607361963190186, 0.25971370143149286, 0.30265848670756645, 0.25766871165644173, 0.23312883435582821], 489: [0.21472392638036811, 0.21267893660531698, 0.32924335378323111, 0.35173824130879344, 0.29652351738241312, 0.20245398773006135, 0.24130879345603273, 0.30061349693251532, 0.29038854805725972, 0.20449897750511248, 0.18813905930470348, 0.23721881390593047, 0.27198364008179959, 0.2822085889570552, 0.20040899795501022, 0.21676891615541921, 0.29652351738241312, 0.18609406952965235, 0.28425357873210633, 0.2474437627811861, 0.23517382413087934, 0.21267893660531698, 0.26380368098159507, 0.26789366053169733, 0.26380368098159507, 0.25766871165644173, 0.2658486707566462, 0.24130879345603273, 0.19631901840490798, 0.22903885480572597, 0.19427402862985685, 0.23312883435582821, 0.29038854805725972, 0.18813905930470348, 0.16973415132924335, 0.25357873210633947, 0.17791411042944785, 0.20245398773006135, 0.25153374233128833, 0.23517382413087934, 0.2556237218813906, 0.13905930470347649, 0.15950920245398773, 0.24539877300613497, 0.22085889570552147, 0.23517382413087934, 0.25357873210633947, 0.17382413087934559, 0.39672801635991822]}
incremental_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
incremental_train_scores_mean_50_folds = array([ 0.94641292, 0.94018405, 0.93508245, 0.92998133, 0.92539877, 0.92040807,
0.91610107, 0.91143247, 0.90711645, 0.903318, 0.89938555, 0.89571898,
0.89192165, 0.88847908, 0.88510274, 0.8819883, 0.87861655, 0.87507101,
0.87178365, 0.8690559, 0.86611193, 0.8634239, 0.86069042, 0.85786338,
0.85518855, 0.85250511, 0.85012629, 0.84780429, 0.84546392, 0.84356169,
0.84149767, 0.83968132, 0.83789902, 0.83605873, 0.83428571, 0.83245285,
0.83075052, 0.82938238, 0.8276965, 0.82601738, 0.82459641, 0.82349182,
0.82237803, 0.82158239, 0.82042263, 0.81958522, 0.81863406, 0.81820893,
0.81770377])
incremental_train_scores_std_50_folds = array([ 0.01849963, 0.01378789, 0.01221196, 0.01036566, 0.00951653, 0.0095293,
0.00920113, 0.00879358, 0.00817018, 0.00829866, 0.00816914, 0.00840314,
0.00921439, 0.00955778, 0.00966554, 0.01008684, 0.01065165, 0.0106473,
0.0110683, 0.01155044, 0.01137906, 0.01131299, 0.01164905, 0.01127409,
0.01067386, 0.01048885, 0.00997217, 0.00943623, 0.00849798, 0.00799113,
0.00751208, 0.00688614, 0.00636693, 0.00622729, 0.00592422, 0.00536307,
0.00523293, 0.00500458, 0.0051211, 0.00461855, 0.00442537, 0.00428366,
0.00362497, 0.0031117, 0.00208263, 0.00132304, 0.00030215, 0.00021302,
0. ])
incremental_test_scores_mean_50_folds = array([ 0.24009849, 0.26921438, 0.28260018, 0.28949942, 0.29202454, 0.29280535,
0.2946212, 0.29418639, 0.29457828, 0.29335378, 0.28850087, 0.28796685,
0.28740397, 0.28732106, 0.28425358, 0.28317094, 0.28282828, 0.27952454,
0.27653539, 0.27484663, 0.26937452, 0.26701724, 0.26168295, 0.25727544,
0.25071575, 0.24710293, 0.24290922, 0.23545269, 0.23001266, 0.22617587,
0.22354967, 0.22438082, 0.21953567, 0.2101227, 0.20804363, 0.20333041,
0.19207173, 0.18404908, 0.17159323, 0.16462168, 0.16950693, 0.1658998,
0.16681274, 0.16768916, 0.18241309, 0.18711656, 0.1724608, 0.12372188,
0.09611452])
incremental_test_scores_std_50_folds = array([ 0.04942445, 0.04132467, 0.04251402, 0.04883771, 0.04769812, 0.04882399,
0.05244483, 0.05212685, 0.05434039, 0.05685099, 0.05870295, 0.06312129,
0.06397167, 0.06475891, 0.06619849, 0.06858608, 0.07052225, 0.07211568,
0.07247594, 0.07426551, 0.0754153, 0.07650368, 0.08005695, 0.07839793,
0.07575411, 0.07573291, 0.0747032, 0.07206771, 0.07047588, 0.07223881,
0.07240794, 0.07394336, 0.0739362, 0.07114794, 0.07094068, 0.07254348,
0.06833713, 0.06574783, 0.05170119, 0.05388267, 0.05510474, 0.05699836,
0.05972988, 0.06344963, 0.06136878, 0.06931128, 0.07251949, 0.02760736,
0. ])
normal_train_accuracy_per_size_4_folds = {18336: [0.82422556719022688], 12224: [0.8534031413612565], 6112: [0.90068717277486909]}
normal_test_accuracy_per_size_4_folds = {18336: [0.094210009813542689], 12224: [0.098135426889106966], 6112: [0.10091593065096501]}
normal_train_sizes_4_folds = array([ 6112, 12224, 18336])
normal_train_scores_mean_4_folds = array([ 0.90068717, 0.85340314, 0.82422557])
normal_train_scores_std_4_folds = array([ 0., 0., 0.])
normal_test_scores_mean_4_folds = array([ 0.10091593, 0.09813543, 0.09421001])
normal_test_scores_std_4_folds = array([ 0., 0., 0.])
normal_train_accuracy_per_size_6_folds = {20375: [0.82012269938650306], 16300: [0.83558282208588952], 12225: [0.85824130879345606], 8150: [0.88981595092024535], 4075: [0.92122699386503071]}
normal_test_accuracy_per_size_6_folds = {20375: [0.054969325153374236], 16300: [0.056441717791411043], 12225: [0.05865030674846626], 8150: [0.063803680981595098], 4075: [0.075828220858895706]}
normal_train_sizes_6_folds = array([ 4075, 8150, 12225, 16300, 20375])
normal_train_scores_mean_6_folds = array([ 0.92122699, 0.88981595, 0.85824131, 0.83558282, 0.8201227 ])
normal_train_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_test_scores_mean_6_folds = array([ 0.07582822, 0.06380368, 0.05865031, 0.05644172, 0.05496933])
normal_test_scores_std_6_folds = array([ 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_8_folds = {21392: [0.81740837696335078], 18336: [0.82869764397905754], 15280: [0.84332460732984293], 12224: [0.86338350785340312], 9168: [0.88568935427574169], 6112: [0.90085078534031415], 3056: [0.91819371727748689]}
normal_test_accuracy_per_size_8_folds = {21392: [0.039241334205362979], 18336: [0.039568345323741004], 15280: [0.043819489862655332], 12224: [0.048724656638325703], 9168: [0.055591890124264222], 6112: [0.072596468279921514], 3056: [0.11118378024852844]}
normal_train_sizes_8_folds = array([ 3056, 6112, 9168, 12224, 15280, 18336, 21392])
normal_train_scores_mean_8_folds = array([ 0.91819372, 0.90085079, 0.88568935, 0.86338351, 0.84332461, 0.82869764,
0.81740838])
normal_train_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_8_folds = array([ 0.11118378, 0.07259647, 0.05559189, 0.04872466, 0.04381949, 0.03956835,
0.03924133])
normal_test_scores_std_8_folds = array([ 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_10_folds = {22005: [0.81799591002044991], 19560: [0.82617586912065444], 17115: [0.83651767455448434], 14670: [0.84935241990456711], 12225: [0.86633946830265851], 9780: [0.88548057259713697], 7335: [0.89788684389911388], 4890: [0.91942740286298563], 2445: [0.92842535787321068]}
normal_test_accuracy_per_size_10_folds = {22005: [0.11820040899795502], 19560: [0.11738241308793455], 17115: [0.12106339468302658], 14670: [0.12269938650306748], 12225: [0.12719836400817996], 9780: [0.14069529652351739], 7335: [0.16196319018404909], 4890: [0.17995910020449898], 2445: [0.19386503067484662]}
normal_train_sizes_10_folds = array([ 2445, 4890, 7335, 9780, 12225, 14670, 17115, 19560, 22005])
normal_train_scores_mean_10_folds = array([ 0.92842536, 0.9194274, 0.89788684, 0.88548057, 0.86633947, 0.84935242,
0.83651767, 0.82617587, 0.81799591])
normal_train_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_10_folds = array([ 0.19386503, 0.1799591, 0.16196319, 0.1406953, 0.12719836, 0.12269939,
0.12106339, 0.11738241, 0.11820041])
normal_test_scores_std_10_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_15_folds = {22820: [0.81849255039439084], 21190: [0.82298253893345918], 19560: [0.83052147239263807], 17930: [0.83736754043502515], 16300: [0.84699386503067486], 14670: [0.85405589638718471], 13040: [0.86618098159509205], 11410: [0.87765118317265556], 9780: [0.88588957055214723], 8150: [0.89447852760736202], 6520: [0.90567484662576692], 4890: [0.91390593047034763], 3260: [0.91932515337423315], 1630: [0.93496932515337428]}
normal_test_accuracy_per_size_15_folds = {22820: [0.15276073619631902], 21190: [0.150920245398773], 19560: [0.15337423312883436], 17930: [0.15214723926380369], 16300: [0.15398773006134969], 14670: [0.15460122699386503], 13040: [0.15766871165644172], 11410: [0.16809815950920245], 9780: [0.17668711656441718], 8150: [0.18466257668711655], 6520: [0.19570552147239265], 4890: [0.20429447852760735], 3260: [0.21717791411042944], 1630: [0.23803680981595093]}
normal_train_sizes_15_folds = array([ 1630, 3260, 4890, 6520, 8150, 9780, 11410, 13040, 14670, 16300, 17930, 19560,
21190, 22820])
normal_train_scores_mean_15_folds = array([ 0.93496933, 0.91932515, 0.91390593, 0.90567485, 0.89447853, 0.88588957,
0.87765118, 0.86618098, 0.8540559, 0.84699387, 0.83736754, 0.83052147,
0.82298254, 0.81849255])
normal_train_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_15_folds = array([ 0.23803681, 0.21717791, 0.20429448, 0.19570552, 0.18466258, 0.17668712,
0.16809816, 0.15766871, 0.15460123, 0.15398773, 0.15214724, 0.15337423,
0.15092025, 0.15276074])
normal_test_scores_std_15_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_25_folds = {23472: [0.81799591002044991], 22494: [0.8191517738063484], 21516: [0.824595649749024], 20538: [0.82753919563735512], 19560: [0.83195296523517381], 18582: [0.8366698955978904], 17604: [0.84003635537377863], 16626: [0.84620473956453746], 15648: [0.85090746421267893], 14670: [0.85821404226312203], 13692: [0.86240140227870288], 12714: [0.871558911436212], 11736: [0.87585207907293794], 10758: [0.88213422569250788], 9780: [0.88486707566462164], 8802: [0.88559418314019545], 7824: [0.89187116564417179], 6846: [0.89789658194566169], 5868: [0.90184049079754602], 4890: [0.89959100204498976], 3912: [0.90030674846625769], 2934: [0.90149965916837083], 1956: [0.90286298568507162], 978: [0.89059304703476483]}
normal_test_accuracy_per_size_25_folds = {23472: [0.10633946830265849], 22494: [0.10531697341513292], 21516: [0.10736196319018405], 20538: [0.10736196319018405], 19560: [0.10531697341513292], 18582: [0.10736196319018405], 17604: [0.11349693251533742], 16626: [0.11042944785276074], 15648: [0.1165644171779141], 14670: [0.11349693251533742], 13692: [0.11451942740286299], 12714: [0.11758691206543967], 11736: [0.1196319018404908], 10758: [0.130879345603272], 9780: [0.1329243353783231], 8802: [0.13190184049079753], 7824: [0.13496932515337423], 6846: [0.13701431492842536], 5868: [0.14723926380368099], 4890: [0.15132924335378323], 3912: [0.15746421267893659], 2934: [0.17586912065439672], 1956: [0.19325153374233128], 978: [0.19529652351738241]}
normal_train_sizes_25_folds = array([ 978, 1956, 2934, 3912, 4890, 5868, 6846, 7824, 8802, 9780, 10758, 11736,
12714, 13692, 14670, 15648, 16626, 17604, 18582, 19560, 20538, 21516, 22494, 23472])
normal_train_scores_mean_25_folds = array([ 0.89059305, 0.90286299, 0.90149966, 0.90030675, 0.899591, 0.90184049,
0.89789658, 0.89187117, 0.88559418, 0.88486708, 0.88213423, 0.87585208,
0.87155891, 0.8624014, 0.85821404, 0.85090746, 0.84620474, 0.84003636,
0.8366699, 0.83195297, 0.8275392, 0.82459565, 0.81915177, 0.81799591])
normal_train_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_25_folds = array([ 0.19529652, 0.19325153, 0.17586912, 0.15746421, 0.15132924, 0.14723926,
0.13701431, 0.13496933, 0.13190184, 0.13292434, 0.13087935, 0.1196319,
0.11758691, 0.11451943, 0.11349693, 0.11656442, 0.11042945, 0.11349693,
0.10736196, 0.10531697, 0.10736196, 0.10736196, 0.10531697, 0.10633947])
normal_test_scores_std_25_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.])
normal_train_accuracy_per_size_50_folds = {23961: [0.81770376862401406], 23472: [0.81842194955691894], 22983: [0.81877909759387368], 22494: [0.82177469547434867], 22005: [0.82394910247670983], 21516: [0.82594348391894401], 21027: [0.8270319113520711], 20538: [0.82958418541240631], 20049: [0.8316624270537184], 19560: [0.83333333333333337], 19071: [0.83682030307797184], 18582: [0.83785383704660421], 18093: [0.84115403747305584], 17604: [0.84350147693705979], 17115: [0.84691790826760149], 16626: [0.84885119692048594], 16137: [0.85077771580839068], 15648: [0.8539749488752556], 15159: [0.85744442245530705], 14670: [0.8597818677573279], 14181: [0.86150483040688242], 13692: [0.86634531113058721], 13203: [0.87002953874119515], 12714: [0.8728173666823974], 12225: [0.87427402862985681], 11736: [0.87857873210633952], 11247: [0.88094603005245842], 10758: [0.87999628183677259], 10269: [0.88207225630538511], 9780: [0.88466257668711656], 9291: [0.884834786352384], 8802: [0.88604862531242901], 8313: [0.89125466137375198], 7824: [0.89391615541922287], 7335: [0.89747784594410362], 6846: [0.90037978381536665], 6357: [0.89948088721094854], 5868: [0.8979209270620313], 5379: [0.8947759806655512], 4890: [0.89427402862985683], 4401: [0.89502385821404229], 3912: [0.89570552147239269], 3423: [0.89862693543675143], 2934: [0.89604635310156777], 2445: [0.89775051124744376], 1956: [0.89570552147239269], 1467: [0.89229720518064082], 978: [0.91104294478527603], 489: [0.91820040899795496]}
normal_test_accuracy_per_size_50_folds = {23961: [0.096114519427402859], 23472: [0.096114519427402859], 22983: [0.096114519427402859], 22494: [0.096114519427402859], 22005: [0.09815950920245399], 21516: [0.096114519427402859], 21027: [0.09815950920245399], 20538: [0.096114519427402859], 20049: [0.096114519427402859], 19560: [0.09202453987730061], 19071: [0.096114519427402859], 18582: [0.094069529652351741], 18093: [0.096114519427402859], 17604: [0.09815950920245399], 17115: [0.096114519427402859], 16626: [0.09202453987730061], 16137: [0.094069529652351741], 15648: [0.09202453987730061], 15159: [0.096114519427402859], 14670: [0.094069529652351741], 14181: [0.096114519427402859], 13692: [0.10020449897750511], 13203: [0.09815950920245399], 12714: [0.10224948875255624], 12225: [0.10224948875255624], 11736: [0.10224948875255624], 11247: [0.10224948875255624], 10758: [0.10633946830265849], 10269: [0.10838445807770961], 9780: [0.10838445807770961], 9291: [0.11042944785276074], 8802: [0.11451942740286299], 8313: [0.1165644171779141], 7824: [0.1165644171779141], 7335: [0.12269938650306748], 6846: [0.12883435582822086], 6357: [0.1329243353783231], 5868: [0.1411042944785276], 5379: [0.14314928425357873], 4890: [0.15132924335378323], 4401: [0.15337423312883436], 3912: [0.17791411042944785], 3423: [0.19631901840490798], 2934: [0.19222903885480572], 2445: [0.19222903885480572], 1956: [0.20245398773006135], 1467: [0.21063394683026584], 978: [0.22085889570552147], 489: [0.21472392638036811]}
normal_train_sizes_50_folds = array([ 489, 978, 1467, 1956, 2445, 2934, 3423, 3912, 4401, 4890, 5379, 5868,
6357, 6846, 7335, 7824, 8313, 8802, 9291, 9780, 10269, 10758, 11247, 11736,
12225, 12714, 13203, 13692, 14181, 14670, 15159, 15648, 16137, 16626, 17115, 17604,
18093, 18582, 19071, 19560, 20049, 20538, 21027, 21516, 22005, 22494, 22983, 23472,
23961])
normal_train_scores_mean_50_folds = array([ 0.91820041, 0.91104294, 0.89229721, 0.89570552, 0.89775051, 0.89604635,
0.89862694, 0.89570552, 0.89502386, 0.89427403, 0.89477598, 0.89792093,
0.89948089, 0.90037978, 0.89747785, 0.89391616, 0.89125466, 0.88604863,
0.88483479, 0.88466258, 0.88207226, 0.87999628, 0.88094603, 0.87857873,
0.87427403, 0.87281737, 0.87002954, 0.86634531, 0.86150483, 0.85978187,
0.85744442, 0.85397495, 0.85077772, 0.8488512, 0.84691791, 0.84350148,
0.84115404, 0.83785384, 0.8368203, 0.83333333, 0.83166243, 0.82958419,
0.82703191, 0.82594348, 0.8239491, 0.8217747, 0.8187791, 0.81842195,
0.81770377])
normal_train_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
normal_test_scores_mean_50_folds = array([ 0.21472393, 0.2208589, 0.21063395, 0.20245399, 0.19222904, 0.19222904,
0.19631902, 0.17791411, 0.15337423, 0.15132924, 0.14314928, 0.14110429,
0.13292434, 0.12883436, 0.12269939, 0.11656442, 0.11656442, 0.11451943,
0.11042945, 0.10838446, 0.10838446, 0.10633947, 0.10224949, 0.10224949,
0.10224949, 0.10224949, 0.09815951, 0.1002045, 0.09611452, 0.09406953,
0.09611452, 0.09202454, 0.09406953, 0.09202454, 0.09611452, 0.09815951,
0.09611452, 0.09406953, 0.09611452, 0.09202454, 0.09611452, 0.09611452,
0.09815951, 0.09611452, 0.09815951, 0.09611452, 0.09611452, 0.09611452,
0.09611452])
normal_test_scores_std_50_folds = array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
data = {
"incremental": {
4: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_4_folds,
"train_sizes": incremental_train_sizes_4_folds,
"train_scores_mean": incremental_train_scores_mean_4_folds,
"train_scores_std": incremental_train_scores_std_4_folds,
"test_scores_mean": incremental_test_scores_mean_4_folds,
"test_scores_std": incremental_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_6_folds,
"train_sizes": incremental_train_sizes_6_folds,
"train_scores_mean": incremental_train_scores_mean_6_folds,
"train_scores_std": incremental_train_scores_std_6_folds,
"test_scores_mean": incremental_test_scores_mean_6_folds,
"test_scores_std": incremental_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_8_folds,
"train_sizes": incremental_train_sizes_8_folds,
"train_scores_mean": incremental_train_scores_mean_8_folds,
"train_scores_std": incremental_train_scores_std_8_folds,
"test_scores_mean": incremental_test_scores_mean_8_folds,
"test_scores_std": incremental_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_10_folds,
"train_sizes": incremental_train_sizes_10_folds,
"train_scores_mean": incremental_train_scores_mean_10_folds,
"train_scores_std": incremental_train_scores_std_10_folds,
"test_scores_mean": incremental_test_scores_mean_10_folds,
"test_scores_std": incremental_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_15_folds,
"train_sizes": incremental_train_sizes_15_folds,
"train_scores_mean": incremental_train_scores_mean_15_folds,
"train_scores_std": incremental_train_scores_std_15_folds,
"test_scores_mean": incremental_test_scores_mean_15_folds,
"test_scores_std": incremental_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_25_folds,
"train_sizes": incremental_train_sizes_25_folds,
"train_scores_mean": incremental_train_scores_mean_25_folds,
"train_scores_std": incremental_train_scores_std_25_folds,
"test_scores_mean": incremental_test_scores_mean_25_folds,
"test_scores_std": incremental_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": incremental_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": incremental_test_accuracy_per_size_50_folds,
"train_sizes": incremental_train_sizes_50_folds,
"train_scores_mean": incremental_train_scores_mean_50_folds,
"train_scores_std": incremental_train_scores_std_50_folds,
"test_scores_mean": incremental_test_scores_mean_50_folds,
"test_scores_std": incremental_test_scores_std_50_folds
}
},
"normal": {
4: {
"train_accuracy_per_size": normal_train_accuracy_per_size_4_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_4_folds,
"train_sizes": normal_train_sizes_4_folds,
"train_scores_mean": normal_train_scores_mean_4_folds,
"train_scores_std": normal_train_scores_std_4_folds,
"test_scores_mean": normal_test_scores_mean_4_folds,
"test_scores_std": normal_test_scores_std_4_folds
},
6: {
"train_accuracy_per_size": normal_train_accuracy_per_size_6_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_6_folds,
"train_sizes": normal_train_sizes_6_folds,
"train_scores_mean": normal_train_scores_mean_6_folds,
"train_scores_std": normal_train_scores_std_6_folds,
"test_scores_mean": normal_test_scores_mean_6_folds,
"test_scores_std": normal_test_scores_std_6_folds
},
8: {
"train_accuracy_per_size": normal_train_accuracy_per_size_8_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_8_folds,
"train_sizes": normal_train_sizes_8_folds,
"train_scores_mean": normal_train_scores_mean_8_folds,
"train_scores_std": normal_train_scores_std_8_folds,
"test_scores_mean": normal_test_scores_mean_8_folds,
"test_scores_std": normal_test_scores_std_8_folds
},
10: {
"train_accuracy_per_size": normal_train_accuracy_per_size_10_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_10_folds,
"train_sizes": normal_train_sizes_10_folds,
"train_scores_mean": normal_train_scores_mean_10_folds,
"train_scores_std": normal_train_scores_std_10_folds,
"test_scores_mean": normal_test_scores_mean_10_folds,
"test_scores_std": normal_test_scores_std_10_folds
},
15: {
"train_accuracy_per_size": normal_train_accuracy_per_size_15_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_15_folds,
"train_sizes": normal_train_sizes_15_folds,
"train_scores_mean": normal_train_scores_mean_15_folds,
"train_scores_std": normal_train_scores_std_15_folds,
"test_scores_mean": normal_test_scores_mean_15_folds,
"test_scores_std": normal_test_scores_std_15_folds
},
25: {
"train_accuracy_per_size": normal_train_accuracy_per_size_25_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_25_folds,
"train_sizes": normal_train_sizes_25_folds,
"train_scores_mean": normal_train_scores_mean_25_folds,
"train_scores_std": normal_train_scores_std_25_folds,
"test_scores_mean": normal_test_scores_mean_25_folds,
"test_scores_std": normal_test_scores_std_25_folds
},
50: {
"train_accuracy_per_size": normal_train_accuracy_per_size_50_folds,
"test_accuracy_per_size": normal_test_accuracy_per_size_50_folds,
"train_sizes": normal_train_sizes_50_folds,
"train_scores_mean": normal_train_scores_mean_50_folds,
"train_scores_std": normal_train_scores_std_50_folds,
"test_scores_mean": normal_test_scores_mean_50_folds,
"test_scores_std": normal_test_scores_std_50_folds
}
}
}
for key, value in data.items():
# print("{}: {}".format(key, value)) # Debug
for subKey, subValue in value.items():
# print("{}: {}".format(subKey, subValue)) # Debug
# Then, we plot the aforementioned learning curves
title = "Learning Curves (Linear SVM without tuning, " + \
key + \
" approach, {} folds)".format(subKey)
fig = plot_learning_curve(title, "accuracy", \
subValue["train_sizes"], \
subValue["train_scores_mean"], \
subValue["train_scores_std"], \
subValue["test_scores_mean"], \
subValue["test_scores_std"])
name_file = "{}_learning_curves_{}_folds.png".format( \
key, subKey)
# save_file = None if not save_file \
# else os.path.join(current_dir, name_file)
save_file = os.path.join(current_dir, name_file)
if save_file:
plt.savefig(save_file, bbox_inches="tight")
plt.close(fig)
else:
plt.show()
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 105,829
| 0
| -17
| 89
|
cf398266bba39f44a35399478e373fb7c8895c96
| 735
|
py
|
Python
|
pycopyql/cli.py
|
elazar/pycopyql
|
4c8384b847fcd9ef2811c12375fc5e9e63094b3e
|
[
"MIT"
] | 1
|
2018-08-02T18:42:34.000Z
|
2018-08-02T18:42:34.000Z
|
pycopyql/cli.py
|
elazar/pycopyql
|
4c8384b847fcd9ef2811c12375fc5e9e63094b3e
|
[
"MIT"
] | null | null | null |
pycopyql/cli.py
|
elazar/pycopyql
|
4c8384b847fcd9ef2811c12375fc5e9e63094b3e
|
[
"MIT"
] | null | null | null |
from .args import get_args
from .config import get_config, get_connection_config, get_engine, get_meta
from .query import query
from .export import get_exporter
def main():
"""
Provides a CLI entrypoint to access a database and export a subset of its
data in a specified format.
"""
args = get_args()
config = get_config(args.config)
connection_config = get_connection_config(config, args.connection)
engine = get_engine(connection_config)
export = get_exporter(args.format, config['exporters'])
connection = engine.connect()
meta = get_meta(engine)
resolver = connection_config['resolver']
data = query(connection, meta, resolver, args.query)
export(meta, data, args.output)
| 31.956522
| 77
| 0.726531
|
from .args import get_args
from .config import get_config, get_connection_config, get_engine, get_meta
from .query import query
from .export import get_exporter
def main():
"""
Provides a CLI entrypoint to access a database and export a subset of its
data in a specified format.
"""
args = get_args()
config = get_config(args.config)
connection_config = get_connection_config(config, args.connection)
engine = get_engine(connection_config)
export = get_exporter(args.format, config['exporters'])
connection = engine.connect()
meta = get_meta(engine)
resolver = connection_config['resolver']
data = query(connection, meta, resolver, args.query)
export(meta, data, args.output)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
aeb45e871f3a30dab24f8b244ee9dacf414f769a
| 258
|
py
|
Python
|
cride/circles/urls.py
|
daecazu/platziride
|
79782770d05e71823c7eb27fec76a3870c737689
|
[
"MIT"
] | null | null | null |
cride/circles/urls.py
|
daecazu/platziride
|
79782770d05e71823c7eb27fec76a3870c737689
|
[
"MIT"
] | 2
|
2020-03-03T20:29:18.000Z
|
2020-03-03T20:29:19.000Z
|
cride/circles/urls.py
|
daecazu/platziride
|
79782770d05e71823c7eb27fec76a3870c737689
|
[
"MIT"
] | null | null | null |
"""Circles URLs"""
# Django
from django.urls import path
# Views
from cride.circles.views import list_circles
from cride.circles.views import create_circle
urlpatterns = [
path ('circles/', list_circles),
path ('circles/create/', create_circle),
]
| 19.846154
| 45
| 0.732558
|
"""Circles URLs"""
# Django
from django.urls import path
# Views
from cride.circles.views import list_circles
from cride.circles.views import create_circle
urlpatterns = [
path ('circles/', list_circles),
path ('circles/create/', create_circle),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4cbc553ee05900bc18bb23ac67ed68a847770fd8
| 150
|
py
|
Python
|
billing/__init__.py
|
xprilion/django-customer-billing
|
82f8147d74ff62e84d9e57465b4d521434c48e49
|
[
"MIT"
] | null | null | null |
billing/__init__.py
|
xprilion/django-customer-billing
|
82f8147d74ff62e84d9e57465b4d521434c48e49
|
[
"MIT"
] | null | null | null |
billing/__init__.py
|
xprilion/django-customer-billing
|
82f8147d74ff62e84d9e57465b4d521434c48e49
|
[
"MIT"
] | 1
|
2020-06-25T22:55:48.000Z
|
2020-06-25T22:55:48.000Z
|
__version__ = '1.5.4'
__copyright__ = 'Copyright (c) 2018, Skioo SA'
__licence__ = 'MIT'
__URL__ = 'https://github.com/skioo/django-customer-billing'
| 30
| 60
| 0.726667
|
__version__ = '1.5.4'
__copyright__ = 'Copyright (c) 2018, Skioo SA'
__licence__ = 'MIT'
__URL__ = 'https://github.com/skioo/django-customer-billing'
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2a6cfbfefd1436bb7d9f94ae6539e54bf036f4d7
| 48,716
|
py
|
Python
|
ShotgunORM/SgFields.py
|
jonykalavera/python-shotgunorm
|
3b0a2b433030815631588ff709c8ffd3e9660476
|
[
"BSD-3-Clause"
] | null | null | null |
ShotgunORM/SgFields.py
|
jonykalavera/python-shotgunorm
|
3b0a2b433030815631588ff709c8ffd3e9660476
|
[
"BSD-3-Clause"
] | null | null | null |
ShotgunORM/SgFields.py
|
jonykalavera/python-shotgunorm
|
3b0a2b433030815631588ff709c8ffd3e9660476
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgFieldCheckbox',
'SgFieldColor',
'SgFieldColor2',
'SgFieldDate',
'SgFieldDateTime',
'SgFieldEntity',
'SgFieldEntityMulti',
'SgFieldFloat',
'SgFieldID',
'SgFieldImage',
'SgFieldInt',
'SgFieldSelectionList',
'SgFieldTagList',
'SgFieldText',
'SgFieldType',
'SgFieldUrl'
]
# Python imports
# This module imports
import ShotgunORM
# Register the fields.
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_CHECKBOX, SgFieldCheckbox)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR, SgFieldColor)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR2, SgFieldColor2)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE, SgFieldDate)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE_TIME, SgFieldDateTime)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_ENTITY, SgFieldEntity)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_FLOAT, SgFieldFloat)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_IMAGE, SgFieldImage)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_INT, SgFieldInt)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY, SgFieldEntityMulti)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SERIALIZABLE, SgFieldSerializable)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_STATUS_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SUMMARY, SgFieldSummary)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TAG_LIST, SgFieldTagList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TEXT, SgFieldText)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_URL, SgFieldUrl)
################################################################################
#
# Custom fields
#
################################################################################
| 24.956967
| 132
| 0.59812
|
# Copyright (c) 2013, Nathan Dunsworth - NFXPlugins
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NFXPlugins nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NFXPLUGINS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
__all__ = [
'SgFieldCheckbox',
'SgFieldColor',
'SgFieldColor2',
'SgFieldDate',
'SgFieldDateTime',
'SgFieldEntity',
'SgFieldEntityMulti',
'SgFieldFloat',
'SgFieldID',
'SgFieldImage',
'SgFieldInt',
'SgFieldSelectionList',
'SgFieldTagList',
'SgFieldText',
'SgFieldType',
'SgFieldUrl'
]
# Python imports
import copy
import datetime
import os
import re
import threading
import urllib2
import webbrowser
# This module imports
import ShotgunORM
class SgFieldCheckbox(ShotgunORM.SgField):
'''
Entity field that stores a bool value for a checkbox.
'''
def _fromFieldData(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_CHECKBOX
def _setValue(self, sgData):
try:
sgData = bool(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a bool' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldColor(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Example: [128, 128, 128]
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
try:
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a list of ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = sgData
return result
try:
if isinstance(sgData, str):
if not self.REGEXP_COLOR.match(sgData):
raise ValueError('invalid value %s' % sgData)
else:
if len(sgData != 3):
raise ValueError('invalid value %s' % sgData)
sgData = '%d,%d,%d' % (sgData[0], sgData[1], sgData[2])
except:
raise TypeError('%s invalid value "%s", expected a list of three ints' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def _Value(self):
if self._value == None:
return None
result = []
for i in self._value.split(','):
result.append(int(i))
return result
class SgFieldColor2(ShotgunORM.SgField):
'''
Entity field that stores a list of 3 ints that represent a rgb color 0-255.
Fix the color return value for Task and Phase Entities color field.
Task and Phase Entities can have their color field set to a value that points
to the color field of the pipeline step or project they belong to.
Brilliant engineering to still call the return type "color" and not
differentiate the two I know right?
'''
REGEXP_COLOR = re.compile(r'(\d+,\d+,\d+)')
REGEXP_TASK_COLOR = re.compile(r'(\d+,\d+,\d+)|(pipeline_step)')
REGEXP_PHASE_COLOR = re.compile(r'(\d+,\d+,\d+)|(project)')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldColor2, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self._regexp = self.REGEXP_COLOR
self._linkString = None
self._linkField = None
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_COLOR2
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if isinstance(sgData, str):
if not self._regexp.match(sgData):
raise ValueError('%s invalid color value "%s", expected format is "255,255,255" or "%s"' % (self, sgData, self._linkString))
else:
if not isinstance(sgData, (tuple, list)):
raise TypeError('%s invalid value type "%s", expected a list' % (self, type(sgData).__name__))
if len(sgData) != 3:
raise ValueError('%s list len is not 3' % self)
newData = []
try:
sgData = '%d,%d,%d' % tuple(sgData)
except:
raise ValueError('%s invalid color values %s' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def linkField(self):
'''
Returns the link field this color field can possibly link to.
'''
return self._linkField
def parentChanged(self):
'''
'''
parent = self.parentEntity()
if parent == None:
return
pType = parent.schemaInfo().name()
if pType == 'Task':
self._regexp = self.REGEXP_TASK_COLOR
self._linkString = 'pipeline_step'
self._linkField = 'step'
elif pType == 'Phase':
self._regexp = self.REGEXP_PHASE_COLOR
self._linkString = 'project'
self._linkField= 'project'
else:
self._regexp = self.REGEXP_COLOR
def value(self, linkEvaluate=True):
'''
Args:
* (bool) linkEvaluate:
When True and the color field is a link to another Entity's color field
the value of the linked color field will be returned.
If linkEvaluate is False a string may be returned instead of a list.
'''
result = super(SgFieldColor2, self).value()
if result == None:
return None
if not linkEvaluate and result == self._linkString:
return result
parent = self.parentEntity()
if parent == None:
if result == self._linkString:
return None
newResult = []
for i in result.split(','):
newResult.append(int(i))
if result == self._linkString:
linkObj = self.parentEntity()[self._linkField]
if linkObj == None:
return None
return linkObj['color']
else:
newResult = []
for i in result.split(','):
newResult.append(int(i))
class SgFieldDate(ShotgunORM.SgField):
'''
Entity field that stores a date string
Example: "1980-01-30".
'''
REGEXP = re.compile(r'^\d{4}-\d{2}-\d{2}')
def _fromFieldData(self, sgData):
if sgData != None:
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string from Shotgun "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if not self.REGEXP.match(sgData):
raise ValueError('%s invalid date string "%s"' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldDateTime(ShotgunORM.SgField):
'''
Entity field that stores a python datetime object.
'''
def _fromFieldData(self, sgData):
if sgData != None:
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_DATE_TIME
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, datetime.datetime):
raise TypeError('%s invalid type "%s", expected a datetime obj' % (self, type(sgData).__name__))
sgData = datetime.datetime(*sgData.timetuple()[:6], tzinfo=sgData.tzinfo)
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return result
return datetime.datetime(*result.timetuple()[:6], tzinfo=result.tzinfo)
def _Value(self):
return self._toFieldData()
class SgFieldEntity(ShotgunORM.SgField):
'''
Entity field that stores a link to another Entity.
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
try:
newValue = {
'type': sgData['type'],
'id': sgData['id']
}
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if newValue['type'] in ['AppWelcome', 'Banner'] and sgData.has_key('name'):
newValue['name'] = sgData['name']
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if newValue == self._value:
return False
parent = self.parentEntity()
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_ENTITY
def _setValue(self, sgData):
if sgData == None:
result = self._value != None
self._value = None
return result
if not isinstance(sgData, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(sgData).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
if not sgData.type in valueTypes:
raise ValueError('not a valid value Entiy type: %s, valid=%s' % (sgData.type, valueTypes))
if sgData['id'] == None:
raise RuntimeError('can not set field value to a Entity that has not been created in Shotgun yet')
parent = self.parentEntity()
if parent == None:
raise RuntimeError('field does not have a parent')
connection = parent.connection()
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != sgData.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
if self._value == sgData:
return False
self._value = sgData.toEntityFieldData()
return True
def _toFieldData(self):
if self._value == None:
return None
return dict(self._value)
def value(self, sgSyncFields=None):
'''
Returns the fields value as a Entity object.
Args:
* (list) sgSyncFields:
List of field names to populate the returned Entity with.
'''
value = super(SgFieldEntity, self).value()
parent = self.parentEntity()
if value == None or parent == None:
return None
connection = parent.connection()
if isinstance(sgSyncFields, dict):
sgSyncFields = sgSyncFields.get(parent.type, None)
elif isinstance(sgSyncFields, str):
sgSyncFields = [sgSyncFields]
if sgSyncFields == None:
sgSyncFields = connection.defaultEntityQueryFields(value['type'])
if len(sgSyncFields) <= 0:
sgSyncFields = None
else:
pullFields = set(sgSyncFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = parent.fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(value['type'])
pullFields.update(extraFields)
if len(pullFields) >= 1:
sgSyncFields = list(pullFields)
else:
sgSyncFields = None
result = connection._createEntity(
value['type'],
value,
sgSyncFields=sgSyncFields
)
return result
class SgFieldEntityMulti(ShotgunORM.SgField):
'''
Entity field that stores a list of links to other Entities.
Example: [Entity01, Entity02, ...]
'''
##############################################################################
#
# IMPORTANT!!!!
#
# Any changes to _fromFieldData, _setValue, _toFieldData, value functions
# should also be applied to the SgUserFieldAbstractMultiEntity class.
#
##############################################################################
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
newValue = []
try:
for i in sgData:
e = {
'type': i['type'],
'id': i['id']
}
if e in newValue:
continue
# This fixes the two Entities as their name field is only available when
# returned as another Entities field value.
if e['type'] in ['AppWelcome', 'Banner'] and i.has_key('name'):
e['name'] = i['name']
newValue.append(e)
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise ValueError('%s invalid data from Shotgun "%s", expected a Shotgun formated Entity dict' % (self, sgData))
if self._value == newValue:
return False
self._value = newValue
return True
def returnType(self):
return self.RETURN_TYPE_MULTI_ENTITY
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
if isinstance(sgData, ShotgunORM.SgEntity):
sgData = [sgData]
elif not isinstance(sgData, list):
raise TypeError('%s invalid value type "%s", expected a SgEntity or list' % (self, type(sgData).__name__))
else:
for i in sgData:
if not isinstance(i, ShotgunORM.SgEntity):
raise TypeError('%s invalid value type "%s", expected a SgEntity' % (self, type(i).__name__))
valueTypes = self.valueTypes()
if valueTypes != None:
if len(valueTypes) > 0:
for i in sgData:
if not i.type in valueTypes:
raise ValueError('not a valid value type: %s, valid=%s' % (i.type, valueTypes))
parent = self.parentEntity()
newValue = []
if parent == None:
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
else:
connection = parent.connection()
for i in sgData:
if i['id'] == None:
raise RuntimeError('can not set field value to a SgEntity that has not been created in Shotgun yet')
# Lord knows you shouldn't do this but if you build it people will try!
if connection.url() != i.connection().url():
raise ValueError('%s passed an Entity from another url' % self)
edata = i.toEntityFieldData()
if edata in newValue:
continue
newValue.append(edata)
if self._value == newValue:
return False
self._value = newValue
return True
def _toFieldData(self):
if self._value == None:
return None
result = []
for i in self._value:
result.append(dict(i))
return result
def value(self, sgSyncFields=None):
'''
Returns the fields value as a list of Entity objects.
Args:
* (dict) sgSyncFields:
Dict of entity types and field names to populate the returned Entities
with.
'''
result = super(SgFieldEntityMulti, self).value()
if result in [None, []]:
return result
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(result)
connection = parent.connection()
schema = connection.schema()
tmp = []
qEng = connection.queryEngine()
qEng.block()
try:
for i in result:
t = i['type']
iSyncFields = None
if sgSyncFields != None:
if sgSyncFields.has_key(t):
iFields = sgSyncFields[t]
if iFields == None:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
pullFields = []
if isinstance(iFields, str):
pullFields = set([iFields])
else:
pullFields = set(iFields)
extraFields = []
if 'all' in pullFields:
pullFields.remove('all')
extraFields = schema.entityInfo(t).fieldNames()
if 'default' in pullFields:
pullFields.remove('default')
elif 'default' in pullFields:
pullFields.remove('default')
extraFields = connection.defaultEntityQueryFields(t)
pullFields.update(extraFields)
if len(pullFields) >= 1:
iSyncFields = list(pullFields)
else:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
if len(iSyncFields) <= 0:
iSyncFields = None
else:
iSyncFields = connection.defaultEntityQueryFields(t)
entity = connection._createEntity(t, i, sgSyncFields=iSyncFields)
tmp.append(entity)
finally:
qEng.unblock()
return tmp
class SgFieldFloat(ShotgunORM.SgField):
'''
Entity field that stores a float.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a float' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_FLOAT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = float(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a float' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldInt(ShotgunORM.SgField):
'''
Entity field that stores an integer.
'''
def _fromFieldData(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise ValueError('%s invalid data from Shotgun "%s", expected a int' % (self, sgData))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_INT
def _setValue(self, sgData):
if sgData != None:
try:
sgData = int(sgData)
except:
raise TypeError('%s invalid value type "%s", expected a int' % (self, type(sgData).__name__))
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldSelectionList(ShotgunORM.SgField):
'''
Entity field that stores a text string that is from a list selection.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if sgData == None:
result = self._value == sgData
if not result:
self._value = None
return result
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_LIST
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid type "%s", expected a string' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
validValues = self.validValues()
if len(validValues) > 0:
if not sgData in validValues:
raise ValueError('%s invalid value "%s"' % (self, sgData))
self._value = sgData
return True
class SgFieldSerializable(ShotgunORM.SgField):
'''
Entity field that stores serializable data.
'''
def _fromFieldData(self, sgData):
if sgData in [None, {}]:
result = self._value in [None, {}]
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise ValueError('%s invalid data from Shotgun "%s", expected a dict' % (self, sgData))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SERIALIZABLE
def _setValue(self, sgData):
if sgData == None:
result = self._value == sgData
if result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid value type "%s", expected a dict' % (self, type(sgData).__name__))
if self._value == sgData:
return False
sgData = copy.deepcopy(sgData)
self._value = sgData
return True
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
class SgFieldSummary(ShotgunORM.SgField):
'''
Entity field that returns an Entity or list of Entities based on a search
expression.
Summary fields.
'''
DATE_REGEXP = re.compile(r'(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) UTC')
def __init__(self, name, label=None, sgFieldSchemaInfo=None):
super(SgFieldSummary, self).__init__(name, label=label, sgFieldSchemaInfo=sgFieldSchemaInfo)
self.__buildLock = threading.Lock()
summaryInfo = self.schemaInfo().summaryInfo()
if summaryInfo == None:
raise RuntimeError('invalid field schema info for summary info')
self._entityType = summaryInfo['entity_type']
self._filtersRaw = summaryInfo['filters']
self._summaryType = summaryInfo['summary_type']
self._summaryField = summaryInfo['summary_field']
self._summaryValue = summaryInfo['summary_value']
self._searchFilter = None
def _buildLogicalOp(self, conditions, info):
'''
Builds the logical operator search pattern and returns it.
'''
result = []
parent = self.parentEntity()
connection = parent.connection()
for c in conditions:
if c.has_key('logical_operator'):
logicalOp = {
'conditions': self._buildLogicalOp(c['conditions'], info),
'logical_operator': c['logical_operator']
}
result.append(logicalOp)
else:
newValues = []
cInfo = info.fieldInfo(c['path'])
cType = cInfo.returnType()
########################################################################
#
# Date and Date Time fields
#
########################################################################
if cType in [ShotgunORM.SgField.RETURN_TYPE_DATE, ShotgunORM.SgField.RETURN_TYPE_DATE_TIME]:
# http://stackoverflow.com/a/13287083
def utc_to_local(utc_dt):
# get integer timestamp to avoid precision lost
timestamp = calendar.timegm(utc_dt.timetuple())
local_dt = datetime.fromtimestamp(timestamp)
assert utc_dt.resolution >= timedelta(microseconds=1)
return local_dt.replace(microsecond=utc_dt.microsecond)
for v in c['values']:
if isinstance(v, dict):
if v.has_key('relative_day'):
time = datetime.time(*v['time'])
date = datetime.date.today()
rd = v['relative_day']
if rd == 'tomorrow':
date = date.replace(day=date.day + 1)
elif rd == 'yesterday':
date = date.replace(day=date.day - 1)
dt = datetime.datetime.combine(date, time)
# Relative day calcs use utc time!
dt.replace(tzinfo=None)
newValues.append(dt)
else:
newValues.append(v)
elif isinstance(v, str):
search = DATE_REGEXP.match(v)
if search:
time = datetime.time(search.group(4), search.group(5), search.group(6))
date = datetime.date(search.group(1), search.group(2), search.group(3))
dt = datetime.datetime.combine(date, time)
dt.replace(tzinfo=None)
newValues.append(utc_to_local(dt))
else:
newValues.append(v)
########################################################################
#
# Entity and Multi-Entity fields
#
########################################################################
elif cType in [ShotgunORM.SgField.RETURN_TYPE_ENTITY, ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY]:
for v in c['values']:
if v['name'] == 'Current %s' % parent.type:
newValues.append(parent.toEntityFieldData())
elif v['name'] == 'Me':
login = os.getenv('USERNAME')
user = connection.findOne('HumanUser', [['login', 'is', login]], ['login'])
if user == None:
raise RuntimError('summary field unable to find user "%s" in Shotgun' % login)
newValues.append(user.toEntityFieldData())
else:
newValues.append(v)
else:
# Do nothing
newValues = c['values']
c['values'] = newValues
del c['active']
result.append(c)
return result
def _buildSearchFilter(self):
'''
'''
opsRaw = copy.deepcopy(self._filtersRaw)
logicalOps = {
'conditions': self._buildLogicalOp(
opsRaw['conditions'],
self.parentEntity().connection().schema().entityInfo(self.entityType())
),
'logical_operator': opsRaw['logical_operator']
}
self._searchFilter = logicalOps
def _fromFieldData(self, sgData):
'''
Always return False for summary fields, they can not be set.
'''
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_SUMMARY
def _toFieldData(self):
result = self._value
if result == None:
return None
if isinstance(result, dict):
return copy.deepcopy(result)
return result
def entityType(self):
'''
Returns the type of Entity the summary field will return.
'''
return self._entityType
def hasCommit(self):
'''
Always returns False for summary fields.
'''
return False
def _invalidate(self):
'''
Deletes the search filter so its built again.
'''
self._searchFilter = None
def isEditable(self):
'''
Always return False for summary fields.
'''
return False
def isQueryable(self):
'''
Even though summary fields can be queried from Shotgun return False.
'''
return False
def setHasCommit(self, valid):
'''
Summary fields can't be committed, always returns False.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Summary fields cant be queried so thus they can not be background pulled.
Always returns False.
'''
return False
def _setValue(self, value):
'''
Always return False for summary fields, they can not be set.
'''
return False
def _valueSg(self):
parent = self.parentEntity()
if parent == None or not parent.exists():
return None
connection = parent.connection()
with self.__buildLock:
if self._searchFilter == None:
self._buildSearchFilter()
searchExp = self._searchFilter
result = None
############################################################################
#
# Single record
#
############################################################################
if self._summaryType == 'single_record':
order = [
{
'field_name': self._summaryValue['column'],
'direction': self._summaryValue['direction']
}
]
result = connection._sg_find_one(self.entityType(), searchExp, order=order)
############################################################################
#
# Status percentage and list
#
############################################################################
elif self._summaryType.startswith('status_'):
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if self._summaryType == 'status_percentage':
if len(sgSearch) <= 0:
result = 0
else:
validCount = 0
for e in sgSearch:
value = e.field(self._summaryField).value()
if value == self._summaryValue:
validCount += 1
if validCount <= 0:
result = 0.0
else:
result = float(validCount) / len(sgSearch)
elif self._summaryType == 'status_list':
if len(sgSearch) <= 0:
result = 'ip'
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != value:
# I have no clue why Shotgun always defaults this result to ip
# but whatevs yo.
value = 'ip'
break
result = value
############################################################################
#
# Record count
#
############################################################################
elif self._summaryType == 'record_count':
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp)
result = len(sgSearch)
elif self._summaryType == 'count':
searchExp = {
'conditions': [
searchExp,
{
#'active': 'true',
'path': self._summaryField,
'relation': 'is_not',
'values': [None]
}
],
'logical_operator': 'and'
}
# Dont use the orm for this search, waste to build the classes when all
# we are doing is getting a len on the search result.
sgSearch = connection._sg_find(self.entityType(), searchExp, fields=[])
result = len(sgSearch)
############################################################################
#
# Sum
#
############################################################################
elif self._summaryType == 'sum':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
v = e.field(self._summaryField).value()
if v != None:
value += v
result = value
############################################################################
#
# Min
#
############################################################################
elif self._summaryType == 'min':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = min(v, value)
result = value
############################################################################
#
# Max
#
############################################################################
elif self._summaryType == 'max':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = None
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value = max(v, value)
result = value
############################################################################
#
# Average
#
############################################################################
elif self._summaryType == 'avg':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = sgSearch[0].field(self._summaryField).value()
for e in sgSearch[1:]:
v = e.field(self._summaryField).value()
if v != None:
value += v
value = float(value) / len(sgSearch)
result = value
############################################################################
#
# Percentage
#
############################################################################
elif self._summaryType == 'percentage':
sgSearch = connection.find(self.entityType(), searchExp, fields=[self._summaryField])
if len(sgSearch) <= 0:
result = 0
else:
value = 0
for e in sgSearch:
if e.field(self._summaryField).value() == self._summaryValue:
value += 1
if value >= 1:
value = float(value) / len(sgSearch)
result = value
return result
def _Value(self):
if self._value == None:
return None
if self._summaryType == 'single_record':
parent = self.parentEntity()
if parent == None:
return copy.deepcopy(self._value)
connection = parent.connection()
return connection._createEntity(self._value['type'], self._value)
return copy.deepcopy(self._value)
class SgFieldTagList(ShotgunORM.SgField):
'''
Entity field that stores a list of strings.
The field may contain a list of valid values which when the field is set are
compared and an Exception thrown when the value is not a valid one.
'''
def _fromFieldData(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def returnType(self):
return self.RETURN_TYPE_TAG_LIST
def _setValue(self, sgData):
if isinstance(sgData, (tuple, set)):
sgData = list(sgData)
if sgData in [None, []]:
result = self._value in [None, []]
if result:
self._value = self.defaultValue()
return result
for i in sgData:
if not isinstance(i, str):
raise TypeError('%s invalid type "%s" in value "%s", expected a string' % (self, type(i).__name__, sgData))
sgData = list(set(sgData))
validValues = self.validValues()
if len(validValues) > 0:
for i in sgData:
if not i in validValues:
ValueError('%s invalid value "%s", valid %s' % (self, i, validValues))
if self._value == sgData:
return False
self._value = sgData
return True
def _toFieldData(self):
result = self._value
if result == None:
return None
return list(result)
def _Value(self):
return self._toFieldData()
class SgFieldText(ShotgunORM.SgField):
'''
Entity field that stores a str.
'''
def _fromFieldData(self, sgData):
if self._value == sgData:
return False
self._value = str(sgData)
return True
def returnType(self):
return self.RETURN_TYPE_TEXT
def _setValue(self, sgData):
if sgData != None:
if not isinstance(sgData, (str, unicode)):
raise TypeError('%s invalid value type "%s", expected a str' % (self, type(sgData).__name__))
sgData = str(sgData)
if self._value == sgData:
return False
self._value = sgData
return True
class SgFieldImage(SgFieldText):
'''
See SgFieldText.
'''
def downloadThumbnail(self, path):
'''
Downloads the image to the specified path.
'''
url = self.value()
if url == None or url == '':
raise ValueError('%s value is empty' % self)
if os.path.exists(path) and os.path.isdir(path):
raise OSError('output path "%s" is a directory' % path)
try:
data = urllib2.urlopen(url)
f = open(path, 'w')
f.write(data.read())
f.close()
except Exception, e:
ShotgunORM.LoggerField.error('%(field)s: %(error)s', {
'field': self,
'error': e
})
raise RuntimeError('%s an error occured while downloading the file' % self)
return True
def openInBrowser(self):
'''
Opens the image in a web-browser
'''
url = self.value()
if url == None:
url = ''
webbrowser.open(url)
def returnType(self):
return self.RETURN_TYPE_IMAGE
def uploadThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities thumbnail.
Returns the Attachment id.
'''
parent = self.parentEntity()
if not parent.exists():
raise RuntimeError('parent entity does not exist')
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
with ShotgunORM.SHOTGUN_API_LOCK:
sgResult = sgconnection.upload_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
def uploadFilmstripThumbnail(self, path):
'''
Uploads the specified image file and sets it as the Entities flimstrip
thumbnail.
Returns the Attachment id.
Note:
This function is only valid for Version Entities.
'''
with self:
if self.hasCommit():
raise RuntimeError('can not upload a new thumbnail while the image field has an un-commited update')
parent = self.parentEntity()
if not parent.type == 'Version':
raise RuntimeError('only valid on Version Entities')
if parent == None or not parent.exist():
raise RuntimeError('parent entity does not exists')
sgconnection = parent.connection().connection()
sgResult = sgconnection.upload_filmstrip_thumbnail(parent.type, parent['id'], path)
parent.sync([self.name()])
return sgResult
class SgFieldUrl(ShotgunORM.SgField):
'''
Entity field that stores a url.
Example URL: {
'content_type': 'image/jpeg',
'link_type': 'upload',
'name': 'bob.jpg',
'url': 'http://www.owned.com/bob.jpg'
}
Example Local: {
'content_type': 'image/jpeg',
'link_type': 'local',
'name': 'bob.jpg',
'local_storage': 'c:/temp/bob.jpg'
}
'''
def _fromFieldData(self, sgData):
result = {}
if sgData == None:
result = self._value == None
if not result:
self._value = None
return result
if not isinstance(sgData, dict):
raise TypeError('%s invalid sgData "%s", expected a dict or string' % (self, sgData))
try:
result['link_type'] = sgData['link_type'].lower()
if result['link_type'] in ['upload', 'web']:
result['url'] = sgData['url']
else:
result['local_storage'] = sgData['local_storage']
result['name'] = sgData['name']
result['content_type'] = sgData.get('content_type', None)
except Exception, e:
ShotgunORM.LoggerField.warn(e)
raise TypeError('%s invalid sgData dict "%s"' % (self, sgData))
if not result['link_type'] in ['local', 'upload', 'web']:
raise ValueError('%s invalid link_type "%s"' % (self, result['link_type']))
if self._value == result:
return False
self._value = result
return True
def returnType(self):
return self.RETURN_TYPE_URL
def setValue(self, sgData):
return self.fromFieldData(sgData)
def _toFieldData(self):
if self._value == None:
return None
return copy.deepcopy(self._value)
def _Value(self):
return self._toFieldData()
def url(self, openInBrowser=False):
'''
Returns the url value.
When the arg "openInBrowser" is set to True then the returned URL will
also be opened in the operating systems default web-browser.
'''
data = self.value()
result = ''
if data == None:
result = ''
else:
try:
result = data['url']
except:
pass
if openInBrowser:
webbrowser.open(url)
return result
# Register the fields.
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_CHECKBOX, SgFieldCheckbox)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR, SgFieldColor)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_COLOR2, SgFieldColor2)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE, SgFieldDate)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_DATE_TIME, SgFieldDateTime)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_ENTITY, SgFieldEntity)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_FLOAT, SgFieldFloat)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_IMAGE, SgFieldImage)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_INT, SgFieldInt)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_MULTI_ENTITY, SgFieldEntityMulti)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SERIALIZABLE, SgFieldSerializable)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_STATUS_LIST, SgFieldSelectionList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_SUMMARY, SgFieldSummary)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TAG_LIST, SgFieldTagList)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_TEXT, SgFieldText)
ShotgunORM.SgField.registerFieldClass(ShotgunORM.SgField.RETURN_TYPE_URL, SgFieldUrl)
################################################################################
#
# Custom fields
#
################################################################################
class SgFieldID(SgFieldInt):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldID, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Does nothing for ID fields.
'''
return False
def isCacheable(self):
'''
Always returns False for ID fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for ID fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for ID fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for ID fields.
'''
return False
def value(self):
'''
Returns the value of the ID field.
'''
return self._value
def _valueSg(self):
'''
Returns the value of the ID field.
For ID fields this will never query Shotgun.
'''
return self._value
class SgFieldType(SgFieldText):
'''
Field that returns the parent Entities Type.
'''
# Do not allow the field to lock, no point in it.
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
return False
def __init__(self, parentEntity, sgFieldSchemaInfo):
super(SgFieldType, self).__init__(None, None, sgFieldSchemaInfo)
self._SgField__setParentEntity(parentEntity)
self._SgField__valid = True
def invalidate(self):
'''
Always returns False for Type fields.
'''
return False
def isCacheable(self):
'''
Always returns False for Type fields.
'''
return False
def setHasSyncUpdate(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValid(self, valid):
'''
Always returns False for Type fields.
'''
return False
def setValueFromShotgun(self):
'''
Always returns False for Type fields.
'''
return False
def validate(self, forReal=False, force=False):
'''
Always returns False for Type fields.
'''
return False
def value(self):
'''
Returns the Entity type the field belongs to.
'''
return self._value
def _valueSg(self):
'''
Returns the Entity type the field belongs to.
For Type fields this will never query Shotgun.
'''
return self._value
| 0
| 0
| 0
| 44,478
| 0
| 0
| 0
| -56
| 568
|
b5961ce174a5e07a6544a64153caec0e5af3facd
| 3,763
|
py
|
Python
|
src/day16.py
|
blu3r4y/AdventOfCode2018
|
5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5
|
[
"MIT"
] | 2
|
2019-01-02T22:57:13.000Z
|
2019-05-07T23:13:25.000Z
|
src/day16.py
|
blu3r4y/AdventOfCode2018
|
5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5
|
[
"MIT"
] | null | null | null |
src/day16.py
|
blu3r4y/AdventOfCode2018
|
5ef6ee251f9184e0f66657d0eb8b5b129a6f93e5
|
[
"MIT"
] | 1
|
2021-12-06T12:38:26.000Z
|
2021-12-06T12:38:26.000Z
|
# Advent of Code 2018, Day 16
# (c) blu3r4y
from collections import namedtuple
OPERATIONS = ['addr', 'addi', 'mulr', 'muli', 'banr', 'bani', 'borr', 'bori',
'setr', 'seti', 'gtir', 'gtri', 'gtrr', 'eqir', 'eqri', 'eqrr']
Observation = namedtuple("Observation", ["instruction", "before", "after"])
if __name__ == "__main__":
print(part1(_parse(open(r"../assets/day16.txt").readlines())[0]))
print(part2(*_parse(open(r"../assets/day16.txt").readlines())))
| 28.08209
| 79
| 0.549296
|
# Advent of Code 2018, Day 16
# (c) blu3r4y
from collections import namedtuple
from parse import parse
OPERATIONS = ['addr', 'addi', 'mulr', 'muli', 'banr', 'bani', 'borr', 'bori',
'setr', 'seti', 'gtir', 'gtri', 'gtrr', 'eqir', 'eqri', 'eqrr']
Observation = namedtuple("Observation", ["instruction", "before", "after"])
def part1(observations):
three_or_more = 0
for obsv in observations:
# execute all possible candidates
num_matches = 0
for op in OPERATIONS:
if obsv.after == execute(obsv.instruction, obsv.before, op):
num_matches += 1
# count observations with three or more possible operations
if num_matches >= 3:
three_or_more += 1
return three_or_more
def part2(observations, program):
# store possible candidates for every opcode
operations = {i: set(OPERATIONS) for i in range(len(OPERATIONS))}
for obsv in observations:
matching_operations = set()
opcode = obsv.instruction[0]
# execute all possible candidates
for op in operations[opcode]:
if obsv.after == execute(obsv.instruction, obsv.before, op):
matching_operations.add(op)
# keep only the matching operations
operations[opcode] = matching_operations
# if we uniquely identified an operation ...
if len(matching_operations) == 1:
unique_op = next(iter(matching_operations))
# ... remove it from the other mappings
for key in set(operations.keys()) - {opcode}:
operations[key].discard(unique_op)
# map set values to scalar
operations = {i: ops.pop() for i, ops in operations.items()}
# interpret the program
reg = [0, 0, 0, 0]
for instruction in program:
reg = execute(instruction, reg, operations[instruction[0]])
return reg[0]
def execute(instruction, reg, op):
_, a, b, c = instruction
reg = list(reg) # copy register
if op == 'addr':
reg[c] = reg[a] + reg[b]
elif op == 'addi':
reg[c] = reg[a] + b
elif op == 'mulr':
reg[c] = reg[a] * reg[b]
elif op == 'muli':
reg[c] = reg[a] * b
elif op == 'banr':
reg[c] = reg[a] & reg[b]
elif op == 'bani':
reg[c] = reg[a] & b
elif op == 'borr':
reg[c] = reg[a] | reg[b]
elif op == 'bori':
reg[c] = reg[a] | b
elif op == 'setr':
reg[c] = reg[a]
elif op == 'seti':
reg[c] = a
elif op == 'gtir':
reg[c] = 1 if a > reg[b] else 0
elif op == 'gtri':
reg[c] = 1 if reg[a] > b else 0
elif op == 'gtrr':
reg[c] = 1 if reg[a] > reg[b] else 0
elif op == 'eqir':
reg[c] = 1 if a == reg[b] else 0
elif op == 'eqri':
reg[c] = 1 if reg[a] == b else 0
elif op == 'eqrr':
reg[c] = 1 if reg[a] == reg[b] else 0
return reg
def _parse(lines):
observations, program, i = [], [], 0
# parse observations
while i < len(lines):
before = parse("Before: [{:d}, {:d}, {:d}, {:d}]", lines[i].strip())
instruction = parse("{:d} {:d} {:d} {:d}", lines[i + 1].strip())
after = parse("After: [{:d}, {:d}, {:d}, {:d}]", lines[i + 2].strip())
i += 4
if not (before and after and instruction):
break
observations.append(Observation([*instruction], [*before], [*after]))
# parse program
for line in lines[i - 2:]:
program.append(list(map(int, line.strip().split(' '))))
return observations, program
if __name__ == "__main__":
print(part1(_parse(open(r"../assets/day16.txt").readlines())[0]))
print(part2(*_parse(open(r"../assets/day16.txt").readlines())))
| 0
| 0
| 0
| 0
| 0
| 3,161
| 0
| 2
| 115
|
d31ca228a7c49cb870496d2465087e6fff372030
| 2,685
|
py
|
Python
|
programming_languages_classification/test.py
|
contimatteo/Programming-Languages-Classification
|
34ccf1bd403f55226ed5131d57265df45d314b6f
|
[
"MIT"
] | 1
|
2022-03-18T12:54:52.000Z
|
2022-03-18T12:54:52.000Z
|
programming_languages_classification/test.py
|
contimatteo/Programming-Languages-Classification
|
34ccf1bd403f55226ed5131d57265df45d314b6f
|
[
"MIT"
] | 5
|
2021-11-10T19:58:25.000Z
|
2022-03-19T18:17:41.000Z
|
programming_languages_classification/test.py
|
contimatteo/programming-language-classifier
|
60847ab91cff4dc20ded1a024d272c75956194a0
|
[
"MIT"
] | null | null | null |
from keras.models import load_model
import os
import json
from utils import FileManager
##
global dictionary
global model
dictionaryUrl = os.path.join(FileManager.getRootUrl(), 'tmp/wordindex.json')
dictionary = json.loads(FileManager.readFile(dictionaryUrl))
modelUrl = os.path.join(FileManager.getRootUrl(), 'tmp/code_model.h5')
model = load_model(modelUrl)
##
##
if __name__ == "__main__":
main()
| 29.833333
| 106
| 0.633147
|
from keras.models import load_model
import keras.preprocessing.text as kpt
from keras.preprocessing.sequence import pad_sequences
import sys
import os
import json
import numpy as np
from utils import ConfigurationManager, FileManager
##
global dictionary
global model
dictionaryUrl = os.path.join(FileManager.getRootUrl(), 'tmp/wordindex.json')
dictionary = json.loads(FileManager.readFile(dictionaryUrl))
modelUrl = os.path.join(FileManager.getRootUrl(), 'tmp/code_model.h5')
model = load_model(modelUrl)
def convert_text_to_index_array(text):
# one really important thing that `text_to_word_sequence` does
# is make all texts the same length -- in this case, the length
# of the longest text in the set.
wordvec = []
for word in kpt.text_to_word_sequence(text):
if word in dictionary:
if dictionary[word] <= 100000:
wordvec.append([dictionary[word]])
else:
wordvec.append([0])
else:
wordvec.append([0])
return wordvec
##
def main():
data = {"success": False}
languages = ConfigurationManager.getLanguages()
matched = 0
totalExamples = 0
for languageFolder in FileManager.getLanguagesFolders(FileManager.datasets['testing']['url']):
language = str(languageFolder.name).lower()
for exampleFolder in FileManager.getExamplesFolders(languageFolder.path):
totalExamples += 1
X_test = []
originalFileContent = FileManager.readFile(FileManager.getOriginalFileUrl(exampleFolder.path))
code_snip = originalFileContent
# print(code_snip, file=sys.stdout)
word_vec = convert_text_to_index_array(code_snip)
X_test.append(word_vec)
X_test = pad_sequences(X_test, maxlen=100)
# print(X_test[0].reshape(1,X_test.shape[1]), file=sys.stdout)
y_prob = model.predict(X_test[0].reshape(1, X_test.shape[1]), batch_size=1, verbose=2)[0]
a = np.array(y_prob)
idx = np.argmax(a)
if str(languages[idx]) == language:
matched += 1
# data["predictions"] = []
# for i in range(len(languages)):
# # print(languages[i], file=sys.stdout)
# r = {"label": languages[i], "probability": format(y_prob[i] * 100, '.2f')}
# data["predictions"].append(r)
print('')
print('')
print('totalExamples = ' + str(totalExamples))
print('matched = ' + str(matched))
print('matched / totalExamples = ' + str(matched / totalExamples))
print('')
print('')
##
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 2,077
| 0
| 58
| 134
|
2e93f597b4ad68b69e4599e8fc30321be3c05d7a
| 31,071
|
py
|
Python
|
src/Animate/Scripts.py
|
henkjannl/py-animate
|
dbc93c8a264ef008954901ea76286331ad1737ee
|
[
"MIT"
] | null | null | null |
src/Animate/Scripts.py
|
henkjannl/py-animate
|
dbc93c8a264ef008954901ea76286331ad1737ee
|
[
"MIT"
] | null | null | null |
src/Animate/Scripts.py
|
henkjannl/py-animate
|
dbc93c8a264ef008954901ea76286331ad1737ee
|
[
"MIT"
] | null | null | null |
#import time
import logging
LOG_FILENAME = '__logfile.txt'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
| 41.931174
| 186
| 0.491101
|
import pandas as pd
from PIL import Image # www.pythonware.com/library/pil/handbook
from PIL import ImageFont, ImageDraw, ImageEnhance
from PIL import ImageFilter
import os
#import time
import logging
from Animate.Items import *
from Animate.Properties import *
from Animate.Constants import *
LOG_FILENAME = '__logfile.txt'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
def SelectFont(Directories, Fonts):
for Font in Fonts:
for Path in Directories:
try:
FontName=os.path.join(Path,Font)
SelectedFont = ImageFont.truetype(FontName, 20)
return FontName
except:
logging.debug('%s not successful' % FontName )
print('All attempts to load fonts failed')
def isNumber(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, int):
return True
elif isinstance(somePandasValue, float):
return True
else:
return False
def isString(somePandasValue):
if pd.isnull(somePandasValue):
return False
elif isinstance(somePandasValue, str):
return True
else:
return False
class Script():
def __init__(self, FileName, SheetName, ScriptList):
logging.debug(' Script.__init__(%s, %s)' % (FileName, SheetName) )
self.FileName = FileName
self.SheetName = SheetName
self.ScriptList = ScriptList
self.IsCanvas = False
self.FirstImage = True
self.ImageDir = 'Pictures'
self.FirstFrame = 0 # Allows the processing of a subset of frames
self.LastFrame = -1
self.FramesPerSecond = 10
self.ShowTime = False # Display the time in each frame
self.Movie = False # Can be overridden by filename of movie
self.AnimatedGIF = False # Can be overridden by filename of animated gif
self.MaxTime = 0 # Largest time, retrieved from the parser
self.TimeOffset = 0.0 # Script, assembly or canvas can be run with an offset to the global time
self.Width = 800 # Width of the output image
self.Height = 600 # Height of the output image
self.Items = ItemDict() # Dictionary of items
# List of (time, item, back/front) tuples
self.Zbuffer = []
self.ZbufferIndex = 0
# List of Items
self.Zorder = []
# Picture that was processed last
self.Picture = False
self.PictureFrame = -1
def ParseScript(self, FileName, SheetName):
logging.debug(' Script.ParseScript(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
print(' - parsing script %s' % SheetName)
for Row in range(df.shape[0]):
# A row contains valid data if the first cell contains a number
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
command = df.loc[Row,1].upper().strip()
if self.MaxTime<time: self.MaxTime=time
if command == 'WIDTH':
# Determine the width of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Width = int(df.loc[Row,2])
elif command == 'HEIGHT':
# Determine the height of the output frames
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number" % (command, Row+1, SheetName)
self.Height = int(df.loc[Row,2])
elif command == 'FRAMESPERSECOND':
# Sets the number of frames per second for the whole movie
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
assert (df.loc[Row,2] >0 ), \
"Frames per second in sheet %s at row %d should be larger than 0" % (SheetName, Row+1)
self.FramesPerSecond = df.loc[Row,2]
elif command == 'FIRSTFRAME':
# Determine the first frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.FirstFrame = int(df.loc[Row,2])
elif command == 'LASTFRAME':
# Determine the last frame to be processed,
# if not all frames must be processed. For debugging
assert isNumber(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a number in column C" % (command, Row+1, SheetName)
self.LastFrame = int(df.loc[Row,2])
elif command == 'SHOWTIME':
# Write the time in the lower left corner of the frames, for debug purposes
self.ShowTime = True
elif command == 'HIDETIME':
# Do not write the time
self.ShowTime = False
elif command == 'MOVIE':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the movie" % (command, Row+1, SheetName)
self.Movie= df.loc[Row,2]
print(" - movie {movie} will be created after generating the frames".format(movie=self.Movie))
elif command == 'ANIMATEDGIF':
# Sets the number of frames per second for the whole movie
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a filename for the animated gif" % (command, Row+1, SheetName)
self.AnimatedGIF= df.loc[Row,2]
print("- animated GIF {gif} will be created after generating the frames".format(gif=self.AnimatedGIF))
elif command == 'TABLE':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the table name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseTable(self.FileName, sheetname)
elif command == 'SCRIPT':
# Do not create a new script object, but import the commands in the current script
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the script name" % (command, Row+1, SheetName)
sheetname = df.loc[Row,2].strip()
self.ParseScript(self.FileName, sheetname)
elif command == 'ASSEMBLY':
# Create a new script object and use the image created by this
# script as feed for this item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the assembly name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddScript( time, sheetname )
elif command == 'CANVAS':
# A canvas is an assembly of which the background is not reset for a new frame
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the sheet name" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
sheetname = df.loc[Row,3]
# If the script is not yet in the list, create it
if not sheetname in self.ScriptList:
NewScript = Script(FileName, sheetname, self.ScriptList)
NewScript.IsCanvas = True
self.ScriptList[sheetname] = NewScript
NewScript.ParseScript(FileName, sheetname)
# Assign the script to the
# ToDo: Implement item type directly
# ToDo: Implement change of script as function of time
self.Items[itemname].AddCanvas( time, sheetname )
elif command == 'IMAGE':
# Assign a new filename for an image item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item name" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
# Assign a new filename for a mask item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string for the item tag" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string for the filename" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
filename = os.path.join(self.ImageDir, df.loc[Row,3])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
# Assign a new title for a text item
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
title = df.loc[Row,3]
self.Items[itemname].AddText( time, title )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new x position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects an item name in column C" % (command, Row+1, SheetName)
assert isNumber(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a number in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
value = df.loc[Row,3]
self.Items[itemname].Properties[command].Append(time, value)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine linear or cycloid movement
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
move = df.loc[Row,3].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
# Set a new text color
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
assert isString(df.loc[Row,3]), \
"%s at row %d of sheet %s expects a string in column D" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
textcolor = df.loc[Row,3].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
elif command == 'BRINGTOFRONT':
# Bring the item to front at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, FRONT) )
elif command == 'SENDTOBACK':
# Send the item to back at this time position
assert isString(df.loc[Row,2]), \
"%s at row %d of sheet %s expects a string in column C" % (command, Row+1, SheetName)
itemname = df.loc[Row,2].upper().strip()
self.Zbuffer.append( ( time, itemname, BACK) )
else:
print("Command %s not recognized on row %d." % (command, Row+1))
def ParseTable(self, FileName, SheetName):
logging.debug(' Script.ParseTable(%s, %s)' % (FileName, SheetName))
# Open excel file with frame data
df = pd.read_excel(FileName, sheet_name=SheetName, header=None)
# Investigate which data each column contains
print(' - parsing table %s' % SheetName)
for Row in range(2, df.shape[0]):
# Only process rows with a time in the first column
if isNumber(df.loc[Row,0]):
time = df.loc[Row,0]
# Increase time if the table exceeds the maximum
if self.MaxTime<time: self.MaxTime=time
for Col in range(1, df.shape[1]):
# Only process columns with an existing object in the first row and a command in the second row
if isString(df.loc[0,Col]) and isString(df.loc[1,Col]) and\
len(df.loc[0,Col])>0 and len(df.loc[1,Col])>0:
itemname = df.loc[0,Col].upper().strip()
command = df.loc[1,Col].upper().strip()
# Only process items that have already been created in another script
if itemname in self.Items:
item = self.Items[itemname]
if command == 'IMAGE':
if item.ItemType == IT_IMAGE:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddImage( time, filename )
elif command == 'MASK':
if self.Items[item].ItemType == IT_MASK:
# Assign a new filename for an image item
if isString(df.loc[Row,Col]):
filename = os.path.join(self.ImageDir, df.loc[Row,Col])
assert os.path.isfile(filename), \
"%s at row %d could not find file %s" % (command, Row+1, filename)
self.Items[itemname].AddMask( time, filename )
elif command == 'TEXT':
if item.ItemType == IT_TEXT:
# Assign a new title for a text item
if isString(df.loc[Row,Col]):
text = df.loc[Row,Col]
self.Items[itemname].AddText( time, text )
elif command in ['XPOS', 'YPOS', 'XPOLE', 'YPOLE', 'XSCALE', 'YSCALE', 'ROTATION',
'TIMEOFFSET', 'TEXTSIZE', 'OPACITY']:
# Set a new float property
if isNumber(df.loc[Row,Col]):
val = df.loc[Row,Col]
self.Items[itemname].Properties[command].Append(time, val)
elif command in ['XMOVE', 'YMOVE', 'SXMOVE', 'SYMOVE', 'RMOVE', 'OMOVE']:
# Determine type of movement
if isString(df.loc[Row,Col]):
move = df.loc[Row,Col].strip().upper()
if move in CheckMove:
self.Items[itemname].Properties[command].Append(time, CheckMove[move])
else:
print("Did not recognize type of movement on row %d." % (Row+1))
elif command in ['TEXTCOLOR', 'FONT']:
if isString(df.loc[Row,Col]):
textcolor = df.loc[Row,Col].strip()
self.Items[itemname].Properties[command].Append(time, textcolor)
else:
print('Command: ', command)
print('Column: ', Col+1)
print("Command %s not recognized on col %d." % (command, Col+1))
def StandardChecks(self):
print(' - checking script %s which has %d items' % (self.SheetName, len(self.Items) ))
# Do some standard checks after parsing
OK = True
self.TimeOffsetUsed=False
for i in self.Items.values():
i.StandardChecks()
if i.TimeOffsetUsed:
self.TimeOffsetUsed=True
if (i.ItemType == IT_IMAGE):
if len(i.Properties['IMAGE'].Sequence)==0:
print('ERROR: %s has NO images' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['IMAGE'].Sequence:
if not os.path.isfile(filename):
print('Image not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_MASK):
if len(i.Properties['MASK'].Sequence)==0:
print('ERROR: %s has NO mask' % i.ItemName)
OK=False
else:
for time, filename in i.Properties['MASK'].Sequence:
if not os.path.isfile(filename):
print('Mask not found: %s at tim %.3f' % (filename, time))
OK = False
if (i.ItemType == IT_TEXT):
if len(i.Properties['TEXT'].Sequence)==0:
print('ERROR: %s has NO lines of text' % i.ItemName)
OK=False
return OK
def Deploy(self, MaxTime):
logging.debug('')
logging.debug('* DEPLOYING SCRIPT %s' % self.SheetName)
for item in self.Items.values():
item.Deploy(MaxTime)
if not self.Zbuffer:
# The Zbuffer has no items because the user did not specify
# any BRINGTOFRONT or SENDTOBACK commands
# Get the name of a random item
itemname = list(self.Items.keys())[0]
self.Zbuffer.append( ( 0, itemname, FRONT) )
self.Zbuffer.sort()
time, item, direction = self.Zbuffer[-1]
self.Zbuffer.append( (MaxTime, item, direction) )
self.Zbuffer.sort()
# Determine the order of the items at time = 0
self.ZbufferIndex = 0
# list() means we create a copy
self.Zorder = list(self.Items.keys())
def GetPicture(self, Time, Frame):
# If exactly the same image was calculated before,
# use that image
#if Frame != self.PictureFrame and not self.TimeOffsetUsed:
if True:
logging.debug('')
logging.debug('* SCRIPT %s IS GENERATING FRAME %.5d at time %.2f' % (self.SheetName, Frame, Time ))
# Start with a transparent image
if (not self.IsCanvas) or self.FirstImage:
self.Picture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
self.FirstImage=False
# Determine the Z-order at the desired time
while True:
time, item, direction = self.Zbuffer[self.ZbufferIndex]
if item not in self.Zorder:
print('Z-order failure: item %s not in script %s' % (item, self.SheetName) )
self.Zorder.remove(item)
if direction == FRONT:
self.Zorder.append(item)
else:
self.Zorder.insert(0, item)
if (self.Zbuffer[self.ZbufferIndex+1][0])>Time:
break
else:
self.ZbufferIndex+=1
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
# Draw each item
for itemname in self.Zorder:
Item = self.Items[itemname]
move = Item.Properties['OMOVE' ].Value(Time)
opacity = Item.Properties['OPACITY' ].Value(Time, move)
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
try:
logging.debug(' - Item %s:%s xpos= %.2f ypos= %.2f xscale= %.3f yscale= %.3f rot= %.3f opacity= %.3f' % (self.SheetName, itemname, xpos, ypos, sx, sy, rot, opacity))
except:
print('opacity', opacity)
print('xpos', xpos)
print('ypos', ypos)
print('sx', sx)
print('sy', sy)
print('rot', rot)
if opacity>0:
if Item.ItemType == IT_ASSY:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Assembly %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
logging.debug(' Script %s not in scriptlist!!:'% (script))
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
logging.debug(' Assembly %s continues:'% (self.SheetName))
if Item.ItemType == IT_CANVAS:
script = Item.Properties['SCRIPT'].Value(Time)
logging.debug(' - Canvas %s:%s requests an image from script %s' % (self.SheetName, itemname, script))
if script in self.ScriptList:
dt=Item.Properties['TIMEOFFSET'].Value(Time, LINEAR)
ItemPicture = self.ScriptList[script].GetPicture(Time-dt, Frame)
else:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
elif Item.ItemType == IT_IMAGE:
image = Item.Properties['IMAGE'].Value(Time)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_MASK:
image = Item.Properties['MASK'].Value(Time)
logging.debug('Line 585 mask is %s' % image)
if Item.PrevImageName != image:
Item.LoadedImage = Image.open(image).convert("RGBA")
Item.PrevImageName = image
ItemPicture = Item.LoadedImage
elif Item.ItemType == IT_TEXT:
ItemPicture = Image.new("RGBA", (self.Width, self.Height), (255,0,0,0) )
text = Item.Properties['TEXT' ].Value(Time)
textsize = int(Item.Properties['TEXTSIZE' ].Value(Time, LINEAR))
textcolor = Item.Properties['TEXTCOLOR'].Value(Time)
fontname = Item.Properties['FONT' ].Value(Time)
Directories = [ 'C:\\WINDOWS\\Fonts\\' ]
Fonts = [fontname, 'calibri.ttf', 'YanoneKaffeesatz-Regular.ttf', 'ARIALN.TTF', 'verdana.ttf', 'YanoneKaffeesatz-Light.ttf']
Face = ImageFont.truetype(SelectFont(Directories, Fonts), textsize)
Draw = ImageDraw.Draw(ItemPicture)
Draw.text( (0,0), text, fill=textcolor, font=Face)
# Retrieve the general properties
move = Item.Properties['XMOVE' ].Value(Time)
xpos = Item.Properties['XPOS' ].Value(Time, move)
move = Item.Properties['YMOVE' ].Value(Time)
ypos = Item.Properties['YPOS' ].Value(Time, move)
move = Item.Properties['SXMOVE' ].Value(Time)
sx = Item.Properties['XSCALE' ].Value(Time, move)
xpole = Item.Properties['XPOLE' ].Value(Time, move)
move = Item.Properties['SYMOVE' ].Value(Time)
sy = Item.Properties['YSCALE' ].Value(Time, move)
ypole = Item.Properties['YPOLE' ].Value(Time, move)
move = Item.Properties['RMOVE' ].Value(Time)
rot = Item.Properties['ROTATION'].Value(Time, move)
fi = math.pi/180*rot
sinfi = math.sin(fi)
cosfi = math.cos(fi)
w,h = ItemPicture.size
# Resize and rotate the ItemPicture
try:
ItemPicture=ItemPicture.resize( (int(sx*w+0.5), int(sy*h+0.5) ), Image.ANTIALIAS)
ItemPicture=ItemPicture.rotate(rot, expand=1)
except:
print('ERROR Script 663: Item %s:%s sx= %.2f sy= %.2f' % (self.SheetName, itemname, sx, sy))
break
wr,hr = ItemPicture.size
xt = xpos + xpole - ypole*sy*sinfi - xpole*sx*cosfi +0.5*w*sx*cosfi +0.5*h*sy*sinfi -0.5*wr
yt = ypos + ypole - ypole*sy*cosfi + xpole*sx*sinfi -0.5*w*sx*sinfi +0.5*h*sy*cosfi -0.5*hr
Mask = ItemPicture.convert("RGBA")
Mask = Image.blend(Image.new(ItemPicture.mode, ItemPicture.size, 0), ItemPicture, opacity)
if Item.ItemType != IT_MASK:
# Item is picture, assembly or canvas
self.Picture.paste( ItemPicture, (int(xt),int(yt)), Mask )
else:
# Item is mask
logging.debug(' - Applying mask for %s' % itemname)
# Start with a clean image with transparent background
CleanImage = Image.new("RGBA", (self.Width, self.Height), (0,0,0,0) )
# Use the mask rotated and translated
Mask = Image.new("L", (self.Width, self.Height), 0 )
Mask.paste( ItemPicture, (int(xt),int(yt)))
# Copy the image as-is with rotation and translation set to zero
CleanImage.paste( self.Picture, (0,0), Mask )
self.Picture = CleanImage.copy()
self.PictureFrame = Frame
return self.Picture.copy()
| 0
| 0
| 0
| 29,838
| 0
| 747
| 0
| 48
| 313
|
da91ac6418297ed4e02987a76d7e459a1c8dc944
| 2,510
|
py
|
Python
|
DatasetHandler/FileHelperFunc.py
|
previtus/MGR-Project-Code
|
1126215059eb3f731dcf78ec24d9a480e73abce6
|
[
"MIT"
] | null | null | null |
DatasetHandler/FileHelperFunc.py
|
previtus/MGR-Project-Code
|
1126215059eb3f731dcf78ec24d9a480e73abce6
|
[
"MIT"
] | null | null | null |
DatasetHandler/FileHelperFunc.py
|
previtus/MGR-Project-Code
|
1126215059eb3f731dcf78ec24d9a480e73abce6
|
[
"MIT"
] | null | null | null |
import os
def get_project_folder():
'''
Gives us the path to MGR-Project-Code from a list of allowed folders.
:return:
'''
PATH_ALTERNATIVES = ['/home/ekmek/Project II/MGR-Project-Code/', '/storage/brno2/home/previtus/MGR-Project-Code/', '/home/ekmek/Vitek/MGR-Project-Code/']
ABS_PATH_TO_PRJ = use_path_which_exists(PATH_ALTERNATIVES)
return ABS_PATH_TO_PRJ
def get_geojson_path():
'''
Gives us the path directly to attractivity_previtus_data_1_edges.geojson from a list of allowed paths
:return:
'''
folders = ['/home/ekmek/Desktop/Project II/graph_new_data/',
'/home/ekmek/Vitek/graph_new_data/',
'/storage/brno2/home/previtus/important_files/']
folder = use_path_which_exists(folders)
return folder+'attractivity_previtus_data_1_edges.geojson'
def use_path_which_exists(list_of_possible_paths):
'''
From a list of possible paths choose the one which exists.
:param list_of_possible_paths: possible paths
:return: working path
'''
used_path = ''
for path in list_of_possible_paths:
if os.path.exists(path):
used_path = path
if used_path == '':
print "Error, cannot locate the path of project, will likely fail!"
return used_path
def file_exists(fname):
''' Does file exist, returns boolean.'''
return os.path.isfile(fname)
def get_folder_from_file(fname):
''' Get folder name from path to a file.'''
return os.path.dirname(fname) + '/'
def folder_exists(directory):
''' Does folder with this name exist, returns boolean'''
return os.path.exists(directory)
def make_folder_ifItDoesntExist(directory):
''' Make a new directory, if it didn't previously exist.'''
if not os.path.exists(directory):
os.makedirs(directory)
import shutil, errno
def copy_folder(src, dst):
''' Copy and paste folders. Used for dataset augmentation.'''
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def copy_file(src, dst):
''' Copy and paste file.'''
try:
shutil.copy(src, dst)
except OSError as exc:
raise
import hashlib
def md5(fname):
''' Get md5 hash of a file.'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
| 30.240964
| 157
| 0.661753
|
import os
def get_project_folder():
'''
Gives us the path to MGR-Project-Code from a list of allowed folders.
:return:
'''
PATH_ALTERNATIVES = ['/home/ekmek/Project II/MGR-Project-Code/', '/storage/brno2/home/previtus/MGR-Project-Code/', '/home/ekmek/Vitek/MGR-Project-Code/']
ABS_PATH_TO_PRJ = use_path_which_exists(PATH_ALTERNATIVES)
return ABS_PATH_TO_PRJ
def get_geojson_path():
'''
Gives us the path directly to attractivity_previtus_data_1_edges.geojson from a list of allowed paths
:return:
'''
folders = ['/home/ekmek/Desktop/Project II/graph_new_data/',
'/home/ekmek/Vitek/graph_new_data/',
'/storage/brno2/home/previtus/important_files/']
folder = use_path_which_exists(folders)
return folder+'attractivity_previtus_data_1_edges.geojson'
def use_path_which_exists(list_of_possible_paths):
'''
From a list of possible paths choose the one which exists.
:param list_of_possible_paths: possible paths
:return: working path
'''
used_path = ''
for path in list_of_possible_paths:
if os.path.exists(path):
used_path = path
if used_path == '':
print "Error, cannot locate the path of project, will likely fail!"
return used_path
def file_exists(fname):
''' Does file exist, returns boolean.'''
return os.path.isfile(fname)
def get_folder_from_file(fname):
''' Get folder name from path to a file.'''
return os.path.dirname(fname) + '/'
def folder_exists(directory):
''' Does folder with this name exist, returns boolean'''
return os.path.exists(directory)
def make_folder_ifItDoesntExist(directory):
''' Make a new directory, if it didn't previously exist.'''
if not os.path.exists(directory):
os.makedirs(directory)
import shutil, errno
def copy_folder(src, dst):
''' Copy and paste folders. Used for dataset augmentation.'''
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def copy_file(src, dst):
''' Copy and paste file.'''
try:
shutil.copy(src, dst)
except OSError as exc:
raise
import hashlib
def md5(fname):
''' Get md5 hash of a file.'''
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ca53a879e6d706ed4d4875884504a13f91f27b99
| 1,575
|
py
|
Python
|
trajectory_executor/Parameters.py
|
WPI-MMR/trajectory_generator
|
61a3c61d37e674cfa81ec4e67fd56bb825e56ab8
|
[
"MIT"
] | null | null | null |
trajectory_executor/Parameters.py
|
WPI-MMR/trajectory_generator
|
61a3c61d37e674cfa81ec4e67fd56bb825e56ab8
|
[
"MIT"
] | null | null | null |
trajectory_executor/Parameters.py
|
WPI-MMR/trajectory_generator
|
61a3c61d37e674cfa81ec4e67fd56bb825e56ab8
|
[
"MIT"
] | null | null | null |
# from sympy import *
# Robot Chassis Parameters
l = 370 #hip to hip length of the robot
b = 210.1 #hip to hip breadth of the robot
h = 44 #height of the robot
## Leg Type 1: Rear
'''
Variable name convention as follows:
The first number represents the length and the second number represents the Leg Type
l11 is the hip to knee length of Leg Type 1
l21 is the knee to ankle length of Leg Type 1
l22 is the knee to ankle length of Leg Type 2
and so on...
'''
# Defining lengths and offsets
l11 = 160 #hip to knee length
l21 = 160 #knee to ankle length
l3 = 39 #ankle to toe length
d1 = 37 #hip offset
d2 = 12.95 #knee offset
'''
Variable name convention as follows:
The first number represents the angle and the second number represents the Leg #
theta11 is the hip rotation angle of Leg 1
theta21 is the knee roation angle of Leg 1
theta31 is the ankle roation angle of Leg 1
theta14 is the hip rotation angle of Leg 4
and so on...
'''
# theta11, alpha11, theta21, alpha21, theta31, alpha31 = symbols("theta11 alpha11 theta21 alpha21 theta31 alpha31")
# theta14, alpha14, theta24, alpha24, theta34, alpha34 = symbols("theta14 alpha14 theta24 alpha24 theta34 alpha34")
## Leg Type 2: Front
# Defining lengths and offsets
l12 = 160 #hip to knee length
l22 = 173.5 #knee to ankle length
# theta12, alpha12, theta22, alpha22 = symbols("theta12 alpha12 theta22 alpha22")
# theta13, alpha13, theta23, alpha23 = symbols("theta13 alpha13 theta23 alpha23")
| 28.125
| 116
| 0.690794
|
# from sympy import *
# Robot Chassis Parameters
l = 370 #hip to hip length of the robot
b = 210.1 #hip to hip breadth of the robot
h = 44 #height of the robot
## Leg Type 1: Rear
'''
Variable name convention as follows:
The first number represents the length and the second number represents the Leg Type
l11 is the hip to knee length of Leg Type 1
l21 is the knee to ankle length of Leg Type 1
l22 is the knee to ankle length of Leg Type 2
and so on...
'''
# Defining lengths and offsets
l11 = 160 #hip to knee length
l21 = 160 #knee to ankle length
l3 = 39 #ankle to toe length
d1 = 37 #hip offset
d2 = 12.95 #knee offset
'''
Variable name convention as follows:
The first number represents the angle and the second number represents the Leg #
theta11 is the hip rotation angle of Leg 1
theta21 is the knee roation angle of Leg 1
theta31 is the ankle roation angle of Leg 1
theta14 is the hip rotation angle of Leg 4
and so on...
'''
# theta11, alpha11, theta21, alpha21, theta31, alpha31 = symbols("theta11 alpha11 theta21 alpha21 theta31 alpha31")
# theta14, alpha14, theta24, alpha24, theta34, alpha34 = symbols("theta14 alpha14 theta24 alpha24 theta34 alpha34")
## Leg Type 2: Front
# Defining lengths and offsets
l12 = 160 #hip to knee length
l22 = 173.5 #knee to ankle length
# theta12, alpha12, theta22, alpha22 = symbols("theta12 alpha12 theta22 alpha22")
# theta13, alpha13, theta23, alpha23 = symbols("theta13 alpha13 theta23 alpha23")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
41e9c7b6bd0603988abf0a0263e4f16fab3ff22b
| 716
|
py
|
Python
|
plbmng/lib/ssh_map.py
|
xxMAKMAKxx/plbmng
|
64bbe70424801092c7429d5e73ecaf5466b6c437
|
[
"MIT"
] | null | null | null |
plbmng/lib/ssh_map.py
|
xxMAKMAKxx/plbmng
|
64bbe70424801092c7429d5e73ecaf5466b6c437
|
[
"MIT"
] | null | null | null |
plbmng/lib/ssh_map.py
|
xxMAKMAKxx/plbmng
|
64bbe70424801092c7429d5e73ecaf5466b6c437
|
[
"MIT"
] | null | null | null |
import folium
import csv
def main():
"""
Creates a map of nodes with available SSH connection.\n
:return: map_ssh.html file
"""
map_ssh = folium.Map(location=[45.523, -122.675],
zoom_start=2)
with open('lib/base_data.txt') as tsv:
for row in csv.reader(tsv, delimiter='\t'):
name = row[0]
try:
x = float(row[1])
y = float(row[2])
print(" %s " % name)
folium.Marker([x, y], popup=name).add_to(map_ssh)
except ValueError:
pass
map_ssh.save('map_ssh.html')
if __name__ == "__main__":
main()
| 23.866667
| 65
| 0.523743
|
import folium
import csv
from folium.plugins import MarkerCluster
def main():
"""
Creates a map of nodes with available SSH connection.\n
:return: map_ssh.html file
"""
map_ssh = folium.Map(location=[45.523, -122.675],
zoom_start=2)
with open('lib/base_data.txt') as tsv:
for row in csv.reader(tsv, delimiter='\t'):
name = row[0]
try:
x = float(row[1])
y = float(row[2])
print(" %s " % name)
folium.Marker([x, y], popup=name).add_to(map_ssh)
except ValueError:
pass
map_ssh.save('map_ssh.html')
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 22
|
2bb73ddf9d4ab92638002a38558adbe2794f6be1
| 146
|
py
|
Python
|
day08/part2.py
|
mtn/advent15
|
b23bcf5761363596336d5361218c52db0b078793
|
[
"MIT"
] | null | null | null |
day08/part2.py
|
mtn/advent15
|
b23bcf5761363596336d5361218c52db0b078793
|
[
"MIT"
] | null | null | null |
day08/part2.py
|
mtn/advent15
|
b23bcf5761363596336d5361218c52db0b078793
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
ans = 0
with open("input.txt") as f:
for line in f:
ans += 2 + line.count("\\") + line.count("\"")
print(ans)
| 18.25
| 54
| 0.541096
|
#!/usr/bin/env python3
ans = 0
with open("input.txt") as f:
for line in f:
ans += 2 + line.count("\\") + line.count("\"")
print(ans)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b39083aacc0b8fea019f95a80c7e48ff65c4cb4a
| 818
|
py
|
Python
|
opttrack/lib/ui/obs_menu.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | null | null | null |
opttrack/lib/ui/obs_menu.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | 2
|
2016-03-30T02:50:31.000Z
|
2016-03-30T16:18:23.000Z
|
opttrack/lib/ui/obs_menu.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/edit_menu.py
Content for interactive editor
"""
| 24.787879
| 67
| 0.665037
|
"""
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/edit_menu.py
Content for interactive editor
"""
from functools import partial
from .obs_handlers import ObsHandlers
from .menu import Menu
from .spread_selector import SpreadSelector
class ObsMenu(Menu):
def __init__(self, logger, tz):
super(ObsMenu, self).__init__(logger, tz=tz)
self.spread_sel = SpreadSelector()
self._handlers = ObsHandlers(self.logger, self.tz)
overrides = {'main': {'desc': 'Quit', 'do': lambda: False}}
self._menus = {'main': self.spread_sel.get(overrides,
dgb=partial(self.handlers.obs, 'dgb'))}
@property
def menus(self):
return self._menus
@property
def handlers(self):
return self._handlers
| 0
| 78
| 0
| 440
| 0
| 0
| 0
| 47
| 113
|
c2dee6d1aa40a7f38a96156bda231d31db4d330b
| 1,307
|
py
|
Python
|
nsr/utils/majority_vote.py
|
GaoSida/Neural-SampleRank
|
8b4a7a40cc34bff608f19d3f7eb64bda76669c5b
|
[
"MIT"
] | 2
|
2020-11-17T18:41:05.000Z
|
2021-08-12T14:40:56.000Z
|
nsr/utils/majority_vote.py
|
GaoSida/Neural-SampleRank
|
8b4a7a40cc34bff608f19d3f7eb64bda76669c5b
|
[
"MIT"
] | null | null | null |
nsr/utils/majority_vote.py
|
GaoSida/Neural-SampleRank
|
8b4a7a40cc34bff608f19d3f7eb64bda76669c5b
|
[
"MIT"
] | null | null | null |
"""A helper for running inference callable multiple times, and ensemble the
predictions with a simple majority vote.
"""
from typing import Callable
def majority_vote_ensemble(eval_func: Callable, num_runs: int):
"""
Args:
eval_func: call without argument to get a prediction or
a list of predictions.
num_runs: how many times to run the eval_func to get the predictions
Returns:
a prediction or a list of predictions after majority vote.
"""
if num_runs == 1:
return eval_func()
all_predictions = [eval_func() for _ in range(num_runs)]
if not isinstance(all_predictions[0][0], list):
# eval func gives single prediction
return _vote(all_predictions)
else:
# eval func gives a list of predictions
results = list()
for i in range(len(all_predictions[0])):
results.append(_vote([pred_list[i]
for pred_list in all_predictions]))
return results
| 33.512821
| 76
| 0.638868
|
"""A helper for running inference callable multiple times, and ensemble the
predictions with a simple majority vote.
"""
from typing import Callable
from collections import Counter
def majority_vote_ensemble(eval_func: Callable, num_runs: int):
"""
Args:
eval_func: call without argument to get a prediction or
a list of predictions.
num_runs: how many times to run the eval_func to get the predictions
Returns:
a prediction or a list of predictions after majority vote.
"""
if num_runs == 1:
return eval_func()
def _vote(prediction_list):
results = list()
for i in range(len(prediction_list[0])):
votes = Counter([pred[i] for pred in prediction_list])
results.append(votes.most_common(1)[0][0])
return results
all_predictions = [eval_func() for _ in range(num_runs)]
if not isinstance(all_predictions[0][0], list):
# eval func gives single prediction
return _vote(all_predictions)
else:
# eval func gives a list of predictions
results = list()
for i in range(len(all_predictions[0])):
results.append(_vote([pred_list[i]
for pred_list in all_predictions]))
return results
| 0
| 0
| 0
| 0
| 0
| 225
| 0
| 10
| 53
|
5ded5a7640fb3ff4dbd2ec961b2d0176566b38b3
| 12,872
|
py
|
Python
|
postgres-appliance/major_upgrade/pg_upgrade.py
|
OlleLarsson/spilo
|
a347a9453a2deef33d968261096c1a328d9a2d87
|
[
"Apache-2.0"
] | null | null | null |
postgres-appliance/major_upgrade/pg_upgrade.py
|
OlleLarsson/spilo
|
a347a9453a2deef33d968261096c1a328d9a2d87
|
[
"Apache-2.0"
] | null | null | null |
postgres-appliance/major_upgrade/pg_upgrade.py
|
OlleLarsson/spilo
|
a347a9453a2deef33d968261096c1a328d9a2d87
|
[
"Apache-2.0"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
| 43.931741
| 118
| 0.64582
|
import logging
import os
import shutil
import subprocess
import psutil
from patroni.postgresql import Postgresql
logger = logging.getLogger(__name__)
class _PostgresqlUpgrade(Postgresql):
_INCOMPATIBLE_EXTENSIONS = ('amcheck_next',)
def adjust_shared_preload_libraries(self, version):
from spilo_commons import adjust_extensions
shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries')
self._old_config_values['shared_preload_libraries'] = shared_preload_libraries
if shared_preload_libraries:
self.config.get('parameters')['shared_preload_libraries'] =\
adjust_extensions(shared_preload_libraries, version)
def no_bg_mon(self):
shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries')
if shared_preload_libraries:
tmp = filter(lambda a: a != "bg_mon", map(lambda a: a.strip(), shared_preload_libraries.split(",")))
self.config.get('parameters')['shared_preload_libraries'] = ",".join(tmp)
def restore_shared_preload_libraries(self):
if getattr(self, '_old_shared_preload_libraries'):
self.config.get('parameters')['shared_preload_libraries'] = self._old_shared_preload_libraries
return True
def start_old_cluster(self, config, version):
self.set_bin_dir(version)
version = float(version)
config[config['method']]['command'] = 'true'
if version < 9.5: # 9.4 and older don't have recovery_target_action
action = config[config['method']].get('recovery_target_action')
config[config['method']]['pause_at_recovery_target'] = str(action == 'pause').lower()
# make sure we don't archive wals from the old version
self._old_config_values = {'archive_mode': self.config.get('parameters').get('archive_mode')}
self.config.get('parameters')['archive_mode'] = 'off'
# and don't load shared_preload_libraries which don't exist in the old version
self.adjust_shared_preload_libraries(version)
return self.bootstrap.bootstrap(config)
def get_cluster_version(self):
with open(self._version_file) as f:
return f.read().strip()
def set_bin_dir(self, version):
from spilo_commons import get_bin_dir
self._old_bin_dir = self._bin_dir
self._bin_dir = get_bin_dir(version)
@property
def local_conn_kwargs(self):
conn_kwargs = self.config.local_connect_kwargs
conn_kwargs['options'] = '-c synchronous_commit=local -c statement_timeout=0 -c search_path='
conn_kwargs.pop('connect_timeout', None)
return conn_kwargs
def _get_all_databases(self):
return [d[0] for d in self.query('SELECT datname FROM pg_catalog.pg_database WHERE datallowconn')]
def drop_possibly_incompatible_extensions(self):
from patroni.postgresql.connection import get_connection_cursor
logger.info('Dropping extensions from the cluster which could be incompatible')
conn_kwargs = self.local_conn_kwargs
for d in self._get_all_databases():
conn_kwargs['database'] = d
with get_connection_cursor(**conn_kwargs) as cur:
for ext in self._INCOMPATIBLE_EXTENSIONS:
logger.info('Executing "DROP EXTENSION IF EXISTS %s" in the database="%s"', ext, d)
cur.execute("DROP EXTENSION IF EXISTS {0}".format(ext))
def drop_possibly_incompatible_objects(self):
from patroni.postgresql.connection import get_connection_cursor
logger.info('Dropping objects from the cluster which could be incompatible')
conn_kwargs = self.local_conn_kwargs
for d in self._get_all_databases():
conn_kwargs['database'] = d
with get_connection_cursor(**conn_kwargs) as cur:
logger.info('Executing "DROP FUNCTION metric_helpers.pg_stat_statements" in the database="%s"', d)
cur.execute("DROP FUNCTION IF EXISTS metric_helpers.pg_stat_statements(boolean) CASCADE")
for ext in ('pg_stat_kcache', 'pg_stat_statements') + self._INCOMPATIBLE_EXTENSIONS:
logger.info('Executing "DROP EXTENSION IF EXISTS %s" in the database="%s"', ext, d)
cur.execute("DROP EXTENSION IF EXISTS {0}".format(ext))
if d[0] == 'postgres':
logger.info('Executing "DROP TABLE postgres_log CASCADE" in the database=postgres')
cur.execute('DROP TABLE IF EXISTS public.postgres_log CASCADE')
cur.execute("SELECT oid::regclass FROM pg_catalog.pg_class WHERE relpersistence = 'u'")
for unlogged in cur.fetchall():
logger.info('Truncating unlogged table %s', unlogged[0])
try:
cur.execute('TRUNCATE {0}'.format(unlogged[0]))
except Exception as e:
logger.error('Failed: %r', e)
def update_extensions(self):
from patroni.postgresql.connection import get_connection_cursor
conn_kwargs = self.local_conn_kwargs
for d in self._get_all_databases():
conn_kwargs['database'] = d
with get_connection_cursor(**conn_kwargs) as cur:
cur.execute('SELECT quote_ident(extname) FROM pg_catalog.pg_extension')
for extname in cur.fetchall():
query = 'ALTER EXTENSION {0} UPDATE'.format(extname[0])
logger.info("Executing '%s' in the database=%s", query, d)
try:
cur.execute(query)
except Exception as e:
logger.error('Failed: %r', e)
@staticmethod
def remove_new_data(d):
if d.endswith('_new') and os.path.isdir(d):
shutil.rmtree(d)
def cleanup_new_pgdata(self):
if getattr(self, '_new_data_dir', None):
self.remove_new_data(self._new_data_dir)
def cleanup_old_pgdata(self):
if os.path.exists(self._old_data_dir):
logger.info('Removing %s', self._old_data_dir)
shutil.rmtree(self._old_data_dir)
return True
def switch_pgdata(self):
self._old_data_dir = self._data_dir + '_old'
self.cleanup_old_pgdata()
os.rename(self._data_dir, self._old_data_dir)
if getattr(self, '_new_data_dir', None):
os.rename(self._new_data_dir, self._data_dir)
self.configure_server_parameters()
return True
def switch_back_pgdata(self):
if os.path.exists(self._data_dir):
self._new_data_dir = self._data_dir + '_new'
self.cleanup_new_pgdata()
os.rename(self._data_dir, self._new_data_dir)
os.rename(self._old_data_dir, self._data_dir)
def pg_upgrade(self, check=False):
upgrade_dir = self._data_dir + '_upgrade'
if os.path.exists(upgrade_dir) and os.path.isdir(upgrade_dir):
shutil.rmtree(upgrade_dir)
os.makedirs(upgrade_dir)
old_cwd = os.getcwd()
os.chdir(upgrade_dir)
pg_upgrade_args = ['-k', '-j', str(psutil.cpu_count()),
'-b', self._old_bin_dir, '-B', self._bin_dir,
'-d', self._data_dir, '-D', self._new_data_dir,
'-O', "-c timescaledb.restoring='on'"]
if 'username' in self.config.superuser:
pg_upgrade_args += ['-U', self.config.superuser['username']]
if check:
pg_upgrade_args += ['--check']
else:
self.config.write_postgresql_conf()
logger.info('Executing pg_upgrade%s', (' --check' if check else ''))
if subprocess.call([self.pgcommand('pg_upgrade')] + pg_upgrade_args) == 0:
os.chdir(old_cwd)
shutil.rmtree(upgrade_dir)
return True
def prepare_new_pgdata(self, version):
from spilo_commons import append_extentions
locale = self.query('SHOW lc_collate').fetchone()[0]
encoding = self.query('SHOW server_encoding').fetchone()[0]
initdb_config = [{'locale': locale}, {'encoding': encoding}]
if self.query("SELECT current_setting('data_checksums')::bool").fetchone()[0]:
initdb_config.append('data-checksums')
logger.info('initdb config: %s', initdb_config)
self._new_data_dir = os.path.abspath(self._data_dir)
self._old_data_dir = self._new_data_dir + '_old'
self._data_dir = self._new_data_dir + '_new'
self.remove_new_data(self._data_dir)
old_postgresql_conf = self.config._postgresql_conf
self.config._postgresql_conf = os.path.join(self._data_dir, 'postgresql.conf')
old_version_file = self._version_file
self._version_file = os.path.join(self._data_dir, 'PG_VERSION')
self.set_bin_dir(version)
# shared_preload_libraries for the old cluster, cleaned from incompatible/missing libs
old_shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries')
# restore original values of archive_mode and shared_preload_libraries
if getattr(self, '_old_config_values', None):
for name, value in self._old_config_values.items():
if value is None:
self.config.get('parameters').pop(name)
else:
self.config.get('parameters')[name] = value
# for the new version we maybe need to add some libs to the shared_preload_libraries
shared_preload_libraries = self.config.get('parameters').get('shared_preload_libraries')
if shared_preload_libraries:
self._old_shared_preload_libraries = self.config.get('parameters')['shared_preload_libraries'] =\
append_extentions(shared_preload_libraries, float(version))
self.no_bg_mon()
if not self.bootstrap._initdb(initdb_config):
return False
self.bootstrap._running_custom_bootstrap = False
# Copy old configs. XXX: some parameters might be incompatible!
for f in os.listdir(self._new_data_dir):
if f.startswith('postgresql.') or f.startswith('pg_hba.conf') or f == 'patroni.dynamic.json':
shutil.copy(os.path.join(self._new_data_dir, f), os.path.join(self._data_dir, f))
self.config.write_postgresql_conf()
self._new_data_dir, self._data_dir = self._data_dir, self._new_data_dir
self.config._postgresql_conf = old_postgresql_conf
self._version_file = old_version_file
if old_shared_preload_libraries:
self.config.get('parameters')['shared_preload_libraries'] = old_shared_preload_libraries
self.no_bg_mon()
self.configure_server_parameters()
return True
def do_upgrade(self):
return self.pg_upgrade() and self.restore_shared_preload_libraries()\
and self.switch_pgdata() and self.cleanup_old_pgdata()
def analyze(self, in_stages=False):
vacuumdb_args = ['--analyze-in-stages'] if in_stages else []
logger.info('Rebuilding statistics (vacuumdb%s)', (' ' + vacuumdb_args[0] if in_stages else ''))
if 'username' in self.config.superuser:
vacuumdb_args += ['-U', self.config.superuser['username']]
vacuumdb_args += ['-Z', '-j']
# vacuumdb is processing databases sequantially, while we better do them in parallel,
# because it will help with the case when there are multiple databases in the same cluster.
single_worker_dbs = ('postgres', 'template1')
databases = self._get_all_databases()
db_count = len([d for d in databases if d not in single_worker_dbs])
# calculate concurrency per database, except always existing "single_worker_dbs" (they'll get always 1 worker)
concurrency = str(max(1, int(psutil.cpu_count()/max(1, db_count))))
procs = []
for d in databases:
j = '1' if d in single_worker_dbs else concurrency
try:
procs.append(subprocess.Popen([self.pgcommand('vacuumdb')] + vacuumdb_args + [j, '-d', d]))
except Exception:
pass
for proc in procs:
try:
proc.wait()
except Exception:
pass
def PostgresqlUpgrade(config):
config['postgresql'].update({'callbacks': {}, 'pg_ctl_timeout': 3600*24*7})
# avoid unnecessary interactions with PGDATA and postgres
is_running = _PostgresqlUpgrade.is_running
_PostgresqlUpgrade.is_running = lambda s: False
try:
return _PostgresqlUpgrade(config['postgresql'])
finally:
_PostgresqlUpgrade.is_running = is_running
| 0
| 355
| 0
| 11,937
| 0
| 380
| 0
| -12
| 157
|
efecd1c0949806cb18bbe2451a6a6c7bb21dc209
| 46
|
py
|
Python
|
code/lib/tosca/__init__.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | 8
|
2017-03-13T16:34:28.000Z
|
2021-11-16T11:35:56.000Z
|
code/lib/tosca/__init__.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | null | null | null |
code/lib/tosca/__init__.py
|
superfluidity/RDCL3D
|
3c5717941bd4046aa1be178e9004db1dc1c469a0
|
[
"Apache-2.0"
] | 3
|
2017-03-28T09:26:40.000Z
|
2020-12-08T14:16:12.000Z
|
__all__ = ["tosca_parser", "tosca_rdcl_graph"]
| 46
| 46
| 0.76087
|
__all__ = ["tosca_parser", "tosca_rdcl_graph"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
68097eba15392e818a32df460c2104fd3ca64819
| 4,729
|
py
|
Python
|
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | 4
|
2021-02-16T19:34:38.000Z
|
2022-01-31T16:44:14.000Z
|
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | null | null | null |
examples/resources/aws/subnet.py
|
cfeenstra67/statey
|
6d127ed48265e2e072fbb26486458a4b28a333ec
|
[
"MIT"
] | null | null | null |
from typing import Optional
import statey as st
SubnetConfigType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
# Optional args
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean(default=False),
"assign_ipv6_address_on_creation" : st.Boolean(default=False),
# Missing: tags
]
SubnetType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
"ipv6_association_id" : ~st.String,
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean,
"assign_ipv6_address_on_creation" : st.Boolean,
# Missing: tags
"id" : st.String,
"owner_id" : st.Integer,
]
subnet_resource = st.MachineResource("aws_subnet", SubnetMachine)
Subnet = subnet_resource.s
RESOURCES = [subnet_resource]
def register(registry: Optional["Registry"] = None) -> None:
"""
Register resources in this module
"""
if registry is None:
registry = st.registry
for resource in RESOURCES:
registry.register(resource)
| 31.317881
| 85
| 0.60055
|
import asyncio
import contextlib
from typing import Dict, Any, Optional
import aioboto3
import botocore
import statey as st
SubnetConfigType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
# Optional args
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean(default=False),
"assign_ipv6_address_on_creation" : st.Boolean(default=False),
# Missing: tags
]
SubnetType = st.Struct[
"vpc_id" : st.String,
"cidr_block" : st.String,
"ipv6_association_id" : ~st.String,
"ipv6_cidr_block" : ~st.String,
"map_public_ip_on_launch" : st.Boolean,
"assign_ipv6_address_on_creation" : st.Boolean,
# Missing: tags
"id" : st.String,
"owner_id" : st.Integer,
]
class SubnetMachine(st.SimpleMachine):
"""
Maching representing an AWS subnet
"""
UP = st.State("UP", SubnetConfigType, SubnetType)
@contextlib.asynccontextmanager
async def resource_ctx(self):
async with aioboto3.resource("ec2") as ec2:
yield ec2
@contextlib.asynccontextmanager
async def client_ctx(self):
async with aioboto3.client("ec2") as client:
yield client
@staticmethod
async def convert_instance(subnet: "Subnet") -> Dict[str, Any]:
out = {"id": subnet.id}
ipv6_associations = []
(
out["owner_id"],
out["cidr_block"],
# ipv6_associations,
out["map_public_ip_on_launch"],
out["assign_ipv6_address_on_creation"],
out["vpc_id"],
) = await asyncio.gather(
subnet.owner_id,
subnet.cidr_block,
# subnet.ipv6_cidr_block_assocation_set,
subnet.map_public_ip_on_launch,
subnet.assign_ipv6_address_on_creation,
subnet.vpc_id,
)
if ipv6_associations:
association = ipv6_associations[0]
out["ipv6_association_id"] = association["AssociationId"]
out["ipv6_cidr_block"] = association["Ipv6CidrBlock"]
else:
out["ipv6_association_id"] = None
out["ipv6_cidr_block"] = None
return out
async def refresh_state(self, data: Any) -> Optional[Any]:
async with self.resource_ctx() as ec2:
instance = await ec2.Subnet(data["id"])
try:
await instance.load()
except botocore.exceptions.ClientError:
return None
return await self.convert_instance(instance)
async def create_task(self, config: SubnetConfigType) -> SubnetType:
"""
Create a new subnet
"""
async with self.resource_ctx() as ec2, self.client_ctx() as client:
kws = {"CidrBlock": config["cidr_block"], "VpcId": config["vpc_id"]}
if config["ipv6_cidr_block"] is not None:
kws["Ipv6CidrBlock"] = config["ipv6_cidr_block"]
subnet = await ec2.create_subnet(**kws)
yield await self.convert_instance(subnet)
map_public_ip_on_launch = await subnet.map_public_ip_on_launch
if map_public_ip_on_launch != config["map_public_ip_on_launch"]:
await client.modify_subnet_attribute(
MapPublicIpOnLaunch={"Value": config["map_public_ip_on_launch"]},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
assign_ipv6_address_on_creation = (
await subnet.assign_ipv6_address_on_creation
)
if (
assign_ipv6_address_on_creation
!= config["assign_ipv6_address_on_creation"]
):
await client.modify_subnet_attribute(
AssignIpv6AddressOnCreation={
"Value": config["assign_ipv6_address_on_creation"]
},
SubnetId=subnet.id,
)
await subnet.load()
yield await self.convert_instance(subnet)
async def delete_task(self, current: SubnetType) -> st.EmptyType:
"""
Delete the subnet
"""
async with self.resource_ctx() as ec2:
subnet = await ec2.Subnet(current["id"])
await subnet.delete()
subnet_resource = st.MachineResource("aws_subnet", SubnetMachine)
Subnet = subnet_resource.s
RESOURCES = [subnet_resource]
def register(registry: Optional["Registry"] = None) -> None:
"""
Register resources in this module
"""
if registry is None:
registry = st.registry
for resource in RESOURCES:
registry.register(resource)
| 0
| 1,201
| 2,102
| 289
| 0
| 0
| 0
| -12
| 112
|
194ab0dc74cd18f13ee2868097d1372c6db981b3
| 7,181
|
py
|
Python
|
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | null | null | null |
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | 19
|
2021-07-08T06:42:31.000Z
|
2021-10-15T09:07:17.000Z
|
archive/_s3.py
|
zpz/upathlib
|
5bf7013be244c5f1b276e0b0ac1b9d7637666ceb
|
[
"MIT"
] | null | null | null |
import logging
# This module requires a directory `.aws/` containing credentials in the home directory,
# or environment variables `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`.
logger = logging.getLogger(__name__)
| 32.640909
| 98
| 0.569002
|
import logging
from pathlib import Path
import time
import boto3
# This module requires a directory `.aws/` containing credentials in the home directory,
# or environment variables `AWS_ACCESS_KEY_ID`, and `AWS_SECRET_ACCESS_KEY`.
logger = logging.getLogger(__name__)
def _get_client():
return boto3.session.Session().client('s3')
def _has_key(s3_client, bucket: str, key: str) -> bool:
response = s3_client.list_objects_v2(Bucket=bucket, Prefix=key)
for obj in response.get('Contents', []):
if obj['Key'] == key:
return True
return False
def _delete_key(s3_client, bucket: str, key: str) -> None:
s3_client.delete_object(Bucket=bucket, Key=key)
def has_key(bucket: str, key: str) -> bool:
return _has_key(_get_client(), bucket, key)
def delete_key(bucket: str, key: str) -> None:
return _delete_key(_get_client(), bucket, key)
class Bucket:
def __init__(self, bucket):
for header in ('s3://', 's3n://'):
if bucket.startswith(header):
bucket = bucket[len(header):]
break
if '/' in bucket:
bucket = bucket[: bucket.find('/')]
self._bucket = boto3.resource('s3').Bucket(bucket)
@property
def name(self):
return self._bucket.name
def _remove_bucket_key(self, key):
for header in ('s3://', 's3n://'):
if key.startswith(header):
assert key.startswith(header + self.name + '/')
key = key[(len(header) + len(self.name) + 1):]
return key
def upload(self, local_file: str, s3_key: str) -> None:
'''
Upload a single file to S3.
`local_file`: path to local file.
`s3_key`: S3 'key'.
Example: suppose current bucket is s3://my-org, with
local_file: /home/zepu/work/data/xyz/memo.txt
s3_key: mysurvey/memo
--> remote file: s3://my-org/mysurvey/memo
Existing file with the same name with be overwritten.
'''
local_file = Path(local_file)
if not local_file.is_file():
raise Exception('a file name is expected')
data = open(local_file, 'rb')
s3_key = self._remove_bucket_key(s3_key)
self._bucket.put_object(Key=s3_key, Body=data)
def upload_tree(self, local_path: str, s3_path: str,
pattern: str = '**/*') -> None:
'''
`local_path`: directory whose content will be uploaded.
If `local_path` contains a trailing `/`, then no part of this path name
becomes part of the remote name; otherwise, the final node in this path name
becomes the leading segment of the remote name.
`pattern`:
'*' (everything directly under `local_path`),
'**/*' (everything recursively under `local_path`),
'*.py' (every Python module directly under `local_path`),
'**/*.py' (every Python module recursively under `local_path`),
etc.
Example: suppose current bucket is s3://my-org, with
local_path: /home/me/work/data/xyz, containing
.../xyz/a.txt,
.../xyz/b.txt,
../xyz/zyx/aa.txt)
s3_path: dataset1
s3_name: '**/*'
--> remote files:
s3://my-org/dataset1/xyz/a.txt
s3://my-org/dataset1/xyz/b.txt
s3://my-org/dataset1/xyz/zyx/aa.txt
local_path: /home/me/work/data/xyz/ (note the trailing '/')
--> remote files:
s3://my-org/dataset1/a.txt
s3://my-org/dataset1/b.txt
s3://my-org/dataset1/zyx/aa.txt
'''
with_root = not local_path.endswith('/')
local_path = Path(local_path)
if not local_path.is_dir():
raise Exception('a directory name is expected')
nodes = [v for v in local_path.glob(pattern) if v.is_file()]
s3_path = self._remove_bucket_key(s3_path)
for node in nodes:
key = node.relative_to(local_path)
if with_root:
key = local_path.name / key
key = s3_path / key
self.upload(node, str(key))
def download(self, s3_key: str, local_file: str = None) -> None:
s3_key = self._remove_bucket_key(s3_key)
if local_file is None:
local_file = str(Path(s3_key).name)
self._bucket.download_file(s3_key, local_file)
def download_tree(self, s3_path: str, local_path: str = None) -> None:
s3_path = self._remove_bucket_key(s3_path)
raise NotImplementedError
def ls(self, key, recursive: bool = False):
# List object names directly or recursively named like `key*`.
# If `key` is `abc/def/`,
# then `abc/def/123/45` will return as `123/45`
#
# If `key` is `abc/def`,
# then `abc/defgh/45` will return as `defgh/45`;
# `abc/def/gh` will return as `/gh`.
#
# So if you know `key` is a `directory`, then it's a good idea to
# include the trailing `/` in `key`.
key = self._remove_bucket_key(key)
z = self._bucket.objects.filter(Prefix=key)
if key.endswith('/'):
key_len = len(key)
else:
key_len = key.rfind('/') + 1
if recursive:
return (v.key[key_len:] for v in z)
# this is a generator, b/c there can be many, many elements
else:
keys = set()
for v in z:
vv = v.key[key_len:]
idx = vv.find('/')
if idx >= 0:
vv = vv[: idx]
keys.add(vv)
return sorted(list(keys))
def has(self, key: str) -> bool:
key = self._remove_bucket_key(key)
if not hasattr(self, '_s3'):
self._s3 = _get_client()
return _has_key(self._s3, self._bucket.name, key)
def delete(self, key: str) -> None:
key = self._remove_bucket_key(key)
if not hasattr(self, '_s3'):
self._s3 = _get_client()
_delete_key(self._s3, self._bucket.name, key)
def delete_tree(self, s3_path: str) -> int:
s3_path = self._remove_bucket_key(s3_path)
n = 0
while True:
nn = self._delete_tree(s3_path)
if nn == 0:
break
n = max(n, nn)
time.sleep(0.5)
return n
def _delete_tree(self, s3_path: str) -> int:
'''
Return the number of objects deleted.
After this operation, the 'folder' `s3_path` is also gone.
TODO: this is not the fastest way to do it.
'''
assert s3_path.endswith('/')
n = 0
for k in self.ls(s3_path, recursive=True):
kk = s3_path + k
self.delete(kk)
n += 1
return n
def reduce_boto_logging():
import boto3.s3.transfer
assert boto3.s3.transfer # silence pyflakes
for name in logging.Logger.manager.loggerDict.keys():
if name.startswith('boto') or name.startswith('urllib3') or name.startswith('s3transfer'):
logging.getLogger(name).setLevel(logging.ERROR)
| 0
| 41
| 0
| 5,902
| 0
| 798
| 0
| -16
| 228
|
75f81d84c2a063746a49d48076491405182c7fc8
| 10,799
|
py
|
Python
|
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | 1
|
2022-03-03T00:35:08.000Z
|
2022-03-03T00:35:08.000Z
|
storitch/handlers/store.py
|
thomaserlang/storitch
|
dbcf97af547d9cb1ae5c3994654e8db03e43a253
|
[
"MIT"
] | null | null | null |
import os
from wand import image
def thumbnail(path: str) -> bool:
'''
Specify the path and add a "@" followed by the arguments.
This allows us to easily get the original file, make the changes,
save the file with the full path, so the server never has to do
the operation again, as long as the arguments are precisely the same.
Arguments can be specified as followed:
SXx - Width, keeps aspect ratio
SYx - Height, keeps aspect ration.
Ignored if SX is specified.
ROTATEx - Number of degrees you wise to
rotate the image. Supports
negative numbers.
RESx - Resolution, used for PDF
files, the higher the number,
the better the quality.
PAGEx - Page index in the PDF document.
The file format can be specified by ending the path with
E.g. .jpg, .png, .tiff, etc.
The arguments can be separated with _ or just
don't separate them. Works either way.
Example:
/foo/14bc...@SX1024_ROTATE90.png
Resizes the image to a width of 1024, rotates it 90 degrees and converts
it to a PNG file.
:param path: str
'''
p = path.split('@')
if len(p) != 2:
return False
if os.path.exists(path):
return True
size_match, rotate_match, resolution_match, \
page_match, format_match = __parse_arguments(p[1])
# a specific page in a PDF document
if page_match and page_match.group(1) != None:
page = '[{}]'.format(page_match.group(1))
else:
# Prevent a dicom file or pdf file from extracting multiple images
page = '[0]'
o = {
'filename': p[0]+page
}
if resolution_match and resolution_match.group(1) != None:
o['resolution'] = int(resolution_match.group(1))
with image.Image(**o) as img:
if size_match:
# resize, keep aspect ratio
if size_match.group(1) != None:# width
img.transform(resize=size_match.group(1))
elif size_match.group(2) != None:# height
img.transform(resize='x'+size_match.group(2))
if rotate_match:
if rotate_match.group(1) != None:
img.rotate(int(rotate_match.group(1)))
if format_match:
img.format = format_match.group(1)
img.save(filename=path)
return True
| 32.429429
| 96
| 0.567738
|
from typing import Union, Dict, List, Any, Tuple, Optional
import json, tempfile, os, logging, re, shutil, mimetypes, good
from tornado import httpclient, web, queues
from storitch import utils, config
from storitch.decorators import run_on_executor
from wand import image, exceptions
class Base_handler(web.RequestHandler):
def write_object(self, data: Union[Dict, List]) -> None:
self.set_json_headers()
self.write(json.dumps(data))
def set_json_headers(self) -> None:
self.set_header('Cache-Control', 'no-cache, must-revalidate')
self.set_header('Expires', 'Sat, 26 Jul 1997 05:00:00 GMT')
self.set_header('Content-Type', 'application/json')
def write_error(self, status_code: int, **kwargs) -> None:
self.set_json_headers()
error = {'error': 'Unknown error'}
if 'exc_info' in kwargs:
error['error'] = str(kwargs['exc_info'][1])
self.set_status(status_code)
self.write_object(error)
@run_on_executor
def move_to_permanent_store(self, temp_path: str, filename: str) -> Dict[str, Any]:
return move_to_permanent_store(temp_path, filename)
def get_content_type(self, path: str) -> str:
# From: https://www.tornadoweb.org/en/stable/_modules/tornado/web.html#StaticFileHandler
mime_type, encoding = mimetypes.guess_type(path)
# per RFC 6713, use the appropriate type for a gzip compressed file
if encoding == "gzip":
return "application/gzip"
# As of 2015-07-21 there is no bzip2 encoding defined at
# http://www.iana.org/assignments/media-types/media-types.xhtml
# So for that (and any other encoding), use octet-stream.
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
class Multipart_handler(Base_handler):
async def post(self) -> None:
if 'multipart/form-data' not in self.request.headers.get('Content-Type').lower():
raise web.HTTPError(400,
'Content-Type must be multipart/form-data, was: {}'.format(
self.request.headers.get('Content-Type')
)
)
if not self.request.files:
raise web.HTTPError(400, 'No files uploaded')
self.set_status(201)
results = []
for n in self.request.files:
for f in self.request.files[n]:
temp_path = await self.save_body(f['body'])
f['body'] = None
r = await self.move_to_permanent_store(temp_path, f['filename'])
results.append(r)
self.write_object(results)
@run_on_executor
def save_body(self, body: bytes) -> str:
with tempfile.NamedTemporaryFile(delete=False, prefix='storitch-') as t:
t.write(body)
return t.name
@web.stream_request_body
class Session_handler(Base_handler):
__schema__ = good.Schema({
'finished': good.Boolean(),
'filename': good.All(str, good.Length(min=1, max=255)),
good.Optional('session'): str,
})
def prepare(self) -> None:
if 'application/octet-stream' not in self.request.headers.get('Content-Type').lower():
raise web.HTTPError(400,
'Content-Type must be application/octet-stream, was: {}'.format(
self.request.headers.get('Content-Type')
)
)
j = self.request.headers.get('storitch-json', None)
if not j:
raise web.HTTPError(400, 'Header: storitch-json must be set')
data = json.loads(j)
self.h_finished = data['finished']
self.h_filename = data['filename']
self.h_session = data.get('session')
if not self.h_session:
self.h_session = self.new_session()
self.temp_path = os.path.join(
tempfile.gettempdir(),
self.h_session
)
if not os.path.isfile(self.temp_path):
raise web.HTTPError(400, 'Session unknown')
self.file = open(self.temp_path, 'ab')
def validate_json(self, data: Dict[str, Any]) -> Union[Dict[str, Any], List]:
try:
return self.__schema__(data)
except good.MultipleInvalid as ee:
data = []
for e in ee:
data.append(
'{}: {}'.format(
'.'.join(str(x) for x in e.path),
e.message,
)
)
raise web.HTTPError(400,' - '.join(d for d in data))
except good.Invalid as e:
raise web.HTTPError(400, '{}: {}'.format(
'.'.join(str(x) for x in e.path),
e.message,
))
async def data_received(self, chunk: bytes) -> None:
self.file.write(chunk)
async def put(self) -> None:
self.file.close()
if self.h_finished:
r = await self.move_to_permanent_store(self.temp_path, self.h_filename)
self.write_object(r)
else:
self.write_object({
'session': self.h_session,
})
def new_session(self) -> str:
with tempfile.NamedTemporaryFile(delete=False, prefix='storitch-') as t:
return os.path.basename(t.name)
class Thumbnail_handler(Base_handler):
async def get(self, hash_: Optional[str] = None) -> None:
if not hash_ or len(hash_) < 64:
raise web.HTTPError(404, 'Please specify a file hash')
path = os.path.abspath(os.path.join(
os.path.realpath(config['store_path']),
utils.path_from_hash(hash_),
hash_
))
if '@' in hash_:
path = await self.thumbnail(path)
if not path:
self.write('Failed to create the thumbnail')
self.set_header('Content-Type', self.get_content_type(path))
with open(path, 'rb') as f:
while True:
d = f.read(16384)
if not d:
break
self.write(d)
@run_on_executor
def thumbnail(self, path: str) -> str:
if thumbnail(path):
return path
def move_to_permanent_store(temp_path: str, filename: str) -> Dict[str, Any]:
hash_ = utils.file_sha256(temp_path)
path = os.path.abspath(os.path.join(
os.path.realpath(config['store_path']),
utils.path_from_hash(hash_),
))
if not os.path.exists(path):
os.makedirs(path, mode=0o755)
path = os.path.join(path, hash_)
if not os.path.exists(path):
shutil.move(temp_path, path)
os.chmod(path, 0o755)
else:
os.remove(temp_path)
extra = {
'type': 'file',
}
d = os.path.splitext(filename)
if len(d) == 2:
ext = d[1]
if ext.lower() in config['image_exts']:
wh = image_width_high(path)
if wh:
wh['type'] = 'image'
if wh:
extra.update(wh)
return {
'stored': True,
'filesize': os.stat(path).st_size,
'hash': hash_,
'filename': filename,
**extra
}
def image_width_high(path) -> Optional[Dict[str, int]]:
try:
with image.Image(filename=path) as img:
return {
'width': img.width,
'height': img.height,
}
except (ValueError, exceptions.MissingDelegateError):
return None
def thumbnail(path: str) -> bool:
'''
Specify the path and add a "@" followed by the arguments.
This allows us to easily get the original file, make the changes,
save the file with the full path, so the server never has to do
the operation again, as long as the arguments are precisely the same.
Arguments can be specified as followed:
SXx - Width, keeps aspect ratio
SYx - Height, keeps aspect ration.
Ignored if SX is specified.
ROTATEx - Number of degrees you wise to
rotate the image. Supports
negative numbers.
RESx - Resolution, used for PDF
files, the higher the number,
the better the quality.
PAGEx - Page index in the PDF document.
The file format can be specified by ending the path with
E.g. .jpg, .png, .tiff, etc.
The arguments can be separated with _ or just
don't separate them. Works either way.
Example:
/foo/14bc...@SX1024_ROTATE90.png
Resizes the image to a width of 1024, rotates it 90 degrees and converts
it to a PNG file.
:param path: str
'''
p = path.split('@')
if len(p) != 2:
return False
if os.path.exists(path):
return True
size_match, rotate_match, resolution_match, \
page_match, format_match = __parse_arguments(p[1])
# a specific page in a PDF document
if page_match and page_match.group(1) != None:
page = '[{}]'.format(page_match.group(1))
else:
# Prevent a dicom file or pdf file from extracting multiple images
page = '[0]'
o = {
'filename': p[0]+page
}
if resolution_match and resolution_match.group(1) != None:
o['resolution'] = int(resolution_match.group(1))
with image.Image(**o) as img:
if size_match:
# resize, keep aspect ratio
if size_match.group(1) != None:# width
img.transform(resize=size_match.group(1))
elif size_match.group(2) != None:# height
img.transform(resize='x'+size_match.group(2))
if rotate_match:
if rotate_match.group(1) != None:
img.rotate(int(rotate_match.group(1)))
if format_match:
img.format = format_match.group(1)
img.save(filename=path)
return True
def __parse_arguments(arguments: str) -> Tuple[str, str, str, str, str]:
size_match = re.search(
'SX(\d+)|SY(\d+)',
arguments,
re.I
)
rotate_match = re.search(
'ROTATE(-?\d+)',
arguments,
re.I
)
resolution_match = re.search(
'RES(\d+)',
arguments,
re.I
)
page_match = re.search(
'PAGE(\d+)',
arguments,
re.I
)
format_match = re.search(
'\.([a-z0-9]{2,5})',
arguments,
re.I
)
return (
size_match,
rotate_match,
resolution_match,
page_match,
format_match,
)
| 0
| 2,866
| 1,489
| 1,572
| 0
| 1,877
| 0
| 164
| 357
|
b0fbc439e2b9764f97c049f14ced20df3b6321a9
| 7,052
|
py
|
Python
|
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | 3
|
2017-08-22T11:32:02.000Z
|
2021-08-09T09:35:51.000Z
|
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | null | null | null |
ddf_library/functions/graph_lib/page_rank.py
|
eubr-bigsea/Compss-Python
|
09ab7c474c8badc9932de3e1148f62ffba16b0b2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Lucas Miguel S Ponce"
__email__ = "[email protected]"
from ddf_library.utils import generate_info, create_stage_files, save_stage_file
from pycompss.api.api import compss_wait_on
import numpy as np
__all__ = ['PageRank']
# TODO: this algorithm can be optimized
def _pagerank_split(result, nfrag):
"""Split the list of vertex into nfrag parts.
Note: the list of unique vertex and their ranks must be fit in memory.
"""
result = compss_wait_on(result)
result = np.array_split(result, nfrag)
outfiles = create_stage_files(nfrag)
info = [0] * nfrag
for f, table in enumerate(result):
save_stage_file(outfiles[f], table)
info[f] = generate_info(table, f)
return outfiles, info
| 29.630252
| 78
| 0.620108
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Lucas Miguel S Ponce"
__email__ = "[email protected]"
from ddf_library.bases.metadata import Status, OPTGroup
from ddf_library.bases.context_base import ContextBase
from ddf_library.ddf import DDF
from ddf_library.bases.ddf_model import ModelDDF
from ddf_library.utils import generate_info, read_stage_file, \
create_stage_files, save_stage_file
from pycompss.api.api import compss_wait_on, compss_delete_object
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
from pycompss.api.parameter import FILE_IN, COLLECTION_IN
import pandas as pd
import numpy as np
__all__ = ['PageRank']
# TODO: this algorithm can be optimized
class PageRank(ModelDDF):
# noinspection PyUnresolvedReferences
"""
PageRank is one of the methods Google uses to determine a page's
relevance or importance. The idea that Page Rank brought up was that, the
importance of any web page can be judged by looking at the pages that link
to it.
PageRank can be utilized in others domains. For example, may also be used
as a methodology to measure the apparent impact of a community.
.. note: This parallel implementation assumes that the list of unique
vertex can be fit in memory.
:Example:
>>> pr = PageRank(damping_factor=0.85)
>>> ddf2 = pr.transform(ddf1, inlink_col='col1', outlink_col='col2')
"""
def __init__(self, damping_factor=0.85, max_iters=100):
"""
:param damping_factor: Default damping factor is 0.85;
:param max_iters: Maximum number of iterations (default is 100).
"""
super(PageRank, self).__init__()
self.inlink_col = None
self.outlink_col = None
self.max_iters = max_iters
self.damping_factor = damping_factor
def transform(self, data, outlink_col, inlink_col):
"""
Generates the PageRank's result.
:param data: DDF
:param outlink_col: Out-link vertex;
:param inlink_col: In-link vertex;
:return: DDF with Vertex and Rank columns
"""
df, nfrag, tmp = self._ddf_initial_setup(data)
self.inlink_col = inlink_col
self.outlink_col = outlink_col
col1 = 'Vertex'
col2 = 'Rank'
"""
Load all URL's from the data and initialize their neighbors.
Initialize each page’s rank to 1.0.
"""
adj_list = [{} for _ in range(nfrag)]
rank_list = [{} for _ in range(nfrag)]
counts_in = [{} for _ in range(nfrag)]
for i in range(nfrag):
adj_list[i], rank_list[i], counts_in[i] = \
_pr_create_adjlist(df[i], inlink_col, outlink_col)
counts_in = merge_reduce(_merge_counts, counts_in)
for i in range(nfrag):
adj_list[i] = _pr_update_adjlist(adj_list[i], counts_in)
compss_delete_object(counts_in)
for iteration in range(self.max_iters):
"""Calculate the partial contribution of each vertex."""
contributions = [_calc_contribuitions(adj_list[i], rank_list[i])
for i in range(nfrag)]
merged_c = merge_reduce(_merge_counts, contributions)
"""Update each vertex rank in the fragment."""
rank_list = [_update_rank(rank_list[i], merged_c,
self.damping_factor)
for i in range(nfrag)]
merged_table = merge_ranks(rank_list, col1, col2)
result, info = _pagerank_split(merged_table, nfrag)
new_state_uuid = ContextBase\
.ddf_add_task(self.name,
status=Status.STATUS_COMPLETED,
opt=OPTGroup.OPT_OTHER,
info_data=info,
parent=[tmp.last_uuid],
result=result,
function=self.transform,
parameters=data)
return DDF(last_uuid=new_state_uuid)
@task(returns=3, data=FILE_IN)
def _pr_create_adjlist(data, inlink, outlink):
cols = [outlink, inlink]
adj = {}
ranks = {}
data = read_stage_file(data, cols=cols)
for link in data[cols].to_numpy():
v_out, v_in = link
# Generate a partial adjacency list.
if v_out in adj:
adj[v_out][0].append(v_in)
adj[v_out][1] += 1
else:
adj[v_out] = [[v_in], 1]
# Generate a partial rank list of each vertex.
if v_out not in ranks:
ranks[v_out] = 1.0 # Rank, contributions, main
if v_in not in ranks:
ranks[v_in] = 1.0
# Generate a partial list of frequency of each vertex.
counts_in = {}
for v_out in adj:
counts_in[v_out] = adj[v_out][1]
return adj, ranks, counts_in
@task(returns=1)
def _merge_counts(counts1, counts2):
"""
Merge the frequency of each vertex.
.. note:: It assumes that the frequency list can be fitted in memory.
"""
for v_out in counts2:
if v_out in counts1:
counts1[v_out] += counts2[v_out]
else:
counts1[v_out] = counts2[v_out]
return counts1
@task(returns=1)
def _pr_update_adjlist(adj1, counts_in):
"""Update the frequency of vertex in each fragment."""
for key in adj1:
adj1[key][1] = counts_in[key]
return adj1
@task(returns=1)
def _calc_contribuitions(adj, ranks):
"""Calculate the partial contribution of each vertex."""
contrib = {}
for key in adj:
urls = adj[key][0]
num_neighbors = adj[key][1]
rank = ranks[key]
for url in urls:
if url not in contrib:
# out = contrib
contrib[url] = rank/num_neighbors
else:
contrib[url] += rank/num_neighbors
return contrib
@task(returns=1)
def _update_rank(ranks, contrib, factor):
"""Update the rank of each vertex in the fragment."""
bo = 1.0 - factor
for key in contrib:
if key in ranks:
ranks[key] = bo + factor*contrib[key]
return ranks
@task(returns=1, dfs=COLLECTION_IN)
def merge_ranks(dfs, c1, c2):
"""Create the final result. Merge and remove duplicates vertex."""
dfs = [pd.DataFrame(ranks.items(), columns=[c1, c2]) for ranks in dfs]
dfs = pd.concat(dfs, ignore_index=True)\
.drop_duplicates(ignore_index=True)\
.sort_values(['Rank'], ascending=False, ignore_index=True)
return dfs
def _pagerank_split(result, nfrag):
"""Split the list of vertex into nfrag parts.
Note: the list of unique vertex and their ranks must be fit in memory.
"""
result = compss_wait_on(result)
result = np.array_split(result, nfrag)
outfiles = create_stage_files(nfrag)
info = [0] * nfrag
for f, table in enumerate(result):
save_stage_file(outfiles[f], table)
info[f] = generate_info(table, f)
return outfiles, info
| 3
| 2,368
| 0
| 3,314
| 0
| 0
| 0
| 225
| 339
|
faeba7f3ca3382662de94211ec44a097d3c7ac9f
| 1,390
|
py
|
Python
|
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
UART_py/tela_serial.py
|
Rodrigo98Matos/Interface-Grafca-Serial
|
b996655e7376229856116ec3d150f3210a82cb4d
|
[
"MIT"
] | null | null | null |
from uart_serial import uart
arduino = uart()
| 37.567568
| 117
| 0.538129
|
import PySimpleGUI as sg
from uart_serial import uart
arduino = uart()
class tela:
def __init__(self, portas):
#Layout
sg.theme('Black')
layout = [
[sg.Text('Porta:',size=(7,0)),sg.Combo(values=(portas),key='porta')],
[sg.Text('Baudrate:',size=(7,0)),sg.Combo(values=([9600,115200]),key='baudrate')],
[sg.Checkbox('Dados da Missão',key='dados_missao'),sg.Checkbox('Beacon',key='beacon')],
[sg.Button('Continuar')],
]
"""layout = [
[sg.Text('Nome',size=(5,0)),sg.Input(size=(15,0),key='nome')],
[sg.Text('Idade',size=(5,0)),sg.Input(size=(5,0),key='idade')],
[sg.Text('Email:')],
[sg.Checkbox('Gmail',key='gmail'),sg.Checkbox('Hotmail',key='hotmail'),sg.Checkbox('Yahoo',key='yahoo')],
[sg.Radio('Sim','email',key='email_sim'),sg.Radio('Não','email',key='email_nao')],
[sg.Button('Enviar dados')],
[sg.Output(size=(30,20))]
]"""
#Janela
self.janela = sg.Window("dados do Usuário").layout(layout)
#Extrair os dados da tela
self.button, self.values = self.janela.Read()
def iniciar(self):
while True:
self.button, self.values = self.janela.Read()
if self.button == sg.WIN_CLOSED:
break
print(self.values)
| 6
| 0
| 0
| 1,291
| 0
| 0
| 0
| 3
| 45
|
a747752e784483f13e0672fa7ef44261d743dd9f
| 403
|
py
|
Python
|
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-11-27 20:28
| 21.210526
| 49
| 0.615385
|
# Generated by Django 2.2.6 on 2019-11-27 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0016_auto_20191127_1424'),
]
operations = [
migrations.AddField(
model_name='promocode',
name='max_usage_per_account',
field=models.IntegerField(default=1),
),
]
| 0
| 0
| 0
| 289
| 0
| 0
| 0
| 19
| 46
|
838a777e045278ea92893c031457352439926ec4
| 1,897
|
py
|
Python
|
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
transact/models.py
|
BrilliantGrant/Transaction-application
|
988fdbd6ed8a1fea9ca8366eeb2b30275b727836
|
[
"MIT",
"Unlicense"
] | null | null | null |
# Create your models here.
| 25.293333
| 72
| 0.710596
|
from django.contrib.auth.models import User
from tinymce.models import HTMLField
from django.db import models
# Create your models here.
class Pic(models.Model):
pic = models.ImageField(upload_to = "pics/",null = True)
user = models.ForeignKey(User,null=True)
pic_name = models.CharField(max_length = 30,null = True)
likes = models.IntegerField(default=0)
pic_caption = models.TextField(null = True)
pub_date = models.DateTimeField(auto_now_add=True,null=True)
# profile = models.ForeignKey(Profile, null=True)
comments = models.IntegerField(default=0)
def __str__(self):
return self.pic_name
def delete_pic(self):
self.delete()
def save_pic(self):
self.save()
def update_caption(self,new_caption):
self.pic_caption = new_caption
self.save()
@classmethod
def get_pics_by_user(cls,id):
sent_pics = Pic.objects.filter(user_id=id)
return sent_pics
@classmethod
def get_pics_by_id(cls,id):
fetched_pic = Pic.objects.get(id = id)
return fetched_pic
class Meta:
ordering = ['-pub_date']
def __str__(self):
return self.user.username
def save_profile(self):
self.save()
class Profile(models.Model):
username = models.CharField(default='User',max_length=30)
email = models.CharField(default='email',max_length=30)
profile_pic = models.ImageField(upload_to = "profile/",null=True)
Phone_number = models.CharField(max_length =30)
Amount = models.CharField(max_length =30)
withdraw = models.CharField(max_length =30)
Balance = models.CharField(max_length =30)
def __str__(self):
return self.username
def delete_profile(self):
self.delete()
def save_profile(self):
self.save()
@classmethod
def search_profile(cls,search_term):
got_profiles = cls.objects.filter(first_name__icontains = search_term)
return got_profiles
| 0
| 323
| 0
| 1,390
| 0
| 0
| 0
| 44
| 111
|
4a30bc154d6f294fba0d9fd2e54096f76bfb7a5f
| 553
|
py
|
Python
|
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | null | null | null |
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | 1
|
2021-02-15T15:58:54.000Z
|
2021-02-15T15:58:54.000Z
|
migrations/versions/6564c80d1598_.py
|
realtimclemans/SafetyHealthDotCloud
|
c7eca52f3e6519de34b05ba573a5778423c2dae2
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 6564c80d1598
Revises: c3c2dc9000d3
Create Date: 2021-06-19 17:03:45.811885
"""
# revision identifiers, used by Alembic.
revision = '6564c80d1598'
down_revision = 'c3c2dc9000d3'
branch_labels = None
depends_on = None
| 19.068966
| 65
| 0.687161
|
"""empty message
Revision ID: 6564c80d1598
Revises: c3c2dc9000d3
Create Date: 2021-06-19 17:03:45.811885
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6564c80d1598'
down_revision = 'c3c2dc9000d3'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 0
| 0
| 0
| 0
| 0
| 208
| 0
| 3
| 90
|
5eb6460889a29c993a99192a3b46f1a9dae54de9
| 1,181
|
py
|
Python
|
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | 1
|
2018-03-30T17:36:41.000Z
|
2018-03-30T17:36:41.000Z
|
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | null | null | null |
setup.py
|
tizz98/xl
|
4534a1792f878964fedd87432c438ab6364ece49
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
from xl import __str_version__, __author__
setup(
name='xl',
version=__str_version__,
description='A nice way of generating excel formulas in python.',
long_description=long_description,
url='https://github.com/tizz98/xl',
download_url='https://github.com/tizz98/xl/tarball/%s' % (
__str_version__
),
author=__author__,
author_email='[email protected]',
license='MIT',
packages=['xl'],
keywords='xl excel formulas formula formulae',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 27.465116
| 69
| 0.647756
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.md') as f:
long_description = f.read()
from xl import __str_version__, __author__
setup(
name='xl',
version=__str_version__,
description='A nice way of generating excel formulas in python.',
long_description=long_description,
url='https://github.com/tizz98/xl',
download_url='https://github.com/tizz98/xl/tarball/%s' % (
__str_version__
),
author=__author__,
author_email='[email protected]',
license='MIT',
packages=['xl'],
keywords='xl excel formulas formula formulae',
zip_safe=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
91c554ac6c77dd73935b5f3788cc38b6a16bd729
| 420
|
py
|
Python
|
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 4
|
2020-08-18T05:29:38.000Z
|
2021-03-13T19:01:10.000Z
|
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | null | null | null |
GroupCondition.py
|
simplymanas/python-learning
|
75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0
|
[
"Apache-2.0"
] | 1
|
2020-08-29T12:57:17.000Z
|
2020-08-29T12:57:17.000Z
|
# all, any for group condition check
# Manas Dash
# 22th July 2020
# think of any (or) and all (and) as series of logical or and and operators
healthy_percentage = 100
have_money = 0
no_of_friends = 5
mental_happiness = [
healthy_percentage > 50,
have_money > 0,
no_of_friends >= 1
]
if all(mental_happiness):
print('happiness inside')
if any(mental_happiness):
print('happiness outside')
# happiness outside
| 17.5
| 75
| 0.738095
|
# all, any for group condition check
# Manas Dash
# 22th July 2020
# think of any (or) and all (and) as series of logical or and and operators
healthy_percentage = 100
have_money = 0
no_of_friends = 5
mental_happiness = [
healthy_percentage > 50,
have_money > 0,
no_of_friends >= 1
]
if all(mental_happiness):
print('happiness inside')
if any(mental_happiness):
print('happiness outside')
# happiness outside
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
41559822f3cd5754bbcded35318328ac3c23e9ab
| 12,012
|
py
|
Python
|
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
zx64c/ast.py
|
khrynczenko/zx64c
|
5a95bef1dff281266ea3f0d0bfd63d27ab5e9965
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from typing import TypeVar
T = TypeVar("T")
| 24.217742
| 88
| 0.635614
|
from __future__ import annotations
import abc
from typing import List, Text, TypeVar, Generic
from abc import ABC
from functools import singledispatchmethod
from dataclasses import dataclass
from zx64c.types import Type, Callable
T = TypeVar("T")
class AstVisitor(ABC, Generic[T]):
@abc.abstractmethod
def visit_program(self, node: Program) -> T:
pass
@abc.abstractmethod
def visit_function(self, node: Function) -> T:
pass
@abc.abstractmethod
def visit_block(self, node: Block) -> T:
pass
@abc.abstractmethod
def visit_if(self, node: Block) -> T:
pass
@abc.abstractmethod
def visit_print(self, node: Print) -> T:
pass
@abc.abstractmethod
def visit_let(self, node: Assignment) -> T:
pass
@abc.abstractmethod
def visit_return(self, node: Return) -> T:
pass
@abc.abstractmethod
def visit_assignment(self, node: Assignment) -> T:
pass
@abc.abstractmethod
def visit_equal(self, node: Equal) -> T:
pass
@abc.abstractmethod
def visit_not_equal(self, node: NotEqual) -> T:
pass
@abc.abstractmethod
def visit_addition(self, node: Addition) -> T:
pass
@abc.abstractmethod
def visit_subtraction(self, node: Subtraction) -> T:
pass
@abc.abstractmethod
def visit_negation(self, node: Negation) -> T:
pass
@abc.abstractmethod
def visit_function_call(self, node: FunctionCall) -> T:
pass
@abc.abstractmethod
def visit_identifier(self, node: Identifier) -> T:
pass
@abc.abstractmethod
def visit_unsignedint(self, node: Unsignedint) -> T:
pass
@abc.abstractmethod
def visit_bool(self, node: Bool) -> T:
pass
class SourceContext:
def __init__(self, line: int, column: int):
self._line = line
self._column = column
def __eq__(self, rhs: SourceContext) -> bool:
return self._line == rhs._line and self._column == rhs._column
@property
def line(self) -> int:
return self._line
@property
def column(self) -> int:
return self._column
class Ast(ABC):
def __init__(self, context: SourceContext):
self._context = context
@property
def context(self) -> SourceContext:
return self._context
@abc.abstractmethod
def visit(self, v: AstVisitor[T]) -> T:
pass
@abc.abstractmethod
def __eq__(self, rhs: Ast) -> bool:
pass
class SjasmplusSnapshotProgram(Ast):
def __init__(self, program: Program, source_name: Text):
super().__init__(program.context)
self.program = program
self.source_name = source_name
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_program(self)
@SjasmplusSnapshotProgram.__eq__.register
def __eq__(self, rhs: SjasmplusSnapshotProgram) -> bool:
return self.program == rhs.program and self.source_name == rhs._source_name
class Program(Ast):
def __init__(self, functions: List[Function], context: SourceContext):
super().__init__(context)
self.functions = functions
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_program(self)
@Program.__eq__.register
def _(self, rhs: Program) -> bool:
return self.functions == rhs.functions and self.context == rhs.context
@dataclass
class Parameter:
name: str
type_id: Type
class Function(Ast):
def __init__(
self,
name: str,
parameters: List[Parameter],
return_type: Type,
code_block: Block,
context: SourceContext,
):
super().__init__(context)
self.name = name
self.parameters = parameters
self.return_type = return_type
self.code_block = code_block
self.type = Callable(return_type, [p.type_id for p in parameters])
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_function(self)
@Function.__eq__.register
def _(self, rhs: Function) -> bool:
return (
self.name == rhs.name
and self.parameters == rhs.parameters
and self.return_type == rhs.return_type
and self.code_block == rhs.code_block
)
class Block(Ast):
def __init__(self, statements: [Ast], context: SourceContext):
super().__init__(context)
self.statements = statements
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_block(self)
@Block.__eq__.register
def _(self, rhs: Block) -> bool:
return self.statements == rhs.statements
class If(Ast):
def __init__(self, condition: Ast, consequence: Ast, context: SourceContext):
super().__init__(context)
self.condition = condition
self.consequence = consequence
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_if(self)
@If.__eq__.register
def _(self, rhs: If) -> bool:
return self.condition == rhs.condition and self.consequence == rhs.consequence
class Print(Ast):
def __init__(self, expression: Ast, context: SourceContext):
super().__init__(context)
self.expression = expression
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_print(self)
@Print.__eq__.register
def _(self, rhs: Print) -> bool:
return self.expression == rhs.expression and self.context == rhs.context
class Let(Ast):
def __init__(self, name: str, var_type: Type, rhs: Ast, context: SourceContext):
super().__init__(context)
self.name = name
self.var_type = var_type
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_let(self)
@Let.__eq__.register
def _(self, rhs: Let) -> bool:
return (
self.name == rhs.name
and self.var_type == rhs.var_type
and self.rhs == rhs.rhs
and self.context == rhs.context
)
class Assignment(Ast):
def __init__(self, name: str, rhs: Ast, context: SourceContext):
super().__init__(context)
self.name = name
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_assignment(self)
@Assignment.__eq__.register
def _(self, rhs: Assignment) -> bool:
return self.name == rhs.name and self.rhs == rhs.rhs and self.context == rhs.context
class Return(Ast):
def __init__(self, expr: Ast, context: SourceContext):
super().__init__(context)
self.expr = expr
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_return(self)
@Return.__eq__.register
def _(self, rhs: Return) -> bool:
return self.expr == rhs.expr
class Equal(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_equal(self)
@Equal.__eq__.register
def _(self, rhs: Equal) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class NotEqual(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_not_equal(self)
@NotEqual.__eq__.register
def _(self, rhs: NotEqual) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Addition(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_addition(self)
@Addition.__eq__.register
def _(self, rhs: Addition) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Subtraction(Ast):
def __init__(self, lhs: Ast, rhs: Ast, context: SourceContext):
super().__init__(context)
self.lhs = lhs
self.rhs = rhs
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_subtraction(self)
@Subtraction.__eq__.register
def _(self, rhs: Subtraction) -> bool:
return self.lhs == rhs.lhs and self.rhs == rhs.rhs and self.context == rhs.context
class Negation(Ast):
def __init__(self, expression: Ast, context: SourceContext):
super().__init__(context)
self.expression = expression
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_negation(self)
@Negation.__eq__.register
def _(self, rhs: Negation) -> bool:
return self.expression == rhs.expression and self.context == rhs.context
class FunctionCall(Ast):
def __init__(
self, function_name: str, arguments: List[Ast], context: SourceContext
):
super().__init__(context)
self.function_name = function_name
self.arguments = arguments
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_function_call(self)
@FunctionCall.__eq__.register
def _(self, rhs: FunctionCall) -> bool:
return (
self.function_name == rhs.function_name
and self.arguments == rhs.arguments
and self.context == rhs.context
)
class Identifier(Ast):
def __init__(self, value: int, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_identifier(self)
@Identifier.__eq__.register
def _(self, rhs: Identifier) -> bool:
return (
isinstance(rhs, Identifier)
and self.value == rhs.value
and self.context == self.context
)
class Unsignedint(Ast):
def __init__(self, value: int, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_unsignedint(self)
@Unsignedint.__eq__.register
def _(self, rhs: Unsignedint) -> bool:
return (
isinstance(rhs, Unsignedint)
and self.value == rhs.value
and self.context == self.context
)
class Bool(Ast):
def __init__(self, value: bool, context: SourceContext):
super().__init__(context)
self.value = value
@singledispatchmethod
def __eq__(self, rhs: Ast) -> bool:
return False
def visit(self, v: AstVisitor[T]) -> T:
return v.visit_bool(self)
@Bool.__eq__.register
def _(self, rhs: Bool) -> bool:
return (
isinstance(rhs, Bool)
and self.value == rhs.value
and self.context == self.context
)
| 0
| 4,957
| 0
| 5,386
| 0
| 0
| 0
| 58
| 1,490
|
12d6ccc9bc22866f30ca1c766583f034776a1025
| 4,946
|
py
|
Python
|
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
src/lib/recorder.py
|
l-maia/viseron
|
d762be93db74f780db13ac332bf8673c41592aa9
|
[
"MIT"
] | null | null | null |
import logging
LOGGER = logging.getLogger(__name__)
| 35.84058
| 88
| 0.641326
|
import datetime
import logging
import os
from threading import Thread
import cv2
from lib.cleanup import SegmentCleanup
from lib.helpers import draw_objects
from lib.mqtt.camera import MQTTCamera
from lib.segments import Segments
LOGGER = logging.getLogger(__name__)
class FFMPEGRecorder:
def __init__(self, config, detection_lock, mqtt_queue):
self._logger = logging.getLogger(__name__ + "." + config.camera.name_slug)
if getattr(config.recorder.logging, "level", None):
self._logger.setLevel(config.recorder.logging.level)
elif getattr(config.camera.logging, "level", None):
self._logger.setLevel(config.camera.logging.level)
self._logger.debug("Initializing ffmpeg recorder")
self.config = config
self._mqtt_queue = mqtt_queue
self.is_recording = False
self.last_recording_start = None
self.last_recording_end = None
self._event_start = None
self._event_end = None
self._recording_name = None
segments_folder = os.path.join(
config.recorder.segments_folder, config.camera.name
)
self.create_directory(segments_folder)
self._segmenter = Segments(
self._logger, config, segments_folder, detection_lock
)
self._segment_cleanup = SegmentCleanup(config)
self._mqtt_devices = {}
if self.config.recorder.thumbnail.send_to_mqtt:
self._mqtt_devices["latest_thumbnail"] = MQTTCamera(
config, mqtt_queue, object_id="latest_thumbnail"
)
def on_connect(self, client):
for device in self._mqtt_devices.values():
device.on_connect(client)
def subfolder_name(self, today):
return (
f"{today.year:04}-{today.month:02}-{today.day:02}/{self.config.camera.name}"
)
def create_thumbnail(self, file_name, frame, objects, resolution):
draw_objects(
frame.decoded_frame_umat_rgb, objects, resolution,
)
cv2.imwrite(file_name, frame.decoded_frame_umat_rgb)
if self.config.recorder.thumbnail.save_to_disk:
thumbnail_folder = os.path.join(
self.config.recorder.folder, "thumbnails", self.config.camera.name
)
self.create_directory(thumbnail_folder)
self._logger.debug(f"Saving thumbnail in {thumbnail_folder}")
if not cv2.imwrite(
os.path.join(thumbnail_folder, "latest_thumbnail.jpg"),
frame.decoded_frame_umat_rgb,
):
self._logger.error("Failed saving thumbnail to disk")
if self.config.recorder.thumbnail.send_to_mqtt and self._mqtt_devices:
ret, jpg = cv2.imencode(".jpg", frame.decoded_frame_umat_rgb)
if ret:
self._mqtt_devices["latest_thumbnail"].publish(jpg.tobytes())
def create_directory(self, path):
try:
if not os.path.isdir(path):
self._logger.debug(f"Creating folder {path}")
os.makedirs(path)
except FileExistsError:
pass
def start_recording(self, frame, objects, resolution):
self._logger.info("Starting recorder")
self.is_recording = True
self._segment_cleanup.pause()
now = datetime.datetime.now()
self.last_recording_start = now.isoformat()
self.last_recording_end = None
self._event_start = int(now.timestamp())
if self.config.recorder.folder is None:
self._logger.error("Output directory is not specified")
return
# Create filename
now = datetime.datetime.now()
video_name = f"{now.strftime('%H:%M:%S')}.{self.config.recorder.extension}"
thumbnail_name = f"{now.strftime('%H:%M:%S')}.jpg"
# Create foldername
subfolder = self.subfolder_name(now)
full_path = os.path.join(self.config.recorder.folder, subfolder)
self.create_directory(full_path)
if frame:
self.create_thumbnail(
os.path.join(full_path, thumbnail_name), frame, objects, resolution
)
self._recording_name = os.path.join(full_path, video_name)
def concat_segments(self):
self._segmenter.concat_segments(
self._event_start - self.config.recorder.lookback,
self._event_end,
self._recording_name,
)
# Dont resume cleanup if new recording started during encoding
if not self.is_recording:
self._segment_cleanup.resume()
def stop_recording(self):
self._logger.info("Stopping recorder")
self.is_recording = False
now = datetime.datetime.now()
self.last_recording_end = now.isoformat()
self._event_end = int(now.timestamp())
concat_thread = Thread(target=self.concat_segments)
concat_thread.start()
| 0
| 0
| 0
| 4,652
| 0
| 0
| 0
| 39
| 201
|
6ad5c56d611d041bd4e20428bfb9dda30e760ae2
| 810
|
py
|
Python
|
src/USEFUL/basic_examples/example_setdefault.py
|
binxiangni/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | 5
|
2017-08-03T06:33:49.000Z
|
2021-08-06T13:20:57.000Z
|
src/USEFUL/basic_examples/example_setdefault.py
|
ritahu/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | null | null | null |
src/USEFUL/basic_examples/example_setdefault.py
|
ritahu/Python-and-Algorithms-and-Data-Structures
|
d2c082d261a68b06f533703867ae8a90ac7f4df1
|
[
"MIT"
] | 6
|
2017-04-27T13:30:49.000Z
|
2020-11-01T20:28:55.000Z
|
#!/usr/bin/env python
__author__ = "bt3"
if __name__ == '__main__':
test_setdef()
| 20.769231
| 51
| 0.549383
|
#!/usr/bin/env python
__author__ = "bt3"
def usual_dict(dict_data):
newdata = {}
for k, v in dict_data:
if k in newdata:
newdata[k].append(v)
else:
newdata[k] = [v]
return newdata
def setdefault_dict(dict_data):
newdata = {}
for k, v in dict_data:
newdata.setdefault(k, []).append(v)
return newdata
def test_setdef(module_name='this module'):
dict_data = (('key1', 'value1'),
('key1', 'value2'),
('key2', 'value3'),
('key2', 'value4'),
('key2', 'value5'),)
print(usual_dict(dict_data))
print(setdefault_dict(dict_data))
s = 'Tests in {name} have {con}!'
print(s.format(name=module_name, con='passed'))
if __name__ == '__main__':
test_setdef()
| 0
| 0
| 0
| 0
| 0
| 648
| 0
| 0
| 69
|
aa3156cdaa41c2efd05f58391fef51ddc65a1c89
| 423
|
py
|
Python
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 81
|
2015-11-29T12:17:39.000Z
|
2021-08-02T07:06:51.000Z
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 11
|
2016-10-23T16:34:10.000Z
|
2022-01-30T05:45:54.000Z
|
examples/PyObjC/pbplugin/PyTestPlugin.py
|
flupke/py2app
|
8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d
|
[
"MIT"
] | 21
|
2016-01-25T18:46:31.000Z
|
2021-01-08T17:38:03.000Z
|
import objc
import sys
print "PyTestPlugin", __name__
print u"[inside] currentBundle %r" % (objc.currentBundle(),)
| 23.5
| 60
| 0.664303
|
from Foundation import *
import objc
import sys
class PyTestPlugin(NSObject):
def init(self):
self = super(PyTestPlugin, self).init()
print 'class load!!'
print "Hello from py2app"
print "frozen", repr(getattr(sys, "frozen", None))
return self
class PyTestPlugin2(NSObject):
pass
print "PyTestPlugin", __name__
print u"[inside] currentBundle %r" % (objc.currentBundle(),)
| 0
| 0
| 0
| 236
| 0
| 0
| 0
| 3
| 68
|
4daa46c2152e35f2d6fed9c1e7f117f7a7694955
| 439
|
py
|
Python
|
tests/grammar/grammars/simple.py
|
AlexandreH/securify2
|
2d2ba0e1c20cdda550120ecdc1a7164db9b90e3c
|
[
"Apache-2.0"
] | 258
|
2020-01-23T16:58:38.000Z
|
2022-03-31T17:29:25.000Z
|
tests/grammar/grammars/simple.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 34
|
2020-01-30T06:11:58.000Z
|
2022-02-27T07:53:17.000Z
|
tests/grammar/grammars/simple.py
|
sirhashalot/securify2
|
6852707449577add14bafce8e304946b3490a977
|
[
"Apache-2.0"
] | 66
|
2020-01-28T09:23:05.000Z
|
2022-03-22T09:01:43.000Z
|
from __future__ import annotations
| 12.911765
| 60
| 0.738041
|
from __future__ import annotations
from typing import Sequence, Union, Optional
from securify.grammar import abstract_production, production
@abstract_production
class Base:
pass
@abstract_production
class AOrC(Base):
pass
@production
class A(AOrC, Base):
optional: Optional[Base]
@production
class B(Base):
seq: Sequence[AOrC]
@production
class C(AOrC, Base):
single: B
@production
class E(Base):
pass
| 0
| 154
| 0
| 0
| 0
| 0
| 0
| 62
| 184
|
ab7e4c8ae6107856ac778d397edffb130f2bed1a
| 2,490
|
py
|
Python
|
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
includes/vars.py
|
jerseyshawn/cf-vcap-vars
|
26effac112b500271e2f5ed298f0e6ab50bd7c4e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
| 35.571429
| 83
| 0.485944
|
#!/usr/bin/env python3
from os import environ
from json import dumps
class CloudFoundry:
def __init__(self, **kwargs):
self.__CF_VARIABLES__ = {'CF_INSTANCE_ADDR': '',
'CF_INSTANCE_GUID': '',
'CF_INSTANCE_INDEX': '',
'CF_INSTANCE_INTERNAL_IP': '',
'CF_INSTANCE_IP': '',
'CF_INSTANCE_PORT': '',
'CF_INSTANCE_PORTS': [{}],
'CF_STACK': '',
'DATABASE_URL': '',
'HOME': '',
'INSTANCE_GUID': '',
'INSTANCE_INDEX': '',
'LANG': '',
'MEMORY_LIMIT': '',
'PATH': '',
'PORT': '',
'PWD': '',
'TMPDIR': '',
'USER': '',
'VCAP_APP_HOST': '',
'VCAP_APP_PORT': '',
'VCAP_APPLICATION': {},
'VCAP_SERVICES': {}}
if kwargs.get('testing'):
self.load_testing_data(**kwargs)
else:
self.set_cf_variables(**kwargs)
def load_testing_data(self, **kwargs):
pass
def set_cf_variables(self, **kwargs):
variables = kwargs.get('variables', None)
if isinstance(variables, str):
cf_variables = [variable.upper() for variable in variables.split(',')]
elif isinstance(variables, list):
cf_variables = [variable.upper() for variable in variables]
else:
cf_variables = self.__CF_VARIABLES__
for cf_variable in cf_variables:
# found in env
if cf_variable in environ:
setattr(self,str(cf_variable).lower(),environ[cf_variable])
# not in env, but a known cf var
elif cf_variable in self.__CF_VARIABLES__:
setattr(self, str(cf_variable).lower(), self.__CF_VARIABLES__[cf_variable])
# not in env and not defaulted
else:
setattr(self, str(cf_variable).lower(), '')
def get_cf_variables(self, **kwargs):
variables = {}
for variable in sorted(self.__CF_VARIABLES__):
variable = variable.lower()
if hasattr(self, variable):
variables[variable] = getattr(self,variable)
print(dumps(variables, indent=4))
return(variables)
| 0
| 0
| 0
| 2,394
| 0
| 0
| 0
| 2
| 68
|
f6bbb564e37b6b680c5c92655011416fe930bcbf
| 2,043
|
py
|
Python
|
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
vega/__init__.py
|
zjzh/vega
|
aa6e7b8c69024262fc483ee06113b4d1bd5156d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vega's methods."""
__all__ = [
"set_backend",
"is_cpu_device", "is_gpu_device", "is_npu_device",
"is_ms_backend", "is_tf_backend", "is_torch_backend",
"get_devices",
"ClassFactory", "ClassType",
"FileOps",
"run",
"init_cluster_args",
"module_existed",
"TrialAgent",
"get_network",
"get_dataset",
"get_trainer",
"get_quota",
]
__version__ = "1.8.0"
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported.')
from .common.backend_register import set_backend, is_cpu_device, is_gpu_device, is_npu_device, is_ms_backend, is_tf_backend, is_torch_backend, get_devices
from .common.class_factory import ClassFactory, ClassType
from .common.file_ops import FileOps
from .core import run, init_cluster_args, module_existed
from .trainer.trial_agent import TrialAgent
def get_network(name, **kwargs):
"""Return network."""
return ClassFactory.get_cls(ClassType.NETWORK, name)(**kwargs)
def get_dataset(name, **kwargs):
"""Return dataset."""
return ClassFactory.get_cls(ClassType.DATASET, name)(**kwargs)
def get_trainer(name="Trainer", **kwargs):
"""Return trainer."""
return ClassFactory.get_cls(ClassType.TRAINER, name)(**kwargs)
def get_quota(**kwargs):
"""Return quota."""
return ClassFactory.get_cls(ClassType.QUOTA, "Quota")(**kwargs)
| 28.375
| 96
| 0.714146
|
# -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vega's methods."""
__all__ = [
"set_backend",
"is_cpu_device", "is_gpu_device", "is_npu_device",
"is_ms_backend", "is_tf_backend", "is_torch_backend",
"get_devices",
"ClassFactory", "ClassType",
"FileOps",
"run",
"init_cluster_args",
"module_existed",
"TrialAgent",
"get_network",
"get_dataset",
"get_trainer",
"get_quota",
]
__version__ = "1.8.0"
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python < 3.6 is not supported.')
from .common.backend_register import set_backend, is_cpu_device, is_gpu_device, is_npu_device, \
is_ms_backend, is_tf_backend, is_torch_backend, get_devices
from .common.class_factory import ClassFactory, ClassType
from .common.file_ops import FileOps
from .core import run, init_cluster_args, module_existed
from .trainer.trial_agent import TrialAgent
from . import quota
def get_network(name, **kwargs):
"""Return network."""
return ClassFactory.get_cls(ClassType.NETWORK, name)(**kwargs)
def get_dataset(name, **kwargs):
"""Return dataset."""
return ClassFactory.get_cls(ClassType.DATASET, name)(**kwargs)
def get_trainer(name="Trainer", **kwargs):
"""Return trainer."""
return ClassFactory.get_cls(ClassType.TRAINER, name)(**kwargs)
def get_quota(**kwargs):
"""Return quota."""
return ClassFactory.get_cls(ClassType.QUOTA, "Quota")(**kwargs)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 22
|
a3fd214bd3ac94e9556d5163c8b69ad52bfd9956
| 13,033
|
py
|
Python
|
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/tests/test_cie_lab.py
|
MaxSchambach/colour
|
3f3685d616fda4be58cec20bc1e16194805d7e2d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.cie_lab` module.
"""
from __future__ import division, unicode_literals
import unittest
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_Lab', 'TestLab_to_XYZ', 'TestLab_to_LCHab', 'TestLCHab_to_Lab'
]
if __name__ == '__main__':
unittest.main()
| 34.028721
| 79
| 0.58559
|
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.cie_lab` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from colour.models import XYZ_to_Lab, Lab_to_XYZ, Lab_to_LCHab, LCHab_to_Lab
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_Lab', 'TestLab_to_XYZ', 'TestLab_to_LCHab', 'TestLCHab_to_Lab'
]
class TestXYZ_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.XYZ_to_Lab` definition unit tests
methods.
"""
def test_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.44757, 0.40745])),
np.array([41.52787529, 38.48089305, -5.73295122]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_Lab(
np.array([0.20654008, 0.12197225, 0.05136952]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([41.52787529, 51.19354174, 19.91843098]),
decimal=7)
def test_n_dimensional_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition n-dimensional
support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
XYZ = np.tile(XYZ, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
XYZ = np.reshape(XYZ, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ, illuminant), Lab, decimal=7)
def test_domain_range_scale_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition
domain and range scale support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = np.array([0.31270, 0.32900])
Lab = XYZ_to_Lab(XYZ, illuminant)
d_r = (('reference', 1, 1), (1, 1, 0.01), (100, 100, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Lab(XYZ * factor_a, illuminant),
Lab * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.XYZ_to_Lab` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
illuminant = np.array(case[0:2])
XYZ_to_Lab(XYZ, illuminant)
class TestLab_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_XYZ` definition unit tests
methods.
"""
def test_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 38.48089305, -5.73295122]),
np.array([0.44757, 0.40745])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_XYZ(
np.array([41.52787529, 51.19354174, 19.91843098]),
np.array([0.34570, 0.35850, 1.00000])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
def test_n_dimensional_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition n-dimensional
support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
Lab = np.tile(Lab, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
illuminant = np.tile(illuminant, (6, 1))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
illuminant = np.reshape(illuminant, (2, 3, 2))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab, illuminant), XYZ, decimal=7)
def test_domain_range_scale_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition
domain and range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
illuminant = np.array([0.31270, 0.32900])
XYZ = Lab_to_XYZ(Lab, illuminant)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_XYZ(Lab * factor_a, illuminant),
XYZ * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_XYZ(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_XYZ` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
illuminant = np.array(case[0:2])
Lab_to_XYZ(Lab, illuminant)
class TestLab_to_LCHab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.Lab_to_LCHab` definition unit tests
methods.
"""
def test_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition.
"""
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([41.52787529, 52.63858304, 26.92317922])),
np.array([41.52787529, 59.12425901, 27.08848784]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([55.11636304, -41.08791787, 30.91825778])),
np.array([55.11636304, 51.42135412, 143.03889556]),
decimal=7)
np.testing.assert_almost_equal(
Lab_to_LCHab(np.array([29.80565520, 20.01830466, -48.34913874])),
np.array([29.80565520, 52.32945383, 292.49133666]),
decimal=7)
def test_n_dimensional_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition
n-dimensional arrays support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
Lab = np.tile(Lab, (6, 1))
LCHab = np.tile(LCHab, (6, 1))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
Lab = np.reshape(Lab, (2, 3, 3))
LCHab = np.reshape(LCHab, (2, 3, 3))
np.testing.assert_almost_equal(Lab_to_LCHab(Lab), LCHab, decimal=7)
def test_domain_range_scale_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition domain and
range scale support.
"""
Lab = np.array([41.52787529, 52.63858304, 26.92317922])
LCHab = Lab_to_LCHab(Lab)
d_r = (('reference', 1, 1), (1, 0.01, np.array([0.01, 0.01, 1 / 360])),
(100, 1, np.array([1, 1, 1 / 3.6])))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
Lab_to_LCHab(Lab * factor_a), LCHab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_Lab_to_LCHab(self):
"""
Tests :func:`colour.models.cie_lab.Lab_to_LCHab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
Lab = np.array(case)
Lab_to_LCHab(Lab)
class TestLCHab_to_Lab(unittest.TestCase):
"""
Defines :func:`colour.models.cie_lab.LCHab_to_Lab` definition unit tests
methods.
"""
def test_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition.
"""
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([41.52787529, 59.12425901, 27.08848784])),
np.array([41.52787529, 52.63858304, 26.92317922]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([55.11636304, 51.42135412, 143.03889556])),
np.array([55.11636304, -41.08791787, 30.91825778]),
decimal=7)
np.testing.assert_almost_equal(
LCHab_to_Lab(np.array([29.80565520, 52.32945383, 292.49133666])),
np.array([29.80565520, 20.01830466, -48.34913874]),
decimal=7)
def test_n_dimensional_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition
n-dimensional arrays support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
LCHab = np.tile(LCHab, (6, 1))
Lab = np.tile(Lab, (6, 1))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
LCHab = np.reshape(LCHab, (2, 3, 3))
Lab = np.reshape(Lab, (2, 3, 3))
np.testing.assert_almost_equal(LCHab_to_Lab(LCHab), Lab, decimal=7)
def test_domain_range_scale_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition domain and
range scale support.
"""
LCHab = np.array([41.52787529, 59.12425901, 27.08848784])
Lab = LCHab_to_Lab(LCHab)
d_r = (('reference', 1, 1), (1, np.array([0.01, 0.01, 1 / 360]), 0.01),
(100, np.array([1, 1, 1 / 3.6]), 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
LCHab_to_Lab(LCHab * factor_a), Lab * factor_b, decimal=7)
@ignore_numpy_errors
def test_nan_LCHab_to_Lab(self):
"""
Tests :func:`colour.models.cie_lab.LCHab_to_Lab` definition nan
support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
LCHab = np.array(case)
LCHab_to_Lab(LCHab)
if __name__ == '__main__':
unittest.main()
| 0
| 1,482
| 0
| 10,662
| 0
| 0
| 0
| 112
| 182
|
84c76c41a480dae4b33646b4f0e6c9ccbdced4c9
| 17,510
|
py
|
Python
|
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
letype_extractor.py
|
olzama/neural-supertagging
|
340a9b3eaf6427e5ec475cd03bc6f4b3d4891ba4
|
[
"MIT"
] | null | null | null |
import sys, pathlib
import pickle
from datetime import datetime
CONTEXT_WINDOW = 2
DEV = ['ws212', 'ecpa']
TEST = ['cb', 'ecpr', 'jhk', 'jhu', 'tgk', 'tgu', 'psk', 'psu', #'rondane',
'vm32', 'ws213', 'ws214', 'petet', 'wsj23']
IGNORE = ['ntucle', 'omw', 'wlb03', 'wnb03']
NONTRAIN = DEV + TEST + IGNORE
if __name__ == "__main__":
args = sys.argv[1:]
dt_str = '-'.join(str(datetime.now()).split()).replace(':','.')
run_id = sys.argv[3] + dt_str
if len(sys.argv) > 3:
autoreg = sys.argv[4] == 'autoreg'
out_dir = './output/' + run_id
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=False)
le = LexTypeExtractor()
le.parse_lexicons(args[0])
le.stats['total lextypes'] = len(le.lextypes)
if autoreg:
le.process_testsuites_autoreg(args[1],le.lextypes,out_dir)
else:
le.process_testsuites_nonautoreg(args[1],le.lextypes,out_dir)
with open(out_dir + '/lextypes','wb') as f:
lextypes = set([str(v) for v in list(le.lextypes.values())])
pickle.dump(lextypes,f)
| 46.693333
| 116
| 0.525871
|
from delphin import tdl, itsdb
from delphin.tokens import YYTokenLattice
import glob, sys, pathlib
import json, pickle
import numpy as np
from collections import OrderedDict
import pos_map
from datetime import datetime
CONTEXT_WINDOW = 2
DEV = ['ws212', 'ecpa']
TEST = ['cb', 'ecpr', 'jhk', 'jhu', 'tgk', 'tgu', 'psk', 'psu', #'rondane',
'vm32', 'ws213', 'ws214', 'petet', 'wsj23']
IGNORE = ['ntucle', 'omw', 'wlb03', 'wnb03']
NONTRAIN = DEV + TEST + IGNORE
class LexTypeExtractor:
def __init__(self):
self.stats = {'corpora': [], 'failed corpora': [], 'tokens': {}, 'total lextypes': 0}
def parse_lexicons(self,lexicons):
lextypes = {} # mapping of lexical entry IDs to types
for lexicon in glob.iglob(lexicons+'**'):
for event, obj, lineno in tdl.iterparse(lexicon):
if event == 'TypeDefinition':
lextypes[obj.identifier] = obj.supertypes[0] # assume exactly 1
self.lextypes = lextypes
def read_testsuites(self,path):
max_sen_length = 0
corpus_size = 0
data = {'train':{'by corpus':[], 'by length': {}},
'test':{'by corpus':[], 'by length': {}},
'dev':{'by corpus':[], 'by length': {}}}
print('Reading test suite files into pydelphin objects...')
n = 0
for idx in ['train','dev','test']:
t = 0
for i, tsuite in enumerate(sorted(glob.iglob(path + idx + '/**'))):
n += 1
ts = itsdb.TestSuite(tsuite)
if idx == 'train':
message = "A nontrain dataset {} is being added as training data!".format(ts.path.stem)
assert ts.path.stem not in NONTRAIN, message
data[idx]['by corpus'].append({'name':ts.path.stem})
items = list(ts.processed_items())
data[idx]['by corpus'][i]['sentences'] = {}
data[idx]['by corpus'][i]['tokens-tags'] = []
corpus_size += len(items)
for response in items:
if len(response['results']) > 0:
deriv = response.result(0).derivation()
terminals = deriv.terminals()
t += len(terminals)
p_input = response['p-input']
p_tokens = response['p-tokens']
terminals_tok_tags = self.map_lattice_to_input(p_input, p_tokens, deriv)
if len(terminals) not in data[idx]['by corpus'][i]['sentences']:
data[idx]['by corpus'][i]['sentences'][len(terminals)] = []
data[idx]['by corpus'][i]['sentences'][len(terminals)].append(terminals_tok_tags)
data[idx]['by corpus'][i]['tokens-tags'].append(terminals_tok_tags)
if len(terminals) > max_sen_length:
max_sen_length = len(terminals)
print('All raw {} tokens: {}'.format(idx,t))
t1 = 0
t2 = 0
if idx == 'train':
all_sentences = {}
for ts in data[idx]['by corpus']:
t1 += self.org_sen_by_length(all_sentences, ts)
for l in all_sentences:
for s in all_sentences[l]:
t2 += len(s)
data[idx]['by length'] = OrderedDict(sorted(all_sentences.items()))
else:
for ts in data[idx]['by corpus']:
all_sentences = {}
t1 += self.org_sen_by_length(all_sentences, ts)
data[idx]['by length'][ts['name']] = OrderedDict(sorted(all_sentences.items()))
for ts in data[idx]['by length']:
for l in data[idx]['by length'][ts]:
for s in data[idx]['by length'][ts][l]:
t2 += len(s)
print('Added {} {} tokens to the by-corpus table'.format(t1,idx))
print('Added {} {} tokens to the by-length table'.format(t2,idx))
return max_sen_length, corpus_size, n+1, data
def org_sen_by_length(self, all_sentences, ts):
n = 0
for l in ts['sentences']:
for s in ts['sentences'][l]:
n += len(s)
if l not in all_sentences:
all_sentences[l] = []
all_sentences[l] += ts['sentences'][l]
return n
def process_testsuites_autoreg(self,testsuites,lextypes, out_dir):
max_sen_length, corpus_size, num_ts, data = self.read_testsuites(testsuites)
tables_by_len = {'train':{},'dev':{},'test':{}}
for k in ['train','dev','test']:
pathlib.Path(out_dir + '/labeled-data/' + k).mkdir(parents=True, exist_ok=False)
all_tokens = 0
test = k in ['dev','test']
if test:
for corpus in data[k]['by length']:
all_tokens += self.process_table(data, k, lextypes, tables_by_len, test, corpus)
else:
all_tokens += self.process_table(data, k, lextypes, tables_by_len, test)
print('Total PROCESSED {} tokens: {}'.format(k, all_tokens))
def process_testsuites_nonautoreg(self,testsuites,lextypes, out_dir):
pos_mapper = pos_map.Pos_mapper('./pos-map.txt')
max_sen_length, corpus_size, num_ts, data = self.read_testsuites(testsuites)
for k in ['train','dev','test']:
is_devtest_data = k in ['dev','test']
pathlib.Path(out_dir + '/labeled-data/' + k).mkdir(parents=True, exist_ok=False)
if is_devtest_data:
for corpus in data[k]['by corpus']:
x,y = self.process_corpus(lextypes,corpus,pos_mapper)
data_table['ft'] = x
data_table['lt'] = y
with open(out_dir + '/labeled-data/' + k + '/' + corpus['name'], 'wb') as f:
pickle.dump(data_table, f)
else:
data_table = {'ft':[],'lt':[]}
for corpus in data[k]['by corpus']:
x, y = self.process_corpus(lextypes,corpus,pos_mapper)
data_table['ft'] += x
data_table['lt'] += y
with open(out_dir + '/labeled-data/train/train' , 'wb') as f:
pickle.dump(data_table, f)
def process_corpus(self, lextypes, corpus,pos_mapper):
data = []
y = []
for sen in corpus['tokens-tags']:
tokens, labels, pos_tags, autoregress_labels = \
self.get_tokens_labels(sen, CONTEXT_WINDOW, lextypes, pos_mapper, False)
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
y.append(labels[k])
data.append(self.get_context(t, tokens, pos_tags, k, CONTEXT_WINDOW))
return data, y
def process_table(self, data, k, lextypes, tables_by_len, test, corpus=None):
n = 0
table = data[k]['by length'] if not test else data[k]['by length'][corpus]
for sen_len in table:
tables_by_len[k][sen_len] = {}
autoregress_table = np.array([[{}] * len(table[sen_len])
for i in range(sen_len)])
labels_table = np.array([[{}] * len(table[sen_len]) for i in range(sen_len)])
# print("Processing sentences of length {}".format(sen_len))
n += self.process_length(lextypes, table[sen_len],
autoregress_table, labels_table, test=test)
tables_by_len[k][sen_len]['ft'] = autoregress_table
tables_by_len[k][sen_len]['lt'] = labels_table
if test:
with open(out_dir + '/labeled-data/' + k + '/' + corpus, 'wb') as f:
pickle.dump(tables_by_len[k], f)
else:
with open(out_dir + '/labeled-data/train/train' , 'wb') as f:
pickle.dump(tables_by_len[k], f)
return n
'''
Assume a numpy table coming in. Get e.g. tokens 2 through 5 in sentences 4 and 5,
for the test suite #20 in the data.
'''
def get_table_portion(self, ts_info, table, ts_num, token_range, sentence_range):
ts_column = ts_info[ts_num]['column']
tokens = sum(ts_info[ts_num]['sentences'][sentence_range[0]:sentence_range[1]])
return table[token_range[0]:token_range[1],ts_column:ts_column+tokens]
def process_testsuite(self, lextypes, logf, tsuite, autoregress_table, labels_table, start):
print("Processing " + tsuite['name'])
logf.write("Processing " + tsuite['name'] + '\n')
pairs = []
contexts = []
y = []
ys = []
pos_mapper = pos_map.Pos_mapper('./pos-map.txt') # do this for every test suite to count unknowns in each
for sentence_len in tsuite['sentences']:
items = tsuite['sentences'][sentence_len]
for j, lst_of_terminals in enumerate(items):
contexts.append([])
#if j % 100 == 0:
# print("Processing item {} out of {}...".format(j, len(items)))
tokens,labels,pos_tags,autoregress_labels = \
self.get_tokens_labels(tsuite['tokens-tags'][j],CONTEXT_WINDOW, lextypes,pos_mapper,test=False)
ys.append(labels[CONTEXT_WINDOW:CONTEXT_WINDOW*-1])
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
pairs.append((t, labels[k]))
y.append(labels[k])
contexts[j].append(self.get_context(t, tokens, pos_tags, k, CONTEXT_WINDOW))
autoregress_table[k-CONTEXT_WINDOW][start+j] = \
self.get_autoregress_context(tokens,pos_tags,autoregress_labels, k,CONTEXT_WINDOW)
labels_table[k-CONTEXT_WINDOW][start+j] = labels[k]
pairs.append(('--EOS--','--EOS--')) # sentence separator
y.append('\n') # sentence separator
self.write_output(contexts, pairs, tsuite['name'])
return ys
def process_length(self, lextypes, items, autoregress_table, labels_table,test):
y = []
ys = []
all_tokens = 0
pos_mapper = pos_map.Pos_mapper('./pos-map.txt') # do this for every test suite to count unknowns in each
for j, lst_of_terminals in enumerate(items):
#if j % 100 == 0:
# print("Processing item {} out of {}...".format(j, len(items)))
tokens,labels,pos_tags,autoregress_labels = \
self.get_tokens_labels(lst_of_terminals,CONTEXT_WINDOW, lextypes,pos_mapper,test)
ys.append(labels[CONTEXT_WINDOW:CONTEXT_WINDOW*-1])
for k, t in enumerate(tokens):
if k < CONTEXT_WINDOW or k >= len(tokens) - CONTEXT_WINDOW:
continue
y.append(labels[k])
autoregress_table[k-CONTEXT_WINDOW][j] = \
self.get_autoregress_context(tokens,pos_tags,autoregress_labels, k,CONTEXT_WINDOW)
labels_table[k-CONTEXT_WINDOW][j] = labels[k]
all_tokens += 1
y.append('\n') # sentence separator
return all_tokens
def map_lattice_to_input(self, p_input, p_tokens, deriv):
yy_lattice = YYTokenLattice.from_string(p_tokens)
yy_input = YYTokenLattice.from_string(p_input)
terminals_toks_postags = []
for t in deriv.terminals():
toks_pos_tags = []
for ttok in t.tokens:
span = None
pos_probs = {}
for lat_tok in yy_lattice.tokens:
if lat_tok.id == ttok.id:
span = lat_tok.lnk.data
break
for i,in_tok in enumerate(yy_input.tokens):
if in_tok.lnk.data[0] == span[0]:
for pos, p in in_tok.pos:
if pos not in pos_probs:
pos_probs[pos] = []
pos_probs[pos].append(float(p))
if in_tok.lnk.data[1] != span[1]:
cur_tok = in_tok
while cur_tok.lnk.data[1] != span[1]:
next_tok = yy_input.tokens[i+1]
i += 1
for pos, p in next_tok.pos:
if pos not in pos_probs:
pos_probs[pos] = []
pos_probs[pos].append(float(p))
cur_tok = next_tok
else:
break
toks_pos_tags.append((ttok, pos_probs))
terminals_toks_postags.append((t,toks_pos_tags))
return terminals_toks_postags
def write_output(self, contexts, pairs, ts_name):
for d in ['train/','test/','dev/', 'ignore/']:
for pd in ['simple/','by-corpus/contexts/','by-corpus/true_labels/']:
pathlib.Path('./output/' + pd + d).mkdir(parents=True, exist_ok=True)
true_labels = []
suf = 'train/'
if ts_name in IGNORE:
suf = 'ignore/'
if ts_name in TEST:
suf = 'test/'
elif ts_name in DEV:
suf = 'dev/'
with open('./output/simple/' + suf + ts_name, 'w') as f:
for form, letype in pairs:
if not letype=='--EOS--':
true_labels.append(str(letype))
str_pair = f'{form}\t{letype}'
f.write(str_pair + '\n')
else:
f.write('\n') # sentence separator
true_labels.append('\n') # sentence separator
with open('./output/by-corpus/true_labels/' + suf + ts_name, 'w') as f:
for tl in true_labels:
f.write(tl)
if tl != '\n':
f.write('\n')
with open('./output/by-corpus/contexts/' + suf + ts_name, 'w') as f:
f.write(json.dumps(contexts))
def get_context(self, t, tokens, pos_tags, i, window):
context = {'w': t, 'pos': pos_tags[i]}
for j in range(1,window+1):
prev_tok = tokens[i-j]
prev_pos = pos_tags[i-j]
next_tok = tokens[i+j]
next_pos = pos_tags[i+j]
context['w-' + str(j)] = prev_tok
context['w+' + str(j)] = next_tok
context['pos-' + str(j)] = prev_pos
context['pos+' + str(j)] = next_pos
return context
def get_autoregress_context(self,tokens,pos_tags,predicted_labels, k,window):
context = {'w':tokens[k],'pos':pos_tags[k]}
for i in range(1,window+1):
context['w-' + str(i)] = tokens[k-i]
context['w+' + str(i)] = tokens[k+i]
context['pos-' + str(i)] = pos_tags[k-i]
context['pos+' + str(i)] = pos_tags[k+i]
context['tag-' + str(i)] = predicted_labels[k-i] # Will be None or FAKE in test mode
return context
def get_tokens_labels(self, terms_and_tokens_tags, context_window, lextypes,pos_mapper, test):
tokens = []
labels = []
pos_tags = []
previous_tags = []
for i,(terminal, toks_tags) in enumerate(terms_and_tokens_tags):
letype = str(lextypes.get(terminal.parent.entity, "<UNK>"))
tokens.append(terminal.form)
labels.append(letype)
pos_tags.append(self.get_pos_tag(toks_tags, pos_mapper))
if test:
previous_tags.append(None)
else:
previous_tags.append(letype)
for i in range(1,1+context_window):
tokens.insert(0, 'FAKE-' + str(i))
labels.insert(0, 'FAKE-' + str(i))
pos_tags.insert(0,'FAKE-' + str(i))
previous_tags.insert(0, 'FAKE-' + str(i))
tokens.append('FAKE+' + str(i))
labels.append('FAKE+' + str(i))
pos_tags.append('FAKE+' + str(i))
return tokens, labels, pos_tags, previous_tags
def get_pos_tag(self,tokens_tags, pos_mapper):
tag = ''
for tt in tokens_tags:
pos_probs = tt[1]
for pos in pos_probs:
tag = tag + '+' + pos
tag = tag.strip('+')
if '+' in tag:
tag = pos_mapper.map_tag(tag)
return tag
if __name__ == "__main__":
args = sys.argv[1:]
dt_str = '-'.join(str(datetime.now()).split()).replace(':','.')
run_id = sys.argv[3] + dt_str
if len(sys.argv) > 3:
autoreg = sys.argv[4] == 'autoreg'
out_dir = './output/' + run_id
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=False)
le = LexTypeExtractor()
le.parse_lexicons(args[0])
le.stats['total lextypes'] = len(le.lextypes)
if autoreg:
le.process_testsuites_autoreg(args[1],le.lextypes,out_dir)
else:
le.process_testsuites_nonautoreg(args[1],le.lextypes,out_dir)
with open(out_dir + '/lextypes','wb') as f:
lextypes = set([str(v) for v in list(le.lextypes.values())])
pickle.dump(lextypes,f)
| 0
| 0
| 0
| 16,271
| 0
| 0
| 0
| 45
| 133
|
e415bab977b01817df0d4c4b2e45aacf11aa8fbf
| 2,294
|
py
|
Python
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/environment_canada/camera.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for the Environment Canada radar imagery."""
from __future__ import annotations
import voluptuous as vol
SERVICE_SET_RADAR_TYPE = "set_radar_type"
SET_RADAR_TYPE_SCHEMA = {
vol.Required("radar_type"): vol.In(["Auto", "Rain", "Snow"]),
}
| 33.246377
| 79
| 0.722319
|
"""Support for the Environment Canada radar imagery."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.camera import Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import (
AddEntitiesCallback,
async_get_current_platform,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTR_OBSERVATION_TIME, DOMAIN
SERVICE_SET_RADAR_TYPE = "set_radar_type"
SET_RADAR_TYPE_SCHEMA = {
vol.Required("radar_type"): vol.In(["Auto", "Rain", "Snow"]),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]["radar_coordinator"]
async_add_entities([ECCamera(coordinator)])
platform = async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_RADAR_TYPE,
SET_RADAR_TYPE_SCHEMA,
"async_set_radar_type",
)
class ECCamera(CoordinatorEntity, Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, coordinator):
"""Initialize the camera."""
super().__init__(coordinator)
Camera.__init__(self)
self.radar_object = coordinator.ec_data
self._attr_name = f"{coordinator.config_entry.title} Radar"
self._attr_unique_id = f"{coordinator.config_entry.unique_id}-radar"
self._attr_attribution = self.radar_object.metadata["attribution"]
self._attr_entity_registry_enabled_default = False
self.content_type = "image/gif"
def camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return bytes of camera image."""
self._attr_extra_state_attributes = {
ATTR_OBSERVATION_TIME: self.radar_object.timestamp,
}
return self.radar_object.image
async def async_set_radar_type(self, radar_type: str):
"""Set the type of radar to retrieve."""
self.radar_object.precip_type = radar_type.lower()
await self.radar_object.update()
| 0
| 0
| 671
| 939
| 0
| 0
| 0
| 248
| 180
|
dfd8028393ae8ae7d4bfcfe8f9c74276b8f956f7
| 308
|
py
|
Python
|
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
v1/chapter6/4-readingCsvDict.py
|
QTYResources/python-scraping
|
d7afe25a012fb5d079ee42372c7fce94b9494b9f
|
[
"MIT"
] | null | null | null |
from urllib.request import urlopen
from io import StringIO
import csv
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
dataFile = StringIO(data)
dictReader = csv.DictReader(dataFile)
print(dictReader.fieldnames)
for row in dictReader:
print(row)
| 25.666667
| 104
| 0.775974
|
from urllib.request import urlopen
from io import StringIO
import csv
data = urlopen("http://pythonscraping.com/files/MontyPythonAlbums.csv").read().decode('ascii', 'ignore')
dataFile = StringIO(data)
dictReader = csv.DictReader(dataFile)
print(dictReader.fieldnames)
for row in dictReader:
print(row)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
02ae809d3645a6053bab6f39633b7e2d90bf2e2e
| 741
|
py
|
Python
|
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
hash-array-string/1.8-zero-matrix/main.py
|
digoreis/code-interview
|
e2250c39b0fc9b6a8f0bc151b4f796d17cdce3e3
|
[
"MIT"
] | null | null | null |
# Write an algorithm such that if an element in MxN matrix is 0, it's entire row and column are set to 0.
matrix = [[1,1,1,1],[1,1,1,1],[1,1,0,1],[1,1,1,1],[1,1,1,0]]
matrixZero(matrix)
print('\n'.join(['\t'.join([str(cell) for cell in row]) for row in matrix]))
| 26.464286
| 105
| 0.562753
|
# Write an algorithm such that if an element in MxN matrix is 0, it's entire row and column are set to 0.
def rowZero(matrix, row):
for i in range(len(matrix[row])):
matrix[row][i] = 0
def columnZero(matrix, column):
for i in range(len(matrix)):
matrix[i][column] = 0
def matrixZero(matrix):
points = []
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
points.append((i,j))
for p in points:
rowZero(matrix, p[0])
columnZero(matrix, p[1])
matrix = [[1,1,1,1],[1,1,1,1],[1,1,0,1],[1,1,1,1],[1,1,1,0]]
matrixZero(matrix)
print('\n'.join(['\t'.join([str(cell) for cell in row]) for row in matrix]))
| 0
| 0
| 0
| 0
| 0
| 393
| 0
| 0
| 68
|
d805c677ed9537d580479c240741257bc4c84e5c
| 4,725
|
py
|
Python
|
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | 2
|
2021-02-23T15:49:47.000Z
|
2021-04-24T01:32:42.000Z
|
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | null | null | null |
src/Class/shadowedrice.py
|
Jonathan-Browning/Shadowed-Rician-Fading-Python
|
c1faa061c4d2a253bd1fe7098edc0e21740cb3ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 18:55:46 2020
@author: Jonathan Browning
"""
| 36.346154
| 151
| 0.572275
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 18:55:46 2020
@author: Jonathan Browning
"""
import numpy as np
from scipy.stats import gaussian_kde as kdf
from scipy import special as sp
class ShadowedRice:
numSamples = 2*(10**6) # the number of samples used in the simulation
r = np.linspace(0, 6, 6000) # theoretical envelope PDF x axes
theta = np.linspace(-np.pi, np.pi, 6000) # theoretical phase PDF x axes
def __init__(self, K, m, r_hat, phi):
# user input checks and assigns value
self.K = self.input_Check(K, "K", 0.001, 50)
self.m = self.input_Check(m, "m", 0.001, 50)
self.r_hat = self.input_Check(r_hat, "\hat{r}", 0.5, 2.5)
self.phi = self.input_Check(phi, "\phi", -np.pi, np.pi)
# simulating and theri densities
self.multipathFading = self.complex_Multipath_Fading()
self.xdataEnv, self.ydataEnv = self.envelope_Density()
self.xdataPh, self.ydataPh = self.phase_Density()
# theoretical PDFs calculated
self.envelopeProbability = self.envelope_PDF()
self.phaseProbability = self.phase_PDF()
def input_Check(self, data, inputName, lower, upper):
# input_Check checks the user inputs
# has a value been entered
if data == "":
raise ValueError(" ".join((inputName, "must have a numeric value")))
# incase of an non-numeric input
try:
data = float(data)
except:
raise ValueError(" ".join((inputName, "must have a numeric value")))
# data must be within the range
if data < lower or data > upper:
raise ValueError(" ".join((inputName, f"must be in the range [{lower:.2f}, {upper:.2f}]")))
return data
def calculate_Means(self):
# calculate_means calculates the means of the complex Gaussians representing the
# in-phase and quadrature components
p = np.sqrt(self.K / (1+self.K)) * self.r_hat * np.cos(self.phi)
q = np.sqrt(self.K / (1+self.K)) * self.r_hat * np.sin(self.phi)
return p, q
def scattered_Component(self):
# scattered_Component calculates the power of the scattered signal component
sigma = self.r_hat / np.sqrt( 2 * (1+self.K) )
return sigma
def generate_Gaussians(self, mean, sigma):
# generate_Gaussians generates the Gaussian random variables
gaussians = np.random.default_rng().normal(mean, sigma, self.numSamples)
return gaussians
def complex_Multipath_Fading(self):
# complex_Multipath_Fading generates the complex fading random variables
p, q = self.calculate_Means()
sigma = self.scattered_Component()
xi = np.sqrt(np.random.gamma(self.m, 1/self.m, self.numSamples))
multipathFading = self.generate_Gaussians(xi*p, sigma) + (1j*self.generate_Gaussians(xi*q, sigma))
return multipathFading
def envelope_PDF(self):
# envelope_PDF calculates the theoretical envelope PDF
PDF = 2 * (1+self.K) * self.r *(self.m**(self.m)) / (self.r_hat**(2)*(self.m+self.K)**(self.m)) \
* np.exp(- ((1+self.K) * self.r**(2)) / self.r_hat**(2)) \
* sp.hyp1f1(self.m, 1, self.r**(2)*self.K*(self.K+1)/(self.r_hat**(2)*(self.K+self.m)))
return PDF
def phase_PDF(self):
# phase_PDF calculates the theoretical phase PDF
PDF = (self.m**self.m * np.sqrt(self.K)/(2 * np.sqrt(np.pi) * (self.K + self.m)**(self.m +1/2))) \
* ( np.sqrt((self.K +self.m)/(np.pi*self.K)) * sp.hyp2f1(self.m, 1, 1/2, (self.K*(np.cos(self.theta - self.phi))**(2))/(self.K +self.m)) \
+ ((sp.gamma(self.m+1/2) / sp.gamma(self.m))*np.cos(self.theta-self.phi) \
* (1- (self.K*(np.cos(self.theta - self.phi))**(2)) / (self.K +self.m))**(-self.m-1/2)))
return PDF
def envelope_Density(self):
# envelope_Density finds the envelope PDF of the simulated random variables
R = np.sqrt((np.real(self.multipathFading))**2 + (np.imag(self.multipathFading))**2)
kde = kdf(R)
x = np.linspace(R.min(), R.max(), 100)
p = kde(x)
return x, p
def phase_Density(self):
# phase_Density finds the phase PDF of the simulated random variables
R = np.angle(self.multipathFading)
kde = kdf(R)
x = np.linspace(R.min(), R.max(), 100)
p = kde(x)
return x, p
| 0
| 0
| 0
| 4,503
| 0
| 0
| 0
| 29
| 90
|
c00b03a6c58efa0a53f7586ea8d163bb92f588f1
| 1,063
|
py
|
Python
|
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 10
|
2018-07-06T00:04:23.000Z
|
2021-02-26T21:13:20.000Z
|
src/ggrc_workflows/migrations/versions/20170925135632_3ebe14ae9547_set_empty_next_cycle_start_date.py
|
HLD/ggrc-core
|
9bdc0fc6ca9e252f4919db682d80e360d5581eb4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2017-11-11T22:16:56.000Z
|
2017-11-11T22:16:56.000Z
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Set empty next_cycle_start_date
Create Date: 2017-09-25 13:56:32.087965
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ebe14ae9547'
down_revision = '4991c5731711'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE workflows, ( "
"SELECT w.id "
"FROM workflows AS w "
"LEFT JOIN task_groups AS tg ON tg.workflow_id = w.id "
"LEFT JOIN task_group_tasks AS t ON t.task_group_id = tg.id "
"WHERE t.id IS NULL AND w.next_cycle_start_date IS NOT NULL "
") AS t "
"SET workflows.next_cycle_start_date = NULL "
"WHERE workflows.id = t.id;")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
| 29.527778
| 79
| 0.664158
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Set empty next_cycle_start_date
Create Date: 2017-09-25 13:56:32.087965
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ebe14ae9547'
down_revision = '4991c5731711'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE workflows, ( "
"SELECT w.id "
"FROM workflows AS w "
"LEFT JOIN task_groups AS tg ON tg.workflow_id = w.id "
"LEFT JOIN task_group_tasks AS t ON t.task_group_id = tg.id "
"WHERE t.id IS NULL AND w.next_cycle_start_date IS NOT NULL "
") AS t "
"SET workflows.next_cycle_start_date = NULL "
"WHERE workflows.id = t.id;")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a9dd6bbf97a596cde44dca1d194056908053fcb0
| 29,435
|
py
|
Python
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 22
|
2015-05-27T13:45:55.000Z
|
2022-03-03T15:43:47.000Z
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 102
|
2015-01-23T18:21:29.000Z
|
2022-02-28T17:07:26.000Z
|
test/test_madx.py
|
odidev/cpymad
|
7b58d013a669d0973c233743e05fa205257233dd
|
[
"ECL-2.0",
"Apache-2.0"
] | 18
|
2015-01-24T12:43:57.000Z
|
2021-11-23T08:29:57.000Z
|
"""
Tests for the :class:`cpymad.madx.Madx` API.
"""
import os
from pytest import raises
from cpymad.madx import Madx
SEQU = """
! constants
QP_K1 = 2;
! elements
qp: quadrupole, k1:=QP_K1, l=1;
sb: sbend, l=2, angle=3.14/4;
dr: drift, l=1;
! sequences
s1: sequence, l=8, refer=center;
dr, at=0.5; ! dr[1] ~ betx_full1[1]
qp, at=1.5;
dr, at=2.5; ! dr[2] ~ betx_full1[3] ~ betx_range[0]
qp, at=3.5; ! ~ betx_full1[4] ~ betx_range[1]
dr, at=4.5;
sb, at=6.0; ! ~ betx_range[3]
dr, at=7.5;
endsequence;
s2: sequence, l=3, refer=entry;
qp1: qp, at=0, k1=3;
qp2: qp, at=1, l=2;
endsequence;
"""
def normalize(path):
"""Normalize path name to eliminate different spellings of the same path.
This is needed for path comparisons in tests, especially on windows where
pathes are case insensitive and allow a multitude of spellings."""
return os.path.normcase(os.path.normpath(path))
def test_version(mad):
"""Check that the Madx.version attribute can be used as expected."""
version = mad.version
# check format:
major, minor, micro = map(int, version.release.split('.'))
# We need at least MAD-X 5.05.00:
assert (major, minor, micro) >= (5, 5, 0)
# check format:
year, month, day = map(int, version.date.split('.'))
assert (year, month, day) >= (2019, 5, 10)
assert 1 <= month <= 12
assert 1 <= day <= 31
assert str(version).startswith(
'MAD-X {}'.format(version.release))
# TODO: We need to fix this on windows, but for now, I just need it to
# pass so that the CI builds the release...
def test_command_log():
"""Check that the command log contains all input commands."""
# create a new Madx instance that uses the history feature:
history_filename = '_test_madx.madx.tmp'
try:
# feed some input lines and compare with history file:
lines = """
l = 5;
f = 200;
fodo: sequence, refer=entry, l=100;
QF: quadrupole, l=5, at= 0, k1= 1/(f*l);
QD: quadrupole, l=5, at=50, k1=-1/(f*l);
endsequence;
beam, particle=proton, energy=2;
use, sequence=fodo;
""".splitlines()
lines = [line.strip() for line in lines if line.strip()]
with Madx(command_log=history_filename) as mad:
for line in lines:
mad.input(line)
with open(history_filename) as history_file:
history = history_file.read()
assert history.strip() == '\n'.join(lines).strip()
finally:
# remove history file
os.remove(history_filename)
def test_append_semicolon():
"""Check that semicolon is automatically appended to input() text."""
# Regression test for #73
log = []
with Madx(command_log=log.append) as mad:
mad.input('a = 0')
mad.input('b = 1')
assert log == ['a = 0;', 'b = 1;']
assert mad.globals.a == 0
assert mad.globals.b == 1
# def test_sequence_get_expanded_elements():
def test_crash(mad):
"""Check that a RuntimeError is raised in case MAD-X crashes."""
assert bool(mad)
# a.t.m. MAD-X crashes on this input, because the L (length)
# parametere is missing:
raises(RuntimeError, mad.input, 'XXX: sequence;')
assert not bool(mad)
| 29.114738
| 77
| 0.592526
|
"""
Tests for the :class:`cpymad.madx.Madx` API.
"""
import os
import sys
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from pytest import approx, fixture, mark, raises
import cpymad
from cpymad.madx import Madx, Sequence, metadata
@fixture
def mad():
with Madx(prompt='X:> ') as mad:
yield mad
@fixture
def lib(mad):
return mad._libmadx
SEQU = """
! constants
QP_K1 = 2;
! elements
qp: quadrupole, k1:=QP_K1, l=1;
sb: sbend, l=2, angle=3.14/4;
dr: drift, l=1;
! sequences
s1: sequence, l=8, refer=center;
dr, at=0.5; ! dr[1] ~ betx_full1[1]
qp, at=1.5;
dr, at=2.5; ! dr[2] ~ betx_full1[3] ~ betx_range[0]
qp, at=3.5; ! ~ betx_full1[4] ~ betx_range[1]
dr, at=4.5;
sb, at=6.0; ! ~ betx_range[3]
dr, at=7.5;
endsequence;
s2: sequence, l=3, refer=entry;
qp1: qp, at=0, k1=3;
qp2: qp, at=1, l=2;
endsequence;
"""
def normalize(path):
"""Normalize path name to eliminate different spellings of the same path.
This is needed for path comparisons in tests, especially on windows where
pathes are case insensitive and allow a multitude of spellings."""
return os.path.normcase(os.path.normpath(path))
def test_copyright():
notice = cpymad.get_copyright_notice()
assert isinstance(notice, type(u""))
def test_version(mad):
"""Check that the Madx.version attribute can be used as expected."""
version = mad.version
# check format:
major, minor, micro = map(int, version.release.split('.'))
# We need at least MAD-X 5.05.00:
assert (major, minor, micro) >= (5, 5, 0)
# check format:
year, month, day = map(int, version.date.split('.'))
assert (year, month, day) >= (2019, 5, 10)
assert 1 <= month <= 12
assert 1 <= day <= 31
assert str(version).startswith(
'MAD-X {}'.format(version.release))
def test_metadata(mad):
version = mad.version
assert metadata.__version__ == version.release
assert isinstance(metadata.get_copyright_notice(), type(u""))
def test_independent_instances():
# Check independence by defining a variable differently in each
# instance:
with Madx(prompt='X1:> ') as mad1, Madx(prompt='X2:> ') as mad2:
mad1.input('ANSWER=42;')
mad2.input('ANSWER=43;')
assert mad1.eval('ANSWER') == 42
assert mad2.eval('ANSWER') == 43
# TODO: We need to fix this on windows, but for now, I just need it to
# pass so that the CI builds the release...
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
def test_streamreader():
output = []
with Madx(stdout=output.append) as m:
assert len(output) == 1
assert b'+++++++++++++++++++++++++++++++++' in output[0]
assert b'+ Support: [email protected],' in output[0]
assert b'+ Release date: ' in output[0]
assert b'+ Execution date: ' in output[0]
# assert b'+ Support: [email protected], ', output[1]
m.input('foo = 3;')
assert len(output) == 1
m.input('foo = 3;')
assert len(output) == 2
assert output[1] == b'++++++ info: foo redefined\n'
assert len(output) == 3
assert b'+ MAD-X finished normally ' in output[2]
def test_quit(mad):
mad.quit()
assert mad._process.returncode is not None
assert not bool(mad)
with raises(RuntimeError):
mad.input(';')
@mark.xfail(
sys.platform != 'linux',
reason='Output is sometimes garbled on MacOS and windows.',
)
def test_context_manager():
output = []
with Madx(stdout=output.append) as m:
m.input('foo = 3;')
assert m.globals.foo == 3
assert b'+ MAD-X finished normally ' in output[-1]
assert not bool(m)
with raises(RuntimeError):
m.input(';')
def test_command_log():
"""Check that the command log contains all input commands."""
# create a new Madx instance that uses the history feature:
history_filename = '_test_madx.madx.tmp'
try:
# feed some input lines and compare with history file:
lines = """
l = 5;
f = 200;
fodo: sequence, refer=entry, l=100;
QF: quadrupole, l=5, at= 0, k1= 1/(f*l);
QD: quadrupole, l=5, at=50, k1=-1/(f*l);
endsequence;
beam, particle=proton, energy=2;
use, sequence=fodo;
""".splitlines()
lines = [line.strip() for line in lines if line.strip()]
with Madx(command_log=history_filename) as mad:
for line in lines:
mad.input(line)
with open(history_filename) as history_file:
history = history_file.read()
assert history.strip() == '\n'.join(lines).strip()
finally:
# remove history file
os.remove(history_filename)
def test_append_semicolon():
"""Check that semicolon is automatically appended to input() text."""
# Regression test for #73
log = []
with Madx(command_log=log.append) as mad:
mad.input('a = 0')
mad.input('b = 1')
assert log == ['a = 0;', 'b = 1;']
assert mad.globals.a == 0
assert mad.globals.b == 1
def test_call_and_chdir(mad):
folder = os.path.abspath(os.path.dirname(__file__))
parent = os.path.dirname(folder)
getcwd = mad._libmadx.getcwd
g = mad.globals
mad.chdir(folder)
assert normalize(getcwd()) == normalize(folder)
mad.call('answer_42.madx')
assert g.answer == 42
with mad.chdir('..'):
assert normalize(getcwd()) == normalize(parent)
mad.call('test/answer_43.madx')
assert g.answer == 43
mad.call('test/answer_call42.madx', True)
assert g.answer == 42
assert normalize(getcwd()) == normalize(folder)
mad.call('answer_43.madx')
assert g.answer == 43
mad.chdir('..')
assert normalize(getcwd()) == normalize(parent)
def _check_twiss(mad, seq_name):
beam = 'ex=1, ey=2, particle=electron, sequence={0};'.format(seq_name)
mad.command.beam(beam)
mad.use(seq_name)
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
twiss = mad.twiss(sequence=seq_name, **initial)
# Check initial values:
assert twiss['alfx'][0] == approx(initial['alfx'])
assert twiss['alfy'][0] == approx(initial['alfy'])
assert twiss['betx'][0] == approx(initial['betx'])
assert twiss['bety'][0] == approx(initial['bety'])
assert twiss.summary['ex'] == approx(1)
assert twiss.summary['ey'] == approx(2)
# Check that keys are all lowercase:
for k in twiss:
assert k == k.lower()
for k in twiss.summary:
assert k == k.lower()
def test_error(mad):
mad.input("""
seq: sequence, l=1;
endsequence;
beam;
use, sequence=seq;
""")
# Errors in MAD-X must not crash, but return False instead:
assert not mad.input('twiss;')
assert mad.input('twiss, betx=1, bety=1;')
def test_twiss_1(mad):
mad.input(SEQU)
_check_twiss(mad, 's1') # s1 can be computed at start
_check_twiss(mad, 's1') # s1 can be computed multiple times
_check_twiss(mad, 's2') # s2 can be computed after s1
def test_twiss_2(mad):
mad.input(SEQU)
_check_twiss(mad, 's2') # s2 can be computed at start
_check_twiss(mad, 's1') # s1 can be computed after s2
def test_twiss_with_range(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s1')
# Compute TWISS on full sequence, then on a sub-range, then again on
# the full sequence. This checks that none of the range selections
# have side-effects on each other:
betx_full1 = mad.twiss(**params)['betx']
betx_range = mad.twiss(range=('dr[2]', 'sb'), **params)['betx']
betx_full2 = mad.twiss(**params)['betx']
# Check that the results have the expected lengths:
assert len(betx_full1) == 9
assert len(betx_range) == 4
assert len(betx_full2) == 9
# Check numeric results. Since the first 3 elements of range and full
# sequence are identical, equal results are expected. And non-equal
# results afterwards.
assert betx_range[0] == approx(betx_full1[1]) # dr:2, dr:1
assert betx_range[1] == approx(betx_full1[2]) # qp:2, qp:1
assert betx_range[2] == approx(betx_full1[3]) # dr:3, dr:2
assert betx_range[3] != approx(betx_full1[4]) # sb, qp:2
def test_range_row_api(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s1')
tab = mad.twiss(range=('dr[2]', 'sb'), **params)
assert tab.range == ('dr[2]', 'sb')
assert 'betx' in tab
def test_survey(mad):
mad.input(SEQU)
mad.beam()
mad.use('s1')
tab = mad.survey()
assert tab._name == 'survey'
assert 'x' in tab
assert 'y' in tab
assert 'z' in tab
assert 'theta' in tab
assert 'phi' in tab
assert 'psi' in tab
assert tab.x[-1] < -1
assert tab.y == approx(0)
assert tab.z[-1] > 7
def test_match(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s2;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s2')
params = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5,
sequence='s2')
mad.match(constraints=[dict(range='s1$end', betx=2.0)],
weight={'betx': 2},
vary=['qp2->k1'],
**params)
twiss = mad.twiss(**params)
val = twiss.betx[-1]
assert val == approx(2.0, rel=1e-2)
def test_verbose(mad):
mad.verbose(False)
assert mad.options.echo is False
assert mad.options.info is False
mad.verbose(True)
assert mad.options.echo is True
assert mad.options.info is True
def test_active_sequence(mad):
mad.input(SEQU)
mad.command.beam('ex=1, ey=2, particle=electron, sequence=s1;')
mad.use('s1')
assert mad.sequence() == 's1'
mad.beam()
mad.use('s2')
assert mad.sequence().name == 's2'
def test_get_sequence(mad):
mad.input(SEQU)
with raises(KeyError):
mad.sequence['sN']
s1 = mad.sequence['s1']
assert s1.name == 's1'
seqs = mad.sequence
assert set(seqs) == {'s1', 's2'}
def test_eval(mad):
mad.input(SEQU)
assert mad.eval(True) is True
assert mad.eval(13) == 13
assert mad.eval(1.3) == 1.3
assert mad.eval([2, True, 'QP_K1']) == [2, True, 2.0]
assert mad.eval("1/QP_K1") == approx(0.5)
def test_eval_functions(mad):
assert mad.eval("sin(1.0)") == approx(np.sin(1.0))
assert mad.eval("cos(1.0)") == approx(np.cos(1.0))
mad.input("""
mqf.k1 = 0.3037241107;
mqd.k1 = -0.3037241107;
fodo: sequence, l=10, refer=entry;
mqf: quadrupole, at=0, l=1, k1:=mqf.k1;
dff: drift, at=1, l=4;
mqd: quadrupole, at=5, l=1, k1:=mqd.k1;
dfd: drift, at=6, l=4;
endsequence;
beam;
use, sequence=fodo;
twiss, sequence=fodo, x=0.1;
""")
elems = mad.sequence.fodo.expanded_elements
twiss = mad.table.twiss
mad.input("mqf_x = table(twiss, mqf, x);")
assert mad.eval("table(twiss, mqf, x)") \
== twiss.row(elems.index('mqf')).x \
== mad.globals.mqf_x
def test_globals(mad):
g = mad.globals
# Membership:
assert 'FOO' not in g
# Setting values:
g['FOO'] = 2
assert 'FOO' in g
assert g['FOO'] == 2
assert mad.eval('FOO') == 2
# Re-setting values:
g['FOO'] = 3
assert mad.eval('FOO') == 3
# Setting expressions:
g['BAR'] = '3*foo'
assert mad.eval('BAR') == 9
g['FOO'] = 4
assert mad.eval('BAR') == 12
assert g.defs.bar == "3*foo"
assert g.cmdpar.bar.definition == "3*foo"
# attribute access:
g.bar = 42
assert g.defs.bar == 42
assert g.cmdpar.bar.definition == 42
assert g.BAR == 42
# repr
assert "'bar': 42.0" in str(g)
with raises(NotImplementedError):
del g['bar']
with raises(NotImplementedError):
del g.bar
assert g.bar == 42 # still there
assert 'bar' in list(g)
assert 'foo' in list(g)
# assert list(g) == list(g.defs)
# assert list(g) == list(g.cmdpar)
assert len(g) == len(list(g))
assert len(g.defs) == len(list(g.defs))
assert len(g.cmdpar) == len(list(g.cmdpar))
def test_elements(mad):
mad.input(SEQU)
assert 'sb' in mad.elements
assert 'sb' in list(mad.elements)
assert 'foobar' not in mad.elements
assert mad.elements['sb']['angle'] == approx(3.14/4)
idx = mad.elements.index('qp1')
elem = mad.elements[idx]
assert elem['k1'] == 3
def test_sequence_map(mad):
mad.input(SEQU)
seq = mad.sequence
assert len(seq) == 2
assert set(seq) == {'s1', 's2'}
assert 's1' in seq
assert 's3' not in seq
assert hasattr(seq, 's1')
assert not hasattr(seq, 's3')
assert seq.s1.name == 's1'
assert seq.s2.name == 's2'
with raises(AttributeError):
seq.s3
def test_table_map(mad):
mad.input(SEQU)
mad.beam()
mad.use('s2')
mad.survey(sequence='s2')
tab = mad.table
assert 'survey' in list(tab)
assert 'survey' in tab
assert 'foobar' not in tab
assert len(tab) == len(list(tab))
with raises(AttributeError):
tab.foobar
def test_sequence(mad):
mad.input(SEQU)
s1 = mad.sequence.s1
assert str(s1) == '<Sequence: s1>'
assert s1 == mad.sequence.s1
assert s1 == 's1'
assert s1 != mad.sequence.s2
assert s1 != 's2'
with raises(RuntimeError):
s1.beam
with raises(RuntimeError):
s1.twiss_table
with raises(RuntimeError):
s1.twiss_table_name
assert not s1.has_beam
assert not s1.is_expanded
s1.expand()
assert s1.has_beam
assert s1.is_expanded
s1.expand() # idempotent
assert s1.has_beam
assert s1.is_expanded
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
mad.twiss(sequence='s1', sectormap=True,
table='my_twiss', **initial)
# Now works:
assert s1.beam.particle == 'positron'
assert s1.twiss_table_name == 'my_twiss'
assert s1.twiss_table.betx[0] == 2.5
assert s1.element_names() == [
's1$start',
'dr', 'qp', 'dr[2]', 'qp[2]', 'dr[3]', 'sb', 'dr[4]',
's1$end',
]
assert s1.expanded_element_names() == s1.element_names()
assert len(s1.element_names()) == len(s1.element_positions())
assert s1.element_positions() == [
0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 8.0]
assert s1.expanded_element_positions() == [
0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 7.0, 8.0]
assert s1.elements[0].name == 's1$start'
assert s1.elements[-1].name == 's1$end'
assert s1.elements[-1].index == len(s1.elements)-1
assert s1.elements[3].index == 3
assert s1.elements.index('#s') == 0
assert s1.elements.index('#e') == len(s1.elements)-1
assert s1.elements.index('sb') == 6
assert s1.length == 8.0
def test_sequence_get_elements_s1(mad):
mad.input(SEQU)
s1 = mad.sequence.s1.elements
qp1 = s1['qp[1]']
qp2 = s1['qp[2]']
sb1 = s1['sb[1]']
assert s1.index('qp') < s1.index('qp[2]')
assert s1.index('qp[2]') < s1.index('sb')
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert sb1['at'] == approx(6)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert sb1.position == approx(5)
assert qp1['l'] == approx(1)
assert qp2['l'] == approx(1)
assert sb1['l'] == approx(2)
assert float(qp1['k1']) == approx(2)
assert float(qp2['k1']) == approx(2)
assert float(sb1['angle']) == approx(3.14/4)
assert qp1.cmdpar.k1.expr.lower() == "qp_k1"
def test_sequence_get_elements_s2(mad):
mad.input(SEQU)
s2 = mad.sequence.s2.elements
qp1 = s2['qp1[1]']
qp2 = s2['qp2[1]']
assert s2.index('qp1') < s2.index('qp2')
assert qp1['at'] == approx(0)
assert qp2['at'] == approx(1)
assert qp1['l'] == approx(1)
assert qp2['l'] == approx(2)
assert float(qp1['k1']) == approx(3)
assert float(qp2['k1']) == approx(2)
# def test_sequence_get_expanded_elements():
def test_crash(mad):
"""Check that a RuntimeError is raised in case MAD-X crashes."""
assert bool(mad)
# a.t.m. MAD-X crashes on this input, because the L (length)
# parametere is missing:
raises(RuntimeError, mad.input, 'XXX: sequence;')
assert not bool(mad)
def test_sequence_elements(mad):
mad.input(SEQU)
elements = mad.sequence['s1'].elements
iqp2 = elements.index('qp[2]')
qp1 = elements['qp[1]']
qp2 = elements[iqp2]
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert iqp2 == elements.at(3.1)
def test_sequence_expanded_elements(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
elements = mad.sequence['s1'].expanded_elements
iqp2 = elements.index('qp[2]')
qp1 = elements['qp[1]']
qp2 = elements[iqp2]
assert qp1['at'] == approx(1.5)
assert qp2['at'] == approx(3.5)
assert qp1.position == approx(1)
assert qp2.position == approx(3)
assert iqp2 == elements.at(3.1)
def test_element_inform(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
elem = mad.sequence.s1.expanded_elements['qp']
assert {
name for name in elem
if elem.cmdpar[name].inform
} == {'k1', 'l', 'at'}
def test_table(mad):
beam = 'ex=1, ey=2, particle=electron, sequence=s1;'
mad.input(SEQU)
mad.command.beam(beam)
mad.use('s1')
initial = dict(alfx=0.5, alfy=1.5,
betx=2.5, bety=3.5)
twiss = mad.twiss(sequence='s1', sectormap=True, **initial)
sector = mad.table.sectortable
assert str(twiss).startswith("<Table 'twiss': ")
assert str(sector).startswith("<Table 'sectortable': ")
assert 'betx' in twiss
assert 't111' in sector
assert 't111' not in twiss
assert 'betx' not in sector
assert len(twiss) == len(list(twiss))
assert set(twiss) == set(twiss[0])
assert twiss.s[5] == twiss[5].s
assert twiss.s[-1] == twiss[-1].s
copy = twiss.copy()
assert copy['betx'] == approx(twiss.betx)
assert set(copy) == set(twiss)
copy = twiss.copy(['betx'])
assert set(copy) == {'betx'}
ALL = slice(None)
assert sector.tmat(0).shape == (6, 6, 6)
assert_allclose(sector.tmat(ALL)[0, 0, 0, :], sector.t111)
assert_allclose(sector.tmat(ALL)[1, 5, 3, :], sector.t264)
assert_allclose(sector.tmat(ALL)[3, 0, 3, :], sector.t414)
assert_allclose(sector.tmat(ALL)[4, 4, 4, :], sector.t555)
assert_allclose(sector.rmat(ALL)[0, 0, :], sector.r11)
assert_allclose(sector.rmat(ALL)[1, 5, :], sector.r26)
assert_allclose(sector.rmat(ALL)[3, 0, :], sector.r41)
assert_allclose(sector.rmat(ALL)[4, 4, :], sector.r55)
assert_allclose(sector.kvec(ALL)[0, :], sector.k1)
assert_allclose(sector.kvec(ALL)[1, :], sector.k2)
assert_allclose(sector.kvec(ALL)[3, :], sector.k4)
assert_allclose(sector.kvec(ALL)[4, :], sector.k5)
r = mad.sectortable()[:, :6, :6]
k = mad.sectortable()[:, 6, :6]
t = mad.sectortable2()
num_elems = len(mad.sequence.s1.elements)
assert t.shape == (num_elems, 6, 6, 6)
assert r.shape == (num_elems, 6, 6)
assert k.shape == (num_elems, 6)
assert_allclose(t[:, 0, 0, 0], sector.t111)
assert_allclose(t[:, 1, 5, 3], sector.t264)
assert_allclose(t[:, 3, 0, 3], sector.t414)
assert_allclose(t[:, 4, 4, 4], sector.t555)
assert_allclose(r[:, 0, 0], sector.r11)
assert_allclose(r[:, 1, 5], sector.r26)
assert_allclose(r[:, 3, 0], sector.r41)
assert_allclose(r[:, 4, 4], sector.r55)
assert_allclose(k[:, 0], sector.k1)
assert_allclose(k[:, 1], sector.k2)
assert_allclose(k[:, 3], sector.k4)
assert_allclose(k[:, 4], sector.k5)
def test_selected_columns(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
mad.select(flag='twiss', column=['s', 'x', 'y'])
table = mad.twiss(sequence='s1', betx=1, bety=1)
assert set(table) > {'s', 'x', 'y', 'betx', 'bety'}
assert set(table.copy()) > {'s', 'x', 'y', 'betx', 'bety'}
assert table.selected_columns() == ['s', 'x', 'y']
assert table.selection().col_names() == ['s', 'x', 'y']
assert table.selection().copy().keys() == {'s', 'x', 'y'}
mad.select(flag='twiss', clear=True)
mad.select(flag='twiss', column=['betx', 'bety'])
lib.apply_table_selections('twiss')
table = mad.table.twiss
assert set(table) > {'s', 'x', 'y', 'betx', 'bety'}
assert set(table.copy()) > {'s', 'x', 'y', 'betx', 'bety'}
assert table.selected_columns() == ['betx', 'bety']
assert table.selection().col_names() == ['betx', 'bety']
assert table.selection().copy().keys() == {'betx', 'bety'}
def test_table_selected_rows(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
def check_selection(table, name):
assert_equal(
table.column(name, rows='selected'),
table[name][table.selected_rows()])
assert_equal(
table.column(name, rows='selected'),
table.selection()[name])
mad.select(flag='twiss', class_='quadrupole')
table = mad.twiss(sequence='s1', betx=1, bety=1)
assert table.selected_rows() == [2, 4]
check_selection(table, 'alfx')
check_selection(table, 'alfy')
check_selection(table, 'betx')
check_selection(table, 'bety')
mad.select(flag='twiss', clear=True)
mad.select(flag='twiss', class_='drift')
lib.apply_table_selections('twiss')
table = mad.table.twiss
assert table.selected_rows() == [1, 3, 5, 7]
check_selection(table, 'alfx')
check_selection(table, 'alfy')
check_selection(table, 'betx')
check_selection(table, 'bety')
def test_table_selected_rows_mask(mad, lib):
mad.input(SEQU)
mad.command.beam()
mad.use('s1')
mad.select(flag='twiss', class_='quadrupole')
table = mad.twiss(sequence='s1', betx=1, bety=1)
mask = lib.get_table_selected_rows_mask('twiss')
assert mask.shape == (len(mad.sequence.s1.expanded_elements), )
assert_equal(mask.nonzero(), (table.selected_rows(), ))
def test_attr(mad):
assert hasattr(mad, 'constraint')
assert hasattr(mad, 'constraint_')
assert hasattr(mad, 'global_')
assert not hasattr(mad, 'foobar')
assert not hasattr(mad, '_constraint')
def test_expr(mad):
g = mad.globals
vars = mad.expr_vars
g.foo = 1
g.bar = 2
assert set(vars('foo')) == {'foo'}
assert set(vars('(foo) * sin(2*pi*bar)')) == {'foo', 'bar'}
def test_command(mad):
mad.input(SEQU)
twiss = mad.command.twiss
sbend = mad.elements.sb
clone = sbend.clone('foobar', angle="pi/5", l=1)
assert 'betx=0' in str(twiss)
assert 'angle=' in str(sbend)
assert 'tilt' in sbend
assert sbend.tilt == 0
assert len(sbend) == len(list(sbend))
assert 'tilt' in list(sbend)
assert clone.name == 'foobar'
assert clone.base_type.name == 'sbend'
assert clone.parent.name == 'sb'
assert clone.defs.angle == 'pi / 5'
assert clone.angle == approx(0.6283185307179586)
assert len(clone) == len(sbend)
assert 'angle=0.628' in str(clone)
assert 'tilt' not in str(clone)
clone.angle = 0.125
clone = mad.elements.foobar # need to update cache
assert clone.angle == 0.125
assert len(twiss) == len(list(twiss))
assert 'betx' in list(twiss)
assert clone.angle != approx(clone.parent.angle)
del clone.angle
clone = mad.elements.foobar # need to update cache
assert clone.angle == clone.parent.angle
with raises(AttributeError):
clone.missing_attribute
with raises(NotImplementedError):
del twiss['betx']
with raises(NotImplementedError):
del clone.base_type.angle
def test_array_attribute(mad):
mad.globals.nine = 9
clone = mad.elements.multipole.clone('foo', knl=[0, 'nine/3', 4])
knl = clone.knl
assert knl[0] == 0
assert knl[1] == 3
assert knl[2] == 4
assert len(knl) == 3
assert list(knl) == [0.0, 3.0, 4.0]
assert str(knl) == '[0.0, 3.0, 4.0]'
knl[1] = '3*nine'
assert mad.elements.foo.defs.knl[1] == '3 * nine'
assert mad.elements.foo.knl[1] == 27
def test_array_attribute_comparison(mad):
mad.globals.nine = 9
foo = mad.elements.multipole.clone('foo', knl=[0, 5, 10])
bar_eq = mad.elements.multipole.clone('bar_eq', knl=[0, 5, 10])
bar_gt = mad.elements.multipole.clone('bar_gt', knl=[0, 6, 10])
bar_lt = mad.elements.multipole.clone('bar_lt', knl=[0, 5, 'nine'])
knl = foo.knl
knl_eq = bar_eq.knl
knl_gt = bar_gt.knl
knl_lt = bar_lt.knl
assert knl == knl_eq
assert not (knl == knl_gt)
assert not (knl == knl_lt)
assert not (knl < knl_eq)
assert knl < knl_gt
assert not (knl < knl_lt)
assert knl <= knl_eq
assert knl <= knl_gt
assert not (knl <= knl_lt)
assert not (knl > knl_eq)
assert not (knl > knl_gt)
assert knl > knl_lt
assert knl >= knl_eq
assert not (knl >= knl_gt)
assert knl >= knl_lt
def test_command_map(mad):
command = mad.command
assert 'match' in command
assert 'sbend' in command
assert 'foooo' not in command
assert 'match' in list(command)
assert len(command) == len(list(command))
assert 'match' in str(command)
assert 'sbend' in str(command)
assert 'sbend' in mad.base_types
assert 'match' not in mad.base_types
def test_comments(mad):
var = mad.globals
mad('x = 1; ! x = 2;')
assert var.x == 1
mad('x = 2; // x = 3;')
assert var.x == 2
mad('x = 3; /* x = 4; */')
assert var.x == 3
mad('/* x = 3; */ x = 4;')
assert var.x == 4
mad('x = 5; ! /* */ x = 6;')
assert var.x == 5
mad('x = 5; /* ! */ x = 6;')
assert var.x == 6
def test_multiline_input(mad):
var = mad.globals
mad('''
x = 1;
y = 2;
''')
assert var.x, 1
assert var.y, 2
mad('''
x = /* 3;
y =*/ 4;
''')
assert var.x == 4
assert var.y == 2
mad('''
x = 1; /* */ x = 2;
*/ if (x == 1) {
x = 3;
}
''')
assert var.x == 2
mad('''
x = 1; /* x = 2;
*/ if (x == 1) {
x = 3;
}
''')
assert var.x == 3
def test_errors(mad):
mad.input(SEQU)
mad.beam()
mad.use(sequence='s1')
mad.select(flag='error', range='qp')
dkn = [1e-6, 2e-6, 3e-6]
dks = [4e-6, 5e-6, 6e-6]
mad.efcomp(dkn=dkn, dks=dks)
mad.ealign(dx=1e-3, dy=-4e-3)
fd = mad.sequence['s1'].expanded_elements['qp'].field_errors
al = mad.sequence['s1'].expanded_elements['qp'].align_errors
expected_dkn = np.hstack((dkn, np.zeros(len(fd.dkn) - len(dkn))))
expected_dks = np.hstack((dks, np.zeros(len(fd.dks) - len(dks))))
assert_allclose(fd.dkn, expected_dkn)
assert_allclose(fd.dks, expected_dks)
assert_allclose(al.dx, 1e-3)
assert_allclose(al.dy, -4e-3)
def test_subsequence(mad):
mad.input("""
d1: RBEND, l=0.1, angle=0.1;
seq1: sequence, l=0.1;
d1.1: d1, at=0.05;
endsequence;
seq2: sequence, l=0.2;
seq1, at=0.05;
seq1, at=0.15;
endsequence;
""")
seq2 = mad.sequence.seq2
assert isinstance(seq2.elements['seq1'], Sequence)
assert seq2.elements['seq1'].name == 'seq1'
assert seq2.elements['seq1'].element_names() == \
mad.sequence.seq1.element_names()
def test_dframe_after_use(mad):
mad.input("""
mqf.k1 = 0.3037241107;
mqd.k1 = -0.3037241107;
fodo: sequence, l=10, refer=entry;
mqf: quadrupole, at=0, l=1, k1:=mqf.k1;
dff: drift, at=1, l=4;
mqd: quadrupole, at=5, l=1, k1:=mqd.k1;
dfd: drift, at=6, l=4;
endsequence;
beam;
use, sequence=fodo;
twiss, sequence=fodo, x=0.1;
""")
index = ['#s', 'mqf', 'dff', 'mqd', 'dfd', '#e']
names = ['fodo$start', 'mqf', 'dff', 'mqd', 'dfd', 'fodo$end']
twiss = mad.table.twiss
assert index == twiss.row_names()
assert index == twiss.dframe().index.tolist()
assert names == twiss.dframe(index='name').index.tolist()
mad.use(sequence='fodo')
twiss = mad.table.twiss
# Should still work:
assert names == twiss.dframe(index='name').index.tolist()
# The following assert demonstrates the current behaviour and is
# meant to detect if the MAD-X implementation changes. It may lead
# to crashes or change in the future. In that case, please remove
# this line. It does not represent desired behaviour!
assert mad.table.twiss.row_names() == \
['#s', '#e', 'dfd', 'mqd', 'dff', 'mqf']
| 0
| 1,239
| 0
| 0
| 0
| 23,589
| 0
| 55
| 1,170
|
beca016de282d8ad828e46810f9fa27aac015a7f
| 4,852
|
py
|
Python
|
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
geneticpython/engines/single_objective/single_objective_engine.py
|
ngocjr7/geneticpython
|
4b4157523ce13b3da56cef61282cb0a984cd317b
|
[
"MIT"
] | null | null | null |
"""
File: single_objective_engine.py
Author: ngocjr7
Email: [email protected]
Github: https://github.com/ngocjr7
Description:
"""
from __future__ import absolute_import
| 38.816
| 83
| 0.557708
|
"""
File: single_objective_engine.py
Author: ngocjr7
Email: [email protected]
Github: https://github.com/ngocjr7
Description:
"""
from __future__ import absolute_import
from typing import List, Union, Callable
from functools import wraps
from collections import OrderedDict
from ..geneticengine import GeneticEngine
from ...core.population import Population
from ...core.operators import Selection, Crossover, Mutation, Replacement
from ...core.individual import Individual
from ...callbacks import Callback, CallbackList
from ...callbacks import History
import math
class SingleObjectiveEngine(GeneticEngine):
def __init__(self, population: Population,
objective: Callable[[Individual], Union[float, int]] = None,
selection: Selection = None,
selection_size: int = None,
crossover: Crossover = None,
mutation: Mutation = None,
replacement: Replacement = None,
callbacks: List[Callback] = None,
generations: int = 100,
random_state: int = None):
callback_list = CallbackList(
callbacks, add_history=True, add_progbar=True)
super(SingleObjectiveEngine, self).__init__(population=population,
objective=objective,
selection=selection,
selection_size=selection_size,
crossover=crossover,
mutation=mutation,
replacement=replacement,
callbacks=callback_list,
generations=generations,
random_state=random_state)
def get_best_indv(self) -> Individual:
best_indv = min(self.population.individuals,
key=lambda indv: indv._objective)
return best_indv.clone()
def _update_metrics(self):
self.metrics = self.metrics or OrderedDict()
self.metrics['best_objective'] = self.get_best_indv().objective
def _update_logs(self, logs):
logs = logs or {}
logs.update(self.metrics or OrderedDict())
return logs
def compute_objectives(self, population: List[Individual]) -> List[Individual]:
ret = list()
# compute objectives
for indv in population:
if self.objective is None:
raise ValueError(f"Engine has no registered objective functions")
indv._coefficient = self.coefficient
indv._objective = self.objective(indv)
ret.append(indv)
return ret
def minimize_objective(self, fn):
"""
register objective function
"""
@wraps(fn)
def _fn_minimization_with_objective_check(indv):
'''
A wrapper function for objective function with objective value check.
'''
# Check indv type.
if not isinstance(indv, Individual):
raise TypeError(
'indv\'s class must be subclass of IndividualBase')
# Check objective.
objective = float(fn(indv))
is_invalid = not isinstance(
objective, (float, int)) or (math.isnan(objective))
if is_invalid:
msg = 'objective value(value: {}, type: {}) is invalid'
msg = msg.format(objective, type(objective))
raise ValueError(msg)
return objective
self.objective = _fn_minimization_with_objective_check
self.coefficient = 1
def maximize_objective(self, fn):
"""
register maximization of objective function
"""
@wraps(fn)
def _fn_maximization_with_objective_check(indv):
'''
A wrapper function for objective function with objective value check.
'''
# Check indv type.
if not isinstance(indv, Individual):
raise TypeError(
'indv\'s class must be subclass of IndividualBase')
# Check objective.
objective = float(fn(indv))
is_invalid = not isinstance(
objective, (float, int)) or (math.isnan(objective))
if is_invalid:
msg = 'objective value(value: {}, type: {}) is invalid'
msg = msg.format(objective, type(objective))
raise ValueError(msg)
return -objective
self.objective = _fn_maximization_with_objective_check
self.coefficient = -1
| 0
| 1,507
| 0
| 2,750
| 0
| 0
| 0
| 178
| 245
|
6c1a2218b3975b6c65e1c36ce24d867d86a06bee
| 517
|
py
|
Python
|
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
timing.py
|
darkless456/Python
|
1ba37d028e4a818ccfffc18682c1bac15554e3ac
|
[
"MIT"
] | null | null | null |
# timing.py
import datetime
today = datetime.date.today()
yesterday = today - datetime.timedelta(days = 1)
tomorrow = today + datetime.timedelta(days = 1)
print(yesterday, today, tomorrow)
# -------------------------
'''
last_friday = datetime.date.today()
oneday = datetime.timedelta(days = 1)
while last_friday.weekday() != calendar.FRIDAY :
last_friday = oneday
print(last_friday.strftime('%A, %d-%b-%Y'))
'''
t = datetime.datetime(2012,9,3,21,30)
k = datetime.date.today()
print(t, '\n', k)
| 19.884615
| 48
| 0.659574
|
# timing.py
import datetime, calendar
today = datetime.date.today()
yesterday = today - datetime.timedelta(days = 1)
tomorrow = today + datetime.timedelta(days = 1)
print(yesterday, today, tomorrow)
# -------------------------
'''
last_friday = datetime.date.today()
oneday = datetime.timedelta(days = 1)
while last_friday.weekday() != calendar.FRIDAY :
last_friday = oneday
print(last_friday.strftime('%A, %d-%b-%Y'))
'''
t = datetime.datetime(2012,9,3,21,30)
k = datetime.date.today()
print(t, '\n', k)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0
|
1676722d3f346db563fa9c4d25ad5528e4cd54fa
| 25,385
|
py
|
Python
|
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
manticore/core/smtlib/visitors.py
|
Srinivas11789/manticore
|
af3c6aada811833864efaccef7477f14e9b5e0dd
|
[
"Apache-2.0"
] | null | null | null |
from manticore.utils.helpers import CacheDict
import logging
logger = logging.getLogger(__name__)
constant_folder_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
arithmetic_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
| 34.869505
| 167
| 0.593146
|
from manticore.utils.helpers import CacheDict
from .expression import *
from functools import lru_cache
import logging
import operator
logger = logging.getLogger(__name__)
class Visitor(object):
''' Class/Type Visitor
Inherit your class visitor from this one and get called on a different
visiting function for each type of expression. It will call the first
implemented method for the __mro__ class order.
For example for a BitVecAdd it will try
visit_BitVecAdd() if not defined then it will try with
visit_BitVecOperation() if not defined then it will try with
visit_BitVec() if not defined then it will try with
visit_Operation() if not defined then it will try with
visit_Expression()
Other class named visitors are:
visit_Constant()
visit_Variable()
visit_Operation()
visit_BitVec()
visit_Bool()
visit_Array()
'''
def __init__(self, cache=None, **kwargs):
super().__init__()
self._stack = []
self._cache = {} if cache is None else cache
def push(self, value):
assert value is not None
self._stack.append(value)
def pop(self):
if len(self._stack) == 0:
return None
result = self._stack.pop()
return result
@property
def result(self):
assert len(self._stack) == 1
return self._stack[-1]
def _method(self, expression, *args):
#Special case. Need to get the unsleeved version of the array
if isinstance(expression, ArrayProxy):
expression = expression.array
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_%s' % sort
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
assert isinstance(value, Expression)
return value
return self._rebuild(expression, args)
def visit(self, node, use_fixed_point=False):
'''
The entry point of the visitor.
The exploration algorithm is a DFS post-order traversal
The implementation used two stacks instead of a recursion
The final result is store in self.result
:param node: Node to explore
:type node: Expression
:param use_fixed_point: if True, it runs _methods until a fixed point is found
:type use_fixed_point: Bool
'''
cache = self._cache
visited = set()
stack = []
stack.append(node)
while stack:
node = stack.pop()
if node in cache:
self.push(cache[node])
elif isinstance(node, Operation):
if node in visited:
operands = [self.pop() for _ in range(len(node.operands))]
value = self._method(node, *operands)
visited.remove(node)
self.push(value)
cache[node] = value
else:
visited.add(node)
stack.append(node)
stack.extend(node.operands)
else:
self.push(self._method(node))
if use_fixed_point:
old_value = None
new_value = self.pop()
while old_value is not new_value:
self.visit(new_value)
old_value = new_value
new_value = self.pop()
self.push(new_value)
@staticmethod
def _rebuild(expression, operands):
if isinstance(expression, Constant):
return expression
if isinstance(expression, Operation):
if any(x is not y for x, y in zip(expression.operands, operands)):
import copy
aux = copy.copy(expression)
aux._operands = operands
return aux
return expression
class Translator(Visitor):
''' Simple visitor to translate an expression into something else
'''
def _method(self, expression, *args):
#Special case. Need to get the unsleeved version of the array
if isinstance(expression, ArrayProxy):
expression = expression.array
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_{:s}'.format(sort)
if hasattr(self, methodname):
value = getattr(self, methodname)(expression, *args)
if value is not None:
return value
raise Exception("No translation for this {}".format(expression))
class GetDeclarations(Visitor):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.variables = set()
def visit_Variable(self, expression):
self.variables.add(expression)
@property
def result(self):
return self.variables
class GetDepth(Translator):
''' Simple visitor to collect all variables in an expression or set of
expressions
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def visit_Expression(self, expression):
return 1
def visit_Operation(self, expression, *operands):
return 1 + max(operands)
def get_depth(exp):
visitor = GetDepth()
visitor.visit(exp)
return visitor.result
class PrettyPrinter(Visitor):
def __init__(self, depth=None, **kwargs):
super().__init__(**kwargs)
self.output = ''
self.indent = 0
self.depth = depth
def _print(self, s, e=None):
self.output += ' ' * self.indent + str(s) # + '(%016x)'%hash(e)
self.output += '\n'
def visit(self, expression):
'''
Overload Visitor.visit because:
- We need a pre-order traversal
- We use a recursion as it makes it easier to keep track of the indentation
'''
self._method(expression)
def _method(self, expression, *args):
'''
Overload Visitor._method because we want to stop to iterate over the
visit_ functions as soon as a valid visit_ function is found
'''
assert expression.__class__.__mro__[-1] is object
for cls in expression.__class__.__mro__:
sort = cls.__name__
methodname = 'visit_%s' % sort
method = getattr(self, methodname, None)
if method is not None:
method(expression, *args)
return
return
def visit_Operation(self, expression, *operands):
self._print(expression.__class__.__name__, expression)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print('...')
self.indent -= 2
return ''
def visit_BitVecExtract(self, expression):
self._print(expression.__class__.__name__ + '{%d:%d}' % (expression.begining, expression.end), expression)
self.indent += 2
if self.depth is None or self.indent < self.depth * 2:
for o in expression.operands:
self.visit(o)
else:
self._print('...')
self.indent -= 2
return ''
def visit_Constant(self, expression):
self._print(expression.value)
return ''
def visit_Variable(self, expression):
self._print(expression.name)
return ''
@property
def result(self):
return self.output
def pretty_print(expression, **kwargs):
if not isinstance(expression, Expression):
return str(expression)
pp = PrettyPrinter(**kwargs)
pp.visit(expression)
return pp.result
class ConstantFolderSimplifier(Visitor):
def __init__(self, **kw):
super().__init__(**kw)
operations = {BitVecAdd: operator.__add__,
BitVecSub: operator.__sub__,
BitVecMul: operator.__mul__,
BitVecDiv: operator.__truediv__,
BitVecShiftLeft: operator.__lshift__,
BitVecShiftRight: operator.__rshift__,
BitVecAnd: operator.__and__,
BitVecOr: operator.__or__,
BitVecXor: operator.__xor__,
BitVecNot: operator.__not__,
BitVecNeg: operator.__invert__,
LessThan: operator.__lt__,
LessOrEqual: operator.__le__,
Equal: operator.__eq__,
GreaterThan: operator.__gt__,
GreaterOrEqual: operator.__ge__,
BoolAnd: operator.__and__,
BoolOr: operator.__or__,
BoolNot: operator.__not__}
def visit_BitVecConcat(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
result = 0
for o in operands:
result <<= o.size
result |= o.value
return BitVecConstant(expression.size, result, taint=expression.taint)
def visit_BitVecZeroExtend(self, expression, *operands):
if all(isinstance(o, Constant) for o in operands):
return BitVecConstant(expression.size, operands[0].value, taint=expression.taint)
def visit_BitVecSignExtend(self, expression, *operands):
if expression.extend == 0:
return operands[0]
def visit_BitVecExtract(self, expression, *operands):
if all(isinstance(o, Constant) for o in expression.operands):
value = expression.operands[0].value
begining = expression.begining
end = expression.end
value = value >> begining
mask = 2**(end - begining + 1) - 1
value = value & mask
return BitVecConstant(expression.size, value, taint=expression.taint)
def visit_BoolAnd(self, expression, a, b):
if isinstance(a, Constant) and a.value == True:
return b
if isinstance(b, Constant) and b.value == True:
return a
def visit_BoolOr(self, expression, a, b):
if isinstance(a, Constant) and a.value == False:
return b
if isinstance(b, Constant) and b.value == False:
return a
def visit_Operation(self, expression, *operands):
''' constant folding, if all operands of an expression are a Constant do the math '''
operation = self.operations.get(type(expression), None)
if operation is not None and \
all(isinstance(o, Constant) for o in operands):
value = operation(*(x.value for x in operands))
if isinstance(expression, BitVec):
return BitVecConstant(expression.size, value, taint=expression.taint)
else:
isinstance(expression, Bool)
return BoolConstant(value, taint=expression.taint)
else:
if any(operands[i] is not expression.operands[i] for i in range(len(operands))):
expression = self._rebuild(expression, operands)
return expression
constant_folder_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
def constant_folder(expression):
global constant_folder_simplifier_cache
simp = ConstantFolderSimplifier(cache=constant_folder_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
class ArithmeticSimplifier(Visitor):
def __init__(self, parent=None, **kw):
super().__init__(**kw)
@staticmethod
def _same_constant(a, b):
return isinstance(a, Constant) and\
isinstance(b, Constant) and\
a.value == b.value or a is b
@staticmethod
def _changed(expression, operands):
if isinstance(expression, Constant) and len(operands) > 0:
return True
arity = len(operands)
return any(operands[i] is not expression.operands[i] for i in range(arity))
def visit_Operation(self, expression, *operands):
''' constant folding, if all operands of an expression are a Constant do the math '''
if all(isinstance(o, Constant) for o in operands):
expression = constant_folder(expression)
if self._changed(expression, operands):
expression = self._rebuild(expression, operands)
return expression
def visit_BitVecZeroExtend(self, expression, *operands):
if self._changed(expression, operands):
return BitVecZeroExtend(expression.size, *operands, taint=expression.taint)
else:
return expression
def visit_BitVecITE(self, expression, *operands):
if isinstance(expression.operands[0], Constant):
if expression.operands[0].value:
result = expression.operands[1]
else:
result = expression.operands[2]
import copy
result = copy.copy(result)
result._taint |= expression.operands[0].taint
return result
if self._changed(expression, operands):
return BitVecITE(expression.size, *operands, taint=expression.taint)
def visit_BitVecExtract(self, expression, *operands):
''' extract(sizeof(a), 0)(a) ==> a
extract(16, 0)( concat(a,b,c,d) ) => concat(c, d)
extract(m,M)(and/or/xor a b ) => and/or/xor((extract(m,M) a) (extract(m,M) a)
'''
op = expression.operands[0]
begining = expression.begining
end = expression.end
# extract(sizeof(a), 0)(a) ==> a
if begining == 0 and end + 1 == op.size:
return op
elif isinstance(op, BitVecConcat):
new_operands = []
bitcount = 0
for item in reversed(op.operands):
if begining >= item.size:
begining -= item.size
else:
if bitcount < expression.size:
new_operands.append(item)
bitcount += item.size
if begining != expression.begining:
return BitVecExtract(BitVecConcat(sum([x.size for x in new_operands]), *reversed(new_operands)),
begining, expression.size, taint=expression.taint)
if isinstance(op, (BitVecAnd, BitVecOr, BitVecXor)):
bitoperand_a, bitoperand_b = op.operands
return op.__class__(BitVecExtract(bitoperand_a, begining, expression.size), BitVecExtract(bitoperand_b, begining, expression.size), taint=expression.taint)
def visit_BitVecAdd(self, expression, *operands):
''' a + 0 ==> a
0 + a ==> a
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
if isinstance(left, BitVecConstant):
if left.value == 0:
return right
def visit_BitVecSub(self, expression, *operands):
''' a - 0 ==> 0
(a + b) - b ==> a
(b + a) - b ==> a
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(left, BitVecAdd):
if self._same_constant(left.operands[0], right):
return left.operands[1]
elif self._same_constant(left.operands[1], right):
return left.operands[0]
def visit_BitVecOr(self, expression, *operands):
''' a | 0 => a
0 | a => a
0xffffffff & a => 0xffffffff
a & 0xffffffff => 0xffffffff
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value == left.mask:
return right
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecOr(left_left, (left_right | right), taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecOr(right, left, taint=expression.taint)
def visit_BitVecAnd(self, expression, *operands):
''' ct & x => x & ct move constants to the right
a & 0 => 0 remove zero
a & 0xffffffff => a remove full mask
(b & ct2) & ct => b & (ct&ct2) associative property
(a & (b | c) => a&b | a&c distribute over |
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return right
elif right.value == right.mask:
return left
elif isinstance(left, BitVecAnd):
left_left = left.operands[0]
left_right = left.operands[1]
if isinstance(right, Constant):
return BitVecAnd(left_left, left_right & right, taint=expression.taint)
elif isinstance(left, BitVecOr):
left_left = left.operands[0]
left_right = left.operands[1]
return BitVecOr(right & left_left, right & left_right, taint=expression.taint)
elif isinstance(left, BitVecConstant):
return BitVecAnd(right, left, taint=expression.taint)
def visit_BitVecShiftLeft(self, expression, *operands):
''' a << 0 => a remove zero
a << ct => 0 if ct > sizeof(a) remove big constant shift
'''
left = expression.operands[0]
right = expression.operands[1]
if isinstance(right, BitVecConstant):
if right.value == 0:
return left
elif right.value >= right.size:
return left
def visit_ArraySelect(self, expression, *operands):
''' ArraySelect (ArrayStore((ArrayStore(x0,v0) ...),xn, vn), x0)
-> v0
'''
arr, index = operands
if isinstance(arr, ArrayVariable):
return
if isinstance(index, BitVecConstant):
ival = index.value
# props are slow and using them in tight loops should be avoided, esp when they offer no additional validation
# arr._operands[1] = arr.index, arr._operands[0] = arr.array
while isinstance(arr, ArrayStore) and isinstance(arr._operands[1], BitVecConstant) and arr._operands[1]._value != ival:
arr = arr._operands[0] # arr.array
if isinstance(index, BitVecConstant) and isinstance(arr, ArrayStore) and isinstance(arr.index, BitVecConstant) and arr.index.value == index.value:
return arr.value
else:
if arr is not expression.array:
return arr.select(index)
def visit_Expression(self, expression, *operands):
assert len(operands) == 0
assert not isinstance(expression, Operation)
return expression
arithmetic_simplifier_cache = CacheDict(max_size=150000, flush_perc=25)
@lru_cache(maxsize=128)
def arithmetic_simplify(expression):
global arithmetic_simplifier_cache
simp = ArithmeticSimplifier(cache=arithmetic_simplifier_cache)
simp.visit(expression, use_fixed_point=True)
return simp.result
def to_constant(expression):
value = arithmetic_simplify(expression)
if isinstance(value, Constant):
return value.value
elif isinstance(value, Array):
if value.index_max:
ba = bytearray()
for i in range(value.index_max):
value_i = simplify(value[i])
if not isinstance(value_i, Constant):
break
ba.append(value_i.value)
else:
return ba
return value
@lru_cache(maxsize=128)
def simplify(expression):
expression = constant_folder(expression)
expression = arithmetic_simplify(expression)
return expression
class TranslatorSmtlib(Translator):
''' Simple visitor to translate an expression to its smtlib representation
'''
unique = 0
def __init__(self, use_bindings=False, *args, **kw):
assert 'bindings' not in kw
super().__init__(*args, **kw)
self.use_bindings = use_bindings
self._bindings_cache = {}
self._bindings = []
def _add_binding(self, expression, smtlib):
if not self.use_bindings or len(smtlib) <= 10:
return smtlib
if smtlib in self._bindings_cache:
return self._bindings_cache[smtlib]
TranslatorSmtlib.unique += 1
name = 'a_%d' % TranslatorSmtlib.unique
self._bindings.append((name, expression, smtlib))
self._bindings_cache[expression] = name
return name
@property
def bindings(self):
return self._bindings
translation_table = {
BoolNot: 'not',
BoolEq: '=',
BoolAnd: 'and',
BoolOr: 'or',
BoolXor: 'xor',
BoolITE: 'ite',
BitVecAdd: 'bvadd',
BitVecSub: 'bvsub',
BitVecMul: 'bvmul',
BitVecDiv: 'bvsdiv',
BitVecUnsignedDiv: 'bvudiv',
BitVecMod: 'bvsmod',
BitVecRem: 'bvsrem',
BitVecUnsignedRem: 'bvurem',
BitVecShiftLeft: 'bvshl',
BitVecShiftRight: 'bvlshr',
BitVecArithmeticShiftLeft: 'bvashl',
BitVecArithmeticShiftRight: 'bvashr',
BitVecAnd: 'bvand',
BitVecOr: 'bvor',
BitVecXor: 'bvxor',
BitVecNot: 'bvnot',
BitVecNeg: 'bvneg',
LessThan: 'bvslt',
LessOrEqual: 'bvsle',
Equal: '=',
GreaterThan: 'bvsgt',
GreaterOrEqual: 'bvsge',
UnsignedLessThan: 'bvult',
UnsignedLessOrEqual: 'bvule',
UnsignedGreaterThan: 'bvugt',
UnsignedGreaterOrEqual: 'bvuge',
BitVecSignExtend: '(_ sign_extend %d)',
BitVecZeroExtend: '(_ zero_extend %d)',
BitVecExtract: '(_ extract %d %d)',
BitVecConcat: 'concat',
BitVecITE: 'ite',
ArrayStore: 'store',
ArraySelect: 'select',
}
def visit_BitVecConstant(self, expression):
assert isinstance(expression, BitVecConstant)
if expression.size == 1:
return '#' + bin(expression.value & expression.mask)[1:]
else:
return '#x%0*x' % (int(expression.size / 4), expression.value & expression.mask)
def visit_BoolConstant(self, expression):
return expression.value and 'true' or 'false'
def visit_Variable(self, expression):
return expression.name
def visit_ArraySelect(self, expression, *operands):
array_smt, index_smt = operands
if isinstance(expression.array, ArrayStore):
array_smt = self._add_binding(expression.array, array_smt)
return '(select %s %s)' % (array_smt, index_smt)
def visit_Operation(self, expression, *operands):
operation = self.translation_table[type(expression)]
if isinstance(expression, (BitVecSignExtend, BitVecZeroExtend)):
operation = operation % expression.extend
elif isinstance(expression, BitVecExtract):
operation = operation % (expression.end, expression.begining)
operands = [self._add_binding(*x) for x in zip(expression.operands, operands)]
return '(%s %s)' % (operation, ' '.join(operands))
@property
def results(self):
raise Exception("NOOO")
@property
def result(self):
output = super().result
if self.use_bindings:
for name, expr, smtlib in reversed(self._bindings):
output = '( let ((%s %s)) %s )' % (name, smtlib, output)
return output
def translate_to_smtlib(expression, **kwargs):
translator = TranslatorSmtlib(**kwargs)
translator.visit(expression)
return translator.result
class Replace(Visitor):
''' Simple visitor to replaces expressions '''
def __init__(self, bindings=None, **kwargs):
super().__init__(**kwargs)
if bindings is None:
raise ValueError("bindings needed in replace")
self._replace_bindings = bindings
def visit_Variable(self, expression):
if expression in self._replace_bindings:
return self._replace_bindings[expression]
return expression
def replace(expression, bindings):
if not bindings:
return expression
visitor = Replace(bindings)
visitor.visit(expression, use_fixed_point=True)
result_expression = visitor.result
#for var in get_variables(result_expression):
# assert var not in bindings
return result_expression
def get_variables(expression):
visitor = GetDeclarations()
visitor.visit(expression)
return visitor.result
| 0
| 1,842
| 0
| 21,533
| 0
| 1,253
| 0
| 8
| 480
|
34a65f614b2aed9614eeb0a853f10c891d51443b
| 280
|
py
|
Python
|
account/urls.py
|
Wizock/CRUD-master
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | 1
|
2022-03-13T09:50:04.000Z
|
2022-03-13T09:50:04.000Z
|
account/urls.py
|
Wizock/TodoButBetter
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | null | null | null |
account/urls.py
|
Wizock/TodoButBetter
|
07fbf3c64610a8725724fc934e66c6be35690cc9
|
[
"CC0-1.0"
] | null | null | null |
from django.urls import path
urlpatterns = [
path(r'user/<str:usr>/', accountView),
path('register_/', register_),
]
| 23.333333
| 61
| 0.739286
|
from django import urls
from django.conf.urls import include, url
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.urls import path
from .views import *
urlpatterns = [
path(r'user/<str:usr>/', accountView),
path('register_/', register_),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 88
|
eb106290ccfcc64601c7996ff5449b815f8ad55c
| 1,020
|
py
|
Python
|
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | 2
|
2019-01-05T13:34:08.000Z
|
2019-01-06T05:33:17.000Z
|
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | null | null | null |
pyaibot.py
|
linsicheng20060818/PythonExercises
|
dff362b066de54186d8e2a71f0fb6b8fcb1c8f2a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""try:
import jieba
except:
print("please install jieba first.")
input("press any key to continue")
quit()"""
| 25.5
| 73
| 0.435294
|
# -*- coding: utf-8 -*-
"""try:
import jieba
except:
print("please install jieba first.")
input("press any key to continue")
quit()"""
def chchat(a):
import jieba
v=False
#if a=="quit" or a=="exit" or a=="退出" or a=="再见":
# import os
# exit()#Error
list1=jieba.lcut(a)#jieba分词
#print(list1)#Debug
i=0
b=""
if list1[i]=="你好":
return(a)
else:
for i in range(len(list1)):
if list1[i]=="你":
list1[i]="我"
elif list1[i]=="我":
list1[i]="你"
elif list1[i]=="几":
import random
v=True
'''for r in range(len(ni)):#'你'换成'我'
list1[r]="我"
for i in range(len(wo)):#'我'换成'你'
list1[i]="你"'''
for i in range(len(list1)):
b=b+list1[i]
if v==True:
return(random.randint(-10,2000))
else:
return((b.replace("吗","").replace("?","!")).replace("?","!"))
| 78
| 0
| 0
| 0
| 0
| 822
| 0
| 0
| 22
|
040bfae6c7070cefcd380adace083b08384a141a
| 391
|
py
|
Python
|
map_annotate_app/admin.py
|
tushar-agarwal/WikiNearby
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2018-03-20T21:30:35.000Z
|
2019-03-19T04:58:42.000Z
|
map_annotate_app/admin.py
|
tushar-agarwal/map_annotate
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2016-08-21T13:21:51.000Z
|
2016-09-07T10:01:24.000Z
|
map_annotate_app/admin.py
|
tushar-agarwal/WikiNearby
|
0cc10bdeb1cb0728a6405808cc25f2d9e65dcb95
|
[
"MIT"
] | 2
|
2016-10-06T13:47:24.000Z
|
2017-02-13T23:10:12.000Z
|
"""
This is the C{admin.py} file for C{map_annotate_app}.
For more details, see the documentation for C{map_annotate_app}.
"""
from django.contrib import admin
from .models import Crime
from .models import CrimeType
from .models import Location
from .models import Sansad
admin.site.register(Location)
admin.site.register(Crime)
admin.site.register(CrimeType)
admin.site.register(Sansad)
| 23
| 64
| 0.792839
|
"""
This is the C{admin.py} file for C{map_annotate_app}.
For more details, see the documentation for C{map_annotate_app}.
"""
from django.contrib import admin
from .models import Crime
from .models import CrimeType
from .models import Location
from .models import Sansad
admin.site.register(Location)
admin.site.register(Crime)
admin.site.register(CrimeType)
admin.site.register(Sansad)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
81256977bc9b0ff7623132bef93f3ba3fe7872ae
| 4,444
|
py
|
Python
|
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
parsers.py
|
ekiwi/parsers
|
1837fe8c76b813da7befeee99668ab59b51aaefa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, University of California, Berkeley
# author: Kevin Laeufer <[email protected]>
if __name__ == "__main__":
g = Grammar()
S, B, D, E, F = non_term = g.non_terminal('S', 'B', 'D', 'E', 'F')
u, v, w, x, y, z = term = g.terminal('u', 'v', 'w', 'x', 'y', 'z')
g.r(S, [u, B, D, z])
g.r(B, [B, v])
g.r(B, [w])
g.r(D, [E, F])
g.r(E, [y])
g.r(E, [])
g.r(F, [x])
g.r(F, [])
for nt in non_term:
print(f"FIRST({nt}): {g.first(nt)}")
print()
for nt in non_term:
print(f"FOLLOW({nt}): {g.follow(nt)}")
print()
print(g.ll_one(check_conflicts=False))
| 22.789744
| 81
| 0.633213
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, University of California, Berkeley
# author: Kevin Laeufer <[email protected]>
from collections import defaultdict
class Symbol:
def __init__(self, name, bold=False):
self.name = name
self._bold = bold
def __str__(self):
if self._bold:
return f"\033[1m{self.name}\033[0m"
else:
return f"{self.name}"
def __repr__(self):
return self.name
class NonTerminal(Symbol):
def __init__(self, name):
super().__init__(name, bold=True)
class Terminal(Symbol):
def __init__(self, name):
super().__init__(name)
class Epsilon(Terminal):
def __init__(self):
super().__init__("ε")
class Rule:
def __init__(self, lhs, rhs):
assert isinstance(lhs, NonTerminal)
assert isinstance(rhs, list)
assert all(isinstance(sym, Symbol) for sym in rhs)
self.lhs = lhs
self.rhs = rhs
def __getitem__(self, item):
if item not in {0, 1}:
raise IndexError(item)
if item == 0:
return self.lhs
else:
return self.rhs
def __str__(self):
return f"{self.lhs} -> {''.join(str(sym) for sym in self.rhs)}"
def __repr__(self):
return str(self)
class Grammar:
def __init__(self):
self._syms = [Epsilon(), Terminal('$')]
self._rules = []
self._root = None
def _make_syms(self, Type, names):
syms = [Type(name) for name in names]
self._syms += syms
return syms
def non_terminal(self, *names):
return self._make_syms(NonTerminal, names)
def terminal(self, *names):
return self._make_syms(Terminal, names)
def epsilon(self):
return self._syms[0]
def eof(self):
return self._syms[1]
def r(self, lhs, rhs):
if len(rhs) < 1: rhs = [self.epsilon()]
if self._root is None:
self._root = lhs # by convention
self._rules.append(Rule(lhs, rhs))
def first(self, sym):
assert isinstance(sym, Symbol), f"{sym} : {type(sym)}"
#print(f"FIRST({sym})")
if isinstance(sym, Terminal):
return {sym}
_first = set()
for lhs, rhs in self._rules:
if lhs != sym:
continue
annullable = True
for s in rhs:
if s == sym:
annullable = False
break
s_first = self.first(s)
s_annullable = self.epsilon() in s_first
_first = _first | (s_first - {self.epsilon()})
if not s_annullable:
annullable = False
break
if annullable:
_first |= {self.epsilon()}
return _first
def follow(self, non_term):
assert isinstance(non_term, NonTerminal)
_follow = set()
for lhs, rhs in self._rules:
if non_term not in rhs:
continue
for ii, sym in enumerate(rhs):
if sym != non_term:
continue
# scan following symbols
followed_by_annulable = True
for ff in rhs[ii+1:]:
_first = self.first(ff)
_follow |= (_first - {self.epsilon()})
if self.epsilon() not in _first:
followed_by_annulable = False
break
if followed_by_annulable:
_follow |= self.follow(lhs)
if non_term == self._root:
_follow |= {self.eof()}
return _follow
def ll_one(self, check_conflicts=False):
non_terms = [s for s in self._syms if isinstance(s, NonTerminal)]
table = defaultdict(dict)
for nt in non_terms:
terms = self.first(nt)
if self.epsilon() in terms:
terms = (terms - {self.epsilon()}) | self.follow(nt)
# pick rule:
for tt in terms:
applicable_rules = []
for rule in self._rules:
if rule.lhs != nt:
continue
# scan rhs
annullable = True
for sym in rule.rhs:
s_first = self.first(sym)
if tt in s_first:
applicable_rules.append(rule)
break
if not self.epsilon() in s_first:
annullable = False
break
if annullable and tt in self.follow(nt):
applicable_rules.append(rule)
if check_conflicts:
if len(applicable_rules) > 1:
raise RuntimeError(f"Found multiple applicable rules for ({nt}, {tt}):\n" +
'\n'.join(str(r) for r in applicable_rules))
table[nt][tt] = applicable_rules
return dict(table)
if __name__ == "__main__":
g = Grammar()
S, B, D, E, F = non_term = g.non_terminal('S', 'B', 'D', 'E', 'F')
u, v, w, x, y, z = term = g.terminal('u', 'v', 'w', 'x', 'y', 'z')
g.r(S, [u, B, D, z])
g.r(B, [B, v])
g.r(B, [w])
g.r(D, [E, F])
g.r(E, [y])
g.r(E, [])
g.r(F, [x])
g.r(F, [])
for nt in non_term:
print(f"FIRST({nt}): {g.first(nt)}")
print()
for nt in non_term:
print(f"FOLLOW({nt}): {g.follow(nt)}")
print()
print(g.ll_one(check_conflicts=False))
| 2
| 0
| 0
| 3,631
| 0
| 0
| 0
| 14
| 161
|
e6d68e135afb09552ac2f3d818b48fe79807d853
| 1,387
|
py
|
Python
|
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | 1
|
2021-11-22T20:04:16.000Z
|
2021-11-22T20:04:16.000Z
|
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | null | null | null |
Server.py
|
louis103/Python-Chat-Application
|
5212360194236daf5888d296fd71ed92303d7f94
|
[
"MIT"
] | null | null | null |
import socket
HOST = "127.0.0.1"
PORT = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
# broadcast func
# handle func
# receive func
print("******Server is running******")
receive()
| 23.508475
| 72
| 0.581831
|
import socket, threading
HOST = "127.0.0.1"
PORT = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
server.listen()
clients = []
nicknames = []
# broadcast func
def broadcast(message):
for client in clients:
client.send(message)
# handle func
def handle(client):
while True:
try:
message = client.recv(2048)
print(f"{nicknames[clients.index(client)]} says {message}")
broadcast(message)
except:
index = clients.index(client)
clients.remove(client)
nickname = nicknames[index]
nicknames.remove(nickname)
break
# receive func
def receive():
while True:
client,address = server.accept()
print(f"Connected with {str(address)}!")
client.send("NICKNAME".encode("utf-8"))
nickname = client.recv(2048).decode("utf-8")
nicknames.append(nickname)
clients.append(client)
print(f"Nickname of new client is {nickname}")
broadcast(f"{nickname} joined the chat!\n".encode("utf-8"))
client.send("You Have Connected to the server".encode("utf-8"))
thread = threading.Thread(target=handle,args=(client,))
thread.start()
print("******Server is running******")
receive()
| 0
| 0
| 0
| 0
| 0
| 999
| 0
| 11
| 69
|
d2d464639fd7c2110b4c254cb34f59661eddfc5e
| 18,069
|
py
|
Python
|
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
pypsi/core.py
|
Rudedog9d/pypsi
|
38dda442b21b8deb569d61076ab0a19c0e78edc8
|
[
"0BSD"
] | null | null | null |
#
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Base classes for developing pluggable commands and plugins.
'''
import sys
from pypsi.ansi import AnsiCode
from pypsi.format import get_lines, wrap_line
def pypsi_print(*args, sep=' ', end='\n', file=None, flush=True, width=None,
wrap=True, wrap_prefix=None, replace_errors=True):
'''
Wraps the functionality of the Python builtin `print` function. The
:meth:`pypsi.shell.Shell.bootstrap` overrides the Python :meth:`print`
function with :meth:`pypsi_print`.
:param str sep: string to print between arguments
:param str end: string to print at the end of the output
:param file file: output stream, if this is :const:`None`, the default is
:data:`sys.stdout`
:param bool flush: whether to flush the output stream
:param int width: override the stream's width
:param bool wrap: whether to word wrap the output
:param str wrap_prefix: prefix string to print prior to every new line that
is wrapped
:param bool replace_errors: replace invalid character points with the '?'
character
'''
file = file or sys.stdout
last = len(args) - 1
def write_safe(data):
'''
Write the input str to the file and, if an encoding error occurs and
replace_errors is ``True``, remove invalid code points and print again.
'''
try:
file.write(data)
except UnicodeEncodeError:
if replace_errors:
enc = getattr(file, 'encoding', sys.getdefaultencoding())
file.write(data.encode(enc, errors='replace').decode(enc))
else:
raise
if wrap and hasattr(file, 'width') and file.width:
width = width or file.width
parts = []
for arg in args:
if isinstance(arg, str):
parts.append(arg)
elif arg is None:
parts.append('')
elif isinstance(arg, AnsiCode):
if file.isatty():
parts.append(str(arg))
elif arg.s is not None:
parts.append(str(arg.s))
else:
parts.append(str(arg))
txt = sep.join(parts)
for (line, endl) in get_lines(txt):
if line:
first = True
wrapno = 0
for wrapped in wrap_line(line, width, wrap_prefix=wrap_prefix):
if not wrapped:
continue
wrapno += 1
if not first:
file.write('\n')
else:
first = False
write_safe(wrapped)
if not line or endl:
file.write('\n')
else:
last = len(args) - 1
for (i, arg) in enumerate(args):
write_safe(str(arg))
if sep and i != last:
write_safe(sep)
if end:
write_safe(end)
if flush:
file.flush()
| 38.363057
| 96
| 0.625989
|
#
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
'''
Base classes for developing pluggable commands and plugins.
'''
import argparse
import sys
from pypsi.ansi import AnsiCodes, AnsiCode
from pypsi.format import get_lines, wrap_line
class Plugin(object):
'''
A plugin is an object that is able to modify a
:py:class:`pypsi.shell.Shell` object's behavior. Whereas a command can be
execute from user input, the `Plugin` class does not contain a `run()`
function.
'''
def __init__(self, preprocess=None, postprocess=None):
'''
Constructor can take two parameters: `preprocess` and `postprocess`
These values determine where the plugin resides inside of the
preprocess and postprocess list. This list, inside of
:class:`pypsi.shell.Shell`, is iterated sequentially, from most
priority to least. So, the highest priority value is 0, which means it
will be the first plugin to run, and the lowest value is 100, which
means it will be the last plugin to run. If either value is `None`, the
plugin is not added to the processing list. For example, if this plugin
only provides a preprocessing functionality, then postprocess should be
set to :const:`None`.
:param int preprocess: the preprocess priority
:param int postprocess: the postprocess priority
'''
self.preprocess = preprocess
self.postprocess = postprocess
def setup(self, shell): # pylint: disable=unused-argument
'''
Called after the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on failure
'''
return 0
def on_input(self, shell, line): # pylint: disable=unused-argument
'''
Called after input from the user has been received. The return value is
the preprocessed line. This means that modifying the line argument will
not populate back. If this function does no preprocessing, return line
unmodified.
:param pypsi.shell.Shell shell: the active shell
:param str line: the current input statement string
:returns str: the preprocessed line
'''
return line
def on_tokenize(self, shell, tokens, origin): # pylint: disable=unused-argument
'''
Called after an input string has been tokenized. If this function
performs no preprocessing, return the tokens unmodified.
:param pypsi.shell.Shell shell: the active shell
:param list tokens: the list of :class:`pypsi.cmdline.Token` objects
:param str origin: the origin of the input, can be either 'input' if
received from a call to `input()` or 'prompt' if the input is the
prompt to display to the user
:returns list: the list of preprocessed :class:`pypsi.cmdline.Token`
objects
'''
return tokens
def on_input_canceled(self, shell): # pylint: disable=unused-argument
'''
Called when the user can canceled entering a statement via SIGINT
(Ctrl+C).
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def on_statement_finished(self, shell, rc): # pylint: disable=unused-argument
'''
Called when a statement has been completely executed.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
class Command(object):
'''
A pluggable command that users can execute. All commands need to derive
from this class. When a command is executed by a user, the command's
:meth:`run` method will be called. The return value of the :meth:`run`
method is used when processing forthcoming commands in the active
statement. The return value must be an :class:`int` and follows the Unix
standard: 0 on success, less than 0 on error, and greater than 0 given
invalid input or incorrect usage.
Each command has a topic associated with it. This topic can be referenced
by commands such as :class:`pypsi.commands.help.HelpCommand` to categorize
commands in help messages.
A command can be used as a fallback handler by implementing the
:meth:`fallback` method. This is similar to the :meth:`run` method, except
that is accepts one more argument: the command name to execute that wasn't
found by the shell. The return value of :meth:`fallback` holds the same
purpose as the return value of :meth:`run`.
By the time :meth:`run` is called, the system streams have been updated to
point to the current file streams issued in the statement. For example, if
the statement redirects standard out (:attr:`sys.stdout`) to a file, the
destination file is automatically opened and :attr:`sys.stdout` is
redirected to the opened file stream. Once the command has complete
execution, the redirected stream is automatically closed and
:attr:`sys.stdout` is set to its original stream.
'''
def __init__(self, name, usage=None, brief=None,
topic=None, pipe='str'):
'''
:param str name: the name of the command which the user will reference
in the shell
:param str usage: the usage message to be displayed to the user
:param str brief: a brief description of the command
:param str topic: the topic that this command belongs to
:param str pipe: the type of data that will be read from and written to
any pipes
'''
self.name = name
self.usage = usage or ''
self.brief = brief or ''
self.topic = topic or ''
self.pipe = pipe or 'str'
def complete(self, shell, args, prefix): # pylint: disable=unused-argument
'''
Called when the user attempts a tab-completion action for this command.
:param pypsi.shell.Shell shell: the active shell
:param list args: the list of arguments, the last one containing the
cursor position
:param str prefix: the prefix that all items returned must start with
:returns list: the list of strings that could complete the current
action
'''
return []
def usage_error(self, shell, *args):
'''
Display an error message that indicates incorrect usage of this
command. After the error is displayed, the usage is printed.
:param pypsi.shell.Shell shell: the active shell
:param args: list of strings that are the error message
'''
self.error(shell, *args)
print(AnsiCodes.yellow, self.usage, AnsiCodes.reset, sep='')
def error(self, shell, *args): # pylint: disable=unused-argument
'''
Display an error message to the user.
:param pypsi.shell.Shell shell: the active shell
:param args: the error message to display
'''
msg = "{}: {}".format(self.name, ''.join([str(a) for a in args]))
print(AnsiCodes.red, msg, AnsiCodes.reset, file=sys.stderr, sep='')
def run(self, shell, args):
'''
Execute the command. All commands need to implement this method.
:param pypsi.shell.Shell shell: the active shell
:param list args: list of string arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
raise NotImplementedError()
def setup(self, shell): # pylint: disable=unused-argument
'''
Called when the plugin has been registered to the active shell.
:param pypsi.shell.Shell shell: the active shell
:returns int: 0 on success, -1 on error
'''
return 0
def fallback(self, shell, name, args): # pylint: disable=unused-argument
'''
Called when this command was set as the fallback command. The only
difference between this and :meth:`run` is that this method accepts the
command name that was entered by the user.
:param pypsi.shell.Shell shell: the active shell
:param str name: the name of the command to run
:param list args: arguments
:returns int: 0 on success, less than 0 on error, and greater than 0 on
invalid usage
'''
return None
class CommandShortCircuit(Exception):
'''
Exception raised when the user enter invalid arguments or requests usage
information via the -h and --help flags.
'''
def __init__(self, code):
'''
:param int code: the code the command should return
'''
super().__init__(code)
self.code = code
class PypsiArgParser(argparse.ArgumentParser):
'''
Customized :class:`argparse.ArgumentParser` for use in pypsi. This class
slightly modifies the base ArgumentParser so that the following occurs:
- The whole program does not exit on printing the help message or bad
arguments
- Any error messages are intercepted and printed on the active shell's
error stream
- Adds the option to provide callbacks for tab-completing
options and parameters
'''
def __init__(self, *args, **kwargs):
#: Store callback functions for positional parameters
self._pos_completers = []
#: Store callback functions for optional arguments with values
self._op_completers = {}
#: If a positional argument can be specified more than once,
# store it's callback here and return it multiple times
self._repeating_cb = None
super().__init__(*args, **kwargs)
def exit(self, status=0, message=None):
if message:
print(AnsiCodes.red, message, AnsiCodes.reset, file=sys.stderr,
sep='')
raise CommandShortCircuit(status)
def print_usage(self, file=None):
f = file or sys.stderr
print(AnsiCodes.yellow, self.format_usage(), AnsiCodes.reset, sep='',
file=f)
def print_help(self, file=None):
f = file or sys.stderr
print(AnsiCodes.yellow, self.format_help(), AnsiCodes.reset, sep='',
file=f)
def get_options(self):
'''
:return: All optional arguments (ex, '-v'/'--verbose')
'''
return list(self._op_completers.keys())
def get_option_completer(self, option):
'''
Returns the callback for the specified optional argument,
Or None if one was not specified.
:param str option: The Option
:return function: The callback function or None
'''
return self._op_completers.get(option, None)
def has_value(self, arg):
'''
Check if the optional argument has a value associated with it.
:param str arg: Optional argument to check
:return: True if arg has a value, false otherwise
'''
# pylint: disable=protected-access
# _option_string_actions is a dictionary containing all of the optional
# arguments and the argparse action they should perform. Currently, the
# only two actions that store a value are _AppendAction/_StoreAction.
# These represent the value passed to 'action' in add_argument:
# parser.add_argument('-l', '--long', action='store')
action = self._option_string_actions.get(arg, None)
return isinstance(action,
(argparse._AppendAction, argparse._StoreAction))
def get_positional_completer(self, pos):
'''
Get the callback for a positional parameter
:param pos: index of the parameter - first param's index = 0
:return: The callback if it exists, else None
'''
try:
return self._pos_completers[pos]
except IndexError:
if self._repeating_cb:
# A positional parameter is set to repeat
return self._repeating_cb
return None
def get_positional_arg_index(self, args):
'''
Get the positional index of a cursor, based on
optional arguments and positional arguments
:param list args: List of str arguments from the Command Line
:return:
'''
index = 0
for token in args:
if token in self._option_string_actions:
# Token is an optional argument ( ex, '-v' / '--verbose' )
if self.has_value(token):
# Optional Argument has a value associated with it, so
# reduce index to not count it's value as a pos param
index -= 1
else:
# Is a positional param or value for an optional argument
index += 1
# return zero-based index
return index - 1
def add_argument(self, *args, completer=None, **kwargs): # pylint: disable=arguments-differ
'''
Override add_argument function of argparse.ArgumentParser to
handle callback functions.
:param args: Positional arguments to pass up to argparse
:param function completer: Optional callback function for argument
:param kwargs: Keywork arguments to pass up to argparse
:return:
'''
cb = completer
nargs = kwargs.get('nargs', None)
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
# If no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument ( from argparse )
if nargs and nargs in ('+', '*', argparse.REMAINDER):
# Positional param can repeat
# Currently only stores the last repeating completer specified
self._repeating_cb = cb
self._pos_completers.append(cb)
else:
# Add an optional argument
for arg in args:
self._op_completers[arg] = cb
# Call argparse.add_argument()
return super().add_argument(*args, **kwargs)
def error(self, message):
print(AnsiCodes.red, self.prog, ": error: ", message, AnsiCodes.reset,
sep='', file=sys.stderr)
self.print_usage()
self.exit(1)
def pypsi_print(*args, sep=' ', end='\n', file=None, flush=True, width=None,
wrap=True, wrap_prefix=None, replace_errors=True):
'''
Wraps the functionality of the Python builtin `print` function. The
:meth:`pypsi.shell.Shell.bootstrap` overrides the Python :meth:`print`
function with :meth:`pypsi_print`.
:param str sep: string to print between arguments
:param str end: string to print at the end of the output
:param file file: output stream, if this is :const:`None`, the default is
:data:`sys.stdout`
:param bool flush: whether to flush the output stream
:param int width: override the stream's width
:param bool wrap: whether to word wrap the output
:param str wrap_prefix: prefix string to print prior to every new line that
is wrapped
:param bool replace_errors: replace invalid character points with the '?'
character
'''
file = file or sys.stdout
last = len(args) - 1
def write_safe(data):
'''
Write the input str to the file and, if an encoding error occurs and
replace_errors is ``True``, remove invalid code points and print again.
'''
try:
file.write(data)
except UnicodeEncodeError:
if replace_errors:
enc = getattr(file, 'encoding', sys.getdefaultencoding())
file.write(data.encode(enc, errors='replace').decode(enc))
else:
raise
if wrap and hasattr(file, 'width') and file.width:
width = width or file.width
parts = []
for arg in args:
if isinstance(arg, str):
parts.append(arg)
elif arg is None:
parts.append('')
elif isinstance(arg, AnsiCode):
if file.isatty():
parts.append(str(arg))
elif arg.s is not None:
parts.append(str(arg.s))
else:
parts.append(str(arg))
txt = sep.join(parts)
for (line, endl) in get_lines(txt):
if line:
first = True
wrapno = 0
for wrapped in wrap_line(line, width, wrap_prefix=wrap_prefix):
if not wrapped:
continue
wrapno += 1
if not first:
file.write('\n')
else:
first = False
write_safe(wrapped)
if not line or endl:
file.write('\n')
else:
last = len(args) - 1
for (i, arg) in enumerate(args):
write_safe(str(arg))
if sep and i != last:
write_safe(sep)
if end:
write_safe(end)
if flush:
file.flush()
| 0
| 0
| 0
| 14,110
| 0
| 0
| 0
| 5
| 115
|
5050f96a5b09f087a43bfbf366927f7c8ded0262
| 687
|
py
|
Python
|
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | 1
|
2020-12-01T08:41:55.000Z
|
2020-12-01T08:41:55.000Z
|
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | null | null | null |
2020/6/main.py
|
klrkdekira/adventofcode
|
8384d919093712c95b707b8e3f293dbfba22be74
|
[
"BSD-2-Clause"
] | null | null | null |
QUESTIONS = ['a', 'b', 'c', 'x', 'y', 'z']
if __name__ == '__main__':
with open('input') as file:
groups = []
group = []
for row in file:
row = row.strip()
if not row:
groups.append(group)
group = []
continue
group.append(row)
groups.append(group)
print(sum(map(anyone, groups)))
print(sum(map(everyone, groups)))
| 24.535714
| 66
| 0.519651
|
QUESTIONS = ['a', 'b', 'c', 'x', 'y', 'z']
def anyone(group):
answers = []
for person in group:
answers.extend(person)
return len(set(answers))
def everyone(group):
answers = set.intersection(*(set(person) for person in group))
return len(answers)
if __name__ == '__main__':
with open('input') as file:
groups = []
group = []
for row in file:
row = row.strip()
if not row:
groups.append(group)
group = []
continue
group.append(row)
groups.append(group)
print(sum(map(anyone, groups)))
print(sum(map(everyone, groups)))
| 0
| 0
| 0
| 0
| 0
| 189
| 0
| 0
| 46
|
62d60dcf7dc46a76d9c2e17fa4e8e062fa646f12
| 9,440
|
py
|
Python
|
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
frille-lang/lib/python3.6/site-packages/pathy/gcs.py
|
frillecode/CDS-spring-2021-language
|
a0b2116044cd20d4a34b98f23bd2663256c90c5d
|
[
"MIT"
] | null | null | null |
from typing import Any
try:
from google.api_core import exceptions as gcs_errors # type:ignore
from google.auth.exceptions import DefaultCredentialsError # type:ignore
from google.cloud.storage import Blob as GCSNativeBlob # type:ignore
from google.cloud.storage import Bucket as GCSNativeBucket # type:ignore
from google.cloud.storage import Client as GCSNativeClient # type:ignore
has_gcs = True
except ImportError:
GCSNativeBlob = Any
DefaultCredentialsError = BaseException
gcs_errors = Any
GCSNativeBucket = Any
GCSNativeClient = Any
has_gcs = False
_MISSING_DEPS = """You are using the GCS functionality of Pathy without
having the required dependencies installed.
Please try installing them:
pip install pathy[gcs]
"""
| 34.327273
| 86
| 0.584322
|
from dataclasses import dataclass
from typing import Any, Dict, Generator, List, Optional
from .base import (
Blob,
Bucket,
BucketClient,
BucketEntry,
ClientError,
PathyScanDir,
PurePathy,
)
try:
from google.api_core import exceptions as gcs_errors # type:ignore
from google.auth.exceptions import DefaultCredentialsError # type:ignore
from google.cloud.storage import Blob as GCSNativeBlob # type:ignore
from google.cloud.storage import Bucket as GCSNativeBucket # type:ignore
from google.cloud.storage import Client as GCSNativeClient # type:ignore
has_gcs = True
except ImportError:
GCSNativeBlob = Any
DefaultCredentialsError = BaseException
gcs_errors = Any
GCSNativeBucket = Any
GCSNativeClient = Any
has_gcs = False
_MISSING_DEPS = """You are using the GCS functionality of Pathy without
having the required dependencies installed.
Please try installing them:
pip install pathy[gcs]
"""
class BucketEntryGCS(BucketEntry["BucketGCS", GCSNativeBlob]):
...
@dataclass
class BlobGCS(Blob[GCSNativeBucket, GCSNativeBlob]):
def delete(self) -> None:
self.raw.delete()
def exists(self) -> bool:
return self.raw.exists()
@dataclass
class BucketGCS(Bucket):
name: str
bucket: GCSNativeBucket
def get_blob(self, blob_name: str) -> Optional[BlobGCS]:
assert isinstance(
blob_name, str
), f"expected str blob name, but found: {type(blob_name)}"
native_blob = None
try:
native_blob = self.bucket.get_blob(blob_name)
except gcs_errors.ClientError:
pass
if native_blob is None:
return None
return BlobGCS(
bucket=self.bucket,
owner=native_blob.owner,
name=native_blob.name,
raw=native_blob,
size=native_blob.size,
updated=int(native_blob.updated.timestamp()),
)
def copy_blob( # type:ignore[override]
self, blob: BlobGCS, target: "BucketGCS", name: str
) -> Optional[BlobGCS]:
assert blob.raw is not None, "raw storage.Blob instance required"
native_blob = self.bucket.copy_blob(blob.raw, target.bucket, name)
if native_blob is None:
return None
return BlobGCS(
bucket=self.bucket,
owner=native_blob.owner,
name=native_blob.name,
raw=native_blob,
size=native_blob.size,
updated=int(native_blob.updated.timestamp()),
)
def delete_blob(self, blob: BlobGCS) -> None: # type:ignore[override]
return self.bucket.delete_blob(blob.name)
def delete_blobs(self, blobs: List[BlobGCS]) -> None: # type:ignore[override]
return self.bucket.delete_blobs(blobs)
def exists(self) -> bool:
try:
return self.bucket.exists()
except gcs_errors.ClientError:
return False
class BucketClientGCS(BucketClient):
client: Optional[GCSNativeClient]
@property
def client_params(self) -> Any:
return dict(client=self.client)
def __init__(self, **kwargs: Any) -> None:
self.recreate(**kwargs)
def recreate(self, **kwargs: Any) -> None:
creds = kwargs["credentials"] if "credentials" in kwargs else None
if creds is not None:
kwargs["project"] = creds.project_id
try:
self.client = GCSNativeClient(**kwargs)
except TypeError:
# TypeError is raised if the imports for GCSNativeClient fail and are
# assigned to Any, which is not callable.
self.client = None
def make_uri(self, path: PurePathy) -> str:
return str(path)
def create_bucket(self, path: PurePathy) -> Bucket:
assert self.client is not None, _MISSING_DEPS
return self.client.create_bucket(path.root)
def delete_bucket(self, path: PurePathy) -> None:
assert self.client is not None, _MISSING_DEPS
bucket = self.client.get_bucket(path.root)
bucket.delete()
def exists(self, path: PurePathy) -> bool:
# Because we want all the parents of a valid blob (e.g. "directory" in
# "directory/foo.file") to return True, we enumerate the blobs with a prefix
# and compare the object names to see if they match a substring of the path
key_name = str(path.key)
try:
for obj in self.list_blobs(path):
if obj.name == key_name:
return True
if obj.name.startswith(key_name + path._flavour.sep):
return True
except gcs_errors.ClientError:
return False
return False
def lookup_bucket(self, path: PurePathy) -> Optional[BucketGCS]:
assert self.client is not None, _MISSING_DEPS
try:
native_bucket = self.client.bucket(path.root)
if native_bucket is not None:
return BucketGCS(str(path.root), bucket=native_bucket)
except gcs_errors.ClientError as err:
print(err)
return None
def get_bucket(self, path: PurePathy) -> BucketGCS:
assert self.client is not None, _MISSING_DEPS
try:
native_bucket = self.client.bucket(path.root)
if native_bucket is not None:
return BucketGCS(str(path.root), bucket=native_bucket)
raise FileNotFoundError(f"Bucket {path.root} does not exist!")
except gcs_errors.ClientError as e:
raise ClientError(message=e.message, code=e.code)
def list_buckets(
self, **kwargs: Dict[str, Any]
) -> Generator[GCSNativeBucket, None, None]:
assert self.client is not None, _MISSING_DEPS
return self.client.list_buckets(**kwargs) # type:ignore
def scandir( # type:ignore[override]
self,
path: Optional[PurePathy] = None,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
) -> PathyScanDir:
return _GCSScanDir(client=self, path=path, prefix=prefix, delimiter=delimiter)
def list_blobs(
self,
path: PurePathy,
prefix: Optional[str] = None,
delimiter: Optional[str] = None,
include_dirs: bool = False,
) -> Generator[BlobGCS, None, None]:
assert self.client is not None, _MISSING_DEPS
continuation_token = None
bucket = self.lookup_bucket(path)
if bucket is None:
return
while True:
if continuation_token:
response = self.client.list_blobs(
path.root,
prefix=prefix,
delimiter=delimiter,
page_token=continuation_token,
)
else:
response = self.client.list_blobs(
path.root, prefix=prefix, delimiter=delimiter
)
for page in response.pages:
for item in page:
yield BlobGCS(
bucket=bucket,
owner=item.owner,
name=item.name,
raw=item,
size=item.size,
updated=item.updated.timestamp(),
)
if response.next_page_token is None:
break
continuation_token = response.next_page_token
class _GCSScanDir(PathyScanDir):
_client: BucketClientGCS
def scandir(self) -> Generator[BucketEntryGCS, None, None]:
assert self._client.client is not None, _MISSING_DEPS
continuation_token = None
if self._path is None or not self._path.root:
gcs_bucket: GCSNativeBucket
for gcs_bucket in self._client.client.list_buckets():
yield BucketEntryGCS(gcs_bucket.name, is_dir=True, raw=None)
return
sep = self._path._flavour.sep
bucket = self._client.lookup_bucket(self._path)
if bucket is None:
return
while True:
if continuation_token:
response = self._client.client.list_blobs(
bucket.name,
prefix=self._prefix,
delimiter=sep,
page_token=continuation_token,
)
else:
response = self._client.client.list_blobs(
bucket.name, prefix=self._prefix, delimiter=sep
)
for page in response.pages:
for folder in list(page.prefixes):
full_name = folder[:-1] if folder.endswith(sep) else folder
name = full_name.split(sep)[-1]
if name:
yield BucketEntryGCS(name, is_dir=True, raw=None)
for item in page:
name = item.name.split(sep)[-1]
if name:
yield BucketEntryGCS(
name=name,
is_dir=False,
size=item.size,
last_modified=item.updated.timestamp(),
raw=item,
)
if response.next_page_token is None:
break
continuation_token = response.next_page_token
| 0
| 1,938
| 0
| 6,396
| 0
| 0
| 0
| 152
| 160
|
3deebfeffce2abe1ba44b1052c91bfb62a647fb4
| 1,750
|
py
|
Python
|
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
project/both.py
|
mahmoudabuelnaga/baby-names-scraping
|
44ded037a4c24306123c4da749e32575eee4afc6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import csv
import requests
links = []
items = []
for i in range(1,38):
endpoint = f"https://baby.webteb.com/baby-names/%D8%A7%D8%B3%D9%85%D8%A7%D8%A1-%D8%A7%D9%88%D9%84%D8%A7%D8%AF-%D9%88%D8%A8%D9%86%D8%A7%D8%AA?pageindex={i}"
get_response = requests.get(endpoint)
# print(get_response.content)
soup = BeautifulSoup(get_response.content, 'lxml')
# print(soup.prettify())
section = soup.find('div', {'class':'page-section'})
for li in section.find_all('li'):
links.append(li.a['href'])
print(f'{i}', li.a['href'])
for i, link in zip(range(1,len(links)+1), links):
url = f"https://baby.webteb.com{link}"
get_response = requests.get(url)
soup = BeautifulSoup(get_response.content, 'lxml')
content = soup.find('div', {'class':'section name'})
section1 = content.find('div', {'class':'section'})
name_detail = content.find('div', {'class':'name-details'})
section2 = name_detail.find('div', {'class':'section'})
span = section2.find('span', {'class':'latin'})
item = {}
if content.h1.text:
item['arabic_name'] = content.h1.text
if section1.p.text:
item['meaning'] = section1.p.text
if span.text:
item['english_name'] = span.text
print(i, content.h1.text, section1.p.text, span.text)
items.append(item)
filename = '/home/naga/dev/babyNamesScraping/project/both.csv'
with open(filename, 'w', newline='') as f:
w = csv.DictWriter(f, fieldnames=['arabic_name','meaning', 'english_name'], extrasaction='ignore' , delimiter = ';')
w.writeheader()
print(items)
for item in items:
w.writerow(item)
print(item)
| 29.166667
| 159
| 0.630857
|
# -*- coding: utf-8 -*-
from time import sleep
from bs4 import BeautifulSoup
import csv
import requests
links = []
items = []
for i in range(1,38):
endpoint = f"https://baby.webteb.com/baby-names/%D8%A7%D8%B3%D9%85%D8%A7%D8%A1-%D8%A7%D9%88%D9%84%D8%A7%D8%AF-%D9%88%D8%A8%D9%86%D8%A7%D8%AA?pageindex={i}"
get_response = requests.get(endpoint)
# print(get_response.content)
soup = BeautifulSoup(get_response.content, 'lxml')
# print(soup.prettify())
section = soup.find('div', {'class':'page-section'})
for li in section.find_all('li'):
links.append(li.a['href'])
print(f'{i}', li.a['href'])
for i, link in zip(range(1,len(links)+1), links):
url = f"https://baby.webteb.com{link}"
get_response = requests.get(url)
soup = BeautifulSoup(get_response.content, 'lxml')
content = soup.find('div', {'class':'section name'})
section1 = content.find('div', {'class':'section'})
name_detail = content.find('div', {'class':'name-details'})
section2 = name_detail.find('div', {'class':'section'})
span = section2.find('span', {'class':'latin'})
item = {}
if content.h1.text:
item['arabic_name'] = content.h1.text
if section1.p.text:
item['meaning'] = section1.p.text
if span.text:
item['english_name'] = span.text
print(i, content.h1.text, section1.p.text, span.text)
items.append(item)
filename = '/home/naga/dev/babyNamesScraping/project/both.csv'
with open(filename, 'w', newline='') as f:
w = csv.DictWriter(f, fieldnames=['arabic_name','meaning', 'english_name'], extrasaction='ignore' , delimiter = ';')
w.writeheader()
print(items)
for item in items:
w.writerow(item)
print(item)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 22
|
bb2ad701ba189d46d6b8954c67ceae977de8da75
| 4,318
|
py
|
Python
|
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
sde/solvers.py
|
d-l-fernandes/ito_general
|
8a9889fa13e5893e923c8d32fd1c94d22aec84d9
|
[
"MIT"
] | null | null | null |
import jax.numpy as jnp
from absl import flags
Array = jnp.ndarray
flags.DEFINE_enum("solver", "strong_3_halfs", ["euler_maruyama", "strong_3_halfs"], "Solver to use.")
FLAGS = flags.FLAGS
solvers_dict = {
"euler_maruyama": EulerMaruyamaSolver,
"strong_3_halfs": Strong3HalfsSolver
}
| 40.35514
| 117
| 0.619268
|
from typing import Tuple
import haiku as hk
import jax.numpy as jnp
import numpyro
from absl import flags
from numpyro.distributions.continuous import MultivariateNormal
from sde import drifts, diffusions
Array = jnp.ndarray
flags.DEFINE_enum("solver", "strong_3_halfs", ["euler_maruyama", "strong_3_halfs"], "Solver to use.")
FLAGS = flags.FLAGS
class BaseSolver:
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
self.delta_t = delta_t
self.beta_dims = beta_dims
self.drift = drift
self.diffusion = diffusion
def __call__(self, x_0: Array, time: float) -> Array:
raise NotImplementedError
class EulerMaruyamaSolver(BaseSolver):
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
super().__init__(delta_t, beta_dims, drift, diffusion)
def __call__(self, x_0: Array, time: float) -> Array:
rng_beta = hk.next_rng_key()
delta_beta = numpyro.sample("delta_beta",
MultivariateNormal(
loc=jnp.zeros(self.beta_dims),
scale_tril=jnp.sqrt(self.delta_t) * jnp.eye(self.beta_dims)),
rng_key=rng_beta)
drift = self.drift(x_0, time)
diff = self.diffusion(x_0, time)
x_1 = x_0 + drift * self.delta_t + jnp.matmul(diff, delta_beta)
return x_1
class Strong3HalfsSolver(BaseSolver):
def __init__(self, delta_t: float, beta_dims: int, drift: drifts.BaseDrift, diffusion: diffusions.BaseDiffusion):
super().__init__(delta_t, beta_dims, drift, diffusion)
def __call__(self, x_0: Array, time: float) -> Array:
rng_beta = hk.next_rng_key()
# Vector of zeros
beta_mean_vector = jnp.zeros((self.beta_dims*2, ))
# Covariance matrix for the betas and gammas
beta_covariance_top_left = self.delta_t ** 3 / 3 * jnp.eye(self.beta_dims)
beta_covariance_top_right = self.delta_t ** 2 / 2 * jnp.eye(self.beta_dims)
beta_covariance_bottom_right = self.delta_t * jnp.eye(self.beta_dims)
beta_covariance_top = jnp.concatenate((beta_covariance_top_left, beta_covariance_top_right), axis=1)
beta_covariance_bottom = jnp.concatenate((beta_covariance_top_right, beta_covariance_bottom_right),
axis=1)
beta_covariance = jnp.concatenate((beta_covariance_top, beta_covariance_bottom), axis=0)
delta_gamma_beta = numpyro.sample("delta_gamma_beta",
MultivariateNormal(loc=beta_mean_vector,
covariance_matrix=beta_covariance),
rng_key=rng_beta)
delta_gamma = delta_gamma_beta[:self.beta_dims]
delta_beta = delta_gamma_beta[self.beta_dims:]
drift_0 = self.drift(x_0, time)
diff = self.diffusion(x_0, time)
diff_plus = self.diffusion(x_0, time + self.delta_t)
init_x_1 = x_0 + drift_0 * self.delta_t + jnp.matmul(diff, delta_beta)
init_x_1 += 1. / self.delta_t * jnp.matmul(diff_plus - diff, delta_beta * self.delta_t - delta_gamma)
def scan_fn(carry, s):
x_1 = carry
x_0_plus = \
x_0 + drift_0 * self.delta_t / self.beta_dims + \
diff[:, s] * jnp.sqrt(self.delta_t)
x_0_minus = \
x_0 + drift_0 * self.delta_t / self.beta_dims - \
diff[:, s] * jnp.sqrt(self.delta_t)
drift_0_plus = self.drift(x_0_plus, time + self.delta_t)
drift_0_minus = self.drift(x_0_minus, time + self.delta_t)
x_1 += 0.25 * self.delta_t * (drift_0_plus + drift_0_minus)
x_1 -= 0.5 * drift_0 * self.delta_t
x_1 += \
1. / (2 * jnp.sqrt(self.delta_t)) * (drift_0_plus - drift_0_minus) * delta_gamma[s]
return x_1, None
final_x_1, _ = hk.scan(scan_fn, init_x_1, jnp.arange(self.beta_dims))
return final_x_1
solvers_dict = {
"euler_maruyama": EulerMaruyamaSolver,
"strong_3_halfs": Strong3HalfsSolver
}
| 0
| 0
| 0
| 3,790
| 0
| 0
| 0
| 48
| 181
|
79cfe256477332ba59823cac9001633a38f29bc4
| 5,767
|
py
|
Python
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/JUNIPER-SONET-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module JUNIPER-SONET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-SONET-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:50:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr")
jnxMibs, jnxSonetNotifications = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMibs", "jnxSonetNotifications")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Gauge32, iso, ObjectIdentity, TimeTicks, ModuleIdentity, Integer32, Counter32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, IpAddress, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "iso", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Integer32", "Counter32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "IpAddress", "Bits")
DisplayString, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "DateAndTime", "TextualConvention")
jnxSonet = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 20))
jnxSonet.setRevisions(('2002-12-12 00:00', '2002-08-08 00:00',))
if mibBuilder.loadTexts: jnxSonet.setLastUpdated('200307182154Z')
if mibBuilder.loadTexts: jnxSonet.setOrganization('Juniper Networks, Inc.')
jnxSonetAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1))
jnxSonetAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1), )
if mibBuilder.loadTexts: jnxSonetAlarmTable.setStatus('current')
jnxSonetAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: jnxSonetAlarmEntry.setStatus('current')
jnxSonetCurrentAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 1), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetCurrentAlarms.setStatus('current')
jnxSonetLastAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 2), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmId.setStatus('current')
jnxSonetLastAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmTime.setStatus('current')
jnxSonetLastAlarmDate = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmDate.setStatus('current')
jnxSonetLastAlarmEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("set", 2), ("cleared", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmEvent.setStatus('current')
jnxSonetNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0))
jnxSonetAlarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 1)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmSet.setStatus('current')
jnxSonetAlarmCleared = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 2)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmCleared.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-SONET-MIB", jnxSonetAlarms=jnxSonetAlarms, jnxSonetCurrentAlarms=jnxSonetCurrentAlarms, jnxSonetLastAlarmTime=jnxSonetLastAlarmTime, jnxSonetAlarmTable=jnxSonetAlarmTable, JnxSonetAlarmId=JnxSonetAlarmId, jnxSonetLastAlarmEvent=jnxSonetLastAlarmEvent, jnxSonetAlarmSet=jnxSonetAlarmSet, PYSNMP_MODULE_ID=jnxSonet, jnxSonetNotificationPrefix=jnxSonetNotificationPrefix, jnxSonetAlarmCleared=jnxSonetAlarmCleared, jnxSonetAlarmEntry=jnxSonetAlarmEntry, jnxSonet=jnxSonet, jnxSonetLastAlarmId=jnxSonetLastAlarmId, jnxSonetLastAlarmDate=jnxSonetLastAlarmDate)
| 128.155556
| 904
| 0.752211
|
#
# PySNMP MIB module JUNIPER-SONET-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-SONET-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:50:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ifIndex, ifDescr = mibBuilder.importSymbols("IF-MIB", "ifIndex", "ifDescr")
jnxMibs, jnxSonetNotifications = mibBuilder.importSymbols("JUNIPER-SMI", "jnxMibs", "jnxSonetNotifications")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Gauge32, iso, ObjectIdentity, TimeTicks, ModuleIdentity, Integer32, Counter32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, Counter64, IpAddress, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Gauge32", "iso", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Integer32", "Counter32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "Counter64", "IpAddress", "Bits")
DisplayString, DateAndTime, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "DateAndTime", "TextualConvention")
jnxSonet = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 20))
jnxSonet.setRevisions(('2002-12-12 00:00', '2002-08-08 00:00',))
if mibBuilder.loadTexts: jnxSonet.setLastUpdated('200307182154Z')
if mibBuilder.loadTexts: jnxSonet.setOrganization('Juniper Networks, Inc.')
class JnxSonetAlarmId(TextualConvention, Bits):
status = 'current'
namedValues = NamedValues(("sonetLolAlarm", 0), ("sonetPllAlarm", 1), ("sonetLofAlarm", 2), ("sonetLosAlarm", 3), ("sonetSefAlarm", 4), ("sonetLaisAlarm", 5), ("sonetPaisAlarm", 6), ("sonetLopAlarm", 7), ("sonetBerrSdAlarm", 8), ("sonetBerrSfAlarm", 9), ("sonetLrdiAlarm", 10), ("sonetPrdiAlarm", 11), ("sonetReiAlarm", 12), ("sonetUneqAlarm", 13), ("sonetPmisAlarm", 14), ("sonetLocAlarm", 15), ("sonetVaisAlarm", 16), ("sonetVlopAlarm", 17), ("sonetVrdiAlarm", 18), ("sonetVuneqAlarm", 19), ("sonetVmisAlarm", 20), ("sonetVlocAlarm", 21), ("sdhLolAlarm", 22), ("sdhPllAlarm", 23), ("sdhLofAlarm", 24), ("sdhLosAlarm", 25), ("sdhOofAlarm", 26), ("sdhMsAisAlarm", 27), ("sdhHpAisAlarm", 28), ("sdhLopAlarm", 29), ("sdhBerrSdAlarm", 30), ("sdhBerrSfAlarm", 31), ("sdhMsFerfAlarm", 32), ("sdhHpFerfAlarm", 33), ("sdhMsFebeAlarm", 34), ("sdhHpUneqAlarm", 35), ("sdhHpMisAlarm", 36), ("sdhLocAlarm", 37))
jnxSonetAlarms = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1))
jnxSonetAlarmTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1), )
if mibBuilder.loadTexts: jnxSonetAlarmTable.setStatus('current')
jnxSonetAlarmEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: jnxSonetAlarmEntry.setStatus('current')
jnxSonetCurrentAlarms = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 1), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetCurrentAlarms.setStatus('current')
jnxSonetLastAlarmId = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 2), JnxSonetAlarmId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmId.setStatus('current')
jnxSonetLastAlarmTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 3), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmTime.setStatus('current')
jnxSonetLastAlarmDate = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 4), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmDate.setStatus('current')
jnxSonetLastAlarmEvent = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 20, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("set", 2), ("cleared", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxSonetLastAlarmEvent.setStatus('current')
jnxSonetNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0))
jnxSonetAlarmSet = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 1)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmSet.setStatus('current')
jnxSonetAlarmCleared = NotificationType((1, 3, 6, 1, 4, 1, 2636, 4, 6, 0, 2)).setObjects(("IF-MIB", "ifDescr"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmId"), ("JUNIPER-SONET-MIB", "jnxSonetCurrentAlarms"), ("JUNIPER-SONET-MIB", "jnxSonetLastAlarmDate"))
if mibBuilder.loadTexts: jnxSonetAlarmCleared.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-SONET-MIB", jnxSonetAlarms=jnxSonetAlarms, jnxSonetCurrentAlarms=jnxSonetCurrentAlarms, jnxSonetLastAlarmTime=jnxSonetLastAlarmTime, jnxSonetAlarmTable=jnxSonetAlarmTable, JnxSonetAlarmId=JnxSonetAlarmId, jnxSonetLastAlarmEvent=jnxSonetLastAlarmEvent, jnxSonetAlarmSet=jnxSonetAlarmSet, PYSNMP_MODULE_ID=jnxSonet, jnxSonetNotificationPrefix=jnxSonetNotificationPrefix, jnxSonetAlarmCleared=jnxSonetAlarmCleared, jnxSonetAlarmEntry=jnxSonetAlarmEntry, jnxSonet=jnxSonet, jnxSonetLastAlarmId=jnxSonetLastAlarmId, jnxSonetLastAlarmDate=jnxSonetLastAlarmDate)
| 0
| 0
| 0
| 954
| 0
| 0
| 0
| 0
| 22
|
74a36f9de503409718965b9b6bc829fa35d95202
| 190
|
py
|
Python
|
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
dev.py
|
LCBRU/batch_demographics
|
e516e958091fd74dad00b1705431ac030e3c4503
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from batch_demographics import create_app
from config import DevConfig
app = create_app(DevConfig())
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| 23.75
| 41
| 0.736842
|
#!/usr/bin/env python
from batch_demographics import create_app
from config import DevConfig
app = create_app(DevConfig())
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dc71d49cfcda3d4e87c6a2b9fa01e89c155ee69a
| 151
|
py
|
Python
|
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
widgetProject/widgetApp/admin.py
|
cs-fullstack-2019-spring/django-fields-widgets-cw-rsalcido
|
4b19595867ee38396d0a80bfa0adcd0cb9811d23
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import supeHero
# Register your models here.
admin.site.register(supeHero)
# Register your models here.
| 21.571429
| 32
| 0.801325
|
from django.contrib import admin
from .models import supeHero
# Register your models here.
admin.site.register(supeHero)
# Register your models here.
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0430b1b3554d1367b14b734250b34ede8b260068
| 337
|
py
|
Python
|
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
main.py
|
pooyapooya/rizpardazande
|
818721a3daac1385daf71ac508ad00bf153cbf0b
|
[
"MIT"
] | null | null | null |
from easygui.boxes.choice_box import choicebox
from phase1 import phase1
from phase2 import phase2
__author__ = 'po0ya'
choices = [
'Phase1',
'Phase2'
]
choice = choicebox(msg='Please select project phase:', choices=choices)
if choice == choices[0]:
phase1()
else:
phase2()
| 17.736842
| 71
| 0.724036
|
from easygui.boxes.choice_box import choicebox
from easygui.boxes.text_box import textbox
from phase1 import phase1
from phase2 import phase2
__author__ = 'po0ya'
choices = [
'Phase1',
'Phase2'
]
choice = choicebox(msg='Please select project phase:', choices=choices)
if choice == choices[0]:
phase1()
else:
phase2()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 22
|
4471ff0d57c1cc3ec8a60aec1f93edea9763dd0c
| 2,984
|
py
|
Python
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 5,358
|
2015-09-18T19:16:11.000Z
|
2022-03-31T20:40:51.000Z
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 112
|
2015-09-10T10:53:57.000Z
|
2022-03-03T09:32:29.000Z
|
saws/data_util.py
|
Pangeam/saws
|
5aba511e72bf5feb35eb44be82fbdf805dfe3553
|
[
"Apache-2.0"
] | 333
|
2015-09-18T19:16:13.000Z
|
2022-03-06T17:27:54.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
try:
from collections import OrderedDict
except:
| 35.52381
| 77
| 0.610925
|
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import re
try:
from collections import OrderedDict
except:
from ordereddict import OrderedDict
class DataUtil(object):
"""Utility class to read from the data folder.
Attributes:
* None.
"""
def create_header_to_type_map(self, headers, data_type):
"""Creates a dict mapping headers to ResourceTypes.
Headers are the resource headers as they appear in the RESOURCES.txt.
Headers are mapped to their corresponding ResourceType.
Args:
* headers: A string that represents the header.
* data_type: An Enum specifying the data type.
Returns:
An OrderedDict mapping headers to ResourceTypes.
"""
command_types = []
for item in data_type:
if item != data_type.NUM_TYPES:
command_types.append(item)
return OrderedDict(zip(headers, command_types))
def get_data(self, data_file_path, header_to_type_map, data_type):
"""Gets all data from the specified data file.
Args:
* data_file_path: A string representing the full file path of
the data file.
* header_to_type_map: A dictionary mapping the data header labels
to the data types.
* data_type: An Enum specifying the data type.
Returns:
A list, where each element is a list of completions for each
data_type
"""
data_lists = [[] for x in range(data_type.NUM_TYPES.value)]
with open(data_file_path) as f:
for line in f:
line = re.sub('\n', '', line)
parsing_header = False
# Check if we are reading in a data header to determine
# which set of data we are parsing
for key, value in header_to_type_map.items():
if key in line:
data_type = value
parsing_header = True
break
if not parsing_header:
# Store the data in its associated list
if line.strip() != '':
data_lists[data_type.value].append(line)
for data_list in data_lists:
data_list.sort()
return data_lists
| 0
| 0
| 0
| 2,200
| 0
| 0
| 0
| 2
| 71
|