hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c7029d58f3d83ba1ff6470775aed8811fcd0cda
| 1,618
|
py
|
Python
|
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-01-31T19:16:29.000Z
|
2021-01-31T19:16:29.000Z
|
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets/docs_snippets/intro_tutorial/advanced/scheduling/scheduler.py
|
rpatil524/dagster
|
6f918d94cbd543ab752ab484a65e3a40fd441716
|
[
"Apache-2.0"
] | 1
|
2021-12-08T18:13:19.000Z
|
2021-12-08T18:13:19.000Z
|
# start_scheduler_marker_0
# end_scheduler_marker_0
# start_scheduler_marker_1
# end_scheduler_marker_1
# start_scheduler_marker_2
# end_scheduler_marker_2
# start_scheduler_marker_3
# end_scheduler_marker_3
# start_scheduler_marker_4
# end_scheduler_marker_4
| 23.114286
| 75
| 0.721261
|
# start_scheduler_marker_0
import csv
from datetime import datetime
import requests
from dagster import get_dagster_logger, job, op, repository, schedule
@op
def hello_cereal(context):
response = requests.get("https://docs.dagster.io/assets/cereal.csv")
lines = response.text.split("\n")
cereals = [row for row in csv.DictReader(lines)]
date = context.op_config["date"]
get_dagster_logger().info(
f"Today is {date}. Found {len(cereals)} cereals."
)
@job
def hello_cereal_job():
hello_cereal()
# end_scheduler_marker_0
# start_scheduler_marker_1
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
)
def good_morning_schedule(context):
date = context.scheduled_execution_time.strftime("%Y-%m-%d")
return {"ops": {"hello_cereal": {"config": {"date": date}}}}
# end_scheduler_marker_1
# start_scheduler_marker_2
@repository
def hello_cereal_repository():
return [hello_cereal_job, good_morning_schedule]
# end_scheduler_marker_2
# start_scheduler_marker_3
def weekday_filter(_context):
weekno = datetime.today().weekday()
# Returns true if current day is a weekday
return weekno < 5
# end_scheduler_marker_3
# start_scheduler_marker_4
@schedule(
cron_schedule="45 6 * * *",
job=hello_cereal_job,
execution_timezone="US/Central",
should_execute=weekday_filter,
)
def good_weekday_morning_schedule(context):
date = context.scheduled_execution_time.strftime("%Y-%m-%d")
return {"ops": {"hello_cereal": {"inputs": {"date": {"value": date}}}}}
# end_scheduler_marker_4
| 0
| 963
| 0
| 0
| 0
| 117
| 0
| 39
| 223
|
17b1dff8fe64ef2da095afccaa9e3fdd5b49ac2c
| 5,483
|
py
|
Python
|
webmachine/auth/oauth_res.py
|
benoitc/dj-webmachine
|
77653d73de57388b712eaf50de8c32ec70c182fa
|
[
"MIT"
] | 6
|
2015-03-29T03:17:53.000Z
|
2020-01-21T11:09:26.000Z
|
webmachine/auth/oauth_res.py
|
benoitc/dj-webmachine
|
77653d73de57388b712eaf50de8c32ec70c182fa
|
[
"MIT"
] | 1
|
2015-05-28T11:32:44.000Z
|
2015-05-28T11:32:44.000Z
|
webmachine/auth/oauth_res.py
|
benoitc/dj-webmachine
|
77653d73de57388b712eaf50de8c32ec70c182fa
|
[
"MIT"
] | 4
|
2015-05-20T20:53:02.000Z
|
2019-11-12T19:46:07.000Z
|
# -*- coding: utf-8 -
#
# This file is part of dj-webmachine released under the MIT license.
# See the NOTICE for more information.
try:
except ImportError:
raise ImportError("restkit>=3.0.2 package is needed for auth.")
| 32.443787
| 82
| 0.590188
|
# -*- coding: utf-8 -
#
# This file is part of dj-webmachine released under the MIT license.
# See the NOTICE for more information.
from django.template import loader, RequestContext
from django.utils.encoding import iri_to_uri
try:
from restkit import oauth2
except ImportError:
raise ImportError("restkit>=3.0.2 package is needed for auth.")
from webmachine.auth.oauth import OAuthServer, load_oauth_datastore
from webmachine.forms import OAuthAuthenticationForm
from webmachine.resource import Resource
class OauthResource(Resource):
def __init__(self, realm='OAuth',
auth_template='webmachine/authorize_token.html',
auth_form=OAuthAuthenticationForm):
self.auth_template = auth_template
self.auth_form = auth_form
self.realm = realm
oauth_datastore = load_oauth_datastore()
self.oauth_server = OAuthServer(oauth_datastore())
self.oauth_server.add_signature_method(oauth2.SignatureMethod_PLAINTEXT())
self.oauth_server.add_signature_method(oauth2.SignatureMethod_HMAC_SHA1())
def allowed_methods(self, req, resp):
return ["GET", "HEAD", "POST"]
def oauth_authorize(self, req, resp):
try:
token = self.oauth_server.fetch_request_token(req.oauth_request)
except oauth2.Error, err:
return self.auth_error(req, resp, err)
try:
callback = self.auth_server.get_callback(req.oauth_request)
except:
callback = None
if req.method == "GET":
params = req.oauth_request.get_normalized_parameters()
form = self.auth_form(initial={
'oauth_token': token.key,
'oauth_callback': token.get_callback_url() or callback,
})
resp.content = loader.render_to_string(self.auth_template,
{'form': form}, RequestContext(req))
elif req.method == "POST":
try:
form = self.auth_form(req.POST)
if form.is_valid():
token = self.oauth_server.authorize_token(token, req.user)
args = '?'+token.to_string(only_key=True)
else:
args = '?error=%s' % 'Access not granted by user.'
if not callback:
resp.content = 'Access not granted by user.'
if not callback:
return True
resp.redirect_to = iri_to_uri("%s%s" % (callback, args))
except oauth2.Error, err:
return self.oauth_error(req, resp, err)
return True
def oauth_access_token(self, req, resp):
try:
token = self.oauth_server.fetch_access_token(req.oauth_request)
if not token:
return False
resp.content = token.to_string()
except oauth2.Error, err:
return self.oauth_error(req, resp, err)
return True
def oauth_request_token(self, req, resp):
try:
token = self.oauth_server.fetch_request_token(req.oauth_request)
if not token:
return False
resp.content = token.to_string()
except oauth2.Error, err:
return self.oauth_error(req, resp, err)
return True
def oauth_error(self, req, resp, err):
resp.content = str(err)
return 'OAuth realm="%s"' % self.realm
def oauth_resp(self, req, resp):
return resp.content
def content_types_provided(self, req, resp):
return [("", self.oauth_resp)]
def process_post(self, res, resp):
# we already processed POST
return True
def created_location(self, req, resp):
try:
return resp.redirect_to
except AttributeError:
return False
def is_authorized(self, req, resp):
func = getattr(self, "oauth_%s" % req.oauth_action)
return func(req, resp)
def malformed_request(self, req, resp):
params = {}
headers = {}
if req.method == "POST":
params = dict(req.REQUEST.items())
if 'HTTP_AUTHORIZATION' in req.META:
headers['Authorization'] = req.META.get('HTTP_AUTHORIZATION')
oauth_request = oauth2.Request.from_request(req.method,
req.build_absolute_uri(), headers=headers,
parameters=params,
query_string=req.META.get('QUERY_STRING'))
if not oauth_request:
return True
req.oauth_request = oauth_request
return False
def ping(self, req, resp):
action = req.url_kwargs.get("action")
if not action or action not in ("authorize", "access_token",
"request_token"):
return False
req.oauth_action = action
return True
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^authorize$', self,
kwargs={"action": "authorize"},
name="oauth_authorize"),
url(r'^access_token$', self,
kwargs={"action": "access_token"},
name="oauth_access_token"),
url(r'^request_token$', self,
kwargs= {"action": "request_token"},
name="oauth_request_token"),
)
return urlpatterns
urls = property(get_urls)
| 0
| 0
| 0
| 4,942
| 0
| 0
| 0
| 153
| 161
|
b7fb6d55ddc310c89f7dd03fe9fc2ce8f6e8528e
| 1,623
|
py
|
Python
|
commands/dns.py
|
thexmarat/routeros-scanner
|
8587493c243572218b5a7778d8bcbc698464856b
|
[
"MIT"
] | null | null | null |
commands/dns.py
|
thexmarat/routeros-scanner
|
8587493c243572218b5a7778d8bcbc698464856b
|
[
"MIT"
] | null | null | null |
commands/dns.py
|
thexmarat/routeros-scanner
|
8587493c243572218b5a7778d8bcbc698464856b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
| 27.508475
| 113
| 0.550216
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from commands.basecommand import BaseCommand
import re
class DNS(BaseCommand):
def __init__(self):
self.__name__ = 'DNS Cache'
def run_ssh(self, sshc):
data = self._ssh_data(sshc, '/ip dns print')
enabled = 'allow-remote-requests: yes' in data.lower()
res = self._ssh_data_with_header(sshc, '/ip dns cache print detail')
sus_dns, recommendation = self.check_results_ssh(res, enabled)
return {'raw_data': res,
'suspicious': sus_dns,
'recommendation': recommendation}
def check_results_ssh(self, res, enabled):
sus_dns = []
recommendation = []
for item in res:
try:
i = int(hms(item['ttl'].partition('s')[0]))
except IndexError:
continue
if i > 200000:
sus_dns.append(f'Domain name: {item["name"]} with ip {item["address"]}: might be DNS poisoning- '
f'severity: high')
if enabled:
recommendation.append('In case DNS cache is not required on your router - disable it')
return sus_dns, recommendation
def hms(s):
l = list(map(int, re.split('[wdhms]', s)[:-1]))
if len(l) == 5:
return l[0]*604800 + l[1]*86400 + l[2]*3600 + l[3]*60 + l[4]
elif len(l) == 4:
return l[0]*86400 + l[1]*3600 + l[2]*60 + l[3]
elif len(l) == 3:
return l[0]*3600 + l[1]*60 + l[2]
elif len(l) == 2:
return l[0]*60 + l[1]
else:
return l[0]
| 0
| 0
| 0
| 1,087
| 0
| 354
| 0
| 11
| 91
|
1c2e6507af08539c77c856e95f6d4852bc06d2f2
| 9,590
|
py
|
Python
|
tests/test_integration.py
|
repo-helper/formate
|
45e4b4fe29af144db714ea90c92cf6e7035ae301
|
[
"MIT"
] | 1
|
2022-03-19T07:39:58.000Z
|
2022-03-19T07:39:58.000Z
|
tests/test_integration.py
|
repo-helper/formate
|
45e4b4fe29af144db714ea90c92cf6e7035ae301
|
[
"MIT"
] | 14
|
2021-01-25T23:10:04.000Z
|
2021-06-29T19:55:38.000Z
|
tests/test_integration.py
|
repo-helper/formate
|
45e4b4fe29af144db714ea90c92cf6e7035ae301
|
[
"MIT"
] | null | null | null |
# stdlib
import re
# 3rd party
# this package
path_sub = re.compile(rf" .*/pytest-of-.*/pytest-\d+")
| 25.710456
| 96
| 0.727216
|
# stdlib
import re
from typing import Union, no_type_check
# 3rd party
import click
import pytest
from _pytest.capture import CaptureResult
from coincidence.regressions import AdvancedDataRegressionFixture, AdvancedFileRegressionFixture
from coincidence.selectors import max_version, min_version, not_pypy, only_pypy
from consolekit.terminal_colours import strip_ansi
from consolekit.testing import CliRunner, Result
from domdf_python_tools.paths import PathPlus, in_directory
# this package
from formate import Reformatter, reformat_file
from formate.__main__ import main
from formate.config import load_toml
path_sub = re.compile(rf" .*/pytest-of-.*/pytest-\d+")
@no_type_check
def check_out(
result: Union[Result, CaptureResult[str]],
advanced_data_regression: AdvancedDataRegressionFixture,
):
if hasattr(result, "stdout"):
stdout = result.stdout
else:
stdout = result.out
if hasattr(result, "stderr"):
stderr = result.stderr
else:
stderr = result.err
data_dict = {
"out": strip_ansi(path_sub.sub(" ...", stdout)).split('\n'),
"err": strip_ansi(path_sub.sub(" ...", stderr)).split('\n'),
}
advanced_data_regression.check(data_dict)
@pytest.fixture()
def demo_environment(tmp_pathplus):
example_formate_toml = PathPlus(__file__).parent / "example_formate.toml"
(tmp_pathplus / "formate.toml").write_text(example_formate_toml.read_text())
code = [
"class F:",
"\tfrom collections import (",
"Iterable,",
"\tCounter,",
"\t\t)",
'',
"\tdef foo(self):",
"\t\tpass",
'',
"print('hello world')",
r"assert t.uname == '\udce4\udcf6\udcfc'",
]
(tmp_pathplus / "code.py").write_lines(code, trailing_whitespace=True)
@pytest.fixture()
def demo_pyproject_environment(demo_environment, tmp_pathplus):
example_formate_toml = PathPlus(__file__).parent / "example_pyproject.toml"
(tmp_pathplus / "pyproject.toml").write_text(example_formate_toml.read_text())
def test_integration(
tmp_pathplus: PathPlus,
advanced_file_regression: AdvancedFileRegressionFixture,
capsys,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
config = load_toml(tmp_pathplus / "formate.toml")
st = (tmp_pathplus / "code.py").stat()
assert st == st
assert reformat_file(tmp_pathplus / "code.py", config) == 1
advanced_file_regression.check_file(tmp_pathplus / "code.py")
check_out(capsys.readouterr(), advanced_data_regression)
# mtime should have changed
new_st = (tmp_pathplus / "code.py").stat()
assert new_st.st_mtime != st.st_mtime
assert new_st != st
# Calling a second time shouldn't change anything
assert reformat_file(tmp_pathplus / "code.py", config) == 0
advanced_file_regression.check_file(tmp_pathplus / "code.py")
# mtime should be the same
assert (tmp_pathplus / "code.py").stat().st_mtime == new_st.st_mtime
def test_integration_pyproject(
tmp_pathplus: PathPlus,
advanced_file_regression: AdvancedFileRegressionFixture,
capsys,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_pyproject_environment,
):
config = load_toml(tmp_pathplus / "pyproject.toml")
assert reformat_file(tmp_pathplus / "code.py", config) == 1
advanced_file_regression.check_file(tmp_pathplus / "code.py")
check_out(capsys.readouterr(), advanced_data_regression)
# Calling a second time shouldn't change anything
assert reformat_file(tmp_pathplus / "code.py", config) == 0
advanced_file_regression.check_file(tmp_pathplus / "code.py")
def test_reformatter_class(
tmp_pathplus: PathPlus,
advanced_file_regression: AdvancedFileRegressionFixture,
capsys,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
config = load_toml(tmp_pathplus / "formate.toml")
r = Reformatter(tmp_pathplus / "code.py", config)
with pytest.raises(ValueError, match=r"'Reformatter.run\(\)' must be called first!"):
r.to_string()
with pytest.raises(ValueError, match=r"'Reformatter.run\(\)' must be called first!"):
r.to_file()
with pytest.raises(ValueError, match=r"'Reformatter.run\(\)' must be called first!"):
r.get_diff()
st = (tmp_pathplus / "code.py").stat()
assert st == st
assert r.run() == 1
r.to_file()
advanced_file_regression.check_file(tmp_pathplus / "code.py")
advanced_file_regression.check(r.to_string(), extension="._py_")
captured = capsys.readouterr()
assert not captured.out
assert not captured.err
# mtime should have changed
new_st = (tmp_pathplus / "code.py").stat()
assert new_st.st_mtime != st.st_mtime
assert new_st != st
# Calling a second time shouldn't change anything
r = Reformatter(tmp_pathplus / "code.py", config)
assert r.run() == 0
r.to_file()
advanced_file_regression.check_file(tmp_pathplus / "code.py")
def test_cli(
tmp_pathplus: PathPlus,
advanced_file_regression: AdvancedFileRegressionFixture,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
result: Result
st = (tmp_pathplus / "code.py").stat()
assert st == st
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
main,
args=["code.py", "--no-colour", "--diff", "--verbose"],
)
assert result.exit_code == 1
advanced_file_regression.check_file(tmp_pathplus / "code.py")
check_out(result, advanced_data_regression)
# mtime should have changed
new_st = (tmp_pathplus / "code.py").stat()
assert new_st.st_mtime != st.st_mtime
assert new_st != st
# Calling a second time shouldn't change anything
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, args=["code.py"])
assert result.exit_code == 0
# mtime should be the same
assert (tmp_pathplus / "code.py").stat().st_mtime == new_st.st_mtime
def test_cli_verbose_verbose(
tmp_pathplus: PathPlus,
advanced_file_regression: AdvancedFileRegressionFixture,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
result: Result
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
main,
args=["code.py", "--no-colour", "--diff", "--verbose", "-v"],
)
assert result.exit_code == 1
advanced_file_regression.check_file(tmp_pathplus / "code.py")
# Calling a second time shouldn't change anything
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(
main,
args=["code.py", "code.c", "--no-colour", "--diff", "--verbose", "-v"],
)
assert result.exit_code == 0
check_out(result, advanced_data_regression)
@max_version("3.9.9", reason="Output differs on Python 3.10+")
@not_pypy("Output differs on PyPy")
def test_cli_syntax_error(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
code = [
"class F:",
"\tfrom collections import (",
"Iterable,",
"\tCounter,",
"\t\t)",
'',
"\tdef foo(self):",
"\t\tpass",
'',
"print('hello world'",
]
(tmp_pathplus / "code.py").write_lines(code, trailing_whitespace=True)
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(main, args=["code.py", "--no-colour", "--verbose"])
assert result.exit_code == 126
check_out(result, advanced_data_regression)
@only_pypy("Output differs on PyPy")
def test_cli_syntax_error_pypy(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
code = [
"class F:",
"\tfrom collections import (",
"Iterable,",
"\tCounter,",
"\t\t)",
'',
"\tdef foo(self):",
"\t\tpass",
'',
"print('hello world'",
]
(tmp_pathplus / "code.py").write_lines(code, trailing_whitespace=True)
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(main, args=["code.py", "--no-colour", "--verbose"])
assert result.exit_code == 126
check_out(result, advanced_data_regression)
@min_version("3.10", reason="Output differs on Python 3.10+")
def test_cli_syntax_error_py310(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
demo_environment,
):
code = [
"class F:",
"\tfrom collections import (",
"Iterable,",
"\tCounter,",
"\t\t)",
'',
"\tdef foo(self):",
"\t\tpass",
'',
"print('hello world'",
]
(tmp_pathplus / "code.py").write_lines(code, trailing_whitespace=True)
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(main, args=["code.py", "--no-colour", "--verbose"])
assert result.exit_code == 126
check_out(result, advanced_data_regression)
@pytest.mark.skipif(click.__version__.split('.')[0] != '7', reason="Output differs on Click 8")
def test_cli_no_config(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
):
result: Result
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, args=["--no-colour", "--verbose"])
assert result.exit_code == 2
check_out(result, advanced_data_regression)
@pytest.mark.skipif(click.__version__.split('.')[0] == '7', reason="Output differs on Click 8")
def test_cli_no_config_click8(
tmp_pathplus: PathPlus,
advanced_data_regression: AdvancedDataRegressionFixture,
):
result: Result
with in_directory(tmp_pathplus):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, args=["--no-colour", "--verbose"])
assert result.exit_code == 2
check_out(result, advanced_data_regression)
| 0
| 4,095
| 0
| 0
| 0
| 4,515
| 0
| 300
| 563
|
e784dd55930201f54c11184d662dda78259fd84c
| 7,049
|
py
|
Python
|
lib/model.py
|
behzadzarfsaz/DualMemoryLearning
|
924905ea14466ac60589e71ff5df6e33e98b6d92
|
[
"MIT"
] | null | null | null |
lib/model.py
|
behzadzarfsaz/DualMemoryLearning
|
924905ea14466ac60589e71ff5df6e33e98b6d92
|
[
"MIT"
] | null | null | null |
lib/model.py
|
behzadzarfsaz/DualMemoryLearning
|
924905ea14466ac60589e71ff5df6e33e98b6d92
|
[
"MIT"
] | null | null | null |
import logging
import shutil
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Model")
terminal_columns = shutil.get_terminal_size().columns // 2
| 42.981707
| 110
| 0.55483
|
import copy
import logging
import shutil
from math import exp
import numpy as np
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.utils import shuffle
from tqdm import trange
from lib.bqueue import Bqueue
from lib.dnn import Dnn
from lib.helper import Helper
from lib.som import SOM
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Model")
terminal_columns = shutil.get_terminal_size().columns // 2
class Model:
def __init__(
self, input_dim, batch_size, som_x, som_y,
label_class_num, xt, tt, limit, stm
):
self.input_dim = input_dim
self.som_x = som_x
self.som_y = som_y
self.class_num = label_class_num
self.batch_size = batch_size
self.som = SOM(self.som_x, self.som_y, self.input_dim)
self.dnn = Dnn(self.som_x * self.som_y, self.class_num)
self.x_test = xt
self.t_test = tt
self.stm = Bqueue(max_size=stm)
self.limit = limit
self.scaler = StandardScaler()
self.max = 1.0
def transfer(self, dist, test=False):
if self.max < np.max(dist) and not test:
self.max = np.max(dist)
dist /= self.max
return self.scaler.fit_transform(dist)
@staticmethod
def flatten(samples):
return np.reshape(samples, newshape=[-1, samples.shape[1] * samples.shape[2]])
def reply(self):
samples = None
labels = None
stm_samples = np.array([s[0] for s in self.stm.get_list()]).astype("float32")
stm_labels = np.array([s[1] for s in self.stm.get_list()]).astype("float32")
if stm_samples.shape[0] > 0:
for i in trange(self.class_num, desc="Replaying Data"):
class_stm_idx = np.argwhere(np.argmax(stm_labels, axis=1) == i).ravel()
if class_stm_idx.shape[0] == 0:
break
class_prototypes = stm_samples[class_stm_idx]
ll = stm_labels[class_stm_idx]
g_samples = np.repeat(
class_prototypes, self.limit // class_prototypes.shape[0], axis=0
)
g_labels = np.repeat(ll, self.limit // class_prototypes.shape[0], axis=0)
if i == 0:
samples = g_samples
labels = g_labels
else:
samples = np.concatenate((samples, g_samples))
labels = np.concatenate((labels, g_labels))
return samples, labels
def fill_stm(self, samples, z_som, labels):
logger.info("\rFilling STM")
_, acc = self.dnn.evaluate(z_som, labels, batch_size=1, verbose=0)
acc = np.array(acc).astype("float32")
stm_idx = np.argwhere(acc > 0.5).ravel()
for s in range(self.class_num):
class_idx = np.argwhere(np.argmax(labels[stm_idx], axis=1) == s).ravel()
np.random.shuffle(class_idx)
class_samples = samples[class_idx]
class_labels = labels[class_idx]
class_samples, class_labels = shuffle(class_samples, class_labels)
loop_iter = min(self.stm.max_size // self.class_num, class_idx.shape[0])
for i in range(loop_iter):
self.stm.push(
(class_samples[i], class_labels[i])
)
def train(
self, samples, labels, dnn_iter, som_lr, som_rad, ce, sub_task, epoch
):
samples, labels = shuffle(samples, labels)
logger.info("\r".center(terminal_columns, "="))
logger.info(f"\r Sub-Task D{sub_task}")
logger.info("\r".center(terminal_columns, "="))
confusion_matrices = []
sigma = []
r_samples = None
r_labels = None
if sub_task > 1 and self.stm.max_size > 0:
m_samples, m_labels = self.reply()
if m_samples is not None:
r_samples = np.concatenate((samples, m_samples))
r_labels = np.concatenate((labels, m_labels))
r_samples, r_labels = shuffle(r_samples, r_labels)
else:
r_samples = samples
r_labels = labels
for ep, e in enumerate(range(epoch)):
new_labels = np.unique(np.argmax(labels, axis=1))
x, t = Helper.generate_batches(r_samples, r_labels, self.batch_size)
sigma = []
d_acc = 0.0
cm_list = range(len(x))
pbar = trange(len(x))
d_counter = 0
for i in pbar:
z_som = self.transfer(self.som.get_distances(x[i]), test=True)
loss, acc = self.dnn.evaluate(z_som, t[i], verbose=0)
loss = np.array(loss)
wrong_idx = np.argwhere(np.greater(np.array(loss), ce)).ravel()
if wrong_idx.shape[0] > 0:
decay = exp(-1 * ((10 / sub_task) * d_counter / len(x)))
sigma.append(som_rad * decay)
d_counter += 1
mask = np.isin(np.argmax(t[i][wrong_idx], axis=1), new_labels)
new_wrong_samples = x[i][wrong_idx][mask]
self.som.train(
new_wrong_samples, learning_rate=som_lr * decay,
radius=som_rad * decay, global_order=self.batch_size
)
z_som = self.transfer(
self.som.get_distances(x[i], batch_size=self.batch_size)
)
z_som_test = self.transfer(
self.som.get_distances(self.x_test, batch_size=self.batch_size), test=True
)
cm = i in cm_list
d_loss, d_acc, confusion_matrix = self.dnn.train(
z_som, t[i], z_som_test, self.t_test,
cm=cm, epoch=dnn_iter, batch_size=self.batch_size
)
if len(confusion_matrix) > 0:
for m in confusion_matrix:
confusion_matrices.append(m)
d_acc = np.mean(np.array(d_acc).astype("float32"))
else:
confusion_matrices.append(copy.copy(confusion_matrices[-1]))
pbar.set_description(
f"Epoch{ep + 1}/{epoch}"
f"|Batch:{i + 1}/{len(x)}"
f"|CE:{wrong_idx.shape[0]}/{x[i].shape[0]}"
f"|Train Acc.:{d_acc:.4f}"
)
pbar.refresh()
logger.info("\rEvaluation...")
z_som_test = self.transfer(self.som.get_distances(self.x_test, batch_size=self.batch_size), test=True)
z_som_stm = self.transfer(self.som.get_distances(r_samples, batch_size=self.batch_size), test=True)
loss, accuracy = self.dnn.evaluate(z_som_test, self.t_test, verbose=1)
if self.stm.max_size > 0:
self.fill_stm(r_samples, z_som_stm, r_labels)
return accuracy, np.array(sigma), confusion_matrices
| 0
| 105
| 0
| 6,477
| 0
| 0
| 0
| 58
| 243
|
b85ee2815f52bf02efa5142a630bf19bd92d2932
| 179
|
py
|
Python
|
tests/project/app/urls.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | 1
|
2015-11-05T11:45:52.000Z
|
2015-11-05T11:45:52.000Z
|
tests/project/app/urls.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | null | null | null |
tests/project/app/urls.py
|
j4mie/django-kronos
|
71d90a67eb73e9c28666e77611466062ff3e3dda
|
[
"MIT"
] | null | null | null |
from views import home
urlpatterns = patterns('',
url(r'^$', home, name='home'),
url('fandjango/', include('fandjango.urls'))
)
| 17.9
| 48
| 0.659218
|
from django.conf.urls.defaults import *
from views import home
urlpatterns = patterns('',
url(r'^$', home, name='home'),
url('fandjango/', include('fandjango.urls'))
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 22
|
c39eec60b57532ade429a9e3594f24af68db932e
| 19,140
|
py
|
Python
|
pychess/Players/PyChessCECP.py
|
jacobchrismarsh/chess_senior_project
|
7797b1f96fda5d4d268224a21e54a744d17e7b81
|
[
"MIT"
] | null | null | null |
pychess/Players/PyChessCECP.py
|
jacobchrismarsh/chess_senior_project
|
7797b1f96fda5d4d268224a21e54a744d17e7b81
|
[
"MIT"
] | 40
|
2019-05-04T04:46:31.000Z
|
2022-02-26T10:37:51.000Z
|
pychess/Players/PyChessCECP.py
|
jacobchrismarsh/chess_senior_project
|
7797b1f96fda5d4d268224a21e54a744d17e7b81
|
[
"MIT"
] | null | null | null |
import sys
if sys.platform != "win32":
import readline
readline.clear_history()
ASCII = sys.platform == "win32"
| 38.05169
| 95
| 0.446708
|
import re
import signal
import sys
from threading import Thread
import pychess
from pychess.Players.PyChess import PyChess
from pychess.System import conf, fident
from pychess.Utils.book import getOpenings
from pychess.Utils.const import (
NORMALCHESS,
FEN_START,
BLACK,
FISCHERRANDOMCHESS,
CRAZYHOUSECHESS,
WILDCASTLESHUFFLECHESS,
LOSERSCHESS,
SUICIDECHESS,
ATOMICCHESS,
THREECHECKCHESS,
KINGOFTHEHILLCHESS,
ASEANCHESS,
MAKRUKCHESS,
CAMBODIANCHESS,
SITTUYINCHESS,
GIVEAWAYCHESS,
HORDECHESS,
RACINGKINGSCHESS,
PLACEMENTCHESS,
WHITE,
)
from pychess.Utils.lutils.Benchmark import benchmark
from pychess.Utils.lutils.perft import perft
from pychess.Utils.lutils.LBoard import LBoard
from pychess.Utils.lutils.ldata import MAXPLY
from pychess.Utils.lutils import lsearch, leval
from pychess.Utils.lutils.lmove import parseSAN, parseAny, toSAN, ParsingError
from pychess.Utils.lutils.lmovegen import genAllMoves, genCaptures, genCheckEvasions
from pychess.Utils.lutils.validator import validateMove
from pychess.System.Log import log
from pychess.Variants.horde import HORDESTART
from pychess.Variants.placement import PLACEMENTSTART
from pychess.Variants.asean import (
ASEANSTART,
MAKRUKSTART,
KAMBODIANSTART,
SITTUYINSTART,
)
if sys.platform != "win32":
import readline
readline.clear_history()
ASCII = sys.platform == "win32"
def get_input():
return input()
class PyChessCECP(PyChess):
def __init__(self):
PyChess.__init__(self)
self.board = LBoard(NORMALCHESS)
self.board.applyFen(FEN_START)
self.forced = False
self.analyzing = False
self.thread = None
self.features = {
"ping": 1,
"setboard": 1,
"playother": 1,
"san": 1,
"usermove": 1,
"time": 1,
"draw": 1,
"sigint": 0,
"sigterm": 0,
"reuse": 1,
"analyze": 1,
"myname": "PyChess %s" % pychess.VERSION,
"variants": "normal,wildcastle,nocastle,fischerandom,crazyhouse,"
+ "losers,suicide,giveaway,horde,atomic,racingkings,"
+ "kingofthehill,3check,placement,asean,cambodian,makruk,sittuyin",
"colors": 0,
"ics": 0,
"name": 0,
"pause": 0, # Unimplemented
"nps": 0, # Unimplemented
"debug": 1,
"memory": 0, # Unimplemented
"smp": 0, # Unimplemented
"egt": "gaviota",
"option": "skipPruneChance -slider 0 0 100",
}
python = sys.executable.split("/")[-1]
python_version = "%s.%s.%s" % sys.version_info[0:3]
self.print("# %s [%s %s]" % (self.features["myname"], python, python_version))
def handle_sigterm(self, *args):
self.__stopSearching()
sys.exit(0)
def makeReady(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, self.handle_sigterm)
def run(self):
while True:
try:
line = get_input()
except EOFError:
line = "quit"
lines = line.split()
try:
if not lines:
continue
log.debug(line, extra={"task": "xboard"})
# CECP commands
# See http://home.hccnet.nl/h.g.muller/engine-intf.html
if lines[0] == "xboard":
pass
elif lines[0] == "protover":
stringPairs = [
"=".join([k, '"%s"' % v if isinstance(v, str) else str(v)])
for k, v in self.features.items()
]
self.print("feature %s" % " ".join(stringPairs))
self.print("feature done=1")
elif lines[0] in ("accepted", "rejected"):
# We only really care about one case:
if tuple(lines) == ("rejected", "debug"):
self.debug = False
elif lines[0] == "new":
self.__stopSearching()
self.board = LBoard(NORMALCHESS)
self.board.applyFen(FEN_START)
self.outOfBook = False
self.forced = False
self.playingAs = BLACK
self.clock[:] = self.basetime, self.basetime
self.searchtime = 0
self.sd = MAXPLY
if self.analyzing:
self.__analyze()
elif lines[0] == "variant":
if len(lines) > 1:
if lines[1] == "fischerandom":
self.board.variant = FISCHERRANDOMCHESS
elif lines[1] == "crazyhouse":
self.board.variant = CRAZYHOUSECHESS
self.board.iniHouse()
elif lines[1] == "wildcastle":
self.board.variant = WILDCASTLESHUFFLECHESS
elif lines[1] == "losers":
self.board.variant = LOSERSCHESS
elif lines[1] == "suicide":
self.board.variant = SUICIDECHESS
elif lines[1] == "giveaway":
self.board.variant = GIVEAWAYCHESS
elif lines[1] == "atomic":
self.board.variant = ATOMICCHESS
self.board.iniAtomic()
elif lines[1] == "3check":
self.board.variant = THREECHECKCHESS
elif lines[1] == "racingkings":
self.board.variant = RACINGKINGSCHESS
elif lines[1] == "kingofthehill":
self.board.variant = KINGOFTHEHILLCHESS
elif lines[1] == "horde":
self.board = LBoard(HORDECHESS)
self.board.applyFen(HORDESTART)
elif lines[1] == "placement":
self.board = LBoard(PLACEMENTCHESS)
self.board.applyFen(PLACEMENTSTART)
elif lines[1] == "asean":
self.board = LBoard(ASEANCHESS)
self.board.applyFen(ASEANSTART)
elif lines[1] == "makruk":
self.board = LBoard(MAKRUKCHESS)
self.board.applyFen(MAKRUKSTART)
elif lines[1] == "cambodian":
self.board = LBoard(CAMBODIANCHESS)
self.board.applyFen(KAMBODIANSTART)
elif lines[1] == "sittuyin":
self.board = LBoard(SITTUYINCHESS)
self.board.applyFen(SITTUYINSTART)
elif lines[0] == "quit":
self.forced = True
self.__stopSearching()
sys.exit(0)
elif lines[0] == "random":
leval.random = True
elif lines[0] == "force":
if not self.forced and not self.analyzing:
self.forced = True
self.__stopSearching()
elif lines[0] == "go":
self.playingAs = self.board.color
self.forced = False
self.__go()
elif lines[0] == "playother":
self.playingAs = 1 - self.board.color
self.forced = False
# TODO: start pondering, if possible
elif lines[0] in ("black", "white"):
newColor = lines[0] == "black" and BLACK or WHITE
self.__stopSearching()
self.playingAs = 1 - newColor
if self.board.color != newColor:
self.board.setColor(newColor)
self.board.setEnpassant(None)
if self.analyzing:
self.__analyze()
elif lines[0] == "level":
self.movestogo = int(lines[1])
inc = int(lines[3])
minutes = lines[2].split(":")
# Per protocol spec, strip off any non-numeric suffixes.
for i in range(len(minutes)):
minutes[i] = re.match(r"\d*", minutes[i]).group()
self.basetime = int(minutes[0]) * 60
if len(minutes) > 1 and minutes[1]:
self.basetime += int(minutes[1])
self.clock[:] = self.basetime, self.basetime
self.increment = inc
self.searchtime = 0
elif lines[0] == "st":
self.searchtime = float(lines[1])
elif lines[0] == "sd":
self.sd = int(lines[1])
# Unimplemented: nps
elif lines[0] == "time":
self.clock[self.playingAs] = float(lines[1]) / 100.0
elif lines[0] == "otim":
self.clock[1 - self.playingAs] = float(lines[1]) / 100.0
elif lines[0] == "usermove":
self.__stopSearching()
try:
move = parseAny(self.board, lines[1])
except ParsingError:
self.print("Error (unknown command): %s" % lines[1])
self.print(self.board.prepr(ascii=ASCII))
continue
if not validateMove(self.board, move):
self.print("Illegal move: %s" % lines[1])
self.print(self.board.prepr(ascii=ASCII))
continue
self.board.applyMove(move)
self.playingAs = self.board.color
if not self.forced and not self.analyzing:
self.__go()
if self.analyzing:
self.__analyze()
elif lines[0] == "?":
if not self.forced and not self.analyzing:
self.__stopSearching()
elif lines[0] == "ping":
self.print("pong %s" % lines[1])
elif lines[0] == "draw":
if self.__willingToDraw():
self.print("offer draw")
elif lines[0] == "result":
# We don't really care what the result is at the moment.
pass
elif lines[0] == "setboard":
self.__stopSearching()
try:
self.board = LBoard(self.board.variant)
fen = " ".join(lines[1:])
self.board.applyFen(fen.replace("[", "/").replace("]", ""))
except SyntaxError as err:
self.print("tellusererror Illegal position: %s" % str(err))
# "edit" is unimplemented. See docs. Exiting edit mode returns to analyze mode.
elif lines[0] == "hint":
pass # TODO: Respond "Hint: MOVE" if we have an expected reply
elif lines[0] == "bk":
entries = getOpenings(self.board)
if entries:
totalWeight = sum(entry[1] for entry in entries)
for entry in entries:
self.print(
"\t%s\t%02.2f%%"
% (
toSAN(self.board, entry[0]),
entry[1] * 100.0 / totalWeight,
)
)
elif lines[0] == "undo":
self.__stopSearching()
self.board.popMove()
if self.analyzing:
self.__analyze()
elif lines[0] == "remove":
self.__stopSearching()
self.board.popMove()
self.board.popMove()
if self.analyzing:
self.__analyze()
elif lines[0] in ("hard", "easy"):
self.ponder = lines[0] == "hard"
elif lines[0] in ("post", "nopost"):
self.post = lines[0] == "post"
elif lines[0] == "analyze":
self.analyzing = True
self.__analyze()
elif lines[0] in ("name", "rating", "ics", "computer"):
pass # We don't care.
# Unimplemented: pause, resume
elif lines[0] == "memory":
# FIXME: this is supposed to control the *total* memory use.
if lsearch.searching:
self.print("Error (already searching):", line)
else:
limit = int(lines[1])
if limit < 1:
self.print("Error (limit too low):", line)
else:
pass
# TODO implement
# lsearch.setHashSize(limit)
elif lines[0] == "cores":
pass # We aren't SMP-capable.
elif lines[0] == "egtpath":
if len(lines) >= 3 and lines[1] == "gaviota":
if lines[2]:
conf.set("egtb_path", lines[2])
else:
conf.set("egtb_path", conf.get("egtb_path"))
from pychess.Utils.lutils.lsearch import enableEGTB
enableEGTB()
elif lines[0] == "option" and len(lines) > 1:
name, eq, value = lines[1].partition("=")
if value:
value = int(
value
) # CECP spec says option values are *always* numeric
if name == "skipPruneChance":
if 0 <= value <= 100:
self.skipPruneChance = value / 100.0
else:
self.print(
"Error (argument must be an integer 0..100): %s" % line
)
# CECP analyze mode commands
# See http://www.gnu.org/software/xboard/engine-intf.html#11
elif lines[0] == "exit":
if self.analyzing:
self.__stopSearching()
self.analyzing = False
# Periodic updates (".") are not implemented.
# Custom commands
elif lines[0] == "moves":
self.print(self.board.prepr(ascii=ASCII))
self.print(
[toSAN(self.board, move) for move in genAllMoves(self.board)]
)
elif lines[0] == "captures":
self.print(self.board.prepr(ascii=ASCII))
self.print(
[toSAN(self.board, move) for move in genCaptures(self.board)]
)
elif lines[0] == "evasions":
self.print(self.board.prepr(ascii=ASCII))
self.print(
[
toSAN(self.board, move)
for move in genCheckEvasions(self.board)
]
)
elif lines[0] == "benchmark":
if len(lines) > 1:
benchmark(int(lines[1]))
else:
benchmark()
elif lines[0] == "profile":
if len(lines) > 1:
import cProfile
cProfile.runctx("benchmark()", locals(), globals(), lines[1])
else:
self.print("Usage: profile outputfilename")
elif lines[0] == "perft":
root = "0" if len(lines) < 3 else lines[2]
depth = "1" if len(lines) == 1 else lines[1]
if root.isdigit() and depth.isdigit():
perft(self.board, int(depth), int(root))
else:
self.print("Error (arguments must be integer")
elif lines[0] == "stop_unittest":
break
elif len(lines) == 1:
# A GUI without usermove support might try to send a move.
try:
move = parseAny(self.board, line)
except ParsingError:
self.print("Error (unknown command): %s" % line)
continue
if not validateMove(self.board, move):
self.print("Illegal move: %s" % lines[0])
self.print(self.board.prepr(ascii=ASCII))
continue
self.__stopSearching()
self.board.applyMove(move)
self.playingAs = self.board.color
if not self.forced and not self.analyzing:
self.__go()
if self.analyzing:
self.__analyze()
else:
self.print("Error (unknown command): %s" % line)
except IndexError:
self.print("Error (missing argument): %s" % line)
def __stopSearching(self):
lsearch.searching = False
if self.thread:
self.thread.join()
def __go(self):
def ondone(result):
if not self.forced:
self.board.applyMove(parseSAN(self.board, result))
self.print("move %s" % result)
# TODO: start pondering, if enabled
self.thread = Thread(
target=PyChess._PyChess__go,
name=fident(PyChess._PyChess__go),
args=(self, ondone),
)
self.thread.daemon = True
self.thread.start()
def __analyze(self):
self.thread = Thread(
target=PyChess._PyChess__analyze,
name=fident(PyChess._PyChess__analyze),
args=(self,),
)
self.thread.daemon = True
self.thread.start()
def __willingToDraw(self):
return self.scr <= 0 # FIXME: this misbehaves in all but the simplest use cases
| 0
| 0
| 0
| 17,645
| 0
| 14
| 0
| 869
| 487
|
9fad8fab23d2e89d70ef2d82789107db78ebaf08
| 483
|
py
|
Python
|
colossalai/nn/layer/parallel_sequence/_utils.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 1,630
|
2021-10-30T01:00:27.000Z
|
2022-03-31T23:02:41.000Z
|
colossalai/nn/layer/parallel_sequence/_utils.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 166
|
2021-10-30T01:03:01.000Z
|
2022-03-31T14:19:07.000Z
|
colossalai/nn/layer/parallel_sequence/_utils.py
|
RichardoLuo/ColossalAI
|
797a9dc5a9e801d7499b8667c3ef039a38aa15ba
|
[
"Apache-2.0"
] | 253
|
2021-10-30T06:10:29.000Z
|
2022-03-31T13:30:06.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
| 30.1875
| 69
| 0.726708
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
def _calc_incoming_device_range(i, rank, world_size, sub_seq_length):
device_of_incoming_k = (rank - i - 1) % world_size
start_idx = sub_seq_length * device_of_incoming_k
end_idx = sub_seq_length * (device_of_incoming_k + 1)
return start_idx, end_idx
def _calc_current_device_range(rank, sub_seq_length):
start_idx = sub_seq_length * rank
end_idx = sub_seq_length * (rank + 1)
return start_idx, end_idx
| 0
| 0
| 0
| 0
| 0
| 387
| 0
| 0
| 46
|
a54633146fadd221f3a0c7ca6783f0b136db02a8
| 438
|
py
|
Python
|
staticClassMethod/classmethodCustom.py
|
liangjie18430/flask_test_myself
|
8923e058d834d6ab7326f869b945601c13674105
|
[
"BSD-3-Clause"
] | null | null | null |
staticClassMethod/classmethodCustom.py
|
liangjie18430/flask_test_myself
|
8923e058d834d6ab7326f869b945601c13674105
|
[
"BSD-3-Clause"
] | null | null | null |
staticClassMethod/classmethodCustom.py
|
liangjie18430/flask_test_myself
|
8923e058d834d6ab7326f869b945601c13674105
|
[
"BSD-3-Clause"
] | null | null | null |
print(Class2.get_user("test"))
| 27.375
| 53
| 0.623288
|
class MyClassMethod(object):
def __init__(self,function):
self.function = function
def __get__(self, instance, type=None):
def wrapper(*args,**kwargs):
print("class method:",type)
return self.function(type,*args,**kwargs)
return wrapper
class Class2(object):
@MyClassMethod
def get_user(cls,x):
print(cls)
return x,"get_user"
print(Class2.get_user("test"))
| 0
| 65
| 0
| 271
| 0
| 0
| 0
| 0
| 70
|
c863133d1f0fec6fd6c1d9ca24de23ddc72f2fe4
| 543
|
py
|
Python
|
tests/parser/dictionary/encoder/other/test_ipi_base.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 37
|
2015-04-21T15:33:53.000Z
|
2022-02-07T00:02:29.000Z
|
tests/parser/dictionary/encoder/other/test_ipi_base.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 86
|
2015-02-01T22:26:02.000Z
|
2021-07-09T08:49:36.000Z
|
tests/parser/dictionary/encoder/other/test_ipi_base.py
|
orenyodfat/CWR-DataApi
|
f3b6ba8308c901b6ab87073c155c08e30692333c
|
[
"MIT"
] | 27
|
2015-01-26T16:01:09.000Z
|
2021-11-08T23:53:55.000Z
|
# -*- coding: utf-8 -*-
"""
Acknowledgement to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martnez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
| 20.884615
| 66
| 0.725599
|
# -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import IPIBaseDictionaryEncoder
"""
Acknowledgement to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestIPIBaseEncoding(unittest.TestCase):
def setUp(self):
self._encoder = IPIBaseDictionaryEncoder()
def test_encoded(self):
encoded = self._encoder.encode('T-123456789-1')
self.assertEqual('T-123456789-1', encoded)
| 2
| 0
| 0
| 233
| 0
| 0
| 0
| 39
| 69
|
ed3946252bbf181f1e56534e33e67fe22228f3cb
| 1,302
|
py
|
Python
|
make-examples.py
|
mattwigway/asu-matplotlib-styles
|
1168529d7476ab5519a9754e21243a704f980b8b
|
[
"CC0-1.0"
] | 1
|
2021-04-09T15:47:19.000Z
|
2021-04-09T15:47:19.000Z
|
make-examples.py
|
mattwigway/asu-matplotlib-styles
|
1168529d7476ab5519a9754e21243a704f980b8b
|
[
"CC0-1.0"
] | 4
|
2020-05-07T16:57:44.000Z
|
2020-05-07T19:12:57.000Z
|
make-examples.py
|
mattwigway/asu-matplotlib-styles
|
1168529d7476ab5519a9754e21243a704f980b8b
|
[
"CC0-1.0"
] | null | null | null |
# Create example plots for README
create_plot('asu-dark')
create_plot('asu-light')
| 29.590909
| 113
| 0.600614
|
# Create example plots for README
import numpy as np
import matplotlib.pyplot as plt
import os.path
def create_plot (style):
# make darn sure we're using the styles from the repo, and not the styles that may be installed on the system
plt.style.use(os.path.join(os.path.dirname(__file__), 'styles', f'{style}.mplstyle'))
plt.subplots(1, 3, figsize=(12, 4))
# line plot
plt.subplot(1, 3, 1)
xs = np.arange(4)
plt.plot(xs, [1, 2, 3.5, 6], label='Arizona State')
plt.plot(xs, [0.5, 0.3, 0.3, 0.2], label='Arizona')
plt.plot(xs, [2, 3, 2, 2], label='UCLA')
plt.plot(xs, [3, 1, 3, 1], label='MIT')
plt.plot(xs, [2, 2.5, 1, 3], label='Harvard')
plt.plot(xs, [1, 2.2, 2.4, 1.8], label='Berkeley')
plt.legend()
# bar plot
plt.subplot(1, 3, 2)
plt.bar(xs - 0.2, [2, 5, 3, 4], label='Sun Devils', width=0.4)
plt.bar(xs + 0.2, [1, 2, 1, 3], label='Wildcats', width=0.4)
plt.legend()
# scatter plot
plt.subplot(1, 3, 3)
xs = np.arange(15)
y1 = np.random.normal(size=15) + 5
y2 = np.random.normal(size=15) + 5
plt.scatter(xs, y1, label='Tempe')
plt.scatter(xs, y2, label='Polytechnic')
plt.legend()
plt.savefig(f'examples/{style}.png', dpi=300)
create_plot('asu-dark')
create_plot('asu-light')
| 0
| 0
| 0
| 0
| 0
| 1,127
| 0
| 0
| 90
|
10f7880be68b0fe1ae5479576360e9ca861c278e
| 16,979
|
py
|
Python
|
plugins/imagetools.py
|
FastmoreCrak/Fantasmas
|
1ce7a55b956ccf84660ceb91fdc39fedd0384c2a
|
[
"CC0-1.0"
] | 1
|
2021-10-04T08:02:29.000Z
|
2021-10-04T08:02:29.000Z
|
plugins/imagetools.py
|
FastmoreCrak/Fantasmas
|
1ce7a55b956ccf84660ceb91fdc39fedd0384c2a
|
[
"CC0-1.0"
] | null | null | null |
plugins/imagetools.py
|
FastmoreCrak/Fantasmas
|
1ce7a55b956ccf84660ceb91fdc39fedd0384c2a
|
[
"CC0-1.0"
] | null | null | null |
# Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
"""
Commands Available -
`{i}grey <reply to any media>`
To make it black nd white.
`{i}color <reply to any Black nd White media>`
To make it Colorfull.
`{i}toon <reply to any media>`
To make it toon.
`{i}danger <reply to any media>`
To make it look Danger.
`{i}negative <reply to any media>`
To make negative image.
`{i}blur <reply to any media>`
To make it blurry.
`{i}quad <reply to any media>`
create a Vortex.
`{i}mirror <reply to any media>`
To create mirror pic.
`{i}flip <reply to any media>`
To make it flip.
`{i}sketch <reply to any media>`
To draw its sketch.
`{i}blue <reply to any media>`
just cool.
`{i}csample <color name /color code>`
example : `{i}csample red`
`{i}csample #ffffff`
"""
HELP.update({f"{__name__.split('.')[1]}": f"{__doc__.format(i=HNDLR)}"})
| 29.375433
| 85
| 0.586077
|
# Ultroid - UserBot
# Copyright (C) 2020 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
"""
✘ Commands Available -
• `{i}grey <reply to any media>`
To make it black nd white.
• `{i}color <reply to any Black nd White media>`
To make it Colorfull.
• `{i}toon <reply to any media>`
To make it toon.
• `{i}danger <reply to any media>`
To make it look Danger.
• `{i}negative <reply to any media>`
To make negative image.
• `{i}blur <reply to any media>`
To make it blurry.
• `{i}quad <reply to any media>`
create a Vortex.
• `{i}mirror <reply to any media>`
To create mirror pic.
• `{i}flip <reply to any media>`
To make it flip.
• `{i}sketch <reply to any media>`
To draw its sketch.
• `{i}blue <reply to any media>`
just cool.
• `{i}csample <color name /color code>`
example : `{i}csample red`
`{i}csample #ffffff`
"""
import asyncio
import os
import cv2
import numpy as np
from PIL import Image
from telegraph import upload_file as upf
from telethon.errors.rpcerrorlist import (
ChatSendMediaForbiddenError,
MessageDeleteForbiddenError,
)
from validators.url import url
from . import *
@ultroid_cmd(
pattern="sketch$",
)
async def sketch(e):
ureply = await e.get_reply_message()
xx = await eor(e, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
img = cv2.imread(file)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
inverted_gray_image = 255 - gray_image
blurred_img = cv2.GaussianBlur(inverted_gray_image, (21, 21), 0)
inverted_blurred_img = 255 - blurred_img
pencil_sketch_IMG = cv2.divide(gray_image, inverted_blurred_img, scale=256.0)
cv2.imwrite("ultroid.png", pencil_sketch_IMG)
await e.client.send_file(e.chat_id, file="ultroid.png")
await xx.delete()
os.remove(file)
os.remove("ultroid.png")
@ultroid_cmd(pattern="color$")
async def _(event):
reply = await event.get_reply_message()
if not reply.media:
return await eor(event, "`Reply To a Black nd White Image`")
xx = await eor(event, "`Coloring image 🎨🖌️...`")
image = await ultroid_bot.download_media(reply.media)
img = cv2.VideoCapture(image)
ret, frame = img.read()
cv2.imwrite("ult.jpg", frame)
if udB.get("DEEP_API"):
key = Redis("DEEP_API")
else:
key = "quickstart-QUdJIGlzIGNvbWluZy4uLi4K"
r = requests.post(
"https://api.deepai.org/api/colorizer",
files={"image": open("ult.jpg", "rb")},
headers={"api-key": key},
)
os.remove("ult.jpg")
os.remove(image)
if "status" in r.json():
return await event.edit(
r.json()["status"] + "\nGet api nd set `{i}setredis DEEP_API key`"
)
r_json = r.json()["output_url"]
await ultroid_bot.send_file(event.chat_id, r_json, reply_to=reply)
await xx.delete()
@ultroid_cmd(
pattern="grey$",
)
async def ultd(event):
ureply = await event.get_reply_message()
if not (ureply and (ureply.media)):
await eor(event, "`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
xx = await eor(event, "`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
xx = await eor(event, "`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
ultroid = cv2.cvtColor(ult, cv2.COLOR_BGR2GRAY)
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="blur$",
)
async def ultd(event):
ureply = await event.get_reply_message()
if not (ureply and (ureply.media)):
await eor(event, "`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
xx = await eor(event, "`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
xx = await eor(event, "`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
ultroid = cv2.GaussianBlur(ult, (35, 35), 0)
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="negative$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
ultroid = cv2.bitwise_not(ult)
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="mirror$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
ish = cv2.flip(ult, 1)
ultroid = cv2.hconcat([ult, ish])
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="flip$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
trn = cv2.flip(ult, 1)
ish = cv2.rotate(trn, cv2.ROTATE_180)
ultroid = cv2.vconcat([ult, ish])
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="quad$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
roid = cv2.flip(ult, 1)
mici = cv2.hconcat([ult, roid])
fr = cv2.flip(mici, 1)
trn = cv2.rotate(fr, cv2.ROTATE_180)
ultroid = cv2.vconcat([mici, trn])
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="toon$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
height, width, channels = ult.shape
samples = np.zeros([height * width, 3], dtype=np.float32)
count = 0
for x in range(height):
for y in range(width):
samples[count] = ult[x][y]
count += 1
compactness, labels, centers = cv2.kmeans(
samples,
12,
None,
(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10000, 0.0001),
5,
cv2.KMEANS_PP_CENTERS,
)
centers = np.uint8(centers)
ish = centers[labels.flatten()]
ultroid = ish.reshape(ult.shape)
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(
pattern="danger$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
ult = cv2.imread(file)
dan = cv2.cvtColor(ult, cv2.COLOR_BGR2RGB)
ultroid = cv2.cvtColor(dan, cv2.COLOR_HSV2BGR)
cv2.imwrite("ult.jpg", ultroid)
await event.client.send_file(
event.chat_id,
"ult.jpg",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.jpg")
os.remove(ultt)
@ultroid_cmd(pattern="csample (.*)")
async def sampl(ult):
color = ult.pattern_match.group(1)
if color:
img = Image.new("RGB", (200, 100), f"{color}")
img.save("csample.png")
try:
try:
await ult.delete()
await ultroid_bot.send_message(
ult.chat_id, f"Colour Sample for `{color}` !", file="csample.png"
)
except MessageDeleteForbiddenError:
await ult.reply(f"Colour Sample for `{color}` !", file="csample.png")
except ChatSendMediaForbiddenError:
await eor(ult, "Umm! Sending Media is disabled here!")
else:
await eor(ult, f"Wrong Color Name/Hex Code specified!")
@ultroid_cmd(
pattern="blue$",
)
async def ultd(event):
ureply = await event.get_reply_message()
xx = await eor(event, "`...`")
if not (ureply and (ureply.media)):
await xx.edit("`Reply to any media`")
return
ultt = await ureply.download_media()
if ultt.endswith(".tgs"):
await xx.edit("`Ooo Animated Sticker 👀...`")
cmd = ["lottie_convert.py", ultt, "ult.png"]
file = "ult.png"
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
stderr.decode().strip()
stdout.decode().strip()
else:
await xx.edit("`Processing...`")
img = cv2.VideoCapture(ultt)
heh, lol = img.read()
cv2.imwrite("ult.png", lol)
file = "ult.png"
got = upf(file)
lnk = f"https://telegra.ph{got[0]}"
r = requests.get(
f"https://nekobot.xyz/api/imagegen?type=blurpify&image={lnk}",
).json()
ms = r.get("message")
utd = url(ms)
if not utd:
return
with open("ult.png", "wb") as f:
f.write(requests.get(ms).content)
img = Image.open("ult.png").convert("RGB")
img.save("ult.webp", "webp")
await event.client.send_file(
event.chat_id,
"ult.webp",
force_document=False,
reply_to=event.reply_to_msg_id,
)
await xx.delete()
os.remove("ult.png")
os.remove("ult.webp")
os.remove(ultt)
HELP.update({f"{__name__.split('.')[1]}": f"{__doc__.format(i=HNDLR)}"})
| 90
| 15,277
| 0
| 0
| 0
| 0
| 0
| 78
| 477
|
0cc45d58110cb6174543a0505e632ae185525ec0
| 1,990
|
py
|
Python
|
Basics_with_Pytorch/GradientDescent/hw1.py
|
SoyOscarRH/LearningNeuralNetworks
|
dd5be94b38b3a0efb3428f76d3416227a92c8265
|
[
"MIT"
] | 3
|
2020-01-20T19:56:35.000Z
|
2021-09-24T14:47:33.000Z
|
Basics_with_Pytorch/GradientDescent/hw1.py
|
SoyOscarRH/LearningNeuralNetworks
|
dd5be94b38b3a0efb3428f76d3416227a92c8265
|
[
"MIT"
] | null | null | null |
Basics_with_Pytorch/GradientDescent/hw1.py
|
SoyOscarRH/LearningNeuralNetworks
|
dd5be94b38b3a0efb3428f76d3416227a92c8265
|
[
"MIT"
] | null | null | null |
x1 = 2.0
x2 = 3.0
ReLu = lambda x: max(0.0, x)
ReLuDer = lambda x: 1 if x > 0 else 0
error_fn = lambda prediction, target: 0.5 * (target - prediction) ** 2
# input
a1 = x1
a2 = x2
w11 = 0.11
w12 = 0.21
w21 = 0.12
w22 = 0.08
w1o = 0.14
w2o = 0.15
y = 1
n = 0.5
# foward
# layer 1
zh1 = (w11 * a1) + (w12 * a2)
zh2 = (w21 * a1) + (w22 * a2)
#print(f"zh1 = {zh1}")
#print(f"zh2 = {zh2}")
h0 = 1
h1 = ReLu(zh1)
h2 = ReLu(zh2)
#print(f"h1 = {h1}")
#print(f"h2 = {h2}")
# layer 2
zo1 = (w1o * h1) + (w2o * h2)
o1 = ReLu(zo1)
error = error_fn(o1, y)
#print(f"zo1 = {zo1}")
print(f"o1 = {o1}")
print(f"error = {error}")
# Back
# Last layer
d_Etotal_d_out = (o1 - y)
#print(f"d_Etotal_d_out = {d_Etotal_d_out}")
d_out_d_zo1 = ReLuDer(o1)
#print(f"d_out_d_zo1 = {d_out_d_zo1}")
d_zo1_d_w1o = h1
#print(f"d_zo1_d_w1o = {d_zo1_d_w1o}")
d_zo1_d_w2o = h2
#print(f"d_zo1_d_w2o = {d_zo1_d_w2o}")
d_Etotal_d_w1o = d_Etotal_d_out * d_out_d_zo1 * d_zo1_d_w1o
#print(f"d_Etotal_d_w1o = {d_Etotal_d_w1o}")
d_Etotal_d_w2o = d_Etotal_d_out * d_out_d_zo1 * d_zo1_d_w2o
#print(f"d_Etotal_d_w1o = {d_Etotal_d_w2o}")
# Previous layer
d_w1o_d_h1 = w1o
d_h1_d_zh1 = 1
d_zh1_d_w11 = a1
d_Etotal_d_w11 = d_Etotal_d_w1o * d_w1o_d_h1 * d_h1_d_zh1 * d_zh1_d_w11
#print(f"d_Etotal_d_w11 = {d_Etotal_d_w11}")
d_w1o_d_h1 = w1o
d_h1_d_zh1 = 1
d_zh1_d_w12 = a2
d_Etotal_d_w12 = d_Etotal_d_w1o * d_w1o_d_h1 * d_h1_d_zh1 * d_zh1_d_w12
#print(f"d_Etotal_d_w11 = {d_Etotal_d_w12}")
d_w2o_d_h2 = w2o
d_h2_d_zh2 = 1
d_zh2_d_w21 = a1
d_Etotal_d_w21 = d_Etotal_d_w1o * d_w2o_d_h2 * d_h2_d_zh2 * d_zh2_d_w21
#print(f"d_Etotal_d_w21 = {d_Etotal_d_w21}")
d_w2o_d_h2 = w2o
d_h2_d_zh2 = 1
d_zh2_d_w22 = a2
d_Etotal_d_w22 = d_Etotal_d_w1o * d_w2o_d_h2 * d_h2_d_zh2 * d_zh2_d_w22
#print(f"d_Etotal_d_w22 = {d_Etotal_d_w22}")
w1o = w1o - n * d_Etotal_d_w1o
w2o = w1o - n * d_Etotal_d_w2o
w11 = w11 - n * d_Etotal_d_w11
w12 = w12 - n * d_Etotal_d_w12
w21 = w21 - n * d_Etotal_d_w21
w22 = w22 - n * d_Etotal_d_w22
| 18.425926
| 71
| 0.682412
|
x1 = 2.0
x2 = 3.0
ReLu = lambda x: max(0.0, x)
ReLuDer = lambda x: 1 if x > 0 else 0
error_fn = lambda prediction, target: 0.5 * (target - prediction) ** 2
# input
a1 = x1
a2 = x2
w11 = 0.11
w12 = 0.21
w21 = 0.12
w22 = 0.08
w1o = 0.14
w2o = 0.15
y = 1
n = 0.5
# foward
# layer 1
zh1 = (w11 * a1) + (w12 * a2)
zh2 = (w21 * a1) + (w22 * a2)
#print(f"zh1 = {zh1}")
#print(f"zh2 = {zh2}")
h0 = 1
h1 = ReLu(zh1)
h2 = ReLu(zh2)
#print(f"h1 = {h1}")
#print(f"h2 = {h2}")
# layer 2
zo1 = (w1o * h1) + (w2o * h2)
o1 = ReLu(zo1)
error = error_fn(o1, y)
#print(f"zo1 = {zo1}")
print(f"o1 = {o1}")
print(f"error = {error}")
# Back
# Last layer
d_Etotal_d_out = (o1 - y)
#print(f"d_Etotal_d_out = {d_Etotal_d_out}")
d_out_d_zo1 = ReLuDer(o1)
#print(f"d_out_d_zo1 = {d_out_d_zo1}")
d_zo1_d_w1o = h1
#print(f"d_zo1_d_w1o = {d_zo1_d_w1o}")
d_zo1_d_w2o = h2
#print(f"d_zo1_d_w2o = {d_zo1_d_w2o}")
d_Etotal_d_w1o = d_Etotal_d_out * d_out_d_zo1 * d_zo1_d_w1o
#print(f"d_Etotal_d_w1o = {d_Etotal_d_w1o}")
d_Etotal_d_w2o = d_Etotal_d_out * d_out_d_zo1 * d_zo1_d_w2o
#print(f"d_Etotal_d_w1o = {d_Etotal_d_w2o}")
# Previous layer
d_w1o_d_h1 = w1o
d_h1_d_zh1 = 1
d_zh1_d_w11 = a1
d_Etotal_d_w11 = d_Etotal_d_w1o * d_w1o_d_h1 * d_h1_d_zh1 * d_zh1_d_w11
#print(f"d_Etotal_d_w11 = {d_Etotal_d_w11}")
d_w1o_d_h1 = w1o
d_h1_d_zh1 = 1
d_zh1_d_w12 = a2
d_Etotal_d_w12 = d_Etotal_d_w1o * d_w1o_d_h1 * d_h1_d_zh1 * d_zh1_d_w12
#print(f"d_Etotal_d_w11 = {d_Etotal_d_w12}")
d_w2o_d_h2 = w2o
d_h2_d_zh2 = 1
d_zh2_d_w21 = a1
d_Etotal_d_w21 = d_Etotal_d_w1o * d_w2o_d_h2 * d_h2_d_zh2 * d_zh2_d_w21
#print(f"d_Etotal_d_w21 = {d_Etotal_d_w21}")
d_w2o_d_h2 = w2o
d_h2_d_zh2 = 1
d_zh2_d_w22 = a2
d_Etotal_d_w22 = d_Etotal_d_w1o * d_w2o_d_h2 * d_h2_d_zh2 * d_zh2_d_w22
#print(f"d_Etotal_d_w22 = {d_Etotal_d_w22}")
w1o = w1o - n * d_Etotal_d_w1o
w2o = w1o - n * d_Etotal_d_w2o
w11 = w11 - n * d_Etotal_d_w11
w12 = w12 - n * d_Etotal_d_w12
w21 = w21 - n * d_Etotal_d_w21
w22 = w22 - n * d_Etotal_d_w22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5e6ad37894ff484c99a09273a78bac3f081c7374
| 24,618
|
py
|
Python
|
medcat/cdb.py
|
HDRUK/MedCAT
|
69c36d1da484ad32520a9b3333adf8f6ebfcbde7
|
[
"Apache-2.0"
] | null | null | null |
medcat/cdb.py
|
HDRUK/MedCAT
|
69c36d1da484ad32520a9b3333adf8f6ebfcbde7
|
[
"Apache-2.0"
] | null | null | null |
medcat/cdb.py
|
HDRUK/MedCAT
|
69c36d1da484ad32520a9b3333adf8f6ebfcbde7
|
[
"Apache-2.0"
] | null | null | null |
""" Representation class for CDB data
"""
#from gensim.matutils import unitvec
from medcat.utils.loggers import basic_logger
log = basic_logger("cdb")
| 38.525822
| 140
| 0.575311
|
""" Representation class for CDB data
"""
import pickle
import numpy as np
from scipy.sparse import dok_matrix
#from gensim.matutils import unitvec
from medcat.utils.matutils import unitvec, sigmoid
from medcat.utils.attr_dict import AttrDict
from medcat.utils.loggers import basic_logger
import os
import pandas as pd
log = basic_logger("cdb")
class CDB(object):
""" Holds all the CDB data required for annotation
"""
MAX_COO_DICT_SIZE = int(os.getenv('MAX_COO_DICT_SIZE', 10000000))
MIN_COO_COUNT = int(os.getenv('MIN_COO_COUNT', 100))
def __init__(self):
self.index2cui = [] # A list containing all CUIs
self.cui2index = {} # Map from cui to index in the index2cui list
self.name2cui = {} # Converts a normalized concept name to a cui
self.name2cnt = {} # Converts a normalized concept name to a count
self.name_isunique = {} # Should this name be skipped
self.name2original_name = {} # Holds the two versions of a name
self.name2ntkns = {} # Number of tokens for this name
self.name_isupper = {} # Checks was this name all upper case in cdb
self.cui2desc = {} # Map between a CUI and its cdb description
self.cui_count = {} # TRAINING - How many times this this CUI appear until now
self.cui_count_ext = {} # Always - counter for cuis that can be reset, destroyed..
self.cui2ontos = {} # Cui to ontology from where it comes
self.cui2names = {} # CUI to all the different names it can have
self.cui2original_names = {} # CUI to all the different original names it can have
self.original_name2cuis = {} # Original name to cuis it can be assigned to
self.cui2tui = {} # CUI to the semantic type ID
self.tui2cuis = {} # Semantic type id to a list of CUIs that have it
self.tui2name = {} # Semnatic tpye id to its name
self.cui2pref_name = {} # Get the prefered name for a CUI - taken from CDB
self.cui2pretty_name = {} # Get the pretty name for a CUI - taken from CDB
self.sname2name = set() # Internal - subnames to nam
self.cui2words = {} # CUI to all the words that can describe it
self.onto2cuis = {} # Ontology to all the CUIs contained in it
self.cui2context_vec = {} # CUI to context vector
self.cui2context_vec_short = {} # CUI to context vector - short
self.cui2context_vec_long = {} # CUI to context vector - long
self.cui2info = {} # Additional info for a concept
self.cui_disamb_always = {} # Should this CUI be always disambiguated
self.vocab = {} # Vocabulary of all words ever, hopefully
self._coo_matrix = None # cooccurrence matrix - scikit
self.coo_dict = {} # cooccurrence dictionary <(cui1, cui2)>:<count>
self.sim_vectors = None
def add_concept(self, cui, name, onto, tokens, snames, isupper=False,
is_pref_name=False, tui=None, pretty_name='',
desc=None, tokens_vocab=None, original_name=None,
is_unique=None, tui_name=None):
r'''
Add a concept to internal Concept Database (CDB). Depending on what you are providing
this will add a large number of properties for each concept.
Args:
cui (str):
Concept ID or unique identifer in this database, all concepts that have
the same CUI will be merged internally.
name (str):
Name for this concept, or the value that if found in free text can be linked to this concept.
onto (str):
Ontology from which the concept is taken (e.g. SNOMEDCT)
tokens (str, list of str):
Tokenized version of the name. Usually done vai spacy
snames (str, list of str):
Subnames of this name, have a look at medcat.prepare_cdb.PrepareCDB for details on how
to provide `snames`.Example: if name is "heart attack" snames is ['heart', 'heart attack']
isupper (boolean, optional):
If name in the original ontology is upper_cased
is_pref_name (boolean, optional):
If this is the prefered name for this CUI
tui (str, optional):
Semantic type identifier (have a look at TUIs in UMLS or SNOMED-CT)
pretty_name (str, optional):
Pretty name for this concept, really just the pretty name for the concept if it exists.
desc (str, optinal):
Description of this concept.
tokens_vocab (list of str, optional):
Tokens that should be added to the vocabulary, usually not normalized version of tokens.
original_name (str, optinal):
The orignal name from the source vocabulary, without any normalization.
is_unique (boolean, optional):
If set to False - you can require disambiguation for a name even if it is unique inside
of the current CDB. If set to True - you are forcing medcat to make a decision without
disambiguation even if it is required. Do not set this arg unless you are sure.
tui_name (str, optional):
The name for the TUI
'''
# Add the info property
if cui not in self.cui2info:
self.cui2info[cui] = {}
# Add is name upper
if name in self.name_isupper:
self.name_isupper[name] = self.name_isupper[name] or isupper
self.name_isupper[name] = self.name_isupper[name] or isupper
else:
self.name_isupper[name] = isupper
# Add original name
if original_name is not None:
self.name2original_name[name] = original_name
if original_name in self.original_name2cuis:
self.original_name2cuis[original_name].add(cui)
else:
self.original_name2cuis[original_name] = {cui}
if cui in self.cui2original_names:
self.cui2original_names[cui].add(original_name)
else:
self.cui2original_names[cui] = {original_name}
# Add prefered name
if is_pref_name:
self.cui2pref_name[cui] = name
if pretty_name:
self.cui2pretty_name[cui] = pretty_name
if cui not in self.cui2pretty_name and pretty_name:
self.cui2pretty_name[cui] = pretty_name
if tui is not None:
self.cui2tui[cui] = tui
if tui in self.tui2cuis:
self.tui2cuis[tui].add(cui)
else:
self.tui2cuis[tui] = set([cui])
if tui_name is not None:
self.tui2name[tui] = tui_name
if is_unique is not None:
self.name_isunique[name] = is_unique
# Add name to cnt
if name not in self.name2cnt:
self.name2cnt[name] = {}
if cui in self.name2cnt[name]:
self.name2cnt[name][cui] += 1
else:
self.name2cnt[name][cui] = 1
# Add description
if desc is not None:
if cui not in self.cui2desc:
self.cui2desc[cui] = str(desc)
elif str(desc) not in str(self.cui2desc[cui]):
self.cui2desc[cui] = str(self.cui2desc[cui]) + "\n\n" + str(desc)
# Add cui to a list of cuis
if cui not in self.index2cui:
self.index2cui.append(cui)
self.cui2index[cui] = len(self.index2cui) - 1
# Expand coo matrix if it is used
if self._coo_matrix is not None:
s = self._coo_matrix.shape[0] + 1
self._coo_matrix.resize((s, s))
# Add words to vocab
for token in tokens_vocab:
if token in self.vocab:
self.vocab[token] += 1
else:
self.vocab[token] = 1
# Add also the normalized tokens, why not
for token in tokens:
if token in self.vocab:
self.vocab[token] += 1
else:
self.vocab[token] = 1
# Add number of tokens for this name
if name in self.name2ntkns:
self.name2ntkns[name].add(len(tokens))
else:
self.name2ntkns[name] = {len(tokens)}
# Add mappings to onto2cuis
if onto not in self.onto2cuis:
self.onto2cuis[onto] = set([cui])
else:
self.onto2cuis[onto].add(cui)
if cui in self.cui2ontos:
self.cui2ontos[cui].add(onto)
else:
self.cui2ontos[cui] = {onto}
# Add mappings to name2cui
if name not in self.name2cui:
self.name2cui[name] = set([cui])
else:
self.name2cui[name].add(cui)
# Add snames to set
self.sname2name.update(snames)
# Add mappings to cui2names
if cui not in self.cui2names:
self.cui2names[cui] = {name}
else:
self.cui2names[cui].add(name)
# Add mappings to cui2words
if cui not in self.cui2words:
self.cui2words[cui] = {}
for token in tokens:
if not token.isdigit() and len(token) > 1:
if token in self.cui2words[cui]:
self.cui2words[cui][token] += 1
else:
self.cui2words[cui][token] = 1
def add_tui_names(self, csv_path, sep="|"):
""" Fils the tui2name dict
"""
df = pd.read_csv(csv_path, sep=sep)
for index, row in df.iterrows():
tui = row['tui']
name = row['name']
if tui not in self.tui2name:
self.tui2name[tui] = name
def add_context_vec(self, cui, context_vec, negative=False, cntx_type='LONG', inc_cui_count=True, anneal=True, lr=0.5):
""" Add the vector representation of a context for this CUI
cui: The concept in question
context_vec: Vector represenation of the context
negative: Is this negative context of positive
cntx_type: Currently only two supported LONG and SHORT
pretty much just based on the window size
inc_cui_count: should this be counted
"""
if cui not in self.cui_count:
self.increase_cui_count(cui, True)
# Ignore very similar context
prob = 0.95
# Set the right context
if cntx_type == 'MED':
cui2context_vec = self.cui2context_vec
elif cntx_type == 'SHORT':
cui2context_vec = self.cui2context_vec_short
elif cntx_type == 'LONG':
cui2context_vec = self.cui2context_vec_long
sim = 0
cv = context_vec
if cui in cui2context_vec:
sim = np.dot(unitvec(cv), unitvec(cui2context_vec[cui]))
if anneal:
lr = max(lr / self.cui_count[cui], 0.0005)
if negative:
b = max(0, sim) * lr
cui2context_vec[cui] = cui2context_vec[cui]*(1-b) - cv*b
#cui2context_vec[cui] = cui2context_vec[cui] - cv*b
else:
if sim < prob:
b = (1 - max(0, sim)) * lr
cui2context_vec[cui] = cui2context_vec[cui]*(1-b) + cv*b
#cui2context_vec[cui] = cui2context_vec[cui] + cv*b
# Increase cui count
self.increase_cui_count(cui, inc_cui_count)
else:
if negative:
cui2context_vec[cui] = -1 * cv
else:
cui2context_vec[cui] = cv
self.increase_cui_count(cui, inc_cui_count)
return sim
def increase_cui_count(self, cui, inc_cui_count):
if inc_cui_count:
if cui in self.cui_count:
self.cui_count[cui] += 1
else:
self.cui_count[cui] = 1
def add_coo(self, cui1, cui2):
""" Add one cooccurrence
cui1: Base CUI
cui2: Coocured with CUI
"""
key = (self.cui2index[cui1], self.cui2index[cui2])
if key in self.coo_dict:
self.coo_dict[key] += 1
else:
self.coo_dict[key] = 1
def add_coos(self, cuis):
""" Given a list of CUIs it will add them to the coo matrix
saying that each CUI cooccurred with each one
cuis: List of CUIs
"""
# We use done to ignore multiple occ of same concept
d_cui1 = set()
pairs = set()
for i, cui1 in enumerate(cuis):
if cui1 not in d_cui1:
for cui2 in cuis[i+1:]:
t = cui1+cui2
if t not in pairs:
self.add_coo(cui1, cui2)
pairs.add(t)
t = cui2+cui1
if t not in pairs:
self.add_coo(cui2, cui1)
pairs.add(t)
d_cui1.add(cui1)
if len(self.coo_dict) > self.MAX_COO_DICT_SIZE:
log.info("Starting the clean of COO_DICT, parameters are\n \
MAX_COO_DICT_SIZE: {}\n \
MIN_COO_COUNT: {}".format(self.MAX_COO_DICT_SIZE, self.MIN_COO_COUNT))
# Remove entries from coo_dict if too many
old_size = len(self.coo_dict)
to_del = []
for key in self.coo_dict.keys():
if self.coo_dict[key] < self.MIN_COO_COUNT:
to_del.append(key)
for key in to_del:
del self.coo_dict[key]
new_size = len(self.coo_dict)
log.info("COO_DICT cleaned, size was: {} and now is {}. In total \
{} items were removed".format(old_size, new_size, old_size-new_size))
@property
def coo_matrix(self):
""" Get the COO Matrix as scikit dok_matrix
"""
if self._coo_matrix is None:
s = len(self.cui2index)
self._coo_matrix = dok_matrix((s, s), dtype=np.uint32)
self._coo_matrix._update(self.coo_dict)
return self._coo_matrix
@coo_matrix.setter
def coo_matrix(self, val):
""" Imposible to set, it is built internally
"""
raise AttributeError("Can not set attribute coo_matrix")
def reset_coo_matrix(self):
""" Remove the COO-Matrix
"""
self.cui_count_ext = {}
self.coo_dict = {}
self._coo_matrix = None
def save(self, path):
with open(path, 'wb') as f:
pickle.dump(self, f)
@classmethod
def load(cls, path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_dict(self, path):
""" Saves variables of this object
"""
with open(path, 'wb') as f:
pickle.dump(self.__dict__, f)
def load_dict(self, path):
""" Loads variables of this object
"""
with open(path, 'rb') as f:
self.__dict__ = pickle.load(f)
def import_training(self, cdb, overwrite=True):
r'''
This will import vector embeddings from another CDB. No new concept swill be added.
IMPORTANT it will not import name maps (cui2name or name2cui or ...).
Args:
cdb (medcat.cdb.CDB):
Concept database from which to import training vectors
overwrite (boolean):
If True all training data in the existing CDB will be overwritten, else
the average between the two training vectors will be taken.
Examples:
>>> new_cdb.import_traininig(cdb=old_cdb, owerwrite=True)
'''
# Import vectors and counts
for cui in self.cui2names:
if cui in cdb.cui_count:
if overwrite or cui not in self.cui_count:
self.cui_count[cui] = cdb.cui_count[cui]
else:
self.cui_count[cui] = (self.cui_count[cui] + cdb.cui_count[cui]) / 2
if cui in cdb.cui2context_vec:
if overwrite or cui not in self.cui2context_vec:
self.cui2context_vec[cui] = cdb.cui2context_vec[cui]
else:
self.cui2context_vec[cui] = (cdb.cui2context_vec[cui] + self.cui2context_vec[cui]) / 2
if cui in cdb.cui2context_vec_short:
if overwrite or cui not in self.cui2context_vec_short:
self.cui2context_vec_short[cui] = cdb.cui2context_vec_short[cui]
else:
self.cui2context_vec_short[cui] = (cdb.cui2context_vec_short[cui] + self.cui2context_vec_short[cui]) / 2
if cui in cdb.cui2context_vec_long:
if overwrite or cui not in self.cui2context_vec_long:
self.cui2context_vec_long[cui] = cdb.cui2context_vec_long[cui]
else:
self.cui2context_vec_long[cui] = (cdb.cui2context_vec_long[cui] + self.cui2context_vec_long[cui]) / 2
if cui in cdb.cui_disamb_always:
self.cui_disamb_always[cui] = cdb.cui_disamb_always
def reset_cui_count(self, n=10):
r'''
Reset the CUI count for all concepts that received training, used when starting new unsupervised training
or for suppervised with annealing.
Args:
n (int, optional):
This will be set as the CUI count for all cuis in this CDB.
Examples:
>>> cdb.reset_cui_count()
'''
for cui in self.cui_count.keys():
self.cui_count[cui] = n
def reset_training(self):
r'''
Will remove all training efforts - in other words all embeddings that are learnt
for concepts in the current CDB. Please note that this does not remove synonyms (names) that were
potentially added during supervised/online learning.
'''
self.cui_count = {}
self.cui2context_vec = {}
self.cui2context_vec_short = {}
self.cui2context_vec_long = {}
self.coo_dict = {}
self.cui_disamb_always = {}
self.reset_coo_matrix()
self.reset_similarity_matrix()
def filter_by_tui(self, tuis_to_keep):
all_cuis = [c for c_list in [self.tui2cuis[tui] for tui in tuis_to_keep] for c in c_list]
self.filter_by_cui(all_cuis)
def filter_by_cui(self, cuis_to_keep=None):
assert cuis_to_keep, "Cannot remove all concepts, enter at least one CUI in a set."
print("FYI - with large CDBs this can take a long time.")
cuis_to_keep = set(cuis_to_keep)
cuis = []
print("Gathering CUIs ")
for cui in self.cui2names:
if cui not in cuis_to_keep:
cuis.append(cui)
print("Cleaning up CUI maps...")
for i, cui in enumerate(cuis):
if i % 10000 == 0:
print(f'removed 10k concepts, {len(cuis) - i} to go...')
if cui in self.cui2desc:
del self.cui2desc[cui]
if cui in self.cui_count:
del self.cui_count[cui]
if cui in self.cui_count_ext:
del self.cui_count_ext[cui]
if cui in self.cui2names:
del self.cui2names[cui]
if cui in self.cui2original_names:
del self.cui2original_names[cui]
if cui in self.cui2pref_name:
del self.cui2pref_name[cui]
if cui in self.cui2pretty_name:
del self.cui2pretty_name[cui]
if cui in self.cui2words:
del self.cui2words[cui]
if cui in self.cui2context_vec:
del self.cui2context_vec[cui]
if cui in self.cui2context_vec_short:
del self.cui2context_vec_short[cui]
if cui in self.cui2context_vec_long:
del self.cui2context_vec_long[cui]
if cui in self.cui2info:
del self.cui2info[cui]
if cui in self.cui_disamb_always:
del self.cui_disamb_always[cui]
print("Done CUI cleaning")
print("Cleaning names...")
for name in list(self.name2cui.keys()):
_cuis = list(self.name2cui[name])
for cui in _cuis:
if cui not in cuis_to_keep:
self.name2cui[name].remove(cui)
if len(self.name2cui[name]) == 0:
del self.name2cui[name]
print("Done all")
def print_stats(self):
""" Print basic statistics on the database
"""
print("Number of concepts: {:,}".format(len(self.cui2names)))
print("Number of names: {:,}".format(len(self.name2cui)))
print("Number of concepts that received training: {:,}".format(len(self.cui2context_vec)))
print("Number of seen training examples in total: {:,}".format(sum(self.cui_count.values())))
print("Average training examples per concept: {:.1f}".format(np.average(list(self.cui_count.values()))))
def reset_similarity_matrix(self):
self.sim_vectors = None
self.sim_vectors_counts = None
self.sim_vectors_tuis = None
self.sim_vectors_cuis = None
def most_similar(self, cui, tui_filter=[], min_cnt=0, topn=50):
r'''
Given a concept it will calculat what other concepts in this CDB have the most similar
embedding.
Args:
cui (str):
The concept ID for the base concept for which you want to get the most similar concepts.
tui_filter (list):
A list of TUIs that will be used to filterout the returned results. Using this it is possible
to limit the similarity calculation to only disorders/symptoms/drugs/...
min_cnt (int):
Minimum training examples (unsupervised+supervised) that a concept must have to be considered
for the similarity calculation.
topn (int):
How many results to return
Return:
results (dict):
A dictionary with topn results like: {<cui>: {'name': <name>, 'sim': <similarity>, 'tui_name': <tui_name>,
'tui': <tui>, 'cnt': <number of training examples the concept has seen>}, ...}
'''
# Create the matrix if necessary
if not hasattr(self, 'sim_vectors') or self.sim_vectors is None or len(self.sim_vectors) < len(self.cui2context_vec):
print("Building similarity matrix")
log.info("Building similarity matrix")
sim_vectors = []
sim_vectors_counts = []
sim_vectors_tuis = []
sim_vectors_cuis = []
for _cui in self.cui2context_vec:
sim_vectors.append(unitvec(self.cui2context_vec[_cui]))
sim_vectors_counts.append(self.cui_count[_cui])
sim_vectors_tuis.append(self.cui2tui.get(_cui, 'unk'))
sim_vectors_cuis.append(_cui)
self.sim_vectors = np.array(sim_vectors)
self.sim_vectors_counts = np.array(sim_vectors_counts)
self.sim_vectors_tuis = np.array(sim_vectors_tuis)
self.sim_vectors_cuis = np.array(sim_vectors_cuis)
# Select appropirate concepts
tui_inds = np.arange(0, len(self.sim_vectors_tuis))
if len(tui_filter) > 0:
tui_inds = np.array([], dtype=np.int32)
for tui in tui_filter:
tui_inds = np.union1d(np.where(self.sim_vectors_tuis == tui)[0], tui_inds)
cnt_inds = np.arange(0, len(self.sim_vectors_counts))
if min_cnt > 0:
cnt_inds = np.where(self.sim_vectors_counts >= min_cnt)[0]
# Intersect cnt and tui
inds = np.intersect1d(tui_inds, cnt_inds)
mtrx = self.sim_vectors[inds]
cuis = self.sim_vectors_cuis[inds]
sims = np.dot(mtrx, unitvec(self.cui2context_vec[cui]))
sims_srt = np.argsort(-1*sims)
# Create the return dict
res = {}
for ind, _cui in enumerate(cuis[sims_srt[0:topn]]):
res[_cui] = {'name': self.cui2pretty_name[_cui], 'sim': sims[sims_srt][ind],
'tui_name': self.tui2name.get(self.cui2tui.get(_cui, 'unk'), 'unk'),
'tui': self.cui2tui.get(_cui, 'unk'),
'cnt': self.cui_count[_cui]}
return res
| 0
| 543
| 0
| 23,707
| 0
| 0
| 0
| 40
| 176
|
e843de334f3334ef6fcdc8a716988c1da1b98457
| 1,380
|
py
|
Python
|
tagopsdb/model/ns_vip_binds.py
|
ifwe/tagopsdb
|
5455810cb9ccdd0803975a2513741c43313b1b7d
|
[
"Apache-2.0"
] | null | null | null |
tagopsdb/model/ns_vip_binds.py
|
ifwe/tagopsdb
|
5455810cb9ccdd0803975a2513741c43313b1b7d
|
[
"Apache-2.0"
] | 1
|
2021-03-25T21:57:08.000Z
|
2021-03-25T21:57:08.000Z
|
tagopsdb/model/ns_vip_binds.py
|
ifwe/tagopsdb
|
5455810cb9ccdd0803975a2513741c43313b1b7d
|
[
"Apache-2.0"
] | 1
|
2016-08-02T06:05:58.000Z
|
2016-08-02T06:05:58.000Z
|
# Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 31.363636
| 76
| 0.707971
|
# Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy import ForeignKey
from sqlalchemy.dialects.mysql import INTEGER, SMALLINT
from sqlalchemy.orm import relationship
from .meta import Base, Column
class NsVipBinds(Base):
__tablename__ = 'ns_vip_binds'
net_default_ip_id = Column(
INTEGER(unsigned=True),
ForeignKey('net_default_ips.net_default_ip_id', ondelete='cascade'),
primary_key=True
)
vip_id = Column(
u'vipID',
INTEGER(unsigned=True),
ForeignKey('ns_vip.vipID', ondelete='cascade'),
primary_key=True
)
service_id = Column(
u'serviceID',
INTEGER(unsigned=True),
ForeignKey('ns_service.serviceID', ondelete='cascade'),
primary_key=True
)
ns_service = relationship('NsService')
ns_vip = relationship('NsVip')
| 0
| 0
| 0
| 620
| 0
| 0
| 0
| 73
| 113
|
96554955e1e867ce4e811b1753e447421a931915
| 29,675
|
py
|
Python
|
python/play.py
|
030helios/Kata2Connect5
|
e8ace620284b46f4a50fc0582924cbadf32653e7
|
[
"MIT"
] | null | null | null |
python/play.py
|
030helios/Kata2Connect5
|
e8ace620284b46f4a50fc0582924cbadf32653e7
|
[
"MIT"
] | 1
|
2021-06-03T14:30:04.000Z
|
2021-06-03T14:40:32.000Z
|
python/play.py
|
030helios/Kata2Surakarta
|
e8ace620284b46f4a50fc0582924cbadf32653e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import argparse
import json
import tensorflow as tf
from model import Model
import common
description = """
Play go with a trained neural net!
Implements a basic GTP engine that uses the neural net directly to play moves.
"""
parser = argparse.ArgumentParser(description=description)
common.add_model_load_args(parser)
parser.add_argument('-name-scope', help='Name scope for model variables', required=False)
args = vars(parser.parse_args())
(model_variables_prefix, model_config_json) = common.load_model_paths(args)
name_scope = args["name_scope"]
#Hardcoded max board size
pos_len = 6
# Model ----------------------------------------------------------------
with open(model_config_json) as f:
model_config = json.load(f)
if name_scope is not None:
with tf.compat.v1.variable_scope(name_scope):
model = Model(model_config,pos_len,{})
else:
model = Model(model_config,pos_len,{})
policy0_output = tf.nn.softmax(model.policy_output[:,:,0])
policy1_output = tf.nn.softmax(model.policy_output[:,:,1])
value_output = tf.nn.softmax(model.value_output)
scoremean_output = 20.0 * model.miscvalues_output[:,0]
scorestdev_output = 20.0 * tf.math.softplus(model.miscvalues_output[:,1])
lead_output = 20.0 * model.miscvalues_output[:,2]
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
estv_output = tf.sqrt(0.25 * tf.math.softplus(model.moremiscvalues_output[:,0]))
ests_output = tf.sqrt(30.0 * tf.math.softplus(model.moremiscvalues_output[:,1]))
td_value_output = tf.nn.softmax(model.miscvalues_output[:,4:7])
td_value_output2 = tf.nn.softmax(model.miscvalues_output[:,7:10])
td_value_output3 = tf.nn.softmax(model.moremiscvalues_output[:,2:5])
td_score_output = model.moremiscvalues_output[:,5:8] * 20.0
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
ownership_output = tf.tanh(model.ownership_output)
scoring_output = model.scoring_output
futurepos_output = tf.tanh(model.futurepos_output)
seki_output = tf.nn.softmax(model.seki_output[:,:,:,0:3])
seki_output = seki_output[:,:,:,1] - seki_output[:,:,:,2]
seki_output2 = tf.sigmoid(model.seki_output[:,:,:,3])
scorebelief_output = tf.nn.softmax(model.scorebelief_output)
sbscale_output = model.sbscale3_layer
# Moves ----------------------------------------------------------------
# Basic parsing --------------------------------------------------------
colstr = 'ABCDEFGHJKLMNOPQRST'
# GTP Implementation -----------------------------------------------------
#Adapted from https://github.com/pasky/michi/blob/master/michi.py, which is distributed under MIT license
#https://opensource.org/licenses/MIT
saver = tf.compat.v1.train.Saver(
max_to_keep = 10000,
save_relative_paths = True,
)
# session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
# session_config.gpu_options.per_process_gpu_memory_fraction = 0.3
with tf.compat.v1.Session() as session:
saver.restore(session, model_variables_prefix)
run_gtp(session)
| 34.505814
| 171
| 0.658635
|
#!/usr/bin/python3
import sys
import os
import argparse
import traceback
import random
import math
import time
import re
import logging
import colorsys
import json
import tensorflow as tf
import numpy as np
from board import Board
from model import Model
import common
description = """
Play go with a trained neural net!
Implements a basic GTP engine that uses the neural net directly to play moves.
"""
parser = argparse.ArgumentParser(description=description)
common.add_model_load_args(parser)
parser.add_argument('-name-scope', help='Name scope for model variables', required=False)
args = vars(parser.parse_args())
(model_variables_prefix, model_config_json) = common.load_model_paths(args)
name_scope = args["name_scope"]
#Hardcoded max board size
pos_len = 6
# Model ----------------------------------------------------------------
with open(model_config_json) as f:
model_config = json.load(f)
if name_scope is not None:
with tf.compat.v1.variable_scope(name_scope):
model = Model(model_config,pos_len,{})
else:
model = Model(model_config,pos_len,{})
policy0_output = tf.nn.softmax(model.policy_output[:,:,0])
policy1_output = tf.nn.softmax(model.policy_output[:,:,1])
value_output = tf.nn.softmax(model.value_output)
scoremean_output = 20.0 * model.miscvalues_output[:,0]
scorestdev_output = 20.0 * tf.math.softplus(model.miscvalues_output[:,1])
lead_output = 20.0 * model.miscvalues_output[:,2]
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
estv_output = tf.sqrt(0.25 * tf.math.softplus(model.moremiscvalues_output[:,0]))
ests_output = tf.sqrt(30.0 * tf.math.softplus(model.moremiscvalues_output[:,1]))
td_value_output = tf.nn.softmax(model.miscvalues_output[:,4:7])
td_value_output2 = tf.nn.softmax(model.miscvalues_output[:,7:10])
td_value_output3 = tf.nn.softmax(model.moremiscvalues_output[:,2:5])
td_score_output = model.moremiscvalues_output[:,5:8] * 20.0
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
vtime_output = 40.0 * tf.math.softplus(model.miscvalues_output[:,3])
ownership_output = tf.tanh(model.ownership_output)
scoring_output = model.scoring_output
futurepos_output = tf.tanh(model.futurepos_output)
seki_output = tf.nn.softmax(model.seki_output[:,:,:,0:3])
seki_output = seki_output[:,:,:,1] - seki_output[:,:,:,2]
seki_output2 = tf.sigmoid(model.seki_output[:,:,:,3])
scorebelief_output = tf.nn.softmax(model.scorebelief_output)
sbscale_output = model.sbscale3_layer
class GameState:
def __init__(self,board_size):
self.board_size = board_size
self.board = Board(size=board_size)
self.moves = []
self.boards = [self.board.copy()]
# Moves ----------------------------------------------------------------
def fetch_output(session, gs, rules, fetches):
bin_input_data = np.zeros(shape=[1]+model.bin_input_shape, dtype=np.float32)
global_input_data = np.zeros(shape=[1]+model.global_input_shape, dtype=np.float32)
pla = gs.board.pla
opp = Board.get_opp(pla)
move_idx = len(gs.moves)
model.fill_row_features(gs.board,pla,opp,gs.boards,gs.moves,move_idx,rules,bin_input_data,global_input_data,idx=0)
outputs = session.run(fetches, feed_dict={
model.bin_inputs: bin_input_data,
model.global_inputs: global_input_data,
model.symmetries: [False,False,False],
model.include_history: [[1.0,1.0,1.0,1.0,1.0]]
})
return [output[0] for output in outputs]
def get_outputs(session, gs, rules):
[policy0,
policy1,
value,
td_value,
td_value2,
td_value3,
scoremean,
td_score,
scorestdev,
lead,
vtime,
estv,
ests,
ownership,
scoring,
futurepos,
seki,
seki2,
scorebelief,
sbscale
] = fetch_output(session,gs,rules,[
policy0_output,
policy1_output,
value_output,
td_value_output,
td_value_output2,
td_value_output3,
scoremean_output,
td_score_output,
scorestdev_output,
lead_output,
vtime_output,
estv_output,
ests_output,
ownership_output,
scoring_output,
futurepos_output,
seki_output,
seki_output2,
scorebelief_output,
sbscale_output
])
board = gs.board
moves_and_probs0 = []
for i in range(len(policy0)):
move = model.tensor_pos_to_loc(i,board)
if i == len(policy0)-1:
moves_and_probs0.append((Board.PASS_LOC,policy0[i]))
elif board.would_be_legal(board.pla,move):
moves_and_probs0.append((move,policy0[i]))
moves_and_probs1 = []
for i in range(len(policy1)):
move = model.tensor_pos_to_loc(i,board)
if i == len(policy1)-1:
moves_and_probs1.append((Board.PASS_LOC,policy1[i]))
elif board.would_be_legal(board.pla,move):
moves_and_probs1.append((move,policy1[i]))
ownership_flat = ownership.reshape([model.pos_len * model.pos_len])
ownership_by_loc = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
if board.pla == Board.WHITE:
ownership_by_loc.append((loc,ownership_flat[pos]))
else:
ownership_by_loc.append((loc,-ownership_flat[pos]))
scoring_flat = scoring.reshape([model.pos_len * model.pos_len])
scoring_by_loc = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
if board.pla == Board.WHITE:
scoring_by_loc.append((loc,scoring_flat[pos]))
else:
scoring_by_loc.append((loc,-scoring_flat[pos]))
futurepos0_flat = futurepos[:,:,0].reshape([model.pos_len * model.pos_len])
futurepos0_by_loc = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
if board.pla == Board.WHITE:
futurepos0_by_loc.append((loc,futurepos0_flat[pos]))
else:
futurepos0_by_loc.append((loc,-futurepos0_flat[pos]))
futurepos1_flat = futurepos[:,:,1].reshape([model.pos_len * model.pos_len])
futurepos1_by_loc = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
if board.pla == Board.WHITE:
futurepos1_by_loc.append((loc,futurepos1_flat[pos]))
else:
futurepos1_by_loc.append((loc,-futurepos1_flat[pos]))
seki_flat = seki.reshape([model.pos_len * model.pos_len])
seki_by_loc = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
if board.pla == Board.WHITE:
seki_by_loc.append((loc,seki_flat[pos]))
else:
seki_by_loc.append((loc,-seki_flat[pos]))
seki_flat2 = seki2.reshape([model.pos_len * model.pos_len])
seki_by_loc2 = []
board = gs.board
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
seki_by_loc2.append((loc,seki_flat2[pos]))
moves_and_probs = sorted(moves_and_probs0, key=lambda moveandprob: moveandprob[1], reverse=True)
#Generate a random number biased small and then find the appropriate move to make
#Interpolate from moving uniformly to choosing from the triangular distribution
alpha = 1
beta = 1 + math.sqrt(max(0,len(gs.moves)-20))
r = np.random.beta(alpha,beta)
probsum = 0.0
i = 0
genmove_result = Board.PASS_LOC
while True:
(move,prob) = moves_and_probs[i]
probsum += prob
if i >= len(moves_and_probs)-1 or probsum > r:
genmove_result = move
break
i += 1
return {
"policy0": policy0,
"policy1": policy1,
"moves_and_probs0": moves_and_probs0,
"moves_and_probs1": moves_and_probs1,
"value": value,
"td_value": td_value,
"td_value2": td_value2,
"td_value3": td_value3,
"scoremean": scoremean,
"td_score": td_score,
"scorestdev": scorestdev,
"lead": lead,
"vtime": vtime,
"estv": estv,
"ests": ests,
"ownership": ownership,
"ownership_by_loc": ownership_by_loc,
"scoring": scoring,
"scoring_by_loc": scoring_by_loc,
"futurepos": futurepos,
"futurepos0_by_loc": futurepos0_by_loc,
"futurepos1_by_loc": futurepos1_by_loc,
"seki": seki,
"seki_by_loc": seki_by_loc,
"seki2": seki2,
"seki_by_loc2": seki_by_loc2,
"scorebelief": scorebelief,
"sbscale": sbscale,
"genmove_result": genmove_result
}
def get_layer_values(session, gs, rules, layer, channel):
board = gs.board
[layer] = fetch_output(session,gs,rules=rules,fetches=[layer])
layer = layer.reshape([model.pos_len * model.pos_len,-1])
locs_and_values = []
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
locs_and_values.append((loc,layer[pos,channel]))
return locs_and_values
def get_input_feature(gs, rules, feature_idx):
board = gs.board
bin_input_data = np.zeros(shape=[1]+model.bin_input_shape, dtype=np.float32)
global_input_data = np.zeros(shape=[1]+model.global_input_shape, dtype=np.float32)
pla = board.pla
opp = Board.get_opp(pla)
move_idx = len(gs.moves)
model.fill_row_features(board,pla,opp,gs.boards,gs.moves,move_idx,rules,bin_input_data,global_input_data,idx=0)
locs_and_values = []
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
pos = model.loc_to_tensor_pos(loc,board)
locs_and_values.append((loc,bin_input_data[0,pos,feature_idx]))
return locs_and_values
def get_pass_alive(board, rules):
pla = board.pla
opp = Board.get_opp(pla)
area = [-1 for i in range(board.arrsize)]
nonPassAliveStones = False
safeBigTerritories = True
unsafeBigTerritories = False
board.calculateArea(area,nonPassAliveStones,safeBigTerritories,unsafeBigTerritories,rules["multiStoneSuicideLegal"])
locs_and_values = []
for y in range(board.size):
for x in range(board.size):
loc = board.loc(x,y)
locs_and_values.append((loc,area[loc]))
return locs_and_values
def get_gfx_commands_for_heatmap(locs_and_values, board, normalization_div, is_percent, value_and_score_from=None, hotcold=False):
gfx_commands = []
divisor = 1.0
if normalization_div == "max":
max_abs_value = max(abs(value) for (loc,value) in locs_and_values)
divisor = max(0.0000000001,max_abs_value) #avoid divide by zero
elif normalization_div is not None:
divisor = normalization_div
#Caps value at 1.0, using an asymptotic curve
def loose_cap(x):
def transformed_softplus(x):
return -math.log(math.exp(-(x-1.0)*8.0)+1.0)/8.0+1.0
base = transformed_softplus(0.0)
return (transformed_softplus(x) - base) / (1.0 - base)
#Softly curves a value so that it ramps up faster than linear in that range
def soft_curve(x,x0,x1):
p = (x-x0)/(x1-x0)
def curve(p):
return math.sqrt(p+0.16)-0.4
p = curve(p) / curve(1.0)
return x0 + p * (x1-x0)
if hotcold:
for (loc,value) in locs_and_values:
if loc != Board.PASS_LOC:
value = value / divisor
if value < 0:
value = -loose_cap(-value)
else:
value = loose_cap(value)
interpoints = [
(-1.00,(0,0,0)),
(-0.85,(15,0,50)),
(-0.60,(60,0,160)),
(-0.35,(0,0,255)),
(-0.15,(0,100,255)),
( 0.00,(115,115,115)),
( 0.15,(250,45,40)),
( 0.25,(255,55,0)),
( 0.60,(255,255,20)),
( 0.85,(255,255,128)),
( 1.00,(255,255,255)),
]
def lerp(p,y0,y1):
return y0 + p*(y1-y0)
i = 0
while i < len(interpoints):
if value <= interpoints[i][0]:
break
i += 1
i -= 1
if i < 0:
(r,g,b) = interpoints[0][1]
if i >= len(interpoints)-1:
(r,g,b) = interpoints[len(interpoints)-1][1]
p = (value - interpoints[i][0]) / (interpoints[i+1][0] - interpoints[i][0])
(r0,g0,b0) = interpoints[i][1]
(r1,g1,b1) = interpoints[i+1][1]
r = lerp(p,r0,r1)
g = lerp(p,g0,g1)
b = lerp(p,b0,b1)
r = ("%02x" % int(r))
g = ("%02x" % int(g))
b = ("%02x" % int(b))
gfx_commands.append("COLOR #%s%s%s %s" % (r,g,b,str_coord(loc,board)))
else:
for (loc,value) in locs_and_values:
if loc != Board.PASS_LOC:
value = value / divisor
if value < 0:
value = -value
huestart = 0.50
huestop = 0.86
else:
huestart = -0.02
huestop = 0.38
value = loose_cap(value)
def lerp(p,x0,x1,y0,y1):
return y0 + (y1-y0) * (p-x0)/(x1-x0)
if value <= 0.03:
hue = huestart
lightness = 0.00 + 0.50 * (value / 0.03)
saturation = value / 0.03
(r,g,b) = colorsys.hls_to_rgb((hue+1)%1, lightness, saturation)
elif value <= 0.60:
hue = lerp(value,0.03,0.60,huestart,huestop)
val = 1.0
saturation = 1.0
(r,g,b) = colorsys.hsv_to_rgb((hue+1)%1, val, saturation)
else:
hue = huestop
lightness = lerp(value,0.60,1.00,0.5,0.95)
saturation = 1.0
(r,g,b) = colorsys.hls_to_rgb((hue+1)%1, lightness, saturation)
r = ("%02x" % int(r*255))
g = ("%02x" % int(g*255))
b = ("%02x" % int(b*255))
gfx_commands.append("COLOR #%s%s%s %s" % (r,g,b,str_coord(loc,board)))
locs_and_values = sorted(locs_and_values, key=lambda loc_and_value: loc_and_value[1])
locs_and_values_rev = sorted(locs_and_values, key=lambda loc_and_value: loc_and_value[1], reverse=True)
texts = []
texts_rev = []
texts_value = []
maxlen_per_side = 1000
if len(locs_and_values) > 0 and locs_and_values[0][1] < 0:
maxlen_per_side = 500
for i in range(min(len(locs_and_values),maxlen_per_side)):
(loc,value) = locs_and_values[i]
if is_percent:
texts.append("%s %4.1f%%" % (str_coord(loc,board),value*100))
else:
texts.append("%s %.3f" % (str_coord(loc,board),value))
texts.reverse()
for i in range(min(len(locs_and_values_rev),maxlen_per_side)):
(loc,value) = locs_and_values_rev[i]
if is_percent:
texts_rev.append("%s %4.1f%%" % (str_coord(loc,board),value*100))
else:
texts_rev.append("%s %.3f" % (str_coord(loc,board),value))
if value_and_score_from is not None:
value = value_and_score_from["value"]
score = value_and_score_from["scoremean"]
lead = value_and_score_from["lead"]
vtime = value_and_score_from["vtime"]
texts_value.append("wv %.2fc nr %.2f%% ws %.1f wl %.1f vt %.1f" % (
100*(value[0]-value[1] if board.pla == Board.WHITE else value[1] - value[0]),
100*value[2],
(score if board.pla == Board.WHITE else -score),
(lead if board.pla == Board.WHITE else -lead),
vtime
))
gfx_commands.append("TEXT " + ", ".join(texts_value + texts_rev + texts))
return gfx_commands
def print_scorebelief(gs,outputs):
board = gs.board
scorebelief = outputs["scorebelief"]
scoremean = outputs["scoremean"]
scorestdev = outputs["scorestdev"]
sbscale = outputs["sbscale"]
scorebelief = list(scorebelief)
if board.pla != Board.WHITE:
scorebelief.reverse()
scoremean = -scoremean
scoredistrmid = pos_len * pos_len + Model.EXTRA_SCORE_DISTR_RADIUS
ret = ""
ret += "TEXT "
ret += "SBScale: " + str(sbscale) + "\n"
ret += "ScoreBelief: \n"
for i in range(17,-1,-1):
ret += "TEXT "
ret += "%+6.1f" %(-(i*20+0.5))
for j in range(20):
idx = scoredistrmid-(i*20+j)-1
ret += " %4.0f" % (scorebelief[idx] * 10000)
ret += "\n"
for i in range(18):
ret += "TEXT "
ret += "%+6.1f" %((i*20+0.5))
for j in range(20):
idx = scoredistrmid+(i*20+j)
ret += " %4.0f" % (scorebelief[idx] * 10000)
ret += "\n"
beliefscore = 0
beliefscoresq = 0
beliefwin = 0
belieftotal = 0
for idx in range(scoredistrmid*2):
score = idx-scoredistrmid+0.5
if score > 0:
beliefwin += scorebelief[idx]
else:
beliefwin -= scorebelief[idx]
belieftotal += scorebelief[idx]
beliefscore += score*scorebelief[idx]
beliefscoresq += score*score*scorebelief[idx]
beliefscoremean = beliefscore/belieftotal
beliefscoremeansq = beliefscoresq/belieftotal
beliefscorevar = max(0,beliefscoremeansq-beliefscoremean*beliefscoremean)
beliefscorestdev = math.sqrt(beliefscorevar)
ret += "TEXT BeliefWin: %.2fc\n" % (100*beliefwin/belieftotal)
ret += "TEXT BeliefScoreMean: %.1f\n" % (beliefscoremean)
ret += "TEXT BeliefScoreStdev: %.1f\n" % (beliefscorestdev)
ret += "TEXT ScoreMean: %.1f\n" % (scoremean)
ret += "TEXT ScoreStdev: %.1f\n" % (scorestdev)
ret += "TEXT Value: %s\n" % (str(outputs["value"]))
ret += "TEXT TDValue: %s\n" % (str(outputs["td_value"]))
ret += "TEXT TDValue2: %s\n" % (str(outputs["td_value2"]))
ret += "TEXT TDValue3: %s\n" % (str(outputs["td_value3"]
))
ret += "TEXT TDScore: %s\n" % (str(outputs["td_score"]))
ret += "TEXT Estv: %s\n" % (str(outputs["estv"]))
ret += "TEXT Ests: %s\n" % (str(outputs["ests"]))
return ret
# Basic parsing --------------------------------------------------------
colstr = 'ABCDEFGHJKLMNOPQRST'
def parse_coord(s,board):
if s == 'pass':
return Board.PASS_LOC
return board.loc(colstr.index(s[0].upper()), board.size - int(s[1:]))
def str_coord(loc,board):
if loc == Board.PASS_LOC:
return 'pass'
x = board.loc_x(loc)
y = board.loc_y(loc)
return '%c%d' % (colstr[x], board.size - y)
# GTP Implementation -----------------------------------------------------
#Adapted from https://github.com/pasky/michi/blob/master/michi.py, which is distributed under MIT license
#https://opensource.org/licenses/MIT
def run_gtp(session):
known_commands = [
'boardsize',
'clear_board',
'showboard',
'komi',
'play',
'genmove',
'quit',
'name',
'version',
'known_command',
'list_commands',
'protocol_version',
'gogui-analyze_commands',
'setrule',
'policy',
'policy1',
'logpolicy',
'ownership',
'scoring',
'futurepos0',
'futurepos1',
'seki',
'seki2',
'scorebelief',
'passalive',
]
known_analyze_commands = [
'gfx/Policy/policy',
'gfx/Policy1/policy1',
'gfx/LogPolicy/logpolicy',
'gfx/Ownership/ownership',
'gfx/Scoring/scoring',
'gfx/FuturePos0/futurepos0',
'gfx/FuturePos1/futurepos1',
'gfx/Seki/seki',
'gfx/Seki2/seki2',
'gfx/ScoreBelief/scorebelief',
'gfx/PassAlive/passalive',
]
board_size = 6
gs = GameState(board_size)
rules = {
"koRule": "KO_POSITIONAL",
"scoringRule": "SCORING_AREA",
"taxRule": "TAX_NONE",
"multiStoneSuicideLegal": True,
"hasButton": False,
"encorePhase": 0,
"passWouldEndPhase": False,
"whiteKomi": 7.5
}
layerdict = dict(model.outputs_by_layer)
weightdict = dict()
for v in tf.compat.v1.trainable_variables():
weightdict[v.name] = v
layer_command_lookup = dict()
def add_extra_board_size_visualizations(layer_name, layer, normalization_div):
assert(layer.shape[1].value == board_size)
assert(layer.shape[2].value == board_size)
num_channels = layer.shape[3].value
for i in range(num_channels):
command_name = layer_name + "-" + str(i)
command_name = command_name.replace("/",":")
known_commands.append(command_name)
known_analyze_commands.append("gfx/" + command_name + "/" + command_name)
layer_command_lookup[command_name.lower()] = (layer,i,normalization_div)
def add_layer_visualizations(layer_name, normalization_div):
if layer_name in layerdict:
layer = layerdict[layer_name]
add_extra_board_size_visualizations(layer_name, layer, normalization_div)
add_layer_visualizations("conv1",normalization_div=6)
add_layer_visualizations("rconv1",normalization_div=14)
add_layer_visualizations("rconv2",normalization_div=20)
add_layer_visualizations("rconv3",normalization_div=26)
add_layer_visualizations("rconv4",normalization_div=36)
add_layer_visualizations("rconv5",normalization_div=40)
add_layer_visualizations("rconv6",normalization_div=40)
add_layer_visualizations("rconv7",normalization_div=44)
add_layer_visualizations("rconv7/conv1a",normalization_div=12)
add_layer_visualizations("rconv7/conv1b",normalization_div=12)
add_layer_visualizations("rconv8",normalization_div=48)
add_layer_visualizations("rconv9",normalization_div=52)
add_layer_visualizations("rconv10",normalization_div=55)
add_layer_visualizations("rconv11",normalization_div=58)
add_layer_visualizations("rconv11/conv1a",normalization_div=12)
add_layer_visualizations("rconv11/conv1b",normalization_div=12)
add_layer_visualizations("rconv12",normalization_div=58)
add_layer_visualizations("rconv13",normalization_div=64)
add_layer_visualizations("rconv14",normalization_div=66)
add_layer_visualizations("g1",normalization_div=6)
add_layer_visualizations("p1",normalization_div=2)
add_layer_visualizations("v1",normalization_div=4)
input_feature_command_lookup = dict()
def add_input_feature_visualizations(layer_name, feature_idx, normalization_div):
command_name = layer_name
command_name = command_name.replace("/",":")
known_commands.append(command_name)
known_analyze_commands.append("gfx/" + command_name + "/" + command_name)
input_feature_command_lookup[command_name] = (feature_idx,normalization_div)
for i in range(model.bin_input_shape[1]):
add_input_feature_visualizations("input-" + str(i),i, normalization_div=1)
linear = tf.cumsum(tf.ones([6],dtype=tf.float32),axis=0,exclusive=True) / 18.0
color_calibration = tf.stack(axis=0,values=[
linear,
linear*0.5,
linear*0.2,
linear*0.1,
linear*0.05,
linear*0.02,
linear*0.01,
-linear,
-linear*0.5,
-linear*0.2,
-linear*0.1,
-linear*0.05,
-linear*0.02,
-linear*0.01,
linear*2-1,
tf.zeros([6],dtype=tf.float32),
linear,
-linear,
tf.zeros([6],dtype=tf.float32)
])
add_extra_board_size_visualizations("colorcalibration", tf.reshape(color_calibration,[1,6,6,1]),normalization_div=None)
while True:
try:
line = input().strip()
except EOFError:
break
if line == '':
continue
command = [s.lower() for s in line.split()]
if re.match('\d+', command[0]):
cmdid = command[0]
command = command[1:]
else:
cmdid = ''
ret = ''
if command[0] == "boardsize":
if int(command[1]) > model.pos_len:
print("Warning: Trying to set incompatible boardsize %s (!= %d)" % (command[1], N), file=sys.stderr)
ret = None
board_size = int(command[1])
gs = GameState(board_size)
elif command[0] == "clear_board":
gs = GameState(board_size)
elif command[0] == "showboard":
ret = "\n" + gs.board.to_string().strip()
elif command[0] == "komi":
rules["whiteKomi"] = float(command[1])
elif command[0] == "play":
pla = (Board.BLACK if command[1] == "B" or command[1] == "b" else Board.WHITE)
loc = parse_coord(command[2],gs.board)
gs.board.play(pla,loc)
gs.moves.append((pla,loc))
gs.boards.append(gs.board.copy())
elif command[0] == "genmove":
outputs = get_outputs(session, gs, rules)
loc = outputs["genmove_result"]
pla = gs.board.pla
if len(command) > 1:
pla = (Board.BLACK if command[1] == "B" or command[1] == "b" else Board.WHITE)
gs.board.play(pla,loc)
gs.moves.append((pla,loc))
gs.boards.append(gs.board.copy())
ret = str_coord(loc,gs.board)
elif command[0] == "name":
ret = 'KataGo Raw Neural Net Debug/Test Script'
elif command[0] == "version":
ret = '1.0'
elif command[0] == "list_commands":
ret = '\n'.join(known_commands)
elif command[0] == "known_command":
ret = 'true' if command[1] in known_commands else 'false'
elif command[0] == "gogui-analyze_commands":
ret = '\n'.join(known_analyze_commands)
elif command[0] == "setrule":
ret = ""
if command[1] == "korule":
rules["koRule"] = command[2].upper()
elif command[1] == "scoringrule":
rules["scoringRule"] = command[2].upper()
elif command[1] == "taxrule":
rules["taxRule"] = command[2].upper()
elif command[1] == "multistonesuicidelegal":
rules["multiStoneSuicideLegal"] = (command[2].lower() == "true")
elif command[1] == "hasbutton":
rules["hasButton"] = (command[2].lower() == "true")
elif command[1] == "encorephase":
rules["encorePhase"] = int(command[2])
elif command[1] == "passwouldendphase":
rules["passWouldEndPhase"] = (command[2].lower() == "true")
elif command[1] == "whitekomi" or command[1] == "komi":
rules["whiteKomi"] = float(command[2])
elif command[1] == "asym":
rules["asymPowersOfTwo"] = float(command[2])
else:
ret = "Unknown rules setting"
elif command[0] == "policy":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["moves_and_probs0"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=outputs)
ret = "\n".join(gfx_commands)
elif command[0] == "policy1":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["moves_and_probs1"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=outputs)
ret = "\n".join(gfx_commands)
elif command[0] == "logpolicy":
outputs = get_outputs(session, gs, rules)
moves_and_logprobs = [(move,max(0.0,4.9+math.log10(prob))) for (move,prob) in outputs["moves_and_probs0"]]
gfx_commands = get_gfx_commands_for_heatmap(moves_and_logprobs, gs.board, normalization_div=6, is_percent=False, value_and_score_from=outputs)
ret = "\n".join(gfx_commands)
elif command[0] == "ownership":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["ownership_by_loc"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None, hotcold=True)
ret = "\n".join(gfx_commands)
elif command[0] == "scoring":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["scoring_by_loc"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None, hotcold=True)
ret = "\n".join(gfx_commands)
elif command[0] == "futurepos0":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["futurepos0_by_loc"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None, hotcold=True)
ret = "\n".join(gfx_commands)
elif command[0] == "futurepos1":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["futurepos1_by_loc"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None, hotcold=True)
ret = "\n".join(gfx_commands)
elif command[0] == "seki":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["seki_by_loc"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None)
ret = "\n".join(gfx_commands)
elif command[0] == "seki2":
outputs = get_outputs(session, gs, rules)
gfx_commands = get_gfx_commands_for_heatmap(outputs["seki_by_loc2"], gs.board, normalization_div=None, is_percent=True, value_and_score_from=None)
ret = "\n".join(gfx_commands)
elif command[0] in layer_command_lookup:
(layer,channel,normalization_div) = layer_command_lookup[command[0]]
locs_and_values = get_layer_values(session, gs, rules, layer, channel)
gfx_commands = get_gfx_commands_for_heatmap(locs_and_values, gs.board, normalization_div, is_percent=False)
ret = "\n".join(gfx_commands)
elif command[0] in input_feature_command_lookup:
(feature_idx,normalization_div) = input_feature_command_lookup[command[0]]
locs_and_values = get_input_feature(gs, rules, feature_idx)
gfx_commands = get_gfx_commands_for_heatmap(locs_and_values, gs.board, normalization_div, is_percent=False)
ret = "\n".join(gfx_commands)
elif command[0] == "passalive":
locs_and_values = get_pass_alive(gs.board, rules)
gfx_commands = get_gfx_commands_for_heatmap(locs_and_values, gs.board, normalization_div=None, is_percent=False)
ret = "\n".join(gfx_commands)
elif command[0] == "scorebelief":
outputs = get_outputs(session, gs, rules)
ret = print_scorebelief(gs,outputs)
elif command[0] == "protocol_version":
ret = '2'
elif command[0] == "quit":
print('=%s \n\n' % (cmdid,), end='')
break
else:
print('Warning: Ignoring unknown command - %s' % (line,), file=sys.stderr)
ret = None
if ret is not None:
print('=%s %s\n\n' % (cmdid, ret,), end='')
else:
print('?%s ???\n\n' % (cmdid,), end='')
sys.stdout.flush()
saver = tf.compat.v1.train.Saver(
max_to_keep = 10000,
save_relative_paths = True,
)
# session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)
# session_config.gpu_options.per_process_gpu_memory_fraction = 0.3
with tf.compat.v1.Session() as session:
saver.restore(session, model_variables_prefix)
run_gtp(session)
| 0
| 0
| 0
| 159
| 0
| 26,063
| 0
| -82
| 494
|
e1f2ff87b306b5118c6404d2e8d28e29a993265e
| 1,832
|
py
|
Python
|
stim_amplitude_scan.py
|
maxnolte/deciphering_variability
|
bea48cc3c04e63f3acdd1b86563eb792358c91a8
|
[
"MIT"
] | 2
|
2020-04-22T12:02:32.000Z
|
2021-06-21T17:35:15.000Z
|
stim_amplitude_scan.py
|
maxnolte/deciphering_variability
|
bea48cc3c04e63f3acdd1b86563eb792358c91a8
|
[
"MIT"
] | null | null | null |
stim_amplitude_scan.py
|
maxnolte/deciphering_variability
|
bea48cc3c04e63f3acdd1b86563eb792358c91a8
|
[
"MIT"
] | 3
|
2019-09-26T07:32:50.000Z
|
2021-06-21T17:35:29.000Z
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
import bluepy
variances = ['0p001', '0p01', '0p05', '0p1', '0p5', '1p0', '1p5', '2p0', '10p0']
bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/spontaneous/base_seeds_abcd_stim/seed170/variance%s/BlueConfig' % s for s in variances[1:]]
bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/spontaneous/base_seeds_abcd/seed170/BlueConfig'] + bcs
# bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/ei-balance/' \
# 'scan_layer5/Ca%s/BlueConfig' % s for s in cas]
sim = bluepy.Simulation(bcs[0])
gids = np.array(list(sim.get_circuit_target()))
gids_exc = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Excitatory'))), gids))
gids_inh = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Inhibitory'))), gids))
# bcs = bcs_0
names = ['MVR', 'det_syns']
fig, axs = plt.subplots(len(bcs), 2, figsize=(14, 14))
for i, bc in enumerate(bcs):
print bc
sim = bluepy.Simulation(bc)
ax = axs[i, 0]
spikes = bluepy.Simulation(bc).v2.reports['spikes']
df = spikes.data(t_start=1000.0)
gids_spiking = np.abs(np.array(df.axes[0]) - gids.max())
times = np.array(df)
ax.vlines(times, gids_spiking, gids_spiking + 200, rasterized=True, lw=0.3)
ax2 = ax.twinx()
ax2.hist(times, bins=np.linspace(1000, 2000, 101), histtype='step', weights=np.zeros(times.size) + (1000.0/10.0)/gids.size)
ax2.set_ylabel('FR (Hz)')
# ax2.set_ylim([0, 3])
# ax2.set_yticks([0, 1, 2, 3])
ax.set_xlabel('t (ms)')
ax.set_ylabel('Neurons')
ax.set_title('variance in percent: %s' % variances[i])
plt.tight_layout()
plt.savefig('figures/variance_raster.pdf', dpi=300)
| 36.64
| 161
| 0.686681
|
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
import bluepy
variances = ['0p001', '0p01', '0p05', '0p1', '0p5', '1p0', '1p5', '2p0', '10p0']
bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/spontaneous/base_seeds_abcd_stim/seed170/variance%s/BlueConfig' % s for s in variances[1:]]
bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/variability/spontaneous/base_seeds_abcd/seed170/BlueConfig'] + bcs
# bcs = ['/gpfs/bbp.cscs.ch/project/proj9/simulations/nolte/ei-balance/' \
# 'scan_layer5/Ca%s/BlueConfig' % s for s in cas]
sim = bluepy.Simulation(bcs[0])
gids = np.array(list(sim.get_circuit_target()))
gids_exc = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Excitatory'))), gids))
gids_inh = np.random.permutation(np.intersect1d(np.array(list(sim.circuit.get_target('Inhibitory'))), gids))
# bcs = bcs_0
names = ['MVR', 'det_syns']
fig, axs = plt.subplots(len(bcs), 2, figsize=(14, 14))
for i, bc in enumerate(bcs):
print bc
sim = bluepy.Simulation(bc)
ax = axs[i, 0]
spikes = bluepy.Simulation(bc).v2.reports['spikes']
df = spikes.data(t_start=1000.0)
gids_spiking = np.abs(np.array(df.axes[0]) - gids.max())
times = np.array(df)
ax.vlines(times, gids_spiking, gids_spiking + 200, rasterized=True, lw=0.3)
ax2 = ax.twinx()
ax2.hist(times, bins=np.linspace(1000, 2000, 101), histtype='step', weights=np.zeros(times.size) + (1000.0/10.0)/gids.size)
ax2.set_ylabel('FR (Hz)')
# ax2.set_ylim([0, 3])
# ax2.set_yticks([0, 1, 2, 3])
ax.set_xlabel('t (ms)')
ax.set_ylabel('Neurons')
ax.set_title('variance in percent: %s' % variances[i])
plt.tight_layout()
plt.savefig('figures/variance_raster.pdf', dpi=300)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
761606c02fc534ca8cbdc3d2fa43d7330287a1ad
| 5,312
|
py
|
Python
|
opengenomebrowser_tools/init_database.py
|
opengenomebrowser/opengenomebrowser-tools
|
b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e
|
[
"MIT"
] | null | null | null |
opengenomebrowser_tools/init_database.py
|
opengenomebrowser/opengenomebrowser-tools
|
b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e
|
[
"MIT"
] | null | null | null |
opengenomebrowser_tools/init_database.py
|
opengenomebrowser/opengenomebrowser-tools
|
b6ef2340b2fd67a61373d1d8a0f3ef71cc892d1e
|
[
"MIT"
] | null | null | null |
import os
import json
import shutil
from .utils import PACKAGE_ROOT
from . import __folder_structure_version__
def init_database(database_dir: str = None) -> None:
"""
Creates a basic OpenGenomeBrowser folders structure.
Result:
database
organisms
annotations.json
annotation-descriptions
SL.tsv
KO.tsv
KR.tsv
EC.tsv
GO.tsv
orthologs
pathway-maps
type_dictionary.json
svg
:param database_dir: Path to the root of the OpenGenomeBrowser folder structure. (Will contain 'organisms' folder.)
"""
if database_dir is None:
assert 'GENOMIC_DATABASE' in os.environ, f'Cannot find the database. Please set --database_dir or environment variable GENOMIC_DATABASE'
database_dir = os.environ['GENOMIC_DATABASE']
assert os.path.isdir(os.path.dirname(database_dir)), f'Parent dir of {database_dir=} does not exist!'
assert not os.path.exists(database_dir), f'Error: {database_dir=} already exist!'
# make main dir
os.makedirs(database_dir)
# set version
with open(f'{database_dir}/version.json', 'w') as f:
json.dump({'folder_structure_version': __folder_structure_version__}, f, indent=4)
# make organisms dir (empty)
os.makedirs(f'{database_dir}/organisms')
# make orthologs dir (empty)
os.makedirs(f'{database_dir}/orthologs')
# make pathway maps dir and content
os.makedirs(f'{database_dir}/pathway-maps')
os.makedirs(f'{database_dir}/pathway-maps/svg')
with open(f'{database_dir}/pathway-maps/type_dictionary.json', 'w') as f:
f.write('{}')
# Create annotations.json
shutil.copy(src=f'{PACKAGE_ROOT}/data/annotations.json', dst=f'{database_dir}/annotations.json')
# download annotation descriptions
annotation_descriptions_dir = f'{database_dir}/annotation-descriptions'
os.makedirs(annotation_descriptions_dir)
download_sl_data(out=f'{annotation_descriptions_dir}/SL.tsv')
download_kegg_data(src='rn', out=f'{annotation_descriptions_dir}/KR.tsv', remove_prefix='rn:')
download_kegg_data(src='ko', out=f'{annotation_descriptions_dir}/KG.tsv', remove_prefix='ko:')
download_kegg_data(src='enzyme', out=f'{annotation_descriptions_dir}/EC.tsv', remove_prefix='ec:', add_prefix='EC:')
download_go_data(out=f'{annotation_descriptions_dir}/GO.tsv')
if __name__ == '__main__':
main()
| 35.413333
| 144
| 0.650414
|
import os
import json
import shutil
from urllib import request
from .utils import PACKAGE_ROOT
from . import __folder_structure_version__
def download_go_data(out: str) -> None:
source_url = 'http://purl.obolibrary.org/obo/go.obo'
print(f'Converting {source_url} -> {out}')
def go_generator(io) -> [str]:
go_entry = []
line = io.readline()
while line:
if line == b'[Term]\n':
yield go_entry
go_entry.clear()
go_entry.append(line.decode('utf-8'))
line = io.readline()
yield go_entry
def get_name(entry: list) -> str:
for line in entry:
if line.startswith('name: '):
return line.rstrip()[6:]
raise TypeError(F'The go.obo file seems to have a wrong format! broken entry: {entry}')
def get_go(entry: list) -> str:
entry = entry[1]
assert entry.startswith('id: GO:') and len(entry) == 15, f'Bad entry in go.obo: {entry}, len={len(entry)}'
assert entry[7:14].isnumeric()
return entry[4:14]
with request.urlopen(source_url) as source_handle, open(out, 'w') as target_handle:
gos = go_generator(io=source_handle)
# skip first entry
file_head = next(gos)
assert not file_head[0].startswith('[Term]'), F'The go.obo file seems to have a wrong format! file_head looks wrong: {file_head}'
# save regular entries to file
for entry in gos:
target_handle.write(F'{get_go(entry)}\t{get_name(entry)}\n')
def download_kegg_data(src: str, out: str, remove_prefix: str = '', add_prefix: str = '') -> None:
source_url = f'http://rest.kegg.jp/list/{src}'
print(f'Converting {source_url} -> {out}')
with request.urlopen(source_url) as source_handle, open(out, 'w') as target_handle:
for line in source_handle:
target_handle.write(f'{add_prefix}{line.decode("utf-8").removeprefix(remove_prefix)}')
def download_sl_data(out: str) -> None:
source_url = 'https://www.uniprot.org/locations/?query=*&format=tab&force=true&columns=id'
print(f'Converting {source_url} -> {out}')
error_msg = 'UniProt must have changed its format. Please contact the developer. error={error}'
with request.urlopen(source_url) as source_handle, open(out, 'w') as target_handle:
first_line = source_handle.readline().decode('utf-8')
assert first_line == 'Subcellular location ID\tDescription\tCategory\tAlias\n', error_msg.format(error=first_line)
for line in source_handle:
line = line.decode('utf-8').strip().split('\t')
assert len(line) == 4, error_msg.format(error=f'{len(line)=}; {line=}')
sl, description, type, location = line
target_handle.write(f'{sl}\t{location} ({description})\n')
def init_database(database_dir: str = None) -> None:
"""
Creates a basic OpenGenomeBrowser folders structure.
Result:
database
├── organisms
├── annotations.json
├── annotation-descriptions
│ ├── SL.tsv
│ ├── KO.tsv
│ ├── KR.tsv
│ ├── EC.tsv
│ └── GO.tsv
├── orthologs
└── pathway-maps
├── type_dictionary.json
└── svg
:param database_dir: Path to the root of the OpenGenomeBrowser folder structure. (Will contain 'organisms' folder.)
"""
if database_dir is None:
assert 'GENOMIC_DATABASE' in os.environ, f'Cannot find the database. Please set --database_dir or environment variable GENOMIC_DATABASE'
database_dir = os.environ['GENOMIC_DATABASE']
assert os.path.isdir(os.path.dirname(database_dir)), f'Parent dir of {database_dir=} does not exist!'
assert not os.path.exists(database_dir), f'Error: {database_dir=} already exist!'
# make main dir
os.makedirs(database_dir)
# set version
with open(f'{database_dir}/version.json', 'w') as f:
json.dump({'folder_structure_version': __folder_structure_version__}, f, indent=4)
# make organisms dir (empty)
os.makedirs(f'{database_dir}/organisms')
# make orthologs dir (empty)
os.makedirs(f'{database_dir}/orthologs')
# make pathway maps dir and content
os.makedirs(f'{database_dir}/pathway-maps')
os.makedirs(f'{database_dir}/pathway-maps/svg')
with open(f'{database_dir}/pathway-maps/type_dictionary.json', 'w') as f:
f.write('{}')
# Create annotations.json
shutil.copy(src=f'{PACKAGE_ROOT}/data/annotations.json', dst=f'{database_dir}/annotations.json')
# download annotation descriptions
annotation_descriptions_dir = f'{database_dir}/annotation-descriptions'
os.makedirs(annotation_descriptions_dir)
download_sl_data(out=f'{annotation_descriptions_dir}/SL.tsv')
download_kegg_data(src='rn', out=f'{annotation_descriptions_dir}/KR.tsv', remove_prefix='rn:')
download_kegg_data(src='ko', out=f'{annotation_descriptions_dir}/KG.tsv', remove_prefix='ko:')
download_kegg_data(src='enzyme', out=f'{annotation_descriptions_dir}/EC.tsv', remove_prefix='ec:', add_prefix='EC:')
download_go_data(out=f'{annotation_descriptions_dir}/GO.tsv')
def main():
import fire
fire.Fire(init_database)
if __name__ == '__main__':
main()
| 123
| 0
| 0
| 0
| 1,396
| 1,274
| 0
| 5
| 114
|
53ba41eb81896191d5d26dcb15844fd97e74a3e7
| 6,272
|
py
|
Python
|
frictionless/file.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | 1
|
2021-11-08T22:29:30.000Z
|
2021-11-08T22:29:30.000Z
|
frictionless/file.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | null | null | null |
frictionless/file.py
|
augusto-herrmann/frictionless-py
|
b4ff35f064141a2c04882edb592666ca6b066776
|
[
"MIT"
] | null | null | null |
# NOTE:
# For better detection we can add an argument allowing metadata reading
# Exact set of file types needs to be reviewed
| 30.595122
| 87
| 0.557398
|
import os
import glob
from pathlib import Path
from .helpers import cached_property
from . import helpers
from . import config
# NOTE:
# For better detection we can add an argument allowing metadata reading
# Exact set of file types needs to be reviewed
class File:
"""File representation"""
def __init__(self, source, *, basepath="", innerpath=None):
# Handle pathlib
if isinstance(source, Path):
source = str(source)
# Set attributes
self.__source = source
self.__basepath = basepath
self.__innerpath = innerpath
# Detect attributes
self.__detect()
@cached_property
def path(self):
return self.__path
@cached_property
def data(self):
return self.__data
@cached_property
def type(self):
return self.__type
@cached_property
def name(self):
return self.__name
@cached_property
def scheme(self):
return self.__scheme
@cached_property
def format(self):
return self.__format
@cached_property
def innerpath(self):
return self.__innerpath
@cached_property
def compression(self):
return self.__compression
@cached_property
def memory(self):
return self.__memory
@cached_property
def remote(self):
return self.__remote
@cached_property
def multipart(self):
return self.__multipart
@cached_property
def expandable(self):
return self.__expandable
@cached_property
def basepath(self):
return self.__basepath
@cached_property
def normpath(self):
return self.__normpath
@cached_property
def fullpath(self):
return self.__fullpath
# Detect
def __detect(self):
source = self.__source
# Detect path/data
path = None
data = source
if isinstance(source, str):
path = source
data = None
elif isinstance(source, list) and source and isinstance(source[0], str):
path = source
data = None
# Detect memory/remote/expandable/multipart
memory = path is None
remote = helpers.is_remote_path(self.__basepath or path)
expandable = not memory and helpers.is_expandable_path(path, self.__basepath)
multipart = not memory and (isinstance(path, list) or expandable)
# Detect fullpath
normpath = path
fullpath = path
if not memory:
if expandable:
normpath = []
fullpath = []
pattern = os.path.join(self.__basepath, path)
pattern = f"{pattern}/*" if os.path.isdir(pattern) else pattern
options = {"recursive": True} if "**" in pattern else {}
for part in sorted(glob.glob(pattern, **options)):
normpath.append(os.path.relpath(part, self.__basepath))
fullpath.append(os.path.relpath(part, ""))
if not fullpath:
expandable = False
multipart = False
fullpath = path
elif multipart:
fullpath = []
for part in path:
part = helpers.join_path(self.__basepath, part)
fullpath.append(part)
else: # string path
fullpath = helpers.join_path(self.__basepath, path)
# Detect name
name = "memory"
if not memory:
names = []
for part in fullpath if multipart else [fullpath]:
name = os.path.splitext(os.path.basename(part))[0]
names.append(name)
name = os.path.commonprefix(names)
name = helpers.slugify(name, regex_pattern=r"[^-a-z0-9._/]")
name = name or "name"
# Detect type
type = "table"
if not multipart:
if memory and isinstance(data, dict):
type = "resource"
if data.get("fields") is not None:
type = "schema"
elif data.get("resources") is not None:
type = "package"
elif data.get("tasks") is not None:
type = "inquiry"
elif data.get("steps") is not None:
type = "pipeline"
elif not memory and path.endswith((".json", ".yaml", ".yml")):
type = "resource"
if path.endswith(("schema.json", "schema.yaml", "schema.yml")):
type = "schema"
elif path.endswith(("package.json", "package.yaml", "package.yml")):
type = "package"
elif path.endswith(("inquiry.json", "inquiry.yaml", "inquiry.yml")):
type = "inquiry"
elif path.endswith(("pipeline.json", "pipeline.yaml", "pipeline.yml")):
type = "pipeline"
# Detect scheme/format/innerpath/compression
scheme = ""
format = ""
compression = ""
innerpath = ""
detection_path = fullpath[0] if multipart else fullpath
if not memory:
scheme, format = helpers.parse_scheme_and_format(detection_path)
if format in config.COMPRESSION_FORMATS:
if not multipart:
compression = format
detection_path = detection_path[: -len(format) - 1]
if self.__innerpath:
detection_path = os.path.join(detection_path, self.__innerpath)
scheme, format = helpers.parse_scheme_and_format(detection_path)
if format:
name = os.path.splitext(name)[0]
# Set attributes
self.__path = path
self.__data = data
self.__name = name
self.__type = type
self.__scheme = scheme
self.__format = format
self.__innerpath = innerpath
self.__compression = compression
self.__memory = memory
self.__remote = remote
self.__multipart = multipart
self.__expandable = expandable
self.__normpath = normpath
self.__fullpath = fullpath
| 0
| 716
| 0
| 5,276
| 0
| 0
| 0
| -5
| 155
|
bc521fe8e156c2bea30c143fc4f2a1b5f920fe18
| 4,919
|
py
|
Python
|
data/parsers/spain.py
|
hdsheena/covid19_scenarios
|
ea67a75a99c20b0948ef6d377bc6cfbec6e670b5
|
[
"MIT"
] | 1,550
|
2020-03-10T13:18:53.000Z
|
2022-03-29T13:48:11.000Z
|
data/parsers/spain.py
|
hdsheena/covid19_scenarios
|
ea67a75a99c20b0948ef6d377bc6cfbec6e670b5
|
[
"MIT"
] | 835
|
2020-03-09T21:52:19.000Z
|
2022-02-02T08:06:21.000Z
|
data/parsers/spain.py
|
hdsheena/covid19_scenarios
|
ea67a75a99c20b0948ef6d377bc6cfbec6e670b5
|
[
"MIT"
] | 444
|
2020-03-13T03:24:13.000Z
|
2021-11-15T19:08:53.000Z
|
# ------------------------------------------------------------------------
# Globals
deaths_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_fallecidos.csv"
cases_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_casos.csv"
hospitalized_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_hospitalizados.csv"
icu_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_uci.csv"
recovered_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_altas.csv"
cols = ['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']
# ------------------------------------------------------------------------
# Main point of entry
| 46.847619
| 143
| 0.544623
|
import sys
import requests
import csv
import io
from datetime import datetime
from collections import defaultdict
from .utils import store_data, stoi
# ------------------------------------------------------------------------
# Globals
deaths_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_fallecidos.csv"
cases_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_casos.csv"
hospitalized_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_hospitalizados.csv"
icu_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_uci.csv"
recovered_URL = "https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019/ccaa_covid19_altas.csv"
cols = ['time', 'cases', 'deaths', 'hospitalized', 'icu', 'recovered']
# ------------------------------------------------------------------------
# Main point of entry
def parse():
# read individual files into dicts of dicts by region
deaths, cases, hospitalized, icu, recovered = defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(dict), defaultdict(dict)
for d, URL in [(deaths, deaths_URL), (cases, cases_URL), (hospitalized, hospitalized_URL), (icu, icu_URL), (recovered, recovered_URL)]:
r = requests.get(URL)
if not r.ok:
print(f"Failed to fetch {URL}", file=sys.stderr)
exit(1)
r.close()
fd = io.StringIO(r.text)
rdr = csv.reader(fd)
hdr = next(rdr)
dates = [x for x in hdr[2:]]
for row in rdr:
region = row[1]
for val, date in zip(row[2:], dates):
d[region][date] = stoi(val)
# combine different data into one dict per region and day
region_data = defaultdict(lambda: defaultdict(dict))
for field, data in ('deaths', deaths), ('cases', cases), ('hospitalized', hospitalized), ('icu', icu), ('recovered', recovered):
for region, d in data.items():
for date in d:
region_data[region][date][field] = d[date]
# convert dict of dicts into dict of lists
regions = {}
for region, d in region_data.items():
dps = sorted(d.items())
regions['-'.join(['ESP',region])] = [[x[0], x[1].get("cases", None),
x[1].get("deaths",None),
x[1].get("hospitalized",None),
x[1].get("icu", None),
x[1].get("recovered", None)] for x in dps]
# Delete incorrect data, see https://github.com/neherlab/covid19_scenarios/issues/595
for r in regions:
if r == 'ESP-Madrid':
for d in regions[r]:
stop = datetime.strptime('2020-04-26', '%Y-%m-%d')
if datetime.strptime(d[cols.index('time')], '%Y-%m-%d') >= stop:
d[cols.index('hospitalized')] = None
d[cols.index('icu')] = None
elif r == 'ESP-Galicia':
for d in regions[r]:
d[cols.index('hospitalized')] = None
elif r == 'ESP-Castilla-La Mancha':
for d in regions[r]:
stop = datetime.strptime('2020-04-12', '%Y-%m-%d')
if datetime.strptime(d[cols.index('time')], '%Y-%m-%d') >= stop:
d[cols.index('hospitalized')] = None
d[cols.index('icu')] = None
elif r == 'SP-Castilla y León':
for d in regions[r]:
stopHosp = datetime.strptime('2020-04-07', '%Y-%m-%d')
stopICU = datetime.strptime('2020-04-17', '%Y-%m-%d')
if datetime.strptime(d[cols.index('time')], '%Y-%m-%d') >= stopHosp:
d[cols.index('hospitalized')] = None
if datetime.strptime(d[cols.index('time')], '%Y-%m-%d') >= stopICU:
d[cols.index('icu')] = None
elif r == 'ESP-C. Valenciana':
for d in regions[r]:
stop = datetime.strptime('2020-04-09', '%Y-%m-%d')
if datetime.strptime(d[cols.index('time')], '%Y-%m-%d') >= stop:
d[cols.index('hospitalized')] = None
d[cols.index('icu')] = None
else:
# none of the data is current, it is cumulative. We delete it for now
for d in regions[r]:
d[cols.index('hospitalized')] = None
d[cols.index('icu')] = None
# For totals, we actually only use the recovered data in the end, as hosp+icu are None, and cases and deaths are taken from ecdc data
try:
regions['Spain'] = regions['ESP-Total']
del regions['ESP-Total']
except:
print(" /!\ Warning: totals don't exist for Spain")
store_data(regions, 'spain', cols)
| 2
| 0
| 0
| 0
| 0
| 3,901
| 0
| -4
| 178
|
a104266d8b0c9acf1fa4b9cf1a58128f80fb8476
| 6,749
|
py
|
Python
|
src/pymor/discretizers/advection.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/discretizers/advection.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
src/pymor/discretizers/advection.py
|
JuliaBru/pymor
|
46343b527267213f4279ea36f208b542ab291c4e
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.algorithms.timestepping import ExplicitEulerTimeStepper
from pymor.analyticalproblems.advection import InstationaryAdvectionProblem
from pymor.discretizations.basic import InstationaryDiscretization
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.gui.qt import PatchVisualizer, Matplotlib1DVisualizer
from pymor.operators.numpy import NumpyGenericOperator
from pymor.operators.fv import (nonlinear_advection_lax_friedrichs_operator, nonlinear_advection_engquist_osher_operator, nonlinear_advection_simplified_engquist_osher_operator, L2Product, L2ProductFunctional)
from pymor.vectorarrays.numpy import NumpyVectorArray
def discretize_nonlinear_instationary_advection_fv(analytical_problem, diameter=None, nt=100, num_flux='lax_friedrichs',
lxf_lambda=1., eo_gausspoints=5, eo_intervals=1, num_values=None,
domain_discretizer=None, grid=None, boundary_info=None):
"""Discretizes an |InstationaryAdvectionProblem| using the finite volume method.
Explicit Euler time-stepping is used for time discretization.
Parameters
----------
analytical_problem
The |InstationaryAdvectionProblem| to discretize.
diameter
If not `None`, `diameter` is passed as an argument to the
`domain_discretizer`.
nt
The number of time steps.
num_flux
The numerical flux to use in the finite volume formulation. Allowed
values are `'lax_friedrichs'`, `'engquist_osher'`, `'simplified_engquist_osher'`
(see :mod:`pymor.operators.fv`).
lxf_lambda
The stabilization parameter for the Lax-Friedrichs numerical flux
(ignored, if different flux is chosen).
eo_gausspoints
Number of Gauss points for the Engquist-Osher numerical flux
(ignored, if different flux is chosen).
eo_intervals
Number of sub-intervals to use for integration when using Engquist-Osher
numerical flux (ignored, if different flux is chosen).
num_values
The number of returned vectors of the solution trajectory. If `None`, each
intermediate vector that is calculated is returned.
domain_discretizer
Discretizer to be used for discretizing the analytical domain. This has
to be a function `domain_discretizer(domain_description, diameter)`.
If `None`, |discretize_domain_default| is used.
grid
Instead of using a domain discretizer, the |Grid| can also be passed directly
using this parameter.
boundary_info
A |BoundaryInfo| specifying the boundary types of the grid boundary entities.
Must be provided if `grid` is specified.
Returns
-------
discretization
The |Discretization| that has been generated.
data
Dictionary with the following entries:
:grid: The generated |Grid|.
:boundary_info: The generated |BoundaryInfo|.
"""
assert isinstance(analytical_problem, InstationaryAdvectionProblem)
assert grid is None or boundary_info is not None
assert boundary_info is None or grid is not None
assert grid is None or domain_discretizer is None
assert num_flux in ('lax_friedrichs', 'engquist_osher', 'simplified_engquist_osher')
if grid is None:
domain_discretizer = domain_discretizer or discretize_domain_default
if diameter is None:
grid, boundary_info = domain_discretizer(analytical_problem.domain)
else:
grid, boundary_info = domain_discretizer(analytical_problem.domain, diameter=diameter)
p = analytical_problem
if num_flux == 'lax_friedrichs':
L = nonlinear_advection_lax_friedrichs_operator(grid, boundary_info, p.flux_function,
dirichlet_data=p.dirichlet_data, lxf_lambda=lxf_lambda)
elif num_flux == 'engquist_osher':
L = nonlinear_advection_engquist_osher_operator(grid, boundary_info, p.flux_function,
p.flux_function_derivative,
gausspoints=eo_gausspoints, intervals=eo_intervals,
dirichlet_data=p.dirichlet_data)
else:
L = nonlinear_advection_simplified_engquist_osher_operator(grid, boundary_info, p.flux_function,
p.flux_function_derivative,
dirichlet_data=p.dirichlet_data)
F = None if p.rhs is None else L2ProductFunctional(grid, p.rhs)
if p.initial_data.parametric:
I = NumpyGenericOperator(initial_projection, dim_range=grid.size(0), linear=True,
parameter_type=p.initial_data.parameter_type)
else:
I = p.initial_data.evaluate(grid.quadrature_points(0, order=2)).squeeze()
I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * (1. / grid.reference_element.volume)
I = NumpyVectorArray(I, copy=False)
products = {'l2': L2Product(grid, boundary_info)}
if grid.dim == 2:
visualizer = PatchVisualizer(grid=grid, bounding_box=grid.bounding_box(), codim=0)
elif grid.dim == 1:
visualizer = Matplotlib1DVisualizer(grid, codim=0)
else:
visualizer = None
parameter_space = p.parameter_space if hasattr(p, 'parameter_space') else None
time_stepper = ExplicitEulerTimeStepper(nt=nt)
discretization = InstationaryDiscretization(operator=L, rhs=F, initial_data=I, T=p.T, products=products,
time_stepper=time_stepper,
parameter_space=parameter_space, visualizer=visualizer,
num_values=num_values, name='{}_FV'.format(p.name))
return discretization, {'grid': grid, 'boundary_info': boundary_info}
| 50.365672
| 120
| 0.659061
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2016 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.algorithms.timestepping import ExplicitEulerTimeStepper
from pymor.analyticalproblems.advection import InstationaryAdvectionProblem
from pymor.discretizations.basic import InstationaryDiscretization
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.gui.qt import PatchVisualizer, Matplotlib1DVisualizer
from pymor.operators.numpy import NumpyGenericOperator
from pymor.operators.fv import (nonlinear_advection_lax_friedrichs_operator,
nonlinear_advection_engquist_osher_operator,
nonlinear_advection_simplified_engquist_osher_operator,
L2Product, L2ProductFunctional)
from pymor.vectorarrays.numpy import NumpyVectorArray
def discretize_nonlinear_instationary_advection_fv(analytical_problem, diameter=None, nt=100, num_flux='lax_friedrichs',
lxf_lambda=1., eo_gausspoints=5, eo_intervals=1, num_values=None,
domain_discretizer=None, grid=None, boundary_info=None):
"""Discretizes an |InstationaryAdvectionProblem| using the finite volume method.
Explicit Euler time-stepping is used for time discretization.
Parameters
----------
analytical_problem
The |InstationaryAdvectionProblem| to discretize.
diameter
If not `None`, `diameter` is passed as an argument to the
`domain_discretizer`.
nt
The number of time steps.
num_flux
The numerical flux to use in the finite volume formulation. Allowed
values are `'lax_friedrichs'`, `'engquist_osher'`, `'simplified_engquist_osher'`
(see :mod:`pymor.operators.fv`).
lxf_lambda
The stabilization parameter for the Lax-Friedrichs numerical flux
(ignored, if different flux is chosen).
eo_gausspoints
Number of Gauss points for the Engquist-Osher numerical flux
(ignored, if different flux is chosen).
eo_intervals
Number of sub-intervals to use for integration when using Engquist-Osher
numerical flux (ignored, if different flux is chosen).
num_values
The number of returned vectors of the solution trajectory. If `None`, each
intermediate vector that is calculated is returned.
domain_discretizer
Discretizer to be used for discretizing the analytical domain. This has
to be a function `domain_discretizer(domain_description, diameter)`.
If `None`, |discretize_domain_default| is used.
grid
Instead of using a domain discretizer, the |Grid| can also be passed directly
using this parameter.
boundary_info
A |BoundaryInfo| specifying the boundary types of the grid boundary entities.
Must be provided if `grid` is specified.
Returns
-------
discretization
The |Discretization| that has been generated.
data
Dictionary with the following entries:
:grid: The generated |Grid|.
:boundary_info: The generated |BoundaryInfo|.
"""
assert isinstance(analytical_problem, InstationaryAdvectionProblem)
assert grid is None or boundary_info is not None
assert boundary_info is None or grid is not None
assert grid is None or domain_discretizer is None
assert num_flux in ('lax_friedrichs', 'engquist_osher', 'simplified_engquist_osher')
if grid is None:
domain_discretizer = domain_discretizer or discretize_domain_default
if diameter is None:
grid, boundary_info = domain_discretizer(analytical_problem.domain)
else:
grid, boundary_info = domain_discretizer(analytical_problem.domain, diameter=diameter)
p = analytical_problem
if num_flux == 'lax_friedrichs':
L = nonlinear_advection_lax_friedrichs_operator(grid, boundary_info, p.flux_function,
dirichlet_data=p.dirichlet_data, lxf_lambda=lxf_lambda)
elif num_flux == 'engquist_osher':
L = nonlinear_advection_engquist_osher_operator(grid, boundary_info, p.flux_function,
p.flux_function_derivative,
gausspoints=eo_gausspoints, intervals=eo_intervals,
dirichlet_data=p.dirichlet_data)
else:
L = nonlinear_advection_simplified_engquist_osher_operator(grid, boundary_info, p.flux_function,
p.flux_function_derivative,
dirichlet_data=p.dirichlet_data)
F = None if p.rhs is None else L2ProductFunctional(grid, p.rhs)
if p.initial_data.parametric:
def initial_projection(U, mu):
I = p.initial_data.evaluate(grid.quadrature_points(0, order=2), mu).squeeze()
I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * (1. / grid.reference_element.volume)
I = NumpyVectorArray(I, copy=False)
return I.lincomb(U).data
I = NumpyGenericOperator(initial_projection, dim_range=grid.size(0), linear=True,
parameter_type=p.initial_data.parameter_type)
else:
I = p.initial_data.evaluate(grid.quadrature_points(0, order=2)).squeeze()
I = np.sum(I * grid.reference_element.quadrature(order=2)[1], axis=1) * (1. / grid.reference_element.volume)
I = NumpyVectorArray(I, copy=False)
products = {'l2': L2Product(grid, boundary_info)}
if grid.dim == 2:
visualizer = PatchVisualizer(grid=grid, bounding_box=grid.bounding_box(), codim=0)
elif grid.dim == 1:
visualizer = Matplotlib1DVisualizer(grid, codim=0)
else:
visualizer = None
parameter_space = p.parameter_space if hasattr(p, 'parameter_space') else None
time_stepper = ExplicitEulerTimeStepper(nt=nt)
discretization = InstationaryDiscretization(operator=L, rhs=F, initial_data=I, T=p.T, products=products,
time_stepper=time_stepper,
parameter_space=parameter_space, visualizer=visualizer,
num_values=num_values, name='{}_FV'.format(p.name))
return discretization, {'grid': grid, 'boundary_info': boundary_info}
| 0
| 0
| 0
| 0
| 0
| 305
| 0
| 96
| 30
|
076f222cfcfc72e18413b42acd4e53e8930fdab1
| 2,403
|
py
|
Python
|
misc-code/adventure_items.py
|
cctechwiz-teaching/python-code-camp
|
1453bebe44d66f27558eb6204fbf4d5f08cc756e
|
[
"MIT"
] | 2
|
2019-06-22T17:13:16.000Z
|
2019-06-22T17:13:17.000Z
|
misc-code/adventure_items.py
|
cctechwiz-teaching/python-code-camp
|
1453bebe44d66f27558eb6204fbf4d5f08cc756e
|
[
"MIT"
] | null | null | null |
misc-code/adventure_items.py
|
cctechwiz-teaching/python-code-camp
|
1453bebe44d66f27558eb6204fbf4d5f08cc756e
|
[
"MIT"
] | null | null | null |
"""
object_adventure.py
A text adventure with objects you can pick up and put down.
"""
# data setup
rooms = {
'empty': {'name': 'an empty room',
'east': 'bedroom', 'north': 'temple',
'contents': [],
'text': 'The stone floors and walls are cold and damp.'},
'temple': {'name': 'a small temple',
'east': 'torture', 'south': 'empty',
'contents': ['bench', 'bench', 'bench', 'statue'],
'text': 'This seems to be a place of worship and deep contemplation.'},
'torture': {'name': 'a torture chamber',
'west': 'temple', 'south': 'bedroom',
'contents': ['chains', 'thumbscrews'],
'text': 'There is a rack and an iron maiden against the wall\naand some dark stains on the floor.'},
'bedroom': {'name': 'a bedroom',
'north': 'torture', 'west': 'empty',
'contents': ['sheets', 'bed'],
'text': 'This is clearly a bedroom, but no one has slept\nhere in a long time.'}
}
directions = ['north', 'south', 'east', 'west']
current_room = rooms['empty']
carrying = []
# game loop
while True:
# display current location
print()
print('You are in {}.'.format(current_room['name']))
print(current_room['text'])
# display movable objects
if current_room['contents']:
print('In the room are: {}'.format(', '.join(current_room['contents'])))
# get user input
command = input('\nWhat do you do? ').strip()
# movement
if command in directions:
if command in current_room:
current_room = rooms[current_room[command]]
else:
# bad movement
print("You can't go that way.")
# quit game
elif command.lower() in ('q', 'quit'):
break
# gather objects
elif command.lower().split()[0] == 'get':
item = command.lower().split()[1]
if item in current_room['contents']:
current_room['contents'].remove(item)
carrying.append(item)
else:
print("I don't see that here.")
# get rid of objects
elif command.lower().split()[0] == 'drop':
item = command.lower().split()[1]
if item in carrying:
current_room['contents'].append(item)
carrying.remove(item)
else:
print("You aren't carrying that.")
# bad command
else:
print("I don't understand that command.")
| 33.375
| 108
| 0.56804
|
"""
object_adventure.py
A text adventure with objects you can pick up and put down.
"""
# data setup
rooms = {
'empty': {'name': 'an empty room',
'east': 'bedroom', 'north': 'temple',
'contents': [],
'text': 'The stone floors and walls are cold and damp.'},
'temple': {'name': 'a small temple',
'east': 'torture', 'south': 'empty',
'contents': ['bench', 'bench', 'bench', 'statue'],
'text': 'This seems to be a place of worship and deep contemplation.'},
'torture': {'name': 'a torture chamber',
'west': 'temple', 'south': 'bedroom',
'contents': ['chains', 'thumbscrews'],
'text': 'There is a rack and an iron maiden against the wall\naand some dark stains on the floor.'},
'bedroom': {'name': 'a bedroom',
'north': 'torture', 'west': 'empty',
'contents': ['sheets', 'bed'],
'text': 'This is clearly a bedroom, but no one has slept\nhere in a long time.'}
}
directions = ['north', 'south', 'east', 'west']
current_room = rooms['empty']
carrying = []
# game loop
while True:
# display current location
print()
print('You are in {}.'.format(current_room['name']))
print(current_room['text'])
# display movable objects
if current_room['contents']:
print('In the room are: {}'.format(', '.join(current_room['contents'])))
# get user input
command = input('\nWhat do you do? ').strip()
# movement
if command in directions:
if command in current_room:
current_room = rooms[current_room[command]]
else:
# bad movement
print("You can't go that way.")
# quit game
elif command.lower() in ('q', 'quit'):
break
# gather objects
elif command.lower().split()[0] == 'get':
item = command.lower().split()[1]
if item in current_room['contents']:
current_room['contents'].remove(item)
carrying.append(item)
else:
print("I don't see that here.")
# get rid of objects
elif command.lower().split()[0] == 'drop':
item = command.lower().split()[1]
if item in carrying:
current_room['contents'].append(item)
carrying.remove(item)
else:
print("You aren't carrying that.")
# bad command
else:
print("I don't understand that command.")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4b9d26720cda64f817643aa05a92dd1452685e67
| 17,109
|
py
|
Python
|
python/graphscope/nx/classes/cache.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | 1
|
2021-12-30T02:55:16.000Z
|
2021-12-30T02:55:16.000Z
|
python/graphscope/nx/classes/cache.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
python/graphscope/nx/classes/cache.py
|
lnfjpt/GraphScope
|
917146f86d8387302a2e1de6963115e7568bf3ee
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import msgpack
import simdjson
from graphscope.framework import dag_utils
from graphscope.proto import graph_def_pb2
from graphscope.proto import types_pb2
__all__ = ["Cache"]
def get_neighbors(graph, n, pred=False):
"""Get the neighbors of node in graph.
Parameters
----------
graph:
the graph to query.
n: node
the node to get neighbors.
report_type:
the report type of report graph operation,
types_pb2.SUCCS_BY_NODE: get the successors of node,
types_pb2.PREDS_BY_NODE: get the predecessors of node,
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
report_t = types_pb2.PREDS_BY_NODE if pred else types_pb2.SUCCS_BY_NODE
op = dag_utils.report_graph(graph, report_t, node=simdjson.dumps(n).encode("utf-8"))
archive = op.eval()
return msgpack.unpackb(archive.get_bytes(), use_list=False)
def get_neighbors_attr(graph, n, pred=False):
"""Get the neighbors attr of node in graph.
Parameters
----------
graph:
the graph to query.
n: node
the node to get neighbors.
report_type:
the report type of report graph operation,
types_pb2.SUCC_ATTR_BY_NODE: get the successors attr of node,
types_pb2.PRED_ATTR_BY_NODE: get the predecessors attr of node,
Returns
-------
attr: tuple
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
report_t = types_pb2.PRED_ATTR_BY_NODE if pred else types_pb2.SUCC_ATTR_BY_NODE
op = dag_utils.report_graph(graph, report_t, node=simdjson.dumps(n).encode("utf-8"))
archive = op.eval()
return simdjson.loads(archive.get_bytes())
def get_node_data(graph, n):
"""Returns the attribute dictionary of node n.
This is identical to `G[n]`.
Parameters
----------
n : nodes
Returns
-------
node_dict : dictionary
The node attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph etc
>>> G[0]
{}
Warning: Assigning to `G[n]` is not permitted.
But it is safe to assign attributes `G[n]['foo']`
>>> G[0]['weight'] = 7
>>> G[0]['weight']
7
>>> G = nx.path_graph(4) # or DiGraph etc
>>> G.get_node_data(0, 1)
{}
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
op = dag_utils.report_graph(
graph, types_pb2.NODE_DATA, node=simdjson.dumps(n).encode("utf-8")
)
archive = op.eval()
return msgpack.loads(archive.get_bytes(), use_list=False)
| 35.867925
| 88
| 0.614004
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import concurrent.futures
import io
from functools import lru_cache
import msgpack
import simdjson
from graphscope.framework import dag_utils
from graphscope.nx.utils.misc import clear_mutation_cache
from graphscope.proto import graph_def_pb2
from graphscope.proto import types_pb2
__all__ = ["Cache"]
class Cache:
"""A adhoc cache for graphscope.nx Graph.
The Cache is consists of two kind of cache: the iteration batch cache for
__iter__ and the LRU cache for cache miss.
"""
def __init__(self, graph):
self._graph = graph
# the iteration caches for graph data
self.node_id_cache = ()
self.node_attr_cache = ()
self.succ_cache = ()
self.succ_attr_cache = ()
self.pred_cache = ()
self.pred_attr_cache = ()
# status for iteration batch cache
self._len = 0
self.id2i = {}
self.enable_iter_cache = False
self.iter_gid = 0
self.iter_pre_gid = 0
self.node_attr_align = False
self.succ_align = False
self.succ_attr_align = False
self.pred_align = False
self.pred_attr_align = False
# thread pool and promises for iteration batch cache fetch
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
self.futures = {
"node_id": None,
"node_attr": None,
"succ": None,
"succ_attr": None,
"pred": None,
"pred_attr": None,
}
def warmup(self):
"""Warm up the iteration cache."""
self._len = self._graph.number_of_nodes()
if self._len > 1000:
# avoid much small graphs to compete thread resource
self.enable_iter_cache = True
self._async_fetch_node_id_cache(0)
self._async_fetch_succ_cache(0)
self._async_fetch_node_attr_cache(0)
self._async_fetch_succ_attr_cache(0)
# LRU Caches
@lru_cache(1000000)
def get_node_attr(self, n):
return get_node_data(self._graph, n)
@lru_cache(1000000)
def get_successors(self, n):
return get_neighbors(self._graph, n)
@lru_cache(1000000)
def get_succ_attr(self, n):
return get_neighbors_attr(self._graph, n)
@lru_cache(1000000)
def get_predecessors(self, n):
return get_neighbors(self._graph, n, pred=True)
@lru_cache(1000000)
def get_pred_attr(self, n):
return get_neighbors_attr(self._graph, n, pred=True)
def align_node_attr_cache(self):
"""Check and align the node attr cache with node id cache"""
if self.enable_iter_cache and self.node_attr_align is False:
f = self.futures["node_attr"]
if f is not None:
start_gid, self.node_attr_cache = f.result()
if start_gid == self.iter_pre_gid:
# align to current node_id_cache
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_node_attr_cache(self.iter_gid)
self.node_attr_align = True
else:
# not align to current node_id_cache, should fetch again
self._async_fetch_node_attr_cache(self.iter_pre_gid)
return self.node_attr_align
def align_succ_cache(self):
"""Check and align the succ neighbor cache with node id cache"""
if self.enable_iter_cache and self.succ_align is False:
f = self.futures["succ"]
start_gid, self.succ_cache = f.result()
if start_gid == self.iter_pre_gid:
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_succ_cache(self.iter_gid)
self.succ_align = True
else:
self._async_fetch_succ_cache(self.iter_pre_gid)
return self.succ_align
def align_succ_attr_cache(self):
"""Check and align the succ neighbor attr cache with node id cache"""
if self.enable_iter_cache and self.succ_attr_align is False:
f = self.futures["succ_attr"]
if f is not None:
start_gid, self.succ_attr_cache = f.result()
if start_gid == self.iter_pre_gid:
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_succ_attr_cache(self.iter_gid)
self.succ_attr_align = True
else:
self._async_fetch_succ_attr_cache(self.iter_pre_gid)
return self.succ_attr_align
def align_pred_cache(self):
"""Check and align the pred neighbor cache with node id cache"""
if self.enable_iter_cache and self.pred_align is False:
if self.futures["pred"] is None:
self._async_fetch_pred_cache(self.iter_pre_gid)
f = self.futures["pred"]
start_gid, self.pred_cache = f.result()
if start_gid == self.iter_pre_gid:
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_pred_cache(self.iter_gid)
self.pred_align = True
else:
print("pred not align", start_gid, self.iter_pre_gid)
self._async_fetch_pred_cache(self.iter_pre_gid)
return self.pred_align
def align_pred_attr_cache(self):
"""Check and align the pred neighbor attr cache with node id cache"""
if self.enable_iter_cache and self.pred_attr_align is False:
if self.futures["pred_attr"] is None:
self._async_fetch_pred_attr_cache(self.iter_pre_gid)
f = self.futures["pred_attr"]
start_gid, self.pred_attr_cache = f.result()
if start_gid == self.iter_pre_gid:
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_pred_attr_cache(self.iter_gid)
self.pred_attr_align = True
else:
self._async_fetch_pred_attr_cache(self.iter_pre_gid)
return self.pred_attr_align
def align_neighbor_cache(self, pred=False):
return self.align_pred_cache() if pred else self.align_succ_cache()
def align_neighbor_attr_cache(self, pred=True):
return self.align_pred_attr_cache() if pred else self.align_succ_attr_cache()
@clear_mutation_cache
def __contains__(self, key):
if self.enable_iter_cache:
if len(self.node_id_cache) == 0 and self.futures["node_id"] is not None:
self.iter_pre_gid = self.iter_gid
self.iter_gid, node_size, self.node_id_cache = self.futures[
"node_id"
].result()
self.futures["node_id"] = None
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_node_id_cache(self.iter_gid)
if not self.id2i and self.node_id_cache:
# initialize the id to index hash map
self.id2i = {k: v for v, k in enumerate(self.node_id_cache)}
return key in self.id2i
@clear_mutation_cache
def __len__(self):
return self._len
@clear_mutation_cache
def __iter__(self):
iter_n = 0
while True:
if iter_n >= self._len:
break
if iter_n == 0 and len(self.node_id_cache) > 0:
iter_n += len(self.node_id_cache)
else:
self.iter_pre_gid = self.iter_gid
if self.enable_iter_cache:
self.iter_gid, node_size, self.node_id_cache = self.futures[
"node_id"
].result()
if self.iter_gid != self.iter_pre_gid:
self._async_fetch_node_id_cache(self.iter_gid)
else:
(
self.iter_gid,
node_size,
self.node_id_cache,
) = self._get_node_id_cache(self.iter_gid)
iter_n += node_size
self.id2i.clear()
self.node_attr_align = False
self.succ_align = False
self.succ_attr_align = False
self.pred_align = False
self.pred_attr_align = False
yield from self.node_id_cache
def shutdown(self):
for _, future in self.futures.items():
if future is not None:
future.cancel()
for _, future in self.futures.items():
if future is not None:
try:
future.result()
except concurrent.futures.CancelledError:
pass
future = None
def clear(self):
"""Clear batch cache and lru cache, reset the status and warmup again"""
if self.enable_iter_cache:
self.shutdown()
self.enable_iter_cache = False
self.iter_gid = 0
self.iter_pre_gid = 0
self.id2i.clear()
self.node_id_cache = ()
self.node_attr_cache = ()
self.succ_cache = ()
self.succ_attr_cache = ()
self.pred_cache = ()
self.pred_attr_cache = ()
self.node_attr_align = (
self.succ_align
) = self.succ_attr_align = self.pred_align = self.pred_attr_align = False
self.get_node_attr.cache_clear()
self.get_successors.cache_clear()
self.get_succ_attr.cache_clear()
self.get_predecessors.cache_clear()
self.get_pred_attr.cache_clear()
self.warmup()
def clear_node_attr_cache(self):
"""Clear the node attr cache"""
if self.futures["node_attr"] is not None:
self.futures["node_attr"].cancel()
if self.futures["node_attr"] is not None:
try:
self.futures["node_attr"].result()
except concurrent.futures.CancelledError:
pass
self.futures["node_attr"] = None
self.node_attr_cache = ()
self.get_node_attr.cache_clear()
self.node_attr_align = False
def clear_neighbor_attr_cache(self):
"""Clear the neighbor attr cache"""
if self.futures["succ_attr"] is not None:
self.futures["succ_attr"].cancel()
if self.futures["pred_attr"] is not None:
self.futures["pred_attr"].cancel()
if self.futures["succ_attr"] is not None:
try:
self.futures["succ_attr"].result()
except concurrent.futures.CancelledError:
pass
if self.futures["pred_attr"] is not None:
try:
self.futures["pred_attr"].result()
except concurrent.futures.CancelledError:
pass
self.futures["succ_attr"] = None
self.futures["pred_attr"] = None
self.succ_attr_cache = ()
self.pred_attr_cache = ()
self.get_succ_attr.cache_clear()
self.get_pred_attr.cache_clear()
self.succ_attr_align = False
self.pred_attr_align = False
def _async_fetch_node_id_cache(self, gid):
self.futures["node_id"] = self.executor.submit(self._get_node_id_cache, gid)
def _async_fetch_node_attr_cache(self, gid):
self.futures["node_attr"] = self.executor.submit(self._get_node_attr_cache, gid)
def _async_fetch_succ_cache(self, gid):
self.futures["succ"] = self.executor.submit(self._get_succ_cache, gid)
def _async_fetch_pred_cache(self, gid):
self.futures["pred"] = self.executor.submit(self._get_pred_cache, gid)
def _async_fetch_succ_attr_cache(self, gid):
self.futures["succ_attr"] = self.executor.submit(self._get_succ_attr_cache, gid)
def _async_fetch_pred_attr_cache(self, gid):
self.futures["pred_attr"] = self.executor.submit(self._get_pred_attr_cache, gid)
def _get_node_id_cache(self, gid):
op = dag_utils.report_graph(
self._graph, types_pb2.NODE_ID_CACHE_BY_GID, gid=gid
)
archive = op.eval()
gid = archive.get_uint64()
node_size = archive.get_uint32()
fp = io.BytesIO(archive.get_bytes())
node_array = msgpack.load(fp, use_list=False)
return gid, node_size, node_array
def _get_node_attr_cache(self, gid):
op = dag_utils.report_graph(
self._graph, types_pb2.NODE_ATTR_CACHE_BY_GID, gid=gid
)
archive = op.eval()
gid = archive.get_uint64()
fp = io.BytesIO(archive.get_bytes())
node_attr_cache = msgpack.load(fp, use_list=False)
return gid, node_attr_cache
def _get_succ_cache(self, gid):
op = dag_utils.report_graph(self._graph, types_pb2.SUCC_BY_GID, gid=gid)
archive = op.eval()
gid = archive.get_uint64()
fp = io.BytesIO(archive.get_bytes())
succ_cache = msgpack.load(fp, use_list=False)
return gid, succ_cache
def _get_pred_cache(self, gid):
op = dag_utils.report_graph(self._graph, types_pb2.PRED_BY_GID, gid=gid)
archive = op.eval()
gid = archive.get_uint64()
fp = io.BytesIO(archive.get_bytes())
pred_cache = msgpack.load(fp, use_list=False)
return gid, pred_cache
def _get_succ_attr_cache(self, gid):
op = dag_utils.report_graph(self._graph, types_pb2.SUCC_ATTR_BY_GID, gid=gid)
archive = op.eval()
gid = archive.get_uint64()
fp = io.BytesIO(archive.get_bytes())
succ_attr_cache = msgpack.load(fp, use_list=False)
return gid, succ_attr_cache
def _get_pred_attr_cache(self, gid):
op = dag_utils.report_graph(self._graph, types_pb2.PRED_ATTR_BY_GID, gid=gid)
archive = op.eval()
gid = archive.get_uint64()
fp = io.BytesIO(archive.get_bytes())
pred_attr_cache = msgpack.load(fp, use_list=False)
return gid, pred_attr_cache
def get_neighbors(graph, n, pred=False):
"""Get the neighbors of node in graph.
Parameters
----------
graph:
the graph to query.
n: node
the node to get neighbors.
report_type:
the report type of report graph operation,
types_pb2.SUCCS_BY_NODE: get the successors of node,
types_pb2.PREDS_BY_NODE: get the predecessors of node,
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
report_t = types_pb2.PREDS_BY_NODE if pred else types_pb2.SUCCS_BY_NODE
op = dag_utils.report_graph(graph, report_t, node=simdjson.dumps(n).encode("utf-8"))
archive = op.eval()
return msgpack.unpackb(archive.get_bytes(), use_list=False)
def get_neighbors_attr(graph, n, pred=False):
"""Get the neighbors attr of node in graph.
Parameters
----------
graph:
the graph to query.
n: node
the node to get neighbors.
report_type:
the report type of report graph operation,
types_pb2.SUCC_ATTR_BY_NODE: get the successors attr of node,
types_pb2.PRED_ATTR_BY_NODE: get the predecessors attr of node,
Returns
-------
attr: tuple
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
report_t = types_pb2.PRED_ATTR_BY_NODE if pred else types_pb2.SUCC_ATTR_BY_NODE
op = dag_utils.report_graph(graph, report_t, node=simdjson.dumps(n).encode("utf-8"))
archive = op.eval()
return simdjson.loads(archive.get_bytes())
def get_node_data(graph, n):
"""Returns the attribute dictionary of node n.
This is identical to `G[n]`.
Parameters
----------
n : nodes
Returns
-------
node_dict : dictionary
The node attribute dictionary.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph etc
>>> G[0]
{}
Warning: Assigning to `G[n]` is not permitted.
But it is safe to assign attributes `G[n]['foo']`
>>> G[0]['weight'] = 7
>>> G[0]['weight']
7
>>> G = nx.path_graph(4) # or DiGraph etc
>>> G.get_node_data(0, 1)
{}
"""
if graph.graph_type == graph_def_pb2.ARROW_PROPERTY:
n = graph._convert_to_label_id_tuple(n)
op = dag_utils.report_graph(
graph, types_pb2.NODE_DATA, node=simdjson.dumps(n).encode("utf-8")
)
archive = op.eval()
return msgpack.loads(archive.get_bytes(), use_list=False)
| 0
| 2,352
| 0
| 11,255
| 0
| 0
| 0
| 38
| 112
|
e96b8708dc8be78814c697d042595105e2d873c2
| 80
|
py
|
Python
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
Getting_Started_With_Raspberry_Pi_Pico/variable/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
"""Example of assigning a variable."""
user_name = input("What is your name? ")
| 26.666667
| 40
| 0.6875
|
"""Example of assigning a variable."""
user_name = input("What is your name? ")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ba26ee36cc7ff86ae625d2c3ea20dd09a7c5df07
| 10,605
|
py
|
Python
|
generator/verify/verification.py
|
biarmic/OpenCache
|
bb9e110e434deb83900de328cc76b63901ba582f
|
[
"BSD-3-Clause"
] | 5
|
2021-09-15T18:29:49.000Z
|
2022-03-26T04:41:01.000Z
|
generator/verify/verification.py
|
VLSIDA/OpenCache
|
0e79bf353c68d57dcc49d78178b12fd0b468f19a
|
[
"BSD-3-Clause"
] | null | null | null |
generator/verify/verification.py
|
VLSIDA/OpenCache
|
0e79bf353c68d57dcc49d78178b12fd0b468f19a
|
[
"BSD-3-Clause"
] | null | null | null |
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
| 37.341549
| 107
| 0.587553
|
# See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import os
import datetime
from shutil import copyfile
from subprocess import call, DEVNULL, STDOUT
from re import findall
from .core import core
from .test_bench import test_bench
from .test_data import test_data
from .sim_cache import sim_cache
import debug
from globals import OPTS, print_time
class verification:
"""
Class to generate files for verification and verify the design by running
EDA tools.
"""
def __init__(self, cache_config, name):
cache_config.set_local_config(self)
self.name = name
self.core = core()
if OPTS.simulate:
self.tb = test_bench(cache_config, name)
self.sim_cache = sim_cache(cache_config)
self.data = test_data(self.sim_cache, cache_config)
# Print subprocess outputs on the terminal if verbose debug is enabled
self.stdout = None if OPTS.verbose_level >= 2 else DEVNULL
self.stderr = None if OPTS.verbose_level >= 2 else STDOUT
def verify(self):
""" Run the verifier. """
debug.print_raw("Initializing verification...")
self.prepare_files()
if OPTS.simulate:
self.simulate()
if OPTS.synthesize:
self.synthesize()
debug.print_raw("Verification completed.")
def simulate(self):
"""
Save required files and simulate the design by running an EDA tool's
simulator.
"""
debug.info(1, "Initializing simulation...")
debug.info(1, "Writing simulation files...")
start_time = datetime.datetime.now()
# Write the DRAM file
dram_path = OPTS.temp_path + "dram.v"
debug.info(1, "Verilog (DRAM): Writing to {}".format(dram_path))
self.sim_cache.dram.sim_dram_write(dram_path)
# Write the test bench file
tb_path = OPTS.temp_path + "test_bench.v"
debug.info(1, "Verilog (Test bench): Writing to {}".format(tb_path))
self.tb.test_bench_write(tb_path)
# Write the test data file
data_path = OPTS.temp_path + "test_data.v"
debug.info(1, "Verilog (Test data): Writing to {}".format(data_path))
self.data.generate_data(OPTS.sim_size)
self.data.test_data_write(data_path)
# Run FuseSoc for simulation
debug.info(1, "Running FuseSoC for simulation...")
self.run_fusesoc(self.name, self.core.core_name, OPTS.temp_path, True)
# Check the result of the simulation
self.check_sim_result(OPTS.temp_path, "icarus.log")
print_time("Simulation", datetime.datetime.now(), start_time)
def synthesize(self):
"""
Save required files and synthesize the design by running an EDA tool's
synthesizer.
"""
debug.info(1, "Initializing synthesis...")
start_time = datetime.datetime.now()
# Convert SRAM modules to blackbox
debug.info(1, "Converting OpenRAM modules to blackbox...")
self.convert_to_blacbox(OPTS.temp_path + OPTS.tag_array_name + ".v")
self.convert_to_blacbox(OPTS.temp_path + OPTS.data_array_name + ".v")
if OPTS.replacement_policy.has_sram_array():
self.convert_to_blacbox(OPTS.temp_path + OPTS.use_array_name + ".v")
# Run FuseSoc for synthesis
debug.info(1, "Running FuseSoC for synthesis...")
self.run_fusesoc(self.name, self.core.core_name, OPTS.temp_path, False)
# Check the result of the synthesis
self.check_synth_result(OPTS.temp_path, "yosys.log")
print_time("Synthesis", datetime.datetime.now(), start_time)
def prepare_files(self):
""" Prepare common files among simulation and synthesis. """
# Write the CORE file
core_path = OPTS.temp_path + "verify.core"
debug.info(1, "CORE: Writing to {}".format(core_path))
self.core.core_write(core_path)
# Copy the generated cache Verilog file
cache_path = OPTS.temp_path + self.name + ".v"
debug.info(1, "Copying the cache design file to the temp subfolder")
copyfile(OPTS.output_path + self.name + ".v", cache_path)
if OPTS.run_openram:
# Copy the configuration files
debug.info(1, "Copying the config files to the temp subfolder")
self.copy_config_file(OPTS.data_array_name + "_config.py", OPTS.temp_path)
self.copy_config_file(OPTS.tag_array_name + "_config.py", OPTS.temp_path)
# Random replacement policy doesn't need a separate SRAM array
if OPTS.replacement_policy.has_sram_array():
self.copy_config_file(OPTS.use_array_name + "_config.py", OPTS.temp_path)
# Run OpenRAM to generate Verilog files of SRAMs
debug.info(1, "Running OpenRAM for the data array...")
self.run_openram("{}_config.py".format(OPTS.temp_path + OPTS.data_array_name))
debug.info(1, "Running OpenRAM for the tag array...")
self.run_openram("{}_config.py".format(OPTS.temp_path + OPTS.tag_array_name))
# Random replacement policy doesn't need a separate SRAM array
if OPTS.replacement_policy.has_sram_array():
debug.info(1, "Running OpenRAM for the use array...")
self.run_openram("{}_config.py".format(OPTS.temp_path + OPTS.use_array_name))
else:
debug.info(1, "Skipping to run OpenRAM")
def run_openram(self, config_path):
""" Run OpenRAM to generate Verilog modules. """
openram_command = "python3 $OPENRAM_HOME/openram.py"
if call("{0} {1}".format(openram_command, config_path),
cwd=OPTS.temp_path,
shell=True,
stdout=self.stdout,
stderr=self.stderr) != 0:
debug.error("OpenRAM failed!", -1)
if not OPTS.keep_openram_files:
for file in os.listdir(OPTS.temp_path):
file_path = OPTS.temp_path + file
if not os.path.isdir(file_path) and all([x not in file for x in [".v", ".py", ".core"]]):
os.remove(file_path)
def run_fusesoc(self, library_name, core_name, path, is_sim):
""" Run FuseSoC for simulation or synthesis. """
fusesoc_library_command = "fusesoc library add {0} {1}".format(library_name,
path)
fusesoc_run_command = "fusesoc run --target={0} --no-export {1}".format("sim" if is_sim else "syn",
core_name)
debug.info(1, "Adding {} core as library...".format("simulation" if is_sim else "synthesis"))
debug.info(1, "Running the {}...".format("simulation" if is_sim else "synthesis"))
# Add the CORE file as a library
if call(fusesoc_library_command,
cwd=path,
shell=True,
stdout=self.stdout,
stderr=self.stderr) != 0:
debug.error("FuseSoC failed to add library!", -1)
# Run the library for simulation or synthesis
if call(fusesoc_run_command,
cwd=path,
shell=True,
stdout=self.stdout,
stderr=self.stderr) != 0:
debug.error("FuseSoC failed to run!", -1)
# Delete the temporary CONF file.
# If this file is not deleted, it can cause syntheses to fail in the
# future.
os.remove(path + "fusesoc.conf")
def copy_config_file(self, file_name, dest):
""" Copy and modify the config file for simulation and synthesis. """
new_file = open(dest + file_name, "w")
with open(OPTS.output_path + file_name) as f:
for line in f:
if line.startswith("output_path"):
new_file.write("output_path = \"{}\"\n".format(dest))
else:
new_file.write(line)
# Verification needs only the Verilog files.
# This option will decrease OpenRAM's runtime (hopefully).
new_file.write("netlist_only = True\n")
new_file.close()
def convert_to_blacbox(self, file_path):
""" Convert the given Verilog module file to blackbox. """
keep = []
# Save blackbox file as "filename_bb.v"
bb_file_path = file_path[:-2] + "_bb.v"
with open(file_path, "r") as f:
delete = False
for line in f:
if line.lstrip().startswith("reg"):
delete = True
if not delete:
keep.append(line)
keep.append("endmodule\n")
f = open(bb_file_path, "w")
f.writelines(keep)
f.close()
def check_synth_result(self, path, file_name):
""" Read the log file of the simulation. """
error_prefix = "found and reported"
# Check the error count lines
with open("{0}build/{1}/syn-yosys/{2}".format(path,
self.core.core_name.replace(":", "_"),
file_name)) as f:
for line in f:
# TODO: How to check whether the synthesis was successful?
# Check if error count is nonzero
if line.find(error_prefix) != -1 and int(findall(r"\d+", line)[0]) != 0:
debug.error("Synthesis failed!", -1)
# Check if there is an "ERROR"
if line.find("ERROR") != -1:
debug.error("Synthesis failed!", -1)
debug.info(1, "Synthesis successful.")
def check_sim_result(self, path, file_name):
""" Read the log file of the simulation. """
# Result of the simulation is supposed to be at the end of the log file
with open("{0}build/{1}/sim-icarus/{2}".format(path,
self.core.core_name.replace(":", "_"),
file_name)) as f:
for line in f:
pass
if line.rstrip() == self.tb.success_message:
debug.info(1, "Simulation successful.")
else:
debug.error("Simulation failed!", -1)
| 0
| 0
| 0
| 10,019
| 0
| 0
| 0
| 54
| 265
|
93162a2be83d4a32945d947bbd5f1a2645032e31
| 9,075
|
py
|
Python
|
pyfr/readers/gmsh.py
|
synthetik-technologies/PyFR
|
9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda
|
[
"BSD-3-Clause"
] | 1
|
2020-06-23T16:37:06.000Z
|
2020-06-23T16:37:06.000Z
|
pyfr/readers/gmsh.py
|
synthetik-technologies/PyFR
|
9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda
|
[
"BSD-3-Clause"
] | null | null | null |
pyfr/readers/gmsh.py
|
synthetik-technologies/PyFR
|
9d4d5e96a8a9d5ca47970ec197b251ae8b0ecdda
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
| 33.241758
| 78
| 0.523747
|
# -*- coding: utf-8 -*-
from collections import defaultdict
import re
import numpy as np
from pyfr.readers import BaseReader, NodalMeshAssembler
from pyfr.readers.nodemaps import GmshNodeMaps
def msh_section(mshit, section):
endln = '$End{}\n'.format(section)
endix = int(next(mshit)) - 1
for i, l in enumerate(mshit):
if l == endln:
raise ValueError('Unexpected end of section $' + section)
yield l.strip()
if i == endix:
break
else:
raise ValueError('Unexpected EOF')
if next(mshit) != endln:
raise ValueError('Expected $End' + section)
class GmshReader(BaseReader):
# Supported file types and extensions
name = 'gmsh'
extn = ['.msh']
# Gmsh element types to PyFR type (petype) and node counts
_etype_map = {
1: ('line', 2), 8: ('line', 3), 26: ('line', 4), 27: ('line', 5),
2: ('tri', 3), 9: ('tri', 6), 21: ('tri', 10), 23: ('tri', 15),
3: ('quad', 4), 10: ('quad', 9), 36: ('quad', 16), 37: ('quad', 25),
4: ('tet', 4), 11: ('tet', 10), 29: ('tet', 20), 30: ('tet', 35),
5: ('hex', 8), 12: ('hex', 27), 92: ('hex', 64), 93: ('hex', 125),
6: ('pri', 6), 13: ('pri', 18), 90: ('pri', 40), 91: ('pri', 75),
7: ('pyr', 5), 14: ('pyr', 14), 118: ('pyr', 30), 119: ('pyr', 55)
}
# First-order node numbers associated with each element face
_petype_fnmap = {
'tri': {'line': [[0, 1], [1, 2], [2, 0]]},
'quad': {'line': [[0, 1], [1, 2], [2, 3], [3, 0]]},
'tet': {'tri': [[0, 1, 2], [0, 1, 3], [0, 2, 3], [1, 2, 3]]},
'hex': {'quad': [[0, 1, 2, 3], [0, 1, 4, 5], [1, 2, 5, 6],
[2, 3, 6, 7], [0, 3, 4, 7], [4, 5, 6, 7]]},
'pri': {'quad': [[0, 1, 3, 4], [1, 2, 4, 5], [0, 2, 3, 5]],
'tri': [[0, 1, 2], [3, 4, 5]]},
'pyr': {'quad': [[0, 1, 2, 3]],
'tri': [[0, 1, 4], [1, 2, 4], [2, 3, 4], [0, 3, 4]]}
}
# Mappings between the node ordering of PyFR and that of Gmsh
_nodemaps = GmshNodeMaps
def __init__(self, msh):
if isinstance(msh, str):
msh = open(msh)
# Get an iterator over the lines of the mesh
mshit = iter(msh)
# Section readers
sect_map = {
'MeshFormat': self._read_mesh_format,
'PhysicalNames': self._read_phys_names,
'Entities': self._read_entities,
'Nodes': self._read_nodes,
'Elements': self._read_eles
}
for l in filter(lambda l: l != '\n', mshit):
# Ensure we have encountered a section
if not l.startswith('$'):
raise ValueError('Expected a mesh section')
# Strip the '$' and '\n' to get the section name
sect = l[1:-1]
# Try to read the section
try:
sect_map[sect](mshit)
# Else skip over it
except KeyError:
endsect = '$End{0}\n'.format(sect)
for el in mshit:
if el == endsect:
break
else:
raise ValueError('Expected $End' + sect)
def _read_mesh_format(self, mshit):
ver, ftype, dsize = next(mshit).split()
if ver == '2.2':
self._read_nodes_impl = self._read_nodes_impl_v2
self._read_eles_impl = self._read_eles_impl_v2
elif ver == '4':
self._read_nodes_impl = self._read_nodes_impl_v4
self._read_eles_impl = self._read_eles_impl_v4
else:
raise ValueError('Invalid mesh version')
if ftype != '0':
raise ValueError('Invalid file type')
if dsize != '8':
raise ValueError('Invalid data size')
if next(mshit) != '$EndMeshFormat\n':
raise ValueError('Expected $EndMeshFormat')
def _read_phys_names(self, mshit):
# Physical entities can be divided up into:
# - fluid elements ('the mesh')
# - boundary faces
# - periodic faces
self._felespent = None
self._bfacespents = {}
self._pfacespents = defaultdict(list)
# Seen physical names
seen = set()
# Extract the physical names
for l in msh_section(mshit, 'PhysicalNames'):
m = re.match(r'(\d+) (\d+) "((?:[^"\\]|\\.)*)"$', l)
if not m:
raise ValueError('Malformed physical entity')
pent, name = int(m.group(2)), m.group(3).lower()
# Ensure we have not seen this name before
if name in seen:
raise ValueError('Duplicate physical name: {}'.format(name))
# Fluid elements
if name == 'fluid':
self._felespent = pent
# Periodic boundary faces
elif name.startswith('periodic'):
p = re.match(r'periodic[ _-]([a-z0-9]+)[ _-](l|r)$', name)
if not p:
raise ValueError('Invalid periodic boundary condition')
self._pfacespents[p.group(1)].append(pent)
# Other boundary faces
else:
self._bfacespents[name] = pent
seen.add(name)
if self._felespent is None:
raise ValueError('No fluid elements in mesh')
if any(len(pf) != 2 for pf in self._pfacespents.values()):
raise ValueError('Unpaired periodic boundary in mesh')
def _read_entities(self, mshit):
self._tagpents = tagpents = {}
# Iterate over the entities
nent = sum(int(i) for i in next(mshit).split())
for i in range(nent):
ent = next(mshit).split()
etag, enphys = int(ent[0]), int(ent[7])
if enphys == 0:
continue
elif enphys == 1:
tagpents[etag] = int(ent[8])
else:
raise ValueError('Invalid physical tag count for entity')
if next(mshit) != '$EndEntities\n':
raise ValueError('Expected $EndEntities')
def _read_nodes(self, mshit):
self._read_nodes_impl(mshit)
def _read_nodes_impl_v2(self, mshit):
self._nodepts = nodepts = {}
for l in msh_section(mshit, 'Nodes'):
nv = l.split()
nodepts[int(nv[0])] = np.array([float(x) for x in nv[1:]])
def _read_nodes_impl_v4(self, mshit):
self._nodepts = nodepts = {}
# Entity and total node count
ne, nn = (int(i) for i in next(mshit).split())
for i in range(ne):
nen = int(next(mshit).split()[-1])
for j in range(nen):
nv = next(mshit).split()
nodepts[int(nv[0])] = np.array([float(x) for x in nv[1:]])
if nn != len(nodepts):
raise ValueError('Invalid node count')
if next(mshit) != '$EndNodes\n':
raise ValueError('Expected $EndNodes')
def _read_eles(self, mshit):
self._read_eles_impl(mshit)
def _read_eles_impl_v2(self, mshit):
elenodes = defaultdict(list)
for l in msh_section(mshit, 'Elements'):
# Extract the raw element data
elei = [int(i) for i in l.split()]
enum, etype, entags = elei[:3]
etags, enodes = elei[3:3 + entags], elei[3 + entags:]
if etype not in self._etype_map:
raise ValueError('Unsupported element type {0}'.format(etype))
# Physical entity type (used for BCs)
epent = etags[0]
elenodes[etype, epent].append(enodes)
self._elenodes = {k: np.array(v) for k, v in elenodes.items()}
def _read_eles_impl_v4(self, mshit):
elenodes = defaultdict(list)
# Block and total element count
nb, ne = (int(i) for i in next(mshit).split())
for i in range(nb):
etag, _, etype, ecount = (int(j) for j in next(mshit).split())
if etype not in self._etype_map:
raise ValueError('Unsupported element type {0}'.format(etype))
# Physical entity type (used for BCs)
epent = self._tagpents.get(etag, -1)
append = elenodes[etype, epent].append
for j in range(ecount):
append([int(k) for k in next(mshit).split()[1:]])
if ne != sum(len(v) for v in elenodes.values()):
raise ValueError('Invalid element count')
if next(mshit) != '$EndElements\n':
raise ValueError('Expected $EndElements')
self._elenodes = {k: np.array(v) for k, v in elenodes.items()}
def _to_raw_pyfrm(self):
# Assemble a nodal mesh
maps = self._etype_map, self._petype_fnmap, self._nodemaps
pents = self._felespent, self._bfacespents, self._pfacespents
mesh = NodalMeshAssembler(self._nodepts, self._elenodes, pents, maps)
rawm = {}
rawm.update(mesh.get_connectivity())
rawm.update(mesh.get_shape_points())
return rawm
| 0
| 0
| 0
| 8,419
| 413
| 0
| 0
| 58
| 159
|
b4da4a65d4e00689281ae22f04447e598748b518
| 246
|
py
|
Python
|
tests/conftest.py
|
stephen-bunn/tomlark
|
5554801b1bccac2f780770e60ebd8f15e996d89d
|
[
"0BSD"
] | null | null | null |
tests/conftest.py
|
stephen-bunn/tomlark
|
5554801b1bccac2f780770e60ebd8f15e996d89d
|
[
"0BSD"
] | null | null | null |
tests/conftest.py
|
stephen-bunn/tomlark
|
5554801b1bccac2f780770e60ebd8f15e996d89d
|
[
"0BSD"
] | null | null | null |
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 Stephen Bunn <[email protected]>
# ISC License <https://opensource.org/licenses/isc>
"""
"""
| 15.375
| 51
| 0.686992
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 Stephen Bunn <[email protected]>
# ISC License <https://opensource.org/licenses/isc>
"""
"""
import pytest
from tomlark.parser import Parser
@pytest.fixture
def toml_parser():
return Parser()
| 0
| 33
| 0
| 0
| 0
| 0
| 0
| 4
| 69
|
a442d4a0784271f2955bb9cc4bd3cd28feea0760
| 67
|
py
|
Python
|
glft2vmd/constants.py
|
Sage-of-Mirrors/gltf2vmd
|
76aa5ae25785f8de50351daa27a5b986daa781f0
|
[
"MIT"
] | null | null | null |
glft2vmd/constants.py
|
Sage-of-Mirrors/gltf2vmd
|
76aa5ae25785f8de50351daa27a5b986daa781f0
|
[
"MIT"
] | 6
|
2019-02-05T03:35:26.000Z
|
2019-02-07T05:44:15.000Z
|
glft2vmd/constants.py
|
Sage-of-Mirrors/gltf2vmd
|
76aa5ae25785f8de50351daa27a5b986daa781f0
|
[
"MIT"
] | null | null | null |
VERSION = 1 # The version number of the format
SECTION_COUNT = 14
| 22.333333
| 47
| 0.746269
|
VERSION = 1 # The version number of the format
SECTION_COUNT = 14
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5b9afc7b9248105d7d8416827b48e86831ababb9
| 548
|
py
|
Python
|
easy/problem118/solution.py
|
cutoutsy/leetcode
|
0734f1060a0340370b8234e8072d70c10d4306d9
|
[
"Apache-2.0"
] | 1
|
2018-02-25T03:45:04.000Z
|
2018-02-25T03:45:04.000Z
|
easy/problem118/solution.py
|
cutoutsy/leetcode
|
0734f1060a0340370b8234e8072d70c10d4306d9
|
[
"Apache-2.0"
] | null | null | null |
easy/problem118/solution.py
|
cutoutsy/leetcode
|
0734f1060a0340370b8234e8072d70c10d4306d9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
| 26.095238
| 59
| 0.410584
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class Solution(object):
def generate(self, numRows):
"""
:type numRows: int
:rtype: List[List[int]]
"""
ans = []
if numRows == 0: return ans
ans.append([1])
for i in range(1,numRows):
row = []
for j in range(i+1):
if j == 0 or j == i:
row.append(1)
else:
row.append(ans[i-1][j-1] + ans[i-1][j])
ans.append(row)
return ans
| 0
| 0
| 0
| 484
| 0
| 0
| 0
| 0
| 22
|
e4f7f7cca2308469990c26afe281704cf43f3897
| 1,532
|
py
|
Python
|
src/astro/__init__.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 71
|
2021-12-06T22:41:59.000Z
|
2022-03-31T21:47:16.000Z
|
src/astro/__init__.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 171
|
2021-12-14T07:34:57.000Z
|
2022-03-31T21:04:15.000Z
|
src/astro/__init__.py
|
astro-projects/astro
|
7fa0404fc690569ff85e379ecca54778f09a9333
|
[
"Apache-2.0"
] | 11
|
2021-12-06T22:46:23.000Z
|
2022-03-31T18:09:46.000Z
|
"""A decorator that allows users to run SQL queries natively in Airflow."""
__version__ = "0.9.1"
# The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`
# Without this, if we run the following imports, in this specific order:
# from astro.databases import create_database
# from astro.sql.table import Metadata, Table, create_unique_table_name
# We face ImportError, as it happened in:
# https://github.com/astronomer/astro-sdk/pull/396/commits/fbe73bdbe46d65777258a5f79f461ef69f08a673
# https://github.com/astronomer/astro-sdk/actions/runs/2378526135
# Although astro.database does not depend on astro.sql, it depends on astro.sql.table - and, unless astro.sql was
# imported beforehand, it will also load astro.sql. In astro.sql we import lots of operators which depend on
# astro.database, and this is what leads to the circular dependency.
# This is needed to allow Airflow to pick up specific metadata fields it needs
# for certain features. We recognize it's a bit unclean to define these in
# multiple places, but at this point it's the only workaround if you'd like
# your custom conn type to show up in the Airflow UI.
| 46.424242
| 119
| 0.727807
|
"""A decorator that allows users to run SQL queries natively in Airflow."""
__version__ = "0.9.1"
# The following line is an import work-around to avoid raising a circular dependency issue related to `create_database`
# Without this, if we run the following imports, in this specific order:
# from astro.databases import create_database
# from astro.sql.table import Metadata, Table, create_unique_table_name
# We face ImportError, as it happened in:
# https://github.com/astronomer/astro-sdk/pull/396/commits/fbe73bdbe46d65777258a5f79f461ef69f08a673
# https://github.com/astronomer/astro-sdk/actions/runs/2378526135
# Although astro.database does not depend on astro.sql, it depends on astro.sql.table - and, unless astro.sql was
# imported beforehand, it will also load astro.sql. In astro.sql we import lots of operators which depend on
# astro.database, and this is what leads to the circular dependency.
import astro.sql # noqa: F401
# This is needed to allow Airflow to pick up specific metadata fields it needs
# for certain features. We recognize it's a bit unclean to define these in
# multiple places, but at this point it's the only workaround if you'd like
# your custom conn type to show up in the Airflow UI.
def get_provider_info() -> dict:
return {
# Required.
"package-name": "astro-sdk-python",
"name": "Astro SQL Provider",
"description": __doc__,
"versions": [__version__],
# Optional.
"hook-class-names": [],
"extra-links": [],
}
| 0
| 0
| 0
| 0
| 0
| 278
| 0
| -5
| 58
|
1c84f381723e000bfb669c57e2bd3a49b340519c
| 736
|
py
|
Python
|
packages/pyright-internal/src/tests/samples/variadicTypeVar12.py
|
Jasha10/pyright
|
0ce0cfa10fe7faa41071a2cc417bb449cf8276fe
|
[
"MIT"
] | 3,934
|
2019-03-22T09:26:41.000Z
|
2019-05-06T21:03:08.000Z
|
packages/pyright-internal/src/tests/samples/variadicTypeVar12.py
|
Jasha10/pyright
|
0ce0cfa10fe7faa41071a2cc417bb449cf8276fe
|
[
"MIT"
] | 107
|
2019-03-24T04:09:37.000Z
|
2019-05-06T17:00:04.000Z
|
packages/pyright-internal/src/tests/samples/variadicTypeVar12.py
|
Jasha10/pyright
|
0ce0cfa10fe7faa41071a2cc417bb449cf8276fe
|
[
"MIT"
] | 119
|
2019-03-23T10:48:04.000Z
|
2019-05-06T08:57:56.000Z
|
# This sample tests the case where a variadic TypeVar is used in
# conjunction with a keyword-only parameter. It also tests protocol
# invariance validation when a TypeVarTuple is used in the protocol
# along with a non-variadic TypeVar.
# pyright: strict
from typing import TypeVar
from typing_extensions import TypeVarTuple
T = TypeVar("T")
Ts = TypeVarTuple("Ts")
a: CallbackA[int, str, bool] = example
reveal_type(a, expected_text="(a: int, b: str, *, keyed: bool) -> tuple[int, str, bool]")
| 27.259259
| 89
| 0.691576
|
# This sample tests the case where a variadic TypeVar is used in
# conjunction with a keyword-only parameter. It also tests protocol
# invariance validation when a TypeVarTuple is used in the protocol
# along with a non-variadic TypeVar.
# pyright: strict
from typing import Protocol, TypeVar
from typing_extensions import TypeVarTuple, Unpack
T = TypeVar("T")
Ts = TypeVarTuple("Ts")
class CallbackA(Protocol[*Ts, T]):
def __call__(self, *args: *Ts, keyed: T) -> tuple[Unpack[Ts], T]:
...
def example(a: int, b: str, *, keyed: bool) -> tuple[int, str, bool]:
return (a, b, keyed)
a: CallbackA[int, str, bool] = example
reveal_type(a, expected_text="(a: int, b: str, *, keyed: bool) -> tuple[int, str, bool]")
| 0
| 0
| 0
| 95
| 0
| 73
| 0
| 18
| 46
|
d10a63cc5cb88f955269d4ce6980f67addd2f947
| 4,440
|
py
|
Python
|
tests/service/contacts_test.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 12
|
2020-06-18T17:16:03.000Z
|
2022-03-23T08:35:49.000Z
|
tests/service/contacts_test.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 129
|
2020-06-25T12:15:51.000Z
|
2022-03-23T09:42:16.000Z
|
tests/service/contacts_test.py
|
mherrmann/dnsimple-python
|
a89127f0bafb2a001c902206fba87cbc4f3bc2d1
|
[
"MIT"
] | 6
|
2020-07-03T09:34:01.000Z
|
2021-12-20T04:29:59.000Z
|
import unittest
if __name__ == '__main__':
unittest.main()
| 45.773196
| 115
| 0.621847
|
import unittest
import responses
from dnsimple import DNSimpleException
from dnsimple.response import Pagination
from dnsimple.struct import Contact
from tests.helpers import DNSimpleMockResponse, DNSimpleTest
class ContactsTest(DNSimpleTest):
@responses.activate
def test_list_contacts(self):
responses.add(DNSimpleMockResponse(method=responses.GET,
path='/1010/contacts',
fixture_name='listContacts/success'))
contacts = self.contacts.list_contacts(1010).data
self.assertEqual(2, len(contacts))
self.assertIsInstance(contacts[0], Contact)
@responses.activate
def test_list_contacts_supports_pagination(self):
responses.add(DNSimpleMockResponse(method=responses.GET,
path='/1010/contacts?page=1&per_page=2',
fixture_name='listContacts/success'))
response = self.contacts.list_contacts(1010, page=1, per_page=2)
self.assertIsInstance(response.pagination, Pagination)
@responses.activate
def test_create_contact(self):
responses.add(DNSimpleMockResponse(method=responses.POST,
path='/1010/contacts',
fixture_name='createContact/created'))
contact = Contact.new(label='Default', first_name='First', last_name='User', job_title='CEO',
organization_name='Awesome Company', email='[email protected]', phone='+18001234567',
fax='+18011234567', address1='Italian Street, 10', address2='', city='Roma',
state_province='RM', postal_code='00100', country='IT')
created = self.contacts.create_contact(1010, contact).data
self.assertEqual(contact.label, created.label)
self.assertEqual(contact.first_name, created.first_name)
self.assertEqual(contact.last_name, created.last_name)
self.assertEqual(contact.job_title, created.job_title)
self.assertEqual(contact.organization_name, created.organization_name)
self.assertEqual(contact.email, created.email)
self.assertEqual(contact.phone, created.phone)
self.assertEqual(contact.fax, created.fax)
self.assertEqual(contact.address1, created.address1)
self.assertEqual(contact.address2, created.address2)
self.assertEqual(contact.city, created.city)
self.assertEqual(contact.state_province, created.state_province)
self.assertEqual(contact.postal_code, created.postal_code)
self.assertEqual(contact.country, created.country)
@responses.activate
def test_get_contact(self):
responses.add(DNSimpleMockResponse(method=responses.GET,
path='/1010/contacts/1',
fixture_name='getContact/success'))
contact = self.contacts.get_contact(1010, 1).data
self.assertIsInstance(contact, Contact)
@responses.activate
def test_update_contact(self):
responses.add(DNSimpleMockResponse(method=responses.PATCH,
path='/1010/contacts/1',
fixture_name='updateContact/success'))
contact = Contact.new(label='Default')
updated = self.contacts.update_contact(1010, 1, contact).data
self.assertEqual(contact.label, updated.label)
@responses.activate
def test_delete_contact(self):
responses.add(DNSimpleMockResponse(method=responses.DELETE,
path='/1010/contacts/1',
fixture_name='deleteContact/success'))
self.contacts.delete_contact(1010, 1)
@responses.activate
def test_delete_contact_in_use(self):
responses.add(DNSimpleMockResponse(method=responses.DELETE,
path='/1010/contacts/1',
fixture_name='deleteContact/error-contact-in-use'))
try:
self.contacts.delete_contact(1010, 1)
except DNSimpleException as dnse:
self.assertEqual("The contact cannot be deleted because it's currently in use", dnse.message)
if __name__ == '__main__':
unittest.main()
| 0
| 3,955
| 0
| 12
| 0
| 0
| 0
| 84
| 323
|
f84ea56c56e51a875d50bc7307a31889f6562e9b
| 423
|
py
|
Python
|
replacedata/urls.py
|
judexzhu/dzhops
|
ffe089a734dd24d88bf433223ab8eb7e2eb099c5
|
[
"Apache-2.0"
] | 202
|
2015-05-18T08:48:52.000Z
|
2021-07-16T13:59:07.000Z
|
replacedata/urls.py
|
judexzhu/dzhops
|
ffe089a734dd24d88bf433223ab8eb7e2eb099c5
|
[
"Apache-2.0"
] | 19
|
2015-11-26T03:54:45.000Z
|
2019-03-02T13:58:24.000Z
|
replacedata/urls.py
|
Hasal/dzhops
|
fcd16adc61a941dccdaebee156b545784a5e96a8
|
[
"Apache-2.0"
] | 172
|
2015-08-07T15:52:17.000Z
|
2021-07-16T13:59:11.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'replacedata.views',
# url(r'^$', 'oms.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^repair/history/$', 'repairHistoryData', name='repair_data'),
url(r'^api/history/$', 'repairHistoryDataAPI', name='repair_data_api'),
)
| 32.538462
| 75
| 0.664303
|
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'replacedata.views',
# url(r'^$', 'oms.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^repair/history/$', 'repairHistoryData', name='repair_data'),
url(r'^api/history/$', 'repairHistoryDataAPI', name='repair_data_api'),
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0
|
980ab3bc4a57447b3222534b3167de92a4804cb1
| 1,641
|
py
|
Python
|
neural_sp/models/modules/zoneout.py
|
SunSki/neural_sp
|
4e4aca9b4cda1c7d95a1774d22f4d3298ad4ba4b
|
[
"Apache-2.0"
] | null | null | null |
neural_sp/models/modules/zoneout.py
|
SunSki/neural_sp
|
4e4aca9b4cda1c7d95a1774d22f4d3298ad4ba4b
|
[
"Apache-2.0"
] | null | null | null |
neural_sp/models/modules/zoneout.py
|
SunSki/neural_sp
|
4e4aca9b4cda1c7d95a1774d22f4d3298ad4ba4b
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Zoneout regularization."""
import torch.nn as nn
| 28.789474
| 71
| 0.606947
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Zoneout regularization."""
import torch.nn as nn
class ZoneoutCell(nn.Module):
def __init__(self, cell, zoneout_prob_h, zoneout_prob_c):
super().__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
if not isinstance(cell, nn.RNNCellBase):
raise TypeError("The cell is not a LSTMCell or GRUCell!")
if isinstance(cell, nn.LSTMCell):
self.prob = (zoneout_prob_h, zoneout_prob_c)
else:
self.prob = zoneout_prob_h
def forward(self, inputs, state):
"""Forward pass.
Args:
inputs (FloatTensor): `[B, input_dim]'
state (tuple or FloatTensor):
Returns:
state (tuple or FloatTensor):
"""
return self.zoneout(state, self.cell(inputs, state), self.prob)
def zoneout(self, state, next_state, prob):
if isinstance(state, tuple):
return (self.zoneout(state[0], next_state[0], prob[0]),
self.zoneout(state[1], next_state[1], prob[1]))
mask = state.new(state.size()).bernoulli_(prob)
if self.training:
return mask * next_state + (1 - mask) * state
else:
return prob * next_state + (1 - prob) * state
def zoneout_wrapper(cell, zoneout_prob_h=0, zoneout_prob_c=0):
if zoneout_prob_h > 0 or zoneout_prob_c > 0:
return ZoneoutCell(cell, zoneout_prob_h, zoneout_prob_c)
else:
return cell
| 0
| 0
| 0
| 1,192
| 0
| 185
| 0
| 0
| 46
|
b3152b4456ead975eb6f74831b450dea9597705e
| 810
|
py
|
Python
|
number programs/sum of digits in anumber.py
|
ZephyrAveryl777/Python-Programs
|
26de85c31af28382d406d27d54186b966a7b1bfc
|
[
"MIT"
] | 6
|
2020-08-13T11:49:29.000Z
|
2021-03-07T05:46:17.000Z
|
number programs/sum of digits in anumber.py
|
ZephyrAveryl777/Python-Programs
|
26de85c31af28382d406d27d54186b966a7b1bfc
|
[
"MIT"
] | null | null | null |
number programs/sum of digits in anumber.py
|
ZephyrAveryl777/Python-Programs
|
26de85c31af28382d406d27d54186b966a7b1bfc
|
[
"MIT"
] | 1
|
2021-04-24T06:12:48.000Z
|
2021-04-24T06:12:48.000Z
|
##Problem Description
##The program takes in a number and finds the sum of digits in a number.
print("-------------------Method 1----------------------------------")
temp=n=int(input("Enter a number: "))
total = 0
while n>0 :
total = total+(n%10)
n=n//10
print("The total sum of digits in the number {0} is: {1} ".format(temp,total))
print("--------------------------------------------------------------")
print("-------------------Method 2----------------------------------")
l=[]
temp=n=int(input("Enter a number: "))
sum_digits(n)
print("The total sum of digits in the number {0} is: {1} ".format(temp,sum(l)))
print("--------------------------------------------------------------")
| 33.75
| 80
| 0.437037
|
##Problem Description
##The program takes in a number and finds the sum of digits in a number.
print("-------------------Method 1----------------------------------")
temp=n=int(input("Enter a number: "))
total = 0
while n>0 :
total = total+(n%10)
n=n//10
print("The total sum of digits in the number {0} is: {1} ".format(temp,total))
print("--------------------------------------------------------------")
print("-------------------Method 2----------------------------------")
l=[]
def sum_digits(b):
if(b==0):
return l
l.append(b%10)
sum_digits(b//10)
temp=n=int(input("Enter a number: "))
sum_digits(n)
print("The total sum of digits in the number {0} is: {1} ".format(temp,sum(l)))
print("--------------------------------------------------------------")
| 0
| 0
| 0
| 0
| 0
| 73
| 0
| 0
| 23
|
f2c241e08bc11d95b523ca06dbb1790a155bc856
| 1,095
|
py
|
Python
|
pymarlin/utils/writer/aml.py
|
nifarn/PyMarlin
|
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
|
[
"MIT"
] | 20
|
2021-06-09T18:46:45.000Z
|
2022-02-09T01:08:13.000Z
|
pymarlin/utils/writer/aml.py
|
nifarn/PyMarlin
|
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
|
[
"MIT"
] | 50
|
2021-06-09T17:50:35.000Z
|
2022-02-07T23:02:30.000Z
|
pymarlin/utils/writer/aml.py
|
nifarn/PyMarlin
|
ea1f5f927aa85112ecebc206d53b5c3ee65704fa
|
[
"MIT"
] | 5
|
2021-06-21T22:24:30.000Z
|
2021-12-21T17:08:21.000Z
|
"""
AML writer module.
"""
| 26.707317
| 67
| 0.536073
|
"""
AML writer module.
"""
from pymarlin.utils.logger.logging_utils import getlogger
from .base import Writer
class Aml(Writer):
"""
This class implements the Azure ML writer for stats.
"""
def __init__(self):
super().__init__(getlogger(__name__))
self.run = None
try:
from azureml.core.run import Run
self.run = Run.get_context()
self.logger.info(self.run.get_status())
except Exception: # pylint: disable=broad-except
self.run = None
self.logger.warning('AML writer failed to initialize.')
self.logger.info(f'run = {self.run}')
def log_scalar(self, k, v, step):
"""
Log metric to AML.
"""
kwargs = {
'global_step': step,
k: v
}
if self.run is not None:
self.run.log_row(k, **kwargs)
def log_multi(self, k, v, step):
"""
Log metrics to stdout.
"""
for key, val in v.items():
key = k+'/'+key
self.log_scalar(key, val, step)
| 0
| 0
| 0
| 962
| 0
| 0
| 0
| 39
| 67
|
3f0de62c4c8c48e6ae02cd05d3405f7ac8d21e23
| 377
|
py
|
Python
|
LectureNote/03.array_linkedlist/14.py
|
Raziel-JKM/Study_turtleCoding
|
d09e03605cdc8130db2a279ec8193b29f3bca7a6
|
[
"MIT"
] | null | null | null |
LectureNote/03.array_linkedlist/14.py
|
Raziel-JKM/Study_turtleCoding
|
d09e03605cdc8130db2a279ec8193b29f3bca7a6
|
[
"MIT"
] | null | null | null |
LectureNote/03.array_linkedlist/14.py
|
Raziel-JKM/Study_turtleCoding
|
d09e03605cdc8130db2a279ec8193b29f3bca7a6
|
[
"MIT"
] | 2
|
2021-12-13T08:02:31.000Z
|
2021-12-18T08:36:23.000Z
|
# Definition for singly-linked list.
| 25.133333
| 68
| 0.567639
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
if (not l1) or (l2 and l1.val > l2.val):
l1, l2 = l2, l1
if l1:
l1.next = self.mergeTwoLists(l1.next, l2)
return l1
| 0
| 0
| 0
| 294
| 0
| 0
| 0
| 0
| 45
|
4f8e2aeb8fb094469d36a66d43fac5b6984cbe13
| 240
|
py
|
Python
|
OCT18A/BITOBYT.py
|
Chhekur/codechef-solutions
|
14ca902ea693139de13ffe5b9f602447bf34b79f
|
[
"MIT"
] | 1
|
2019-03-25T14:14:47.000Z
|
2019-03-25T14:14:47.000Z
|
OCT18A/BITOBYT.py
|
Chhekur/codechef-solutions
|
14ca902ea693139de13ffe5b9f602447bf34b79f
|
[
"MIT"
] | null | null | null |
OCT18A/BITOBYT.py
|
Chhekur/codechef-solutions
|
14ca902ea693139de13ffe5b9f602447bf34b79f
|
[
"MIT"
] | null | null | null |
for _ in range(int(input())):
n = int(input())
temp = (n - 1) // 26
temp2 = n % 26
ans = 2**temp
if n == 0: print(1,0,0)
elif temp2 > 0 and temp2 < 3: print(ans,0,0)
elif temp2 > 2 and temp2 < 11: print(0,ans,0)
else: print(0,0,ans)
| 26.666667
| 46
| 0.570833
|
for _ in range(int(input())):
n = int(input())
temp = (n - 1) // 26
temp2 = n % 26
ans = 2**temp
if n == 0: print(1,0,0)
elif temp2 > 0 and temp2 < 3: print(ans,0,0)
elif temp2 > 2 and temp2 < 11: print(0,ans,0)
else: print(0,0,ans)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b1a4913225fbfc946d7637c1b7948e693eb990e2
| 8,110
|
py
|
Python
|
LinearRegression.py
|
Prasanna-Brabourame/Machine-Learning
|
f27811d1d0b280ac025cfc7d5610c646b9f5de35
|
[
"MIT"
] | null | null | null |
LinearRegression.py
|
Prasanna-Brabourame/Machine-Learning
|
f27811d1d0b280ac025cfc7d5610c646b9f5de35
|
[
"MIT"
] | null | null | null |
LinearRegression.py
|
Prasanna-Brabourame/Machine-Learning
|
f27811d1d0b280ac025cfc7d5610c646b9f5de35
|
[
"MIT"
] | null | null | null |
# The problem to be solved:
# We have trucks located in different cities and each truck brings a profit or loss. We have the historical data and determined that the profit depends on the city's population. We want to find this relation.
import numpy as np
print('Welcome to Machine Learning with Python!')
print('Lesson 1: Linear regression')
print('\n'+40*'=')
# data contains the city population (in 10,000s) in the first column
# and the profit/loss (in 10,000$) in the second columns
# the data was rescaled to save on calculations and resources consumption
# Based on the first entry, a truck in a city of population of 61,101 brought a profit of $175,920
data =\
[
[6.1101,17.592],
[5.5277,9.1302],
[8.5186,13.662],
[7.0032,11.854],
[5.8598,6.8233],
[8.3829,11.886],
[7.4764,4.3483],
[8.5781,12],
[6.4862,6.5987],
[5.0546,3.8166],
[5.7107,3.2522],
[14.164,15.505],
[5.734,3.1551],
[8.4084,7.2258],
[5.6407,0.71618],
[5.3794,3.5129],
[6.3654,5.3048],
[5.1301,0.56077],
[6.4296,3.6518],
[7.0708,5.3893],
[6.1891,3.1386],
[20.27,21.767],
[5.4901,4.263],
[6.3261,5.1875],
[5.5649,3.0825],
[18.945,22.638],
[12.828,13.501],
[10.957,7.0467],
[13.176,14.692],
[22.203,24.147],
[5.2524,-1.22],
[6.5894,5.9966],
[9.2482,12.134],
[5.8918,1.8495],
[8.2111,6.5426],
[7.9334,4.5623],
[8.0959,4.1164],
[5.6063,3.3928],
[12.836,10.117],
[6.3534,5.4974],
[5.4069,0.55657],
[6.8825,3.9115],
[11.708,5.3854],
[5.7737,2.4406],
[7.8247,6.7318],
[7.0931,1.0463],
[5.0702,5.1337],
[5.8014,1.844],
[11.7,8.0043],
[5.5416,1.0179],
[7.5402,6.7504],
[5.3077,1.8396],
[7.4239,4.2885],
[7.6031,4.9981],
[6.3328,1.4233],
[6.3589,-1.4211],
[6.2742,2.4756],
[5.6397,4.6042],
[9.3102,3.9624],
[9.4536,5.4141],
[8.8254,5.1694],
[5.1793,-0.74279],
[21.279,17.929],
[14.908,12.054],
[18.959,17.054],
[7.2182,4.8852],
[8.2951,5.7442],
[10.236,7.7754],
[5.4994,1.0173],
[20.341,20.992],
[10.136,6.6799],
[7.3345,4.0259],
[6.0062,1.2784],
[7.2259,3.3411],
[5.0269,-2.6807],
[6.5479,0.29678],
[7.5386,3.8845],
[5.0365,5.7014],
[10.274,6.7526],
[5.1077,2.0576],
[5.7292,0.47953],
[5.1884,0.20421],
[6.3557,0.67861],
[9.7687,7.5435],
[6.5159,5.3436],
[8.5172,4.2415],
[9.1802,6.7981],
[6.002,0.92695],
[5.5204,0.152],
[5.0594,2.8214],
[5.7077,1.8451],
[7.6366,4.2959],
[5.8707,7.2029],
[5.3054,1.9869],
[8.2934,0.14454],
[13.394,9.0551],
[5.4369,0.61705]
]
# We want to make a model able to predict the profit/loss, based on a given population. In order to do some machine learning, the data has to be of a matrix type.
# X matrix will hold city population
X = np.matrix(data)[:,0]
# y matrix will hold the profit/loss information
y = np.matrix(data)[:,1]
'''
Basically, we are looking for a function f(x) returning the _output_ value y based on its _input_ x. We assume a linear y = ax + b dependence, but it as well might have been a polynominal or any other function. So, we are looking for such a and b values that give us a function that will somehow reflect the profit based on the population. Like this:
predicted_profit = a * city_population + b
A quick look at the data shows that it is impossible to find a line which would cross all the datapoints. So, we want to have the best possible fit. How do we measure the quality of it? The best possible fit is such that makes the smallest prediction error on the whole dataset. The single error is calculated as the square of the difference between the real and predicted value, so the total error will simply be the sum of all single ones.
We thus need a so-called cost function which would return the average error of a given f(x) when trying to explain the datapoints and make predictions. In order to make things quicker, we will look for a vector 'theta', containing the 'a' and 'b' (or more, for more complicated models - theta0, theta1, theta2,...) parameters.
'''
print('\nLooking for y=a*x+b function (a,b=theta)')
# function J calculates the cost under a given set of theta parameters
# the transformation below adds a column of ones to the left of the X matrix, for calculation reasons
dataX = np.matrix(data)[:,0:1]
X = np.ones((len(dataX),2))
X[:,1:] = dataX
# let's check the cost if we would assume theta at two different values
print('\nChecking two example cases of theta:')
for t in [0,0], [-1,2]:
print('Assuming theta vector at {}, the cost would be {:.2f}'.format(t, J(X, y, t).item())) # 32.073, 54.242
'''
Now, how to find the optimal theta vector for our model to predict with the smallest possible error?
Assuming that J is a cost function, this is an optimization problem - we need to find the minimum of J.
We will use a technique called gradient descent - we will initialize theta at all-zeros and gradually move along the J curve updating all thetas (simultaneously) by small fractions. If J increases - we are going the wrong way, if it decreases - we are moving along this way.
'''
# gradient descent function will iteratively update theta by a small fraction alpha (also called the learning rate) for a number of iterations
print('\n'+40*'=')
# we have the function ready, let's do some machine learning!
theta = np.matrix([np.random.random(),np.random.random()]) # we initialize theta at random values
alpha = 0.01 # learning rate - if too low, the algorithm will not converge, if too high, it can "explode"
iters = 2000 # number of iterations - reduce if "Time limit exceeded"
print('\n== Model summary ==\nLearning rate: {}\nIterations: {}\nInitial theta: {}\nInitial J: {:.2f}\n'.format(alpha, iters, theta, J(X,y,theta).item()))
print('Training the model... ')
# this actually trains our model and finds the optimal theta value
J_history, theta_min = gradient(X, y, alpha, theta, iters)
print('Done.')
print('\nFinal theta: {}\nFinal J: {:.2f}'.format(theta_min.T, J(X,y,theta_min.T).item()))
'''
Now that we have the model trained, we can use it to predict the profit/loss
Usually, since we want to solve a real problem, we define our function to accept real numbers, not rescaled ones. However, we have to remember, that the model itself is trained on rescaled data, so we have to provide it.
'''
# This function will calculate the predicted profit
# Now, let's check for a random city
p = 50000 + 100000 * np.random.random()
print('\n'+40*'=')
print('\nBased on learned data, predicted profit for a city of population of {:,.0f} is ${:,.2f}.\n'.format(p, predict_profit(p).item()))
# For the business decision, it would also be good to know what is the minimal population of a city to start the profitable business (predicted value is at least positive)
p_min = -theta_min[0].item() / theta_min[1].item() * 10000
print('In order for the business to be profitable, it has to be started in a city with population greater than {:,.0f}.'.format(p_min))
print('\n'+40*'=')
print('\nNOTE: The code initializes the model with different theta each time, thus the model predicts different minimal viable population at each runtime.')
| 38.254717
| 441
| 0.69815
|
# The problem to be solved:
# We have trucks located in different cities and each truck brings a profit or loss. We have the historical data and determined that the profit depends on the city's population. We want to find this relation.
import numpy as np
print('Welcome to Machine Learning with Python!')
print('Lesson 1: Linear regression')
print('\n'+40*'=')
# data contains the city population (in 10,000s) in the first column
# and the profit/loss (in 10,000$) in the second columns
# the data was rescaled to save on calculations and resources consumption
# Based on the first entry, a truck in a city of population of 61,101 brought a profit of $175,920
data =\
[
[6.1101,17.592],
[5.5277,9.1302],
[8.5186,13.662],
[7.0032,11.854],
[5.8598,6.8233],
[8.3829,11.886],
[7.4764,4.3483],
[8.5781,12],
[6.4862,6.5987],
[5.0546,3.8166],
[5.7107,3.2522],
[14.164,15.505],
[5.734,3.1551],
[8.4084,7.2258],
[5.6407,0.71618],
[5.3794,3.5129],
[6.3654,5.3048],
[5.1301,0.56077],
[6.4296,3.6518],
[7.0708,5.3893],
[6.1891,3.1386],
[20.27,21.767],
[5.4901,4.263],
[6.3261,5.1875],
[5.5649,3.0825],
[18.945,22.638],
[12.828,13.501],
[10.957,7.0467],
[13.176,14.692],
[22.203,24.147],
[5.2524,-1.22],
[6.5894,5.9966],
[9.2482,12.134],
[5.8918,1.8495],
[8.2111,6.5426],
[7.9334,4.5623],
[8.0959,4.1164],
[5.6063,3.3928],
[12.836,10.117],
[6.3534,5.4974],
[5.4069,0.55657],
[6.8825,3.9115],
[11.708,5.3854],
[5.7737,2.4406],
[7.8247,6.7318],
[7.0931,1.0463],
[5.0702,5.1337],
[5.8014,1.844],
[11.7,8.0043],
[5.5416,1.0179],
[7.5402,6.7504],
[5.3077,1.8396],
[7.4239,4.2885],
[7.6031,4.9981],
[6.3328,1.4233],
[6.3589,-1.4211],
[6.2742,2.4756],
[5.6397,4.6042],
[9.3102,3.9624],
[9.4536,5.4141],
[8.8254,5.1694],
[5.1793,-0.74279],
[21.279,17.929],
[14.908,12.054],
[18.959,17.054],
[7.2182,4.8852],
[8.2951,5.7442],
[10.236,7.7754],
[5.4994,1.0173],
[20.341,20.992],
[10.136,6.6799],
[7.3345,4.0259],
[6.0062,1.2784],
[7.2259,3.3411],
[5.0269,-2.6807],
[6.5479,0.29678],
[7.5386,3.8845],
[5.0365,5.7014],
[10.274,6.7526],
[5.1077,2.0576],
[5.7292,0.47953],
[5.1884,0.20421],
[6.3557,0.67861],
[9.7687,7.5435],
[6.5159,5.3436],
[8.5172,4.2415],
[9.1802,6.7981],
[6.002,0.92695],
[5.5204,0.152],
[5.0594,2.8214],
[5.7077,1.8451],
[7.6366,4.2959],
[5.8707,7.2029],
[5.3054,1.9869],
[8.2934,0.14454],
[13.394,9.0551],
[5.4369,0.61705]
]
# We want to make a model able to predict the profit/loss, based on a given population. In order to do some machine learning, the data has to be of a matrix type.
# X matrix will hold city population
X = np.matrix(data)[:,0]
# y matrix will hold the profit/loss information
y = np.matrix(data)[:,1]
'''
Basically, we are looking for a function f(x) returning the _output_ value y based on its _input_ x. We assume a linear y = ax + b dependence, but it as well might have been a polynominal or any other function. So, we are looking for such a and b values that give us a function that will somehow reflect the profit based on the population. Like this:
predicted_profit = a * city_population + b
A quick look at the data shows that it is impossible to find a line which would cross all the datapoints. So, we want to have the best possible fit. How do we measure the quality of it? The best possible fit is such that makes the smallest prediction error on the whole dataset. The single error is calculated as the square of the difference between the real and predicted value, so the total error will simply be the sum of all single ones.
We thus need a so-called cost function which would return the average error of a given f(x) when trying to explain the datapoints and make predictions. In order to make things quicker, we will look for a vector 'theta', containing the 'a' and 'b' (or more, for more complicated models - theta0, theta1, theta2,...) parameters.
'''
print('\nLooking for y=a*x+b function (a,b=theta)')
# function J calculates the cost under a given set of theta parameters
def J(X, y, theta):
theta = np.matrix(theta).T # we need a transposed matrix theta
m = len(y) # m is the number of datapoints
predictions = X * theta # stores the outputs predicted by f(x) with a given theta as parameter vector
sqError = np.power((predictions-y),[2]) # a matrix of squared errors between predictions and real values
return 1/(2*m) * sum(sqError) # the value of the cost function J
# the transformation below adds a column of ones to the left of the X matrix, for calculation reasons
dataX = np.matrix(data)[:,0:1]
X = np.ones((len(dataX),2))
X[:,1:] = dataX
# let's check the cost if we would assume theta at two different values
print('\nChecking two example cases of theta:')
for t in [0,0], [-1,2]:
print('Assuming theta vector at {}, the cost would be {:.2f}'.format(t, J(X, y, t).item())) # 32.073, 54.242
'''
Now, how to find the optimal theta vector for our model to predict with the smallest possible error?
Assuming that J is a cost function, this is an optimization problem - we need to find the minimum of J.
We will use a technique called gradient descent - we will initialize theta at all-zeros and gradually move along the J curve updating all thetas (simultaneously) by small fractions. If J increases - we are going the wrong way, if it decreases - we are moving along this way.
'''
# gradient descent function will iteratively update theta by a small fraction alpha (also called the learning rate) for a number of iterations
def gradient(X, y, alpha, theta, iters):
J_history = np.zeros(iters) # will store historical values of J for each iteration
m = len(y) # m is the number of datapoints
theta = np.matrix(theta).T # theta has to be transposed again
for i in range(iters):
h0 = X * theta # zero hypothesis for each datapoint
delta = (1 / m) * (X.T * h0 - X.T * y) # the gradient descent
theta = theta - alpha * delta # update theta by learning rate times gradient
J_history[i] = J(X, y, theta.T) # save the J of a particular iteration, it should drop in the next
return J_history, theta # return the history of J plus the optimal theta
print('\n'+40*'=')
# we have the function ready, let's do some machine learning!
theta = np.matrix([np.random.random(),np.random.random()]) # we initialize theta at random values
alpha = 0.01 # learning rate - if too low, the algorithm will not converge, if too high, it can "explode"
iters = 2000 # number of iterations - reduce if "Time limit exceeded"
print('\n== Model summary ==\nLearning rate: {}\nIterations: {}\nInitial theta: {}\nInitial J: {:.2f}\n'.format(alpha, iters, theta, J(X,y,theta).item()))
print('Training the model... ')
# this actually trains our model and finds the optimal theta value
J_history, theta_min = gradient(X, y, alpha, theta, iters)
print('Done.')
print('\nFinal theta: {}\nFinal J: {:.2f}'.format(theta_min.T, J(X,y,theta_min.T).item()))
'''
Now that we have the model trained, we can use it to predict the profit/loss
Usually, since we want to solve a real problem, we define our function to accept real numbers, not rescaled ones. However, we have to remember, that the model itself is trained on rescaled data, so we have to provide it.
'''
# This function will calculate the predicted profit
def predict_profit(population):
pop = population / 10000
return [1, pop] * theta_min * 10000
# Now, let's check for a random city
p = 50000 + 100000 * np.random.random()
print('\n'+40*'=')
print('\nBased on learned data, predicted profit for a city of population of {:,.0f} is ${:,.2f}.\n'.format(p, predict_profit(p).item()))
# For the business decision, it would also be good to know what is the minimal population of a city to start the profitable business (predicted value is at least positive)
p_min = -theta_min[0].item() / theta_min[1].item() * 10000
print('In order for the business to be profitable, it has to be started in a city with population greater than {:,.0f}.'.format(p_min))
print('\n'+40*'=')
print('\nNOTE: The code initializes the model with different theta each time, thus the model predicts different minimal viable population at each runtime.')
| 0
| 0
| 0
| 0
| 0
| 1,120
| 0
| 0
| 66
|
451b547531e66a11eedfdad82d0ab5ec2c049406
| 989
|
py
|
Python
|
remindme/config.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 17
|
2015-05-02T22:58:07.000Z
|
2017-04-17T06:33:43.000Z
|
remindme/config.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 8
|
2015-02-14T16:22:27.000Z
|
2016-10-26T13:15:19.000Z
|
remindme/config.py
|
GochoMugo/remindme
|
6cf2f94ce07ead754f1ee5976a7e7d7cbfa1a2e4
|
[
"MIT"
] | 2
|
2016-02-26T10:47:56.000Z
|
2019-10-09T05:49:51.000Z
|
import os
import sys
import colorama
from . import metadata
# project metadata
METADATA = metadata
# paths
PATHS = {}
PATHS["home"] = os.path.expanduser("~")
PATHS["db_file"] = os.path.join(PATHS["home"], ".remindme.db")
PATHS["config_file"] = os.path.join(PATHS["home"], ".remindme")
# colors
colorama.init()
COLORS = {}
COLORS["default"] = colorama.Fore.WHITE
COLORS["error"] = colorama.Fore.RED
COLORS["info"] = colorama.Fore.MAGENTA
COLORS["reset"] = colorama.Style.RESET_ALL
COLORS["success"] = colorama.Fore.GREEN
# python version
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# cryptography settings
CRYPTO = {}
CRYPTO["kdf_iterations"] = 100000
CRYPTO["kdf_length"] = 32
# default user settings
USER_SETTINGS = {}
USER_SETTINGS["editor"] = None
USER_SETTINGS["disable_encryption"] = False
USER_SETTINGS["encrypt_by_default"] = True
USER_SETTINGS["retry_password_match"] = True
USER_SETTINGS["retry_decryption"] = False
USER_SETTINGS["end_line"] = ":end"
| 21.5
| 63
| 0.722952
|
import os
import sys
import colorama
from . import metadata
# project metadata
METADATA = metadata
# paths
PATHS = {}
PATHS["home"] = os.path.expanduser("~")
PATHS["db_file"] = os.path.join(PATHS["home"], ".remindme.db")
PATHS["config_file"] = os.path.join(PATHS["home"], ".remindme")
# colors
colorama.init()
COLORS = {}
COLORS["default"] = colorama.Fore.WHITE
COLORS["error"] = colorama.Fore.RED
COLORS["info"] = colorama.Fore.MAGENTA
COLORS["reset"] = colorama.Style.RESET_ALL
COLORS["success"] = colorama.Fore.GREEN
# python version
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# cryptography settings
CRYPTO = {}
CRYPTO["kdf_iterations"] = 100000
CRYPTO["kdf_length"] = 32
# default user settings
USER_SETTINGS = {}
USER_SETTINGS["editor"] = None
USER_SETTINGS["disable_encryption"] = False
USER_SETTINGS["encrypt_by_default"] = True
USER_SETTINGS["retry_password_match"] = True
USER_SETTINGS["retry_decryption"] = False
USER_SETTINGS["end_line"] = ":end"
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3c4fdc05b6325dc0d014850d64adf6128c1af6de
| 1,075
|
py
|
Python
|
telemetry/telemetry/internal/platform/platform_backend_unittest.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 1,894
|
2015-04-17T18:29:53.000Z
|
2022-03-28T22:41:06.000Z
|
telemetry/telemetry/internal/platform/platform_backend_unittest.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 4,640
|
2015-07-08T16:19:08.000Z
|
2019-12-02T15:01:27.000Z
|
telemetry/telemetry/internal/platform/platform_backend_unittest.py
|
Martijnve23/catapult
|
5c63b19d221af6a12889e8727acc85d93892cab7
|
[
"BSD-3-Clause"
] | 698
|
2015-06-02T19:18:35.000Z
|
2022-03-29T16:57:15.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
| 39.814815
| 74
| 0.728372
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import unittest
import mock
from telemetry.core import platform as platform_module
from telemetry.internal.platform import platform_backend
from telemetry.internal.browser import possible_browser
class PlatformBackendTest(unittest.TestCase):
def testGetTypExpectationsTags(self):
pbe = platform_backend.PlatformBackend()
pb = possible_browser.PossibleBrowser('reference_debug', 'win', False)
with mock.patch.object(
pbe.__class__, 'GetOSName', return_value='win'):
with mock.patch.object(
pbe.__class__, 'GetOSVersionName', return_value='win 10'):
with mock.patch.object(
pb.__class__, '_InitPlatformIfNeeded', return_value=None):
pb._platform = platform_module.Platform(pbe)
self.assertEqual(set(pb.GetTypExpectationsTags()),
{'win', 'win-10', 'reference-debug'})
| 0
| 0
| 0
| 652
| 0
| 0
| 0
| 86
| 134
|
704bb3776179c6385b0af47d73095b8ef624dce9
| 960
|
py
|
Python
|
files/update-lang-list.py
|
eumel8/translation_checksite
|
122acecf10e1de21320f8f0607d45ddada69d032
|
[
"Apache-2.0"
] | null | null | null |
files/update-lang-list.py
|
eumel8/translation_checksite
|
122acecf10e1de21320f8f0607d45ddada69d032
|
[
"Apache-2.0"
] | 4
|
2016-01-30T06:59:50.000Z
|
2021-12-02T17:55:54.000Z
|
files/update-lang-list.py
|
eumel8/translation_checksite
|
122acecf10e1de21320f8f0607d45ddada69d032
|
[
"Apache-2.0"
] | 3
|
2016-01-30T03:44:15.000Z
|
2016-02-05T10:50:43.000Z
|
#!/usr/bin/env python
import pprint
import os
from django.conf.locale import LANG_INFO
from django.utils import translation
HORIZON_DIR = '/opt/stack/horizon'
langs_horizon = os.listdir(os.path.join(HORIZON_DIR, 'horizon', 'locale'))
langs_dashboard = os.listdir(os.path.join(HORIZON_DIR, 'openstack_dashboard', 'locale'))
# Pick up languages with both horizon and openstack_dashboard translations
langs = set(langs_horizon) & set(langs_dashboard)
lang_list = [get_django_lang_name(l, langs) for l in sorted(langs)]
print 'LANGUAGES = ',
pprint.pprint(tuple(lang_list))
| 28.235294
| 88
| 0.708333
|
#!/usr/bin/env python
import pprint
import os
from django.conf.locale import LANG_INFO
from django.utils import translation
def get_django_lang_name(code, all_codes):
code = code.lower().replace('_', '-')
code_orig = code
lang_info = LANG_INFO.get(code)
if not lang_info:
code = code.split('-', 1)[0]
if code not in all_codes:
lang_info = LANG_INFO.get(code)
if lang_info:
return code, lang_info['name']
else:
return code_orig, code_orig
HORIZON_DIR = '/opt/stack/horizon'
langs_horizon = os.listdir(os.path.join(HORIZON_DIR, 'horizon', 'locale'))
langs_dashboard = os.listdir(os.path.join(HORIZON_DIR, 'openstack_dashboard', 'locale'))
# Pick up languages with both horizon and openstack_dashboard translations
langs = set(langs_horizon) & set(langs_dashboard)
lang_list = [get_django_lang_name(l, langs) for l in sorted(langs)]
print 'LANGUAGES = ',
pprint.pprint(tuple(lang_list))
| 0
| 0
| 0
| 0
| 0
| 360
| 0
| 0
| 23
|
fe6edc63bbf0559878618a4e33821990cd4a7535
| 235
|
py
|
Python
|
problems/Codeforces/Birthday.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | 6
|
2020-06-25T14:52:09.000Z
|
2021-08-05T20:54:15.000Z
|
problems/Codeforces/Birthday.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | null | null | null |
problems/Codeforces/Birthday.py
|
jspw/Basic_Python
|
aa159f576a471c6deebdf1e5f462dfc9ffb4930b
|
[
"Unlicense"
] | null | null | null |
_ = input()
m = map(int, input().split())
m = sorted(m)
#print(m)
l=[]
for i in range(len(m)):
if(i%2==0):
l.append(str(m[i]))
for i in range(len(m)-1,0,-1):
if(i%2!=0):
l.append(str(m[i]))
print(' '.join(l))
| 15.666667
| 30
| 0.489362
|
_ = input()
m = map(int, input().split())
m = sorted(m)
#print(m)
l=[]
for i in range(len(m)):
if(i%2==0):
l.append(str(m[i]))
for i in range(len(m)-1,0,-1):
if(i%2!=0):
l.append(str(m[i]))
print(' '.join(l))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fc6a9e223a774f2d6514568d4a145897adc465d7
| 1,144
|
py
|
Python
|
figures/ring.py
|
deepsphere/paper-deepsphere-rlgm2019
|
e3b56f48da5b18bcf9a78426b19f203c5c0dda54
|
[
"CC-BY-4.0"
] | 1
|
2020-11-05T13:45:40.000Z
|
2020-11-05T13:45:40.000Z
|
figures/ring.py
|
deepsphere/paper-iclr19-rlgm
|
e3b56f48da5b18bcf9a78426b19f203c5c0dda54
|
[
"CC-BY-4.0"
] | null | null | null |
figures/ring.py
|
deepsphere/paper-iclr19-rlgm
|
e3b56f48da5b18bcf9a78426b19f203c5c0dda54
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
import os
import numpy as np
import pygsp as gsp
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
# plt.rc('font', family='Latin Modern Roman')
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{lmodern}')
fig = plt.figure(figsize = (3, 3))
ax = fig.add_subplot(1, 1, 1)
G = gsp.graphs.ring.Ring(8)
G.plot(edges=True, ax=ax, title='', vertex_color='r', edge_color='b')
circle = plt.Circle((0, 0), radius=1, color='g', fill=False, LineWidth=3)
ax.add_artist(circle)
angle = 45*1.5
line_1 = plt.Line2D([1, 0], [0, 0], linewidth=2, linestyle="-", color="black")
line_2 = plt.Line2D([np.cos(angle/360*2*np.pi), 0], [np.sin(angle/360*2*np.pi), 0], linewidth=2, linestyle = "--", color="black")
ax.add_line(line_1)
ax.add_line(line_2)
angle_plot = Arc((0,0), 0.8, 0.8, 0, 0, angle, color='black', linewidth=2)
ax.add_patch(angle_plot)
ax.text(0.5*np.cos(angle/2/360*2*np.pi), 0.5*np.sin(angle/2/360*2*np.pi), r"$\theta$", fontsize=18)
ax.axis('off')
ax.axis('equal')
fig.tight_layout()
filename = os.path.splitext(os.path.basename(__file__))[0] + '.pdf'
fig.savefig(filename)
| 27.902439
| 129
| 0.68007
|
#!/usr/bin/env python3
import os
import numpy as np
import pygsp as gsp
import matplotlib.pyplot as plt
from matplotlib.patches import Arc
# plt.rc('font', family='Latin Modern Roman')
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage{lmodern}')
fig = plt.figure(figsize = (3, 3))
ax = fig.add_subplot(1, 1, 1)
G = gsp.graphs.ring.Ring(8)
G.plot(edges=True, ax=ax, title='', vertex_color='r', edge_color='b')
circle = plt.Circle((0, 0), radius=1, color='g', fill=False, LineWidth=3)
ax.add_artist(circle)
angle = 45*1.5
line_1 = plt.Line2D([1, 0], [0, 0], linewidth=2, linestyle="-", color="black")
line_2 = plt.Line2D([np.cos(angle/360*2*np.pi), 0], [np.sin(angle/360*2*np.pi), 0], linewidth=2, linestyle = "--", color="black")
ax.add_line(line_1)
ax.add_line(line_2)
angle_plot = Arc((0,0), 0.8, 0.8, 0, 0, angle, color='black', linewidth=2)
ax.add_patch(angle_plot)
ax.text(0.5*np.cos(angle/2/360*2*np.pi), 0.5*np.sin(angle/2/360*2*np.pi), r"$\theta$", fontsize=18)
ax.axis('off')
ax.axis('equal')
fig.tight_layout()
filename = os.path.splitext(os.path.basename(__file__))[0] + '.pdf'
fig.savefig(filename)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8fe47d6e92443b760621ca519956d6954987c080
| 6,343
|
py
|
Python
|
blend/test/TestConfiguration.py
|
azavea/blend
|
2cafbc326f2e6b3f1947581cca860ff544adada1
|
[
"MIT"
] | 1
|
2017-03-06T14:55:29.000Z
|
2017-03-06T14:55:29.000Z
|
blend/test/TestConfiguration.py
|
azavea/blend
|
2cafbc326f2e6b3f1947581cca860ff544adada1
|
[
"MIT"
] | null | null | null |
blend/test/TestConfiguration.py
|
azavea/blend
|
2cafbc326f2e6b3f1947581cca860ff544adada1
|
[
"MIT"
] | null | null | null |
# By Justin Walgran
# Copyright (c) 2012 Azavea, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
| 40.401274
| 116
| 0.700457
|
# By Justin Walgran
# Copyright (c) 2012 Azavea, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import unittest
from blend import Configuration
from blend import Analyzer
from blend.Resource import Resource
from blend.SizeAnalyzer import SizeAnalyzer
from blend import Minifier
from blend.YUICompressorMinifier import YUICompressorMinifier
import os
import shutil
import tempfile
from helpers import clean_output, create_file_with_content
class TestConfiguration(unittest.TestCase):
def setUp(self):
self.test_env_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_env_dir)
clean_output()
def test_can_add_analyzer_for_filetype(self):
conf = Configuration()
analyzer = Analyzer()
conf.add_analyzer_for_file_type(analyzer, 'javascript')
resource = Resource('file.js')
analyzers = conf.get_analyzers_for_resource(resource)
self.assertListEqual([analyzer], analyzers)
def test_returns_non_when_asking_for_analyzers_for_an_unknown_file_type(self):
conf = Configuration()
analyzer = Analyzer()
conf.add_analyzer_for_file_type(analyzer, 'javascript')
resource = Resource('file.foo')
analyzers = conf.get_analyzers_for_resource(resource)
self.assertIsNone(analyzers)
def test_add_analyzer_checks_classes(self):
conf = Configuration()
self.assertRaises(Exception, conf.add_analyzer_for_file_type, 'string instead of an analyzer', 'javascript')
# should not throw
conf.add_analyzer_for_file_type(Analyzer(), 'javascript')
# should not throw
conf.add_analyzer_for_file_type(SizeAnalyzer(), 'javascript')
def test_throws_when_passed_an_invalid_config_file_path(self):
self.assertRaises(Exception, Configuration, '/some/non/existent/path')
def test_can_load_analyzers_from_config_file(self):
config_file_path = os.path.join(self.test_env_dir, 'blend.config')
create_file_with_content(config_file_path,
"""{
"analyzers": {
"javascript": [
{
"name": "blend.SizeAnalyzer",
"skip_list": [
"bin"
]
}
]
}
}""")
conf = Configuration(config_file_path)
resource = Resource('file.js')
actual_analyzers = conf.get_analyzers_for_resource(resource)
self.assertIsNotNone(actual_analyzers)
self.assertEqual(1, len(actual_analyzers))
self.assertIsInstance(actual_analyzers[0], SizeAnalyzer)
self.assertIsNotNone(conf.analyzer_skip_lists)
def test_can_load_minfiers_from_config_file(self):
config_file_path = os.path.join(self.test_env_dir, 'blend.config')
create_file_with_content(config_file_path,
"""{
"minifiers": {
"javascript": {
"name": "blend.YUICompressorMinifier"
}
}
}""")
conf = Configuration(config_file_path)
resource = Resource('file.js')
actual_minifier = conf.get_minifier_for_file_type(resource.file_type)
self.assertIsNotNone(actual_minifier)
self.assertIsInstance(actual_minifier, YUICompressorMinifier)
def test_can_add_minifier_for_filetype(self):
conf = Configuration()
minifier = Minifier()
conf.set_minifier_for_file_type(minifier, 'javascript')
actual_minifier = conf.get_minifier_for_file_type('javascript')
self.assertEqual(minifier, actual_minifier)
def test_add_minifier_checks_classes(self):
conf = Configuration()
self.assertRaises(Exception, conf.set_minifier_for_file_type, 'string instead of an minifier', 'javascript')
# should not throw
conf.set_minifier_for_file_type(Minifier(), 'javascript')
# should not throw
conf.set_minifier_for_file_type(YUICompressorMinifier(), 'javascript')
def test_returns_none_when_asking_for_minifier_for_an_unknown_file_type(self):
conf = Configuration()
minifier = Minifier()
conf.set_minifier_for_file_type(minifier, 'javascript')
analyzers = conf.get_minifier_for_file_type('some-other-type')
self.assertIsNone(analyzers)
def test_get_analyzers_for_resource_with_skip_list(self):
lib_resource = Resource(os.path.join(os.getcwd(), 'lib', 'jquery.js'))
deep_lib_resource = Resource(os.path.join(os.getcwd(), 'deeply', 'nested', 'lib', 'backbone.js'))
src_resource = Resource(os.path.join(os.getcwd(), 'src', 'file.js'))
conf = Configuration()
analyzer = Analyzer()
conf.add_analyzer_for_file_type(analyzer, 'javascript', [
os.path.join('lib', '*'),
os.path.join('*', 'lib', '*')
])
self.assertIsNone(conf.get_analyzers_for_resource(lib_resource))
self.assertIsNone(conf.get_analyzers_for_resource(deep_lib_resource))
self.assertEqual([analyzer], conf.get_analyzers_for_resource(src_resource))
def test_add_analyzer_for_file_type_raises_when_skip_list_is_a_string(self):
conf = Configuration()
self.assertRaises(Exception, conf.add_analyzer_for_file_type, Analyzer(), 'javascript', 'something invalid')
| 0
| 0
| 0
| 4,855
| 0
| 0
| 0
| 101
| 269
|
7d4df8ca4fd9f6faefafdc8a8cbba5f7922eda77
| 1,876
|
py
|
Python
|
07/7.1.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | 1
|
2021-12-16T16:46:47.000Z
|
2021-12-16T16:46:47.000Z
|
07/7.1.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | null | null | null |
07/7.1.py
|
abe-101/ThinkPython-2
|
bcebb1e9b3cc63c403f59c3cc0f33017bb017363
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import math
def mysqrt(a):
"""Compute the square root of a using Newton's method: start with an
approximate answer and iteratively improving it
"""
estimate = a / 2 + 1 # Arbitrary estimae of the square root of a
epsilon = 0.0000001
while True:
approx = (estimate + a / estimate)/2
if abs(approx-estimate) < epsilon:
return approx
estimate = approx
def test_square_root(a):
"""Print a table that, for all the numbers in the range a,
compares the square roots calculated with the
Newton's method with those calculated with the built in function math.sqrt()
and display the absolute error between the two.
"""
n = float(1)
print('n', ' '*10, 'mysqrt(n)', ' '*10, 'math.swrt(n)', ' '*10, 'diff')
print('-', ' '*10, '---------', ' '*10, '------------', ' '*10, '----')
for i in range(a):
my_square = mysqrt(n)
math_square = math.sqrt(n)
abs_error = abs(math_square - my_square)
x = str(n)
if (len(x) >= 4):
val = x + (' '*(9-(len(x)-3)))
else:
val = x + ' '*9
perfect_square = math_square*math_square == n
my_square = format(my_square, '.12f')
math_square = format(math_square, '.12f')
abs_error = format(abs_error, '.12g')
if (perfect_square):
my_square = my_square[:3]
math_square = math_square[:3]
space1 = ' '*16
space2 = ' '*19
else:
space1 = ' '*5
space2 = ' '*8
print(val, my_square, space1, math_square, space2, abs_error)
n += 1
def ask_user():
"""Prompt the user to enter how many numbers to be calculated"""
a = int(input('Enter how many numbers you want to calculate: '))
test_square_root(a)
ask_user()
| 31.266667
| 80
| 0.549041
|
#!/usr/bin/python3
import math
def mysqrt(a):
"""Compute the square root of a using Newton's method: start with an
approximate answer and iteratively improving it
"""
estimate = a / 2 + 1 # Arbitrary estimae of the square root of a
epsilon = 0.0000001
while True:
approx = (estimate + a / estimate)/2
if abs(approx-estimate) < epsilon:
return approx
estimate = approx
def test_square_root(a):
"""Print a table that, for all the numbers in the range a,
compares the square roots calculated with the
Newton's method with those calculated with the built in function math.sqrt()
and display the absolute error between the two.
"""
n = float(1)
print('n', ' '*10, 'mysqrt(n)', ' '*10, 'math.swrt(n)', ' '*10, 'diff')
print('-', ' '*10, '---------', ' '*10, '------------', ' '*10, '----')
for i in range(a):
my_square = mysqrt(n)
math_square = math.sqrt(n)
abs_error = abs(math_square - my_square)
x = str(n)
if (len(x) >= 4):
val = x + (' '*(9-(len(x)-3)))
else:
val = x + ' '*9
perfect_square = math_square*math_square == n
my_square = format(my_square, '.12f')
math_square = format(math_square, '.12f')
abs_error = format(abs_error, '.12g')
if (perfect_square):
my_square = my_square[:3]
math_square = math_square[:3]
space1 = ' '*16
space2 = ' '*19
else:
space1 = ' '*5
space2 = ' '*8
print(val, my_square, space1, math_square, space2, abs_error)
n += 1
def ask_user():
"""Prompt the user to enter how many numbers to be calculated"""
a = int(input('Enter how many numbers you want to calculate: '))
test_square_root(a)
ask_user()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
df668d722b67fd2443fb0e29147acb271a0d6a49
| 63,507
|
py
|
Python
|
tests/simulations/system/test_system_unitary.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/simulations/system/test_system_unitary.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | 1
|
2021-02-03T01:56:56.000Z
|
2021-02-03T01:56:56.000Z
|
tests/simulations/system/test_system_unitary.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T18:31:05.000Z
|
2022-01-11T18:31:05.000Z
|
from pathlib import Path
test_dir = Path(__file__).parent.parent.parent
hot_water_objects = {
"HVACTemplate:Plant:Boiler": {
"Main Boiler": {
"boiler_type": "HotWaterBoiler",
"capacity": "Autosize",
"efficiency": 0.8,
"fuel_type": "NaturalGas",
"priority": "1"
}
},
"HVACTemplate:Plant:HotWaterLoop": {
"Hot Water Loop": {
"hot_water_design_setpoint": 82,
"hot_water_plant_operation_scheme_type": "Default",
"hot_water_pump_configuration": "ConstantFlow",
"hot_water_pump_rated_head": 179352,
"hot_water_reset_outdoor_dry_bulb_high": 10,
"hot_water_reset_outdoor_dry_bulb_low": -6.7,
"hot_water_setpoint_at_outdoor_dry_bulb_high": 65.6,
"hot_water_setpoint_at_outdoor_dry_bulb_low": 82.2,
"hot_water_setpoint_reset_type": "OutdoorAirTemperatureReset",
"pump_control_type": "Intermittent"
}
}
}
schedule_objects = {
"Schedule:Compact": {
"Always0.8": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 0.8
}
],
"schedule_type_limits_name": "Any Number"
},
"Always6.8": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 6.8
}
],
"schedule_type_limits_name": "Any Number"
},
"Always12.5": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 12.5
}
],
"schedule_type_limits_name": "Any Number"
},
"Always15.5": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 15.5
}
],
"schedule_type_limits_name": "Any Number"
},
"Always62": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 62.0
}
],
"schedule_type_limits_name": "Any Number"
},
"Always29": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 29.0
}
],
"schedule_type_limits_name": "Any Number"
}
}
}
| 56.652096
| 125
| 0.678366
|
from pathlib import Path
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
test_dir = Path(__file__).parent.parent.parent
hot_water_objects = {
"HVACTemplate:Plant:Boiler": {
"Main Boiler": {
"boiler_type": "HotWaterBoiler",
"capacity": "Autosize",
"efficiency": 0.8,
"fuel_type": "NaturalGas",
"priority": "1"
}
},
"HVACTemplate:Plant:HotWaterLoop": {
"Hot Water Loop": {
"hot_water_design_setpoint": 82,
"hot_water_plant_operation_scheme_type": "Default",
"hot_water_pump_configuration": "ConstantFlow",
"hot_water_pump_rated_head": 179352,
"hot_water_reset_outdoor_dry_bulb_high": 10,
"hot_water_reset_outdoor_dry_bulb_low": -6.7,
"hot_water_setpoint_at_outdoor_dry_bulb_high": 65.6,
"hot_water_setpoint_at_outdoor_dry_bulb_low": 82.2,
"hot_water_setpoint_reset_type": "OutdoorAirTemperatureReset",
"pump_control_type": "Intermittent"
}
}
}
schedule_objects = {
"Schedule:Compact": {
"Always0.8": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 0.8
}
],
"schedule_type_limits_name": "Any Number"
},
"Always6.8": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 6.8
}
],
"schedule_type_limits_name": "Any Number"
},
"Always12.5": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 12.5
}
],
"schedule_type_limits_name": "Any Number"
},
"Always15.5": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 15.5
}
],
"schedule_type_limits_name": "Any Number"
},
"Always62": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 62.0
}
],
"schedule_type_limits_name": "Any Number"
},
"Always29": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 29.0
}
],
"schedule_type_limits_name": "Any Number"
}
}
}
class TestSimulationsSystemUnitary(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles', 'HVACTemplate-5ZoneFurnaceDX.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
self.base_epjson.pop('Output:Variable')
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:minimum_inputs")
def test_minimum_inputs(self):
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 1'][
'zone_cooling_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 2'][
'zone_cooling_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 3'][
'zone_cooling_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 4'][
'zone_cooling_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:System:Unitary'].pop('Furnace DX 1-1')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:System:Unitary': {
'Furnace DX 1-1': {
}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:system_availability_schedule_name")
def test_system_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'system_availability_schedule_name'] = 'OCCUPY-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'night_cycle_control'] = 'CycleOnAny'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Fan:OnOff']['Furnace DX 1-1 Supply Fan']['availability_schedule_name'])
self.assertEqual(
'OCCUPY-1',
epjson_output['AvailabilityManager:NightCycle']['Furnace DX 1-1 Availability']['fan_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_maximum_flow_rate")
def test_supply_fan_maximum_flow_rate(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1']['supply_fan_maximum_flow_rate'] = 1.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.01,
epjson_output['Sizing:System']['Furnace DX 1-1 Sizing System']['cooling_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:Unitary:supply_fan_operating_mode_schedule_name")
def test_supply_fan_operating_mode_schedule_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'supply_fan_operating_mode_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'supply_air_fan_operating_mode_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_total_efficiency")
def test_supply_fan_total_efficiency(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1']['supply_fan_total_efficiency'] = 0.65
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.65,
epjson_output['Fan:OnOff']['Furnace DX 1-1 Supply Fan']['fan_total_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_delta_pressure")
def test_supply_fan_delta_pressure(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1']['supply_fan_delta_pressure'] = 500
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
500,
epjson_output['Fan:OnOff']['Furnace DX 1-1 Supply Fan']['pressure_rise'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_motor_efficiency")
def test_supply_fan_motor_efficiency(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1']['supply_fan_motor_efficiency'] = 0.8
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.8,
epjson_output['Fan:OnOff']['Furnace DX 1-1 Supply Fan']['motor_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_motor_in_air_stream_fraction")
def test_supply_fan_motor_in_air_stream_fraction(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'supply_fan_motor_in_air_stream_fraction'] = 0.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.9,
epjson_output['Fan:OnOff']['Furnace DX 1-1 Supply Fan']['motor_in_airstream_fraction'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:cooling_coil_type_single_speed_dx")
def test_cooling_coil_type_single_speed_dx(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_type'] = 'SingleSpeedDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:SingleSpeed'].get('Furnace DX 1-1 Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:cooling_coil_type_none")
def test_cooling_coil_type_none(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output['Coil:Cooling:DX:SingleSpeed'].get('Furnace DX 1-1 Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:cooling_coil_availability_schedule_name")
def test_cooling_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Cooling:DX:SingleSpeed']['Furnace DX 1-1 Cooling Coil']['availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"cooling_design_supply_air_temperature")
def test_cooling_design_supply_air_temperature(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_design_supply_air_temperature'] = 12.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
12.9,
epjson_output['Sizing:System']['Furnace DX 1-1 Sizing System'][
'central_cooling_design_supply_air_temperature'])
self.assertEqual(
12.9,
epjson_output['SetpointManager:SingleZone:Cooling']['Furnace DX 1-1 Cooling Supply Air Temp Manager'][
'minimum_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"cooling_coil_gross_rated_total_capacity")
def test_cooling_coil_gross_rated_total_capacity(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_gross_rated_total_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Cooling:DX:SingleSpeed']['Furnace DX 1-1 Cooling Coil'][
'gross_rated_total_cooling_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"cooling_coil_gross_rated_sensible_heat_ratio")
def test_cooling_coil_gross_rated_sensible_heat_ratio(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_gross_rated_sensible_heat_ratio'] = 0.75
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.75,
epjson_output['Coil:Cooling:DX:SingleSpeed']['Furnace DX 1-1 Cooling Coil'][
'gross_rated_sensible_heat_ratio'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"cooling_coil_gross_rated_cop")
def test_cooling_coil_gross_rated_cop(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'cooling_coil_gross_rated_cop'] = 3.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
3.1,
epjson_output['Coil:Cooling:DX:SingleSpeed']['Furnace DX 1-1 Cooling Coil'][
'gross_rated_cooling_cop'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heating_coil_type_electric")
def test_heating_coil_type_electric(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Electric'].get('Furnace DX 1-1 Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heating_coil_type_gas")
def test_heating_coil_type_gas(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'Gas'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Fuel'].get('Furnace DX 1-1 Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heating_coil_type_hot_water")
def test_heating_coil_type_hot_water(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=hot_water_objects)
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'HotWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Water'].get('Furnace DX 1-1 Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heating_coil_availability_schedule_name")
def test_heating_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Heating:Fuel']['Furnace DX 1-1 Heating Coil']['availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"heating_design_supply_air_temperature")
def test_heating_design_supply_air_temperature(self):
# todo_eo: why is the SetpointManager:SingleZone:Cooling object not affected by this input
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_design_supply_air_temperature'] = 48
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
48,
epjson_output['Sizing:System']['Furnace DX 1-1 Sizing System'][
'central_heating_design_supply_air_temperature'])
# self.assertEqual(
# 48,
# epjson_output['SetpointManager:SingleZone:Cooling']['Furnace DX 1-1 Cooling Supply Air Temp Manager'][
# 'maximum_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"heating_coil_capacity")
def test_heating_coil_capacity(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Heating:Fuel']['Furnace DX 1-1 Heating Coil'][
'nominal_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"gas_heating_coil_efficiency")
def test_gas_heating_coil_efficiency(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'gas_heating_coil_efficiency'] = 0.77
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.77,
epjson_output['Coil:Heating:Fuel']['Furnace DX 1-1 Heating Coil']['burner_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"gas_heating_coil_parasitic_electric_load")
def test_gas_heating_coil_parasitic_electric_load(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'gas_heating_coil_parasitic_electric_load'] = 1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1,
epjson_output['Coil:Heating:Fuel']['Furnace DX 1-1 Heating Coil']['parasitic_electric_load'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"outdoor_air_flow_rates")
def test_outdoor_air_flow_rates(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'maximum_outdoor_air_flow_rate'] = 0.66
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'minimum_outdoor_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.66,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['maximum_outdoor_air_flow_rate'])
self.assertEqual(
0.1,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['minimum_outdoor_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:minimum_outdoor_air_schedule_name")
def test_minimum_outdoor_air_schedule_name(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=schedule_objects)
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'minimum_outdoor_air_schedule_name'] = 'Always0.8'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Always0.8',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller'][
'minimum_outdoor_air_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_no_economizer")
def test_economizer_type_no_economizer(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'NoEconomizer'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'NoEconomizer',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_fixed_dry_bulb")
def test_economizer_type_fixed_dry_bulb(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'FixedDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedDryBulb',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_fixed_enthalpy")
def test_economizer_type_fixed_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'FixedEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedEnthalpy',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_differential_dry_bulb")
def test_economizer_type_differential_dry_bulb(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'DifferentialDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialDryBulb',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_differential_enthalpy")
def test_economizer_type_differential_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'DifferentialEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialEnthalpy',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"economizer_type_fixed_dew_point_and_dry_bulb")
def test_economizer_type_fixed_dew_point_and_dry_bulb(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'FixedDewPointAndDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedDewPointAndDryBulb',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"electronic_enthalpy")
def test_economizer_type_electronic_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'ElectronicEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'ElectronicEnthalpy',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_type_"
"differential_dry_bulb_and_enthalpy")
def test_economizer_type_differential_dry_bulb_and_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'DifferentialDryBulbAndEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialDryBulbAndEnthalpy',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_lockout_no_lockout")
def test_economizer_lockout_no_lockout(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_lockout'] = 'NoLockout'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'NoLockout',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_lockout_lockout_with_heating")
def test_economizer_lockout_lockout_with_heating(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_lockout'] = 'LockoutWithHeating'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'LockoutWithHeating',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"economizer_lockout_lockout_with_compressor")
def test_economizer_lockout_lockout_with_compressor(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_lockout'] = 'LockoutWithCompressor'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'LockoutWithCompressor',
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_temperature_limits")
def test_economizer_temperature_limits(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_type'] = 'FixedDryBulb'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_upper_temperature_limit'] = 18
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_lower_temperature_limit'] = 5
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
18,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller'][
'economizer_maximum_limit_dry_bulb_temperature'])
self.assertEqual(
5,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller'][
'economizer_minimum_limit_dry_bulb_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_upper_enthalpy_limit")
def test_economizer_upper_enthalpy_limit(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_upper_enthalpy_limit'] = 100
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
100,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller']['economizer_maximum_limit_enthalpy'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:economizer_maximum_limit_dewpoint_temperature")
def test_economizer_maximum_limit_dewpoint_temperature(self):
# todo_eo: Notes say that limit is applied regardless of what economizer type is applied. However, EO only
# applies the value when certain economizer is selected. Figure out what is preferred method.
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'economizer_maximum_limit_dewpoint_temperature'] = 20
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
20,
epjson_output['Controller:OutdoorAir']['Furnace DX 1-1 OA Controller'][
'economizer_maximum_limit_dewpoint_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_plenum_name")
def test_supply_plenum_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'supply_plenum_name'] = 'PLENUM-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'PLENUM-1',
epjson_output['AirLoopHVAC:SupplyPlenum']['Furnace DX 1-1 Supply Plenum']['zone_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:return_plenum_name")
def test_return_plenum_name(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_plenum_name'] = 'PLENUM-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'PLENUM-1',
epjson_output['AirLoopHVAC:ReturnPlenum']['Furnace DX 1-1 Return Plenum']['zone_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_placement_blow_through")
def test_supply_fan_placement_blow_through(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'supply_fan_placement'] = 'BlowThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'BlowThrough',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:supply_fan_placement_draw_through")
def test_supply_fan_placement_draw_through(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'supply_fan_placement'] = 'DrawThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DrawThrough',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:night_cycle_control_stay_off")
def test_night_cycle_control_stay_off(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'night_cycle_control'] = 'StayOff'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'StayOff',
epjson_output['AvailabilityManager:NightCycle']['Furnace DX 1-1 Availability']['control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:night_cycle_control_cycle_on_any")
def test_night_cycle_control_cycle_on_any(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'night_cycle_control'] = 'CycleOnAny'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'CycleOnAny',
epjson_output['AvailabilityManager:NightCycle']['Furnace DX 1-1 Availability']['control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:night_cycle_control_cycle_on_control_zone")
def test_night_cycle_control_cycle_on_control_zone(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'night_cycle_control'] = 'CycleOnControlZone'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'night_cycle_control_zone_name'] = 'SPACE1-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'CycleOnControlZone',
epjson_output['AvailabilityManager:NightCycle']['Furnace DX 1-1 Availability']['control_type'])
self.assertEqual(
'SPACE1-1',
epjson_output['AvailabilityManager:NightCycle']['Furnace DX 1-1 Availability']['control_zone_or_zone_list_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heat_recovery_sensible")
def test_heat_recovery_sensible(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heat_recovery_type'] = 'Sensible'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heat_recovery_enthalpy")
def test_heat_recovery_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heat_recovery_type'] = 'Enthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heat_recovery_effectiveness_sensible")
def test_heat_recovery_effectiveness_sensible(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heat_recovery_type'] = 'Sensible'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'sensible_heat_recovery_effectiveness'] = 0.72
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_75_heating_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_100_heating_air_flow'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:heat_recovery_effectiveness_enthalpy")
def test_heat_recovery_effectiveness_enthalpy(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heat_recovery_type'] = 'Enthalpy'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'sensible_heat_recovery_effectiveness'] = 0.72
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'latent_heat_recovery_effectiveness'] = 0.61
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_75_heating_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'sensible_effectiveness_at_100_heating_air_flow'])
self.assertEqual(
0.61,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'latent_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.61,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'latent_effectiveness_at_100_heating_air_flow'])
self.assertEqual(
0.66,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'latent_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.66,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Furnace DX 1-1 Heat Recovery'][
'latent_effectiveness_at_75_heating_air_flow'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:dehumidification_control_type_none")
def test_dehumidification_control_type_none(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"dehumidification_control_type_cool_reheat_gas")
def test_dehumidification_control_type_cool_reheat_heating_coil_gas(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'Gas'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_type'] = 'CoolReheatHeatingCoil'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
self.assertEqual(
'Coil:Heating:Fuel',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'heating_coil_object_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"dehumidification_control_type_cool_reheat_electric")
def test_dehumidification_control_type_cool_reheat_heating_coil_electric(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'Electric'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_type'] = 'CoolReheatHeatingCoil'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
self.assertEqual(
'Coil:Heating:Electric',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'heating_coil_object_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:"
"dehumidification_control_type_cool_reheat_hot_water")
def test_dehumidification_control_type_cool_reheat_heating_coil_hot_water(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=hot_water_objects)
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'HotWater'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_type'] = 'CoolReheatHeatingCoil'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
self.assertEqual(
'Coil:Heating:Water',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'heating_coil_object_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:humidifier_type")
def test_humidifier_type(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_type'] = 'ElectricSteam'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_setpoint'] = 29
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Humidifier:Steam:Electric'].get('Furnace DX 1-1 Humidifier'))
self.assertEqual(
'HVACTemplate-Always29.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Humidification Humidistat'][
'humidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:humidifier_inputs")
def test_humidifier_inputs(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_type'] = 'ElectricSteam'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_relative_humidity_setpoint'] = 29
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_availability_schedule_name'] = 'OCCUPY-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_rated_capacity'] = 1
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_rated_electric_power'] = 1000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Humidifier:Steam:Electric'].get('Furnace DX 1-1 Humidifier'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Humidifier:Steam:Electric']['Furnace DX 1-1 Humidifier']['availability_schedule_name'])
self.assertEqual(
1,
epjson_output['Humidifier:Steam:Electric']['Furnace DX 1-1 Humidifier']['rated_capacity'])
self.assertEqual(
1000,
epjson_output['Humidifier:Steam:Electric']['Furnace DX 1-1 Humidifier']['rated_power'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:humidifier_type")
def test_humidifier_and_dehumidifier(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_type'] = 'ElectricSteam'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'humidifier_setpoint'] = 29
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'heating_coil_type'] = 'Electric'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_type'] = 'CoolReheatHeatingCoil'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'dehumidification_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
self.assertEqual(
'Coil:Heating:Electric',
epjson_output['AirLoopHVAC:Unitary:Furnace:HeatCool']['Furnace DX 1-1 Furnace with DX Cooling'][
'heating_coil_object_type'])
self.assertIsNotNone(epjson_output['Humidifier:Steam:Electric'].get('Furnace DX 1-1 Humidifier'))
self.assertEqual(
'HVACTemplate-Always29.0',
epjson_output['ZoneControl:Humidistat']['Furnace DX 1-1 Humidistat'][
'humidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:return_fan_yes")
def test_return_fan_yes(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan'] = 'Yes'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Fan:ConstantVolume'].get('Furnace DX 1-1 Return Fan'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:return_fan_no")
def test_return_fan_no(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan'] = 'No'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(epjson_output.get('Fan:ConstantVolume'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:return_fan_inputs")
def test_return_fan_inputs(self):
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan'] = 'Yes'
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan_total_efficiency'] = 0.72
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan_delta_pressure'] = 295
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan_motor_efficiency'] = 0.85
self.base_epjson['HVACTemplate:System:Unitary']['Furnace DX 1-1'][
'return_fan_motor_in_air_stream_fraction'] = 0.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.72,
epjson_output['Fan:ConstantVolume']['Furnace DX 1-1 Return Fan']['fan_total_efficiency'])
self.assertEqual(
295,
epjson_output['Fan:ConstantVolume']['Furnace DX 1-1 Return Fan']['pressure_rise'])
self.assertEqual(
0.85,
epjson_output['Fan:ConstantVolume']['Furnace DX 1-1 Return Fan']['motor_efficiency'])
self.assertEqual(
0.9,
epjson_output['Fan:ConstantVolume']['Furnace DX 1-1 Return Fan']['motor_in_airstream_fraction'])
return
| 0
| 57,525
| 0
| 2,153
| 0
| 0
| 0
| 43
| 68
|
b309c952311c2ba99fc6444d9dabcacf6b2a8a7a
| 1,287
|
py
|
Python
|
apps/base/migrations/0052_fill_new_item_order.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/base/migrations/0052_fill_new_item_order.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
apps/base/migrations/0052_fill_new_item_order.py
|
KolevDarko/lifehq
|
88d92f5fe76f2fb6511f2a892e096d95a69985d8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2019-01-31 07:28
| 33.868421
| 73
| 0.62704
|
# Generated by Django 2.0 on 2019-01-31 07:28
from django.db import migrations
class Migration(migrations.Migration):
def forwards_func(apps, schema_editor):
ProjectTodoList = apps.get_model('base', 'ProjectTodoList')
PersonalTodoList = apps.get_model('base', 'PersonalTodoList')
for project_list in ProjectTodoList.objects.all():
i = 1
for todo_item in project_list.todos.order_by('id'):
todo_item.project_list_order = i
todo_item.save()
i+=1
for personal_list in PersonalTodoList.objects.all():
i = 1
for todo_item in personal_list.personal_todos.order_by('id'):
todo_item.personal_list_order = i
todo_item.save()
i+=1
def reverse_func(apps, schema_editor):
ProjectTodoItem = apps.get_model('base', 'ProjectTodoItem')
for project_item in ProjectTodoItem.objects.all():
project_item.personal_list_order = None
project_item.project_list_order = None
project_item.save()
dependencies = [
('base', '0051_create_new_order_fields'),
]
operations = [
migrations.RunPython(forwards_func, reverse_func, atomic=False)
]
| 0
| 0
| 0
| 1,183
| 0
| 0
| 0
| 11
| 46
|
c32b35ee54e2b205d3b64c553ab89bbe78d5c853
| 7,697
|
py
|
Python
|
tests/bibkey_formatter/test_formatters/test_author_formatters.py
|
astamminger/zotero-bibtize
|
bd518c85d5ea03f952903b721e0d66e8990bd185
|
[
"MIT"
] | 2
|
2019-11-20T02:46:46.000Z
|
2022-03-08T18:32:32.000Z
|
tests/bibkey_formatter/test_formatters/test_author_formatters.py
|
astamminger/zotero-bibtize
|
bd518c85d5ea03f952903b721e0d66e8990bd185
|
[
"MIT"
] | 8
|
2019-11-20T15:31:37.000Z
|
2020-05-05T09:07:05.000Z
|
tests/bibkey_formatter/test_formatters/test_author_formatters.py
|
astamminger/zotero-bibtize
|
bd518c85d5ea03f952903b721e0d66e8990bd185
|
[
"MIT"
] | null | null | null |
"""
Test suite for BibKey formatting sequences.
Tests the generation of key contents based on the author entry
"""
from zotero_bibtize.bibkey_formatter import KeyFormatter
#
# Test lower author formatting
#
#
# Test upper author formatting
#
#
# Test capitalized author formatting
#
#
# Test abbreviated author formatting
#
def test_missing_author():
"""Test editor is used if author is missing"""
key_format = '[author]'
# check that editor is used if author not present
editors = 'Surname, Firstname and Prefix Surname, Firstname'
authors = ''
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'Surname'
# check authors take precedence over editors
editors = 'Editor, Firstname and Prefix Author, Firstname'
authors = 'Author, Firstname and Prefix Author, Firstname'
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'Author'
# check No Name author is used if none is present
editors = ''
authors = ''
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'NoName'
def test_author_list_split_for_name_containing_and():
"""Test that author lists are only split at and that is not part of a name"""
key_format = '[author]'
authors = 'Ackland, G. J. and Bacon, D. J. and Calder, A. F.'
key_formatter = KeyFormatter({'author': authors})
assert key_formatter.generate_key(key_format) == 'Ackland'
| 36.478673
| 81
| 0.715214
|
"""
Test suite for BibKey formatting sequences.
Tests the generation of key contents based on the author entry
"""
from zotero_bibtize.bibkey_formatter import KeyFormatter
#
# Test lower author formatting
#
def test_no_author_lower():
key_formatter = KeyFormatter({})
key_format = '[author:lower]'
assert key_formatter.generate_key(key_format) == 'noname'
def test_single_author_lower():
authors = 'Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:lower]'
assert key_formatter.generate_key(key_format) == 'surname'
def test_prefixed_author_lower():
authors = 'Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:lower]'
assert key_formatter.generate_key(key_format) == 'prefixsurname'
def test_multi_author_lower():
authors = 'Surname, Firstname and Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
# default only first author
key_format = '[author:lower]'
assert key_formatter.generate_key(key_format) == 'surname'
# use only one author (i.e. the first author)
key_format = '[author:1:lower]'
assert key_formatter.generate_key(key_format) == 'surname'
# use two authors from the list
key_format = '[author:2:lower]'
assert key_formatter.generate_key(key_format) == 'surnameprefixsurname'
# use maximal three authors
key_format = '[author:3:lower]'
assert key_formatter.generate_key(key_format) == 'surnameprefixsurname'
#
# Test upper author formatting
#
def test_no_author_upper():
key_formatter = KeyFormatter({})
key_format = '[author:upper]'
assert key_formatter.generate_key(key_format) == 'NONAME'
def test_single_author_upper():
authors = 'Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:upper]'
assert key_formatter.generate_key(key_format) == 'SURNAME'
def test_prefixed_author_upper():
authors = 'Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:upper]'
assert key_formatter.generate_key(key_format) == 'PREFIXSURNAME'
def test_multi_author_upper():
authors = 'Surname, Firstname and Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
# default only first author
key_format = '[author:upper]'
assert key_formatter.generate_key(key_format) == 'SURNAME'
# use only one author (i.e. the first author)
key_format = '[author:1:upper]'
assert key_formatter.generate_key(key_format) == 'SURNAME'
# use two authors from the list
key_format = '[author:2:upper]'
assert key_formatter.generate_key(key_format) == 'SURNAMEPREFIXSURNAME'
# use maximal three authors
key_format = '[author:3:upper]'
assert key_formatter.generate_key(key_format) == 'SURNAMEPREFIXSURNAME'
#
# Test capitalized author formatting
#
def test_no_author_capitalize():
key_formatter = KeyFormatter({})
key_format = '[author:capitalize]'
assert key_formatter.generate_key(key_format) == 'NoName'
def test_single_author_capitalize():
authors = 'Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:capitalize]'
assert key_formatter.generate_key(key_format) == 'Surname'
def test_prefixed_author_upper():
authors = 'Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:capitalize]'
assert key_formatter.generate_key(key_format) == 'PrefixSurname'
def test_multi_author_upper():
authors = 'Surname, Firstname and Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
# default only first author
key_format = '[author:capitalize]'
assert key_formatter.generate_key(key_format) == 'Surname'
# use only one author (i.e. the first author)
key_format = '[author:1:upper]'
key_format = '[author:1:capitalize]'
assert key_formatter.generate_key(key_format) == 'Surname'
# use two authors from the list
key_format = '[author:2:capitalize]'
assert key_formatter.generate_key(key_format) == 'SurnamePrefixSurname'
# use maximal three authors
key_format = '[author:3:capitalize]'
assert key_formatter.generate_key(key_format) == 'SurnamePrefixSurname'
#
# Test abbreviated author formatting
#
def test_no_author_abbreviate():
key_formatter = KeyFormatter({})
key_format = '[author:abbreviate]'
assert key_formatter.generate_key(key_format) == 'NN'
key_formatter = KeyFormatter({})
key_format = '[author:abbr]'
assert key_formatter.generate_key(key_format) == 'NN'
def test_single_author_abbreviate():
authors = 'Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:abbreviate]'
assert key_formatter.generate_key(key_format) == 'S'
key_format = '[author:abbr]'
assert key_formatter.generate_key(key_format) == 'S'
def test_prefixed_author_abbreviate():
authors = 'Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
key_format = '[author:abbreviate]'
assert key_formatter.generate_key(key_format) == 'PS'
key_format = '[author:abbr]'
assert key_formatter.generate_key(key_format) == 'PS'
def test_multi_author_abbreviate():
authors = 'Surname, Firstname and Prefix Surname, Firstname'
key_formatter = KeyFormatter({'author': authors})
# default only first author
key_format = '[author:abbreviate]'
assert key_formatter.generate_key(key_format) == 'S'
key_format = '[author:abbr]'
assert key_formatter.generate_key(key_format) == 'S'
# use only one author (i.e. the first author)
key_format = '[author:1:abbreviate]'
assert key_formatter.generate_key(key_format) == 'S'
key_format = '[author:1:abbr]'
assert key_formatter.generate_key(key_format) == 'S'
# use two authors from the list
key_format = '[author:2:abbreviate]'
assert key_formatter.generate_key(key_format) == 'SPS'
key_format = '[author:2:abbr]'
assert key_formatter.generate_key(key_format) == 'SPS'
# use maximal three authors
key_format = '[author:3:abbreviate]'
assert key_formatter.generate_key(key_format) == 'SPS'
key_format = '[author:3:abbr]'
assert key_formatter.generate_key(key_format) == 'SPS'
def test_missing_author():
"""Test editor is used if author is missing"""
key_format = '[author]'
# check that editor is used if author not present
editors = 'Surname, Firstname and Prefix Surname, Firstname'
authors = ''
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'Surname'
# check authors take precedence over editors
editors = 'Editor, Firstname and Prefix Author, Firstname'
authors = 'Author, Firstname and Prefix Author, Firstname'
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'Author'
# check No Name author is used if none is present
editors = ''
authors = ''
key_formatter = KeyFormatter({'author': authors, 'editor': editors})
assert key_formatter.generate_key(key_format) == 'NoName'
def test_author_list_split_for_name_containing_and():
"""Test that author lists are only split at and that is not part of a name"""
key_format = '[author]'
authors = 'Ackland, G. J. and Bacon, D. J. and Calder, A. F.'
key_formatter = KeyFormatter({'author': authors})
assert key_formatter.generate_key(key_format) == 'Ackland'
| 0
| 0
| 0
| 0
| 0
| 5,724
| 0
| 0
| 364
|
d9bdf38b8225d5bcf36515d8ff0fd7d07cd4ddee
| 2,900
|
py
|
Python
|
ontobio/io/entitywriter.py
|
alliance-genome/ontobio
|
0ec3aa6fea9d4492a9873a4b9b394c4866f741b6
|
[
"BSD-3-Clause"
] | null | null | null |
ontobio/io/entitywriter.py
|
alliance-genome/ontobio
|
0ec3aa6fea9d4492a9873a4b9b394c4866f741b6
|
[
"BSD-3-Clause"
] | null | null | null |
ontobio/io/entitywriter.py
|
alliance-genome/ontobio
|
0ec3aa6fea9d4492a9873a4b9b394c4866f741b6
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Classes for exporting entities.
So far only one implementation
"""
import re
external_taxon = re.compile("taxon:([0-9]+)")
internal_taxon = re.compile("NCBITaxon:([0-9]+)")
| 24.576271
| 80
| 0.556897
|
"""
Classes for exporting entities.
So far only one implementation
"""
import re
def stringify(s):
if s is None:
return ""
elif isinstance(s,list):
return "|".join(s)
else:
return s
external_taxon = re.compile("taxon:([0-9]+)")
internal_taxon = re.compile("NCBITaxon:([0-9]+)")
def normalize_taxon(taxon):
global internal_taxon
global external_taxon
if external_taxon.match(taxon):
# If we match here, then the internal view already exists and we're good
return internal_taxon
match = internal_taxon.match(taxon)
if match:
taxon_id = match.group(1)
return "taxon:{num}".format(num=taxon_id)
return taxon
class EntityWriter():
"""
Abstract superclass of all association writer objects (Gpad, GAF)
"""
# TODO: add to superclass
def _split_prefix(self, ref):
id = ref['id']
[prefix, local_id] = id.split(':', maxsplit=1)
return prefix, local_id
# TODO: add to superclass
def _write_row(self, vals):
vals = [stringify(v) for v in vals]
line = "\t".join(vals)
self.file.write(line + "\n")
# TODO: add to superclass
def write_entity(self, e):
"""
Write a single entity
"""
pass ## Implemented in subclasses
def write(self, entities, meta=None):
"""
Write a complete set of entities to a file
Arguments
---------
entities: list[dict]
A list of entity dict objects
meta: Meta
metadata about association set (not yet implemented)
"""
for e in entities:
self.write_entity(e)
class GpiWriter(EntityWriter):
"""
Writes entities in GPI format
Takes an entity dictionary:
{
'id': id, (String)
'label': db_object_symbol, (String)
'full_name': db_object_name, (String)
'synonyms': synonyms, (List[str])
'type': db_object_type, (String)
'parents': parents, (List[Str])
'xrefs': xref_ids, (List[Str])
'taxon': {
'id': self._taxon_id(taxon) (String)
}
}
"""
def __init__(self, file=None):
self.file = file
if self.file:
self.file.write("!gpi-version: 2.1")
def write_entity(self, entity):
"""
Write a single entity to a line in the output file
"""
db, db_object_id = self._split_prefix(entity)
taxon = normalize_taxon(entity["taxon"]["id"])
vals = [
db,
db_object_id,
entity.get('label'),
entity.get('full_name'),
entity.get('synonyms'),
entity.get('type'),
taxon,
entity.get('parents'),
entity.get('xrefs'),
entity.get('properties')
]
self._write_row(vals)
| 0
| 0
| 0
| 2,150
| 0
| 478
| 0
| 0
| 92
|
7bc67dc45c88bf77bfd385e03be6efef81543692
| 101
|
py
|
Python
|
src/pagnn/training/dcn/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | 1
|
2022-01-16T12:06:13.000Z
|
2022-01-16T12:06:13.000Z
|
src/pagnn/training/dcn/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | null | null | null |
src/pagnn/training/dcn/__init__.py
|
ostrokach/protein-adjacency-net
|
fd3ad0b9034eb61b0187752c1f38f7eed1a8f1dc
|
[
"MIT"
] | null | null | null |
"""Train a network."""
| 20.2
| 29
| 0.732673
|
"""Train a network."""
from .args import Args
from .stats import Stats
from .main import main, train
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 66
|
c90a19220af8528b49927128e469ba9aff6561ab
| 291
|
py
|
Python
|
export_readiness/migrations/0064_merge_20191009_1320.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2018-03-20T11:19:07.000Z
|
2021-10-05T07:53:11.000Z
|
export_readiness/migrations/0064_merge_20191009_1320.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 802
|
2018-02-05T14:16:13.000Z
|
2022-02-10T10:59:21.000Z
|
export_readiness/migrations/0064_merge_20191009_1320.py
|
uktrade/directory-cms
|
8c8d13ce29ea74ddce7a40f3dd29c8847145d549
|
[
"MIT"
] | 6
|
2019-01-22T13:19:37.000Z
|
2019-07-01T10:35:26.000Z
|
# Generated by Django 2.2.4 on 2019-10-09 13:20
| 19.4
| 56
| 0.670103
|
# Generated by Django 2.2.4 on 2019-10-09 13:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0063_auto_20191009_1239'),
('export_readiness', '0063_auto_20191008_1307'),
]
operations = [
]
| 0
| 0
| 0
| 185
| 0
| 0
| 0
| 11
| 46
|
e607fd13a8a8af9d7b0dd6db2c1d5a67f9b2d4bc
| 16,202
|
py
|
Python
|
skating_etl/skating_etl.py
|
gcp825/gcp_public
|
3208249658b227de4a3d5e054de8df42042429a5
|
[
"Apache-2.0"
] | null | null | null |
skating_etl/skating_etl.py
|
gcp825/gcp_public
|
3208249658b227de4a3d5e054de8df42042429a5
|
[
"Apache-2.0"
] | null | null | null |
skating_etl/skating_etl.py
|
gcp825/gcp_public
|
3208249658b227de4a3d5e054de8df42042429a5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
if __name__ == '__main__': run()
| 70.751092
| 146
| 0.398901
|
#!/usr/bin/python3
def run(cloud=False):
# imports
import apache_beam as ab
from apache_beam import io
from apache_beam import ToString as ts
from apache_beam.options.pipeline_options import PipelineOptions, SetupOptions, StandardOptions, GoogleCloudOptions
from gcp_tools import beam_tools as bt
from python_tools import scalar_functions as sf
# field-level transform functions
def forename(x): return x[sf.translate(x,'','abcdefghijklmnopqrstuvwxyz','x').find('x',x.find(' '))-1:].strip().title()
def surname(x): return x[:sf.translate(x,'','abcdefghijklmnopqrstuvwxyz','x').find('x',x.find(' '))-1].strip().title()
def name1(x): return x.split('/')[0].strip()
def name2(x): return (x + '/').split('/')[1].strip()
def gender1(x): return 'M' if x.title().find('Men ') >= 0 else 'F'
def gender2(x): return 'M' if x.find('/') >= 0 else ''
def program(x): return x[0:x[::-1].replace(' ','|',2)[::-1].find('|')].rstrip('-').strip()
def element(x): return x[(x[::-1].replace(' ','|',2)[::-1].find('|'))+1:].strip()
def asp_type(x): return x[0:1].upper()
def str_int(x): return '0' if x == '' else str(int(float(x)))
def nullify(x): return '' if x == '0' else x
# set up pipeline arguments and variables
shards = '-SSSSS-of-NNNNN' if cloud else ''
path = 'gs://your-bucket/' if cloud else '/home/your_user_dir/'
input_path = path + 'input' + '/'
output_path = path + 'output' + '/'
opt = PipelineOptions(flags=[])
if cloud:
opt.view_as(SetupOptions).save_main_session = True
opt.view_as(SetupOptions).setup_file = './setup.py'
opt.view_as(StandardOptions).runner = 'DataflowRunner'
opt.view_as(GoogleCloudOptions).project = 'your-project'
opt.view_as(GoogleCloudOptions).job_name = 'skating-etl'
opt.view_as(GoogleCloudOptions).staging_location = path + 'temp'
opt.view_as(GoogleCloudOptions).temp_location = path + 'temp'
opt.view_as(GoogleCloudOptions).region = 'us-central1'
# run the pipeline
with ab.Pipeline(options=opt) as pipe:
# extract the data
p00 = (pipe | 'p00 Read Performance' >> io.ReadFromText(input_path + 'performances.csv', skip_header_lines=1)
| 'p00 Switch Delimiter' >> ab.ParDo(bt.SwitchDelimiters(',','|'))
| 'p00 ToList' >> ab.ParDo(bt.ConvertRecTo(list,'|')))
p01 = (pipe | 'p01 Read Judges' >> io.ReadFromText(input_path + 'judges.csv', skip_header_lines=1)
| 'p01 Switch Delimiter' >> ab.ParDo(bt.SwitchDelimiters(',','|'))
| 'p01 ToList' >> ab.ParDo(bt.ConvertRecTo(list,'|')))
p02 = (pipe | 'p02 Read Aspects' >> io.ReadFromText(input_path + 'judged-aspects.csv', skip_header_lines=1)
| 'p02 Switch Delimiter' >> ab.ParDo(bt.SwitchDelimiters(',','|'))
| 'p02 ToList' >> ab.ParDo(bt.ConvertRecTo(list,'|')))
p03 = (pipe | 'p03 Read Scores' >> io.ReadFromText(input_path + 'judge-scores.csv', skip_header_lines=1)
| 'p03 Switch Delimiter' >> ab.ParDo(bt.SwitchDelimiters(',','|'))
| 'p03 ToList' >> ab.ParDo(bt.ConvertRecTo(list,'|')))
# transform the data
p10 = (p00 | 'p10 Events: Drop Fields' >> bt.KeepFields(1,2,0) # Keep: Comp, Prog/Element, Performance ID
| 'p10 Events: Distinct' >> bt.DistinctList()
| 'p10 Events: Count' >> bt.Count(0,1)) # Outp: Comp, Prog/Element, Entries (Count(*))
p15 = (p00 | 'p15 Perf: Split Skaters' >> bt.XFormAppend(a3=name1,
b3=name2,
a2=gender1,
c3=gender2)) # Add: Sk.Name 1 & 2, Sk.Gender 1 & 2
p20 = (p15 | 'p20 Skaters: Drop Fields' >> bt.KeepFields(4,11,12,13,14) # Keep: Ctry, Sk.Name 1 & 2, Gender 1 & 2
| 'p20 Skaters: Parse Names' >> bt.XFormAppend(a1=forename,
b1=surname,
a2=forename,
b2=surname) # Add: Sk.1 Fore & Surname, Sk.2 Fore & Surname
| 'p20 Skaters: Parse' >> bt.Normalise(2,1,5,6,3,2,7,
8,4,blanks='n') # Outp: Ctry, Name, Forename, Surname, Gender
| 'p20 Skaters: Rearrange' >> bt.KeepFields(1,2,3,4,0) # Outp: Name, Forename, Surname, Gender, Ctry
| 'p20 Skaters: Distinct' >> bt.DistinctList())
p25 = (p01 | 'p25 Judges: Drop Fields' >> bt.KeepFields(2,2,2,'x',7) # Keep: Name, Name, Name, Null, Ctry
| 'p25 Judges: Distinct' >> bt.DistinctList()
| 'p25 Judges: Parse Names' >> bt.XForm(t1=forename,
t2=surname)) # Outp: Name, Forename, Surname, Null, Ctry
p30 = ((p20, p25)
| 'p30 Person: Combine' >> ab.Flatten() # Combine: Skaters & Judges
| 'p30 Person: Sort' >> bt.Sort(2,1,4,3) # Sort: Surname, Forename, Ctry, Gender
| 'p30 Person: Generate SKs' >> bt.GenerateSKs()) # Outp: PersonID, Name, Fore & Surname, Gender, Ctry
p35 = (p01 | 'p35 Events: Drop Fields' >> bt.KeepFields(5,4) # Keep: Comp, Prog/Element
| 'p35 Events: Distinct' >> bt.DistinctList()
| 'p35 Events: Sort' >> bt.Sort(0,1) # Sort: Comp, Prog/Element
| 'p35 Events: Add Entries' >> bt.Join(p10,'Left',
key='0,1',
keep='0,1,4') # Outp: Comp, Prog/Element, Entries
| 'p35 Events: Generate SKs' >> bt.GenerateSKs()) # Outp: EventID, Comp, Prog/Element, Entries
p40 = (p01 | 'p40 J.Roles: Drop Fields' >> bt.KeepFields(5,2,4,6) # Keep: Comp, Name, Prog/Element, Role
| 'p40 J.Roles: Add Person FK' >> bt.Join(p30,'Left',
key=1,
keep='3,0,2,4') # Outp: Role, Comp, Prog/Element, PersonID
| 'p40 J.Roles: Add Events FK' >> bt.Join(p35,'Left',
key='1,2',
keep='0,3,4')) # Outp: Role, PersonID, EventID
p45 = (p15 | 'p45 Perf: Drop Fields' >> bt.DropFields(3,4,7,13,14) # Keep: PerfID, Comp, Prog/Elm, Rank, Seq, El.Score
# Co.Score, Ded, Sk.1 Name, Sk.2 Name
| 'p45 Perf: Add Events FK' >> bt.Lookup(p35,'Left',
side_val=0,
key='1,2', # Outp: PerfID, Rank, Seq, El.Score, Co.Score, Ded,
keep='0,3,4,5,6,7,8,9,10') # Sk.1 Name, Sk.2 Name, EventID
| 'p45 Perf: Add Skater 1 FK' >> bt.Lookup(p30,'Left',
side_val=0,
main_key=6,side_key=1,
keep='0,1,2,3,4,5,7,8,9')
| 'p45 Perf: Add Skater 2 FK' >> bt.Lookup(p30,'Left',
side_val=0,
main_key=6,side_key=1, # Outp: PerfID, Rank, Seq, El.Score, Co.Score, Ded,
keep='0,1,2,3,4,5,7,8,9') # EventID, Sk.1 ID, Sk.2 ID
| 'p45 Perf: Distinct' >> bt.DistinctList()
| 'p45 Perf: Sort' >> bt.Sort(6.,2.,0) # Sort: EventID, Seq, PerfID
| 'p45 Perf: Generate SKs' >> bt.GenerateSKs()) # Outp: PerformanceID, PerfID, Rank, Seq, El.Score,
# Co.Score, Ded, EventID, Sk.1 ID, Sk.2 ID
p50 = (p02 | 'p50 J.Aspect: Drop Fields' >> bt.KeepFields(0,1,2,4,3,7,11) # Keep: J.AspectID, PerfID, Type, Desc, Seq,
# B.Diff, Score
| 'p50 J.Aspect: Distinct' >> bt.DistinctList()
| 'p50 J.Aspect: Transform' >> bt.XForm(t2=asp_type,
t4=str_int))
p55 = (p50 | 'p55 Aspect: Drop Fields' >> bt.KeepFields(2,3) # Keep: Type, Desc
| 'p55 Aspect: Distinct' >> bt.DistinctList()
| 'p55 Aspect: Sort' >> bt.Sort(0,1) # Sort: Type, Desc
| 'p55 Aspect: Generate SKs' >> bt.GenerateSKs()) # Outp: AspectID, Aspect Type, Aspect Desc
p60 = (p50 | 'p60 J.Aspect: Apply Perf FK' >> bt.Lookup(p45,'Left', # Keep: J.AspectID, Type, Desc, Seq, B.Diff,
key=1,side_val=0, # Score, PerformanceID
keep='0,2,3,4,5,6,7')
| 'p60 J.Aspect: Apply Asp. FK' >> bt.Lookup(p55,'Left', # Keep: J.AspectID, Seq, B.Diff, Score
key='1,2',side_val=0, # PerformanceID, AspectID
keep='0,3,4,5,6,7')
| 'p60 J.Aspect: Sort' >> bt.Sort(4.,1.,5.,0) # Sort: PerformanceID, Seq, AspectID, J.AspectID
| 'p60 J.Aspect: XForm Seq' >> bt.XForm(t1=nullify)
| 'p60 J.Aspect: Generate SKs' >> bt.GenerateSKs()) # Outp: JudgedAspectID, J.AspectID, Seq, B.Diff,
# Score, PerformanceID, AspectID
p65 = (p03 | 'p65 Scores: Distinct' >> bt.DistinctList()
| 'p65 Scores: Add J.Aspect ID' >> bt.Lookup(p60,'Left', # Outp: Role, Score, JudgedAspectID
side_key=1,side_val=0,
main_key=0,
keep='1,2,3')
| 'p65 Scores: Add Perf. ID' >> bt.Lookup(p60,'Left', # Outp: Role, Score, JudgedAspectID, PerformanceID
side_key=0,side_val=5,
main_key=2,
keep='0,1,2,3')
| 'p65 Scores: Add Event. ID' >> bt.Lookup(p45,'Left', # Outp: Role, Score, JudgedAspectID, EventID
side_key=0,side_val=7,
main_key=3,
keep='0,1,2,4')
| 'p65 Scores: Add Person ID' >> bt.Lookup(p40,'Left',
side_key='2,0',side_val=1,
main_key='3,0',
keep='2,4,1') # Outp: JudgedAspectID, PersonID, Score
| 'p65 Scores: Sort' >> bt.Sort(0.,1.) # Sort: JudgedAspectID, PersonID
| 'p65 Scores: Generate SKs' >> bt.GenerateSKs()) # Outp: ScoreID, JudgedAspectID, PersonID, Score
# load the data
p91 = (p30 | 'p91 Person: Reformat' >> bt.DropFields(1) # Outp: PersonID, Forename, Surname, Gender, Ctry
| 'p91 Person: ToStr' >> ts.Iterables(delimiter='|')
| 'p91 Person: Write File ' >> io.WriteToText
(output_path + 'person.dat',
shard_name_template=shards))
p92 = (p35 | 'p92 Event: Dupe Prog/Elem' >> bt.KeepFields(0,1,2,2,3) # Outp: EventID, Comp, Prog/Elm, Prog/Elm, Entries
| 'p92 Event: Parse Prog/Elem' >> bt.XForm(t2=program,
t3=element) # Outp: EventID, Comp, Program, Element, Entries
| 'p92 Event: ToStr' >> ts.Iterables(delimiter='|')
| 'p92 Event: Write File' >> io.WriteToText
(output_path + 'event.dat',
shard_name_template=shards))
p93 = (p45 | 'p93 Perf: Reformat' >> bt.KeepFields(0,7,3,8,9, # Outp: PerformanceID, EventID, Seq, Sk1 ID, Sk2 ID
2,5,4,6) # Rank, Co.Score, El.Score, Ded
| 'p93 Perf: ToStr' >> ts.Iterables(delimiter='|')
| 'p93 Perf: Write File' >> io.WriteToText
(output_path + 'performance.dat',
shard_name_template=shards))
p94 = (p55 | 'p94 Aspect: ToStr' >> ts.Iterables(delimiter='|')
| 'p94 Aspect: Write File' >> io.WriteToText
(output_path + 'aspect.dat',
shard_name_template=shards))
p95 = (p60 | 'p95 J.Aspect: Reformat' >> bt.KeepFields(0,5,2,6,3,4) # Outp: J.Asp.ID, Perf.ID, Seq, AspID, B.Diff, Score
| 'p95 J.Aspect: ToStr' >> ts.Iterables(delimiter='|')
| 'p95 J.Aspect: Write File' >> io.WriteToText
(output_path + 'performance_aspect.dat',
shard_name_template=shards))
p96 = (p65 | 'p96 Scores: ToStr' >> ts.Iterables(delimiter='|')
| 'p96 Scores: Write_File' >> io.WriteToText
(output_path + 'performance_scores.dat',
shard_name_template=shards))
if __name__ == '__main__': run()
| 0
| 0
| 0
| 0
| 0
| 16,126
| 0
| 0
| 23
|
0997b2bb53b3e94433d1abfed3c5673193adb7bc
| 1,393
|
py
|
Python
|
main_adco.py
|
maple-research-lab/AdCo
|
a9f25fc18c12df88c732b33700f3bb698454dd3f
|
[
"MIT"
] | 139
|
2021-03-05T01:20:26.000Z
|
2022-03-24T02:25:20.000Z
|
main_adco.py
|
maple-research-lab/AdCo
|
a9f25fc18c12df88c732b33700f3bb698454dd3f
|
[
"MIT"
] | 12
|
2021-03-09T02:59:40.000Z
|
2021-09-27T05:25:25.000Z
|
main_adco.py
|
maple-research-lab/AdCo
|
a9f25fc18c12df88c732b33700f3bb698454dd3f
|
[
"MIT"
] | 18
|
2021-03-05T02:44:52.000Z
|
2022-03-14T02:37:09.000Z
|
#Copyright (C) 2020 Xiao Wang
#License: MIT for academic use.
#Contact: Xiao Wang ([email protected], [email protected])
#Some codes adopted from https://github.com/facebookresearch/moco
from ops.argparser import argparser
if __name__ == '__main__':
#use_cuda = torch.cuda.is_available()
#print("starting check cuda status",use_cuda)
#if use_cuda:
parser = argparser()
args = parser.parse_args()
main(args)
| 36.657895
| 81
| 0.729361
|
#Copyright (C) 2020 Xiao Wang
#License: MIT for academic use.
#Contact: Xiao Wang ([email protected], [email protected])
#Some codes adopted from https://github.com/facebookresearch/moco
import os
from ops.argparser import argparser
from ops.Config_Environment import Config_Environment
import torch.multiprocessing as mp
from training.main_worker import main_worker
def main(args):
if args.choose is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = args.choose
print("Current we choose gpu:%s" % args.choose)
#config environment
ngpus_per_node=Config_Environment(args)
# call training main control function
if args.multiprocessing_distributed==1:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
if __name__ == '__main__':
#use_cuda = torch.cuda.is_available()
#print("starting check cuda status",use_cuda)
#if use_cuda:
parser = argparser()
args = parser.parse_args()
main(args)
| 0
| 0
| 0
| 0
| 0
| 781
| 0
| 57
| 110
|
4c1974ce7ed5abfe14e791e0f8209d92e3dcc752
| 638
|
py
|
Python
|
yk_utils/images/image_parser.py
|
jppdpf/yk-utils-python
|
2c101feda900713c8cbb0223326031ba09cd48e9
|
[
"MIT"
] | null | null | null |
yk_utils/images/image_parser.py
|
jppdpf/yk-utils-python
|
2c101feda900713c8cbb0223326031ba09cd48e9
|
[
"MIT"
] | null | null | null |
yk_utils/images/image_parser.py
|
jppdpf/yk-utils-python
|
2c101feda900713c8cbb0223326031ba09cd48e9
|
[
"MIT"
] | 1
|
2022-02-16T19:04:33.000Z
|
2022-02-16T19:04:33.000Z
|
"""Image parser module.
"""
import os
import base64
def parse_image(image) -> str:
"""Check whether the image is a string or a file path or a file-like object.
:param image:
A base64 string or a file path or a file-like object representing an image.
:return:
Image as a base64 string.
"""
data = None
if hasattr(image, 'read'): # When image is a file-like object.
data = image.read()
elif os.path.isfile(image): # When image is a file path.
with open(image, 'rb') as file:
data = file.read()
return base64.b64encode(data).decode('utf-8') if data else image
| 29
| 83
| 0.628527
|
"""Image parser module.
"""
import os
import base64
def parse_image(image) -> str:
"""Check whether the image is a string or a file path or a file-like object.
:param image:
A base64 string or a file path or a file-like object representing an image.
:return:
Image as a base64 string.
"""
data = None
if hasattr(image, 'read'): # When image is a file-like object.
data = image.read()
elif os.path.isfile(image): # When image is a file path.
with open(image, 'rb') as file:
data = file.read()
return base64.b64encode(data).decode('utf-8') if data else image
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3d022cac11f1e60bc2bcab537423ff5ea3705e37
| 3,983
|
py
|
Python
|
client/gym_carla/experiment_suite/experiment_suite.py
|
wielgosz-info/carla-rl
|
8841c0c7997299ed76388ad93b34834bd6b55d3e
|
[
"MIT"
] | null | null | null |
client/gym_carla/experiment_suite/experiment_suite.py
|
wielgosz-info/carla-rl
|
8841c0c7997299ed76388ad93b34834bd6b55d3e
|
[
"MIT"
] | null | null | null |
client/gym_carla/experiment_suite/experiment_suite.py
|
wielgosz-info/carla-rl
|
8841c0c7997299ed76388ad93b34834bd6b55d3e
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# -------------------------------------------------------------------------------
#
# This file is intended to provide the same functions as
# https://github.com/carla-simulator/driving-benchmarks/blob/master/version084/benchmark_tools/experiment_suites/experiment_suite.py
# but working with CARLA 0.9.11 and gym
| 32.647541
| 132
| 0.644489
|
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
# -------------------------------------------------------------------------------
#
# This file is intended to provide the same functions as
# https://github.com/carla-simulator/driving-benchmarks/blob/master/version084/benchmark_tools/experiment_suites/experiment_suite.py
# but working with CARLA 0.9.11 and gym
import abc
from collections import OrderedDict
from gym_carla.converters.observations.sensors.camera.rgb import RGBCameraSensorObservations
from carla import Transform, Location, Rotation
class ExperimentSuite(object):
def __init__(self, city_name):
self._city_name = city_name
self._experiments = self.build_experiments()
def calculate_time_out(self, path_distance):
"""
Function to return the timeout, in seconds,
that is calculated based on distance (in meters).
"""
# Originally, path distance was in map coordinates
# and I have no idea how it corresponded to meters.
# But now we will supply it in meters since that's
# what we can get from
# GlobalRoutePlanner.track_route() * waypoints resolution.
# Also, we're only really ever interested in seconds
# (not milliseconds as documented in the original file).
# So, assuming the path_distance is in meters,
# and the minimal sensible average velocity is 10km/h (~2.78 m/s),
# and we're adding 10s of "bonus" time (start/stop),
# and we want the result to be in seconds
# we get the exact same equation ;)
return ((path_distance / 1000.0) / 10.0) * 3600.0 + 10.0
def get_number_of_poses_task(self):
"""
Get the number of poses a task have for this benchmark
"""
# Warning: assumes that all tasks have the same size
return len(self._experiments[0].poses)
def get_number_of_reps_poses(self):
"""
Get the number of poses a task have for this benchmark
"""
# Warning: assumes that all poses have the same number of repetitions
return self._experiments[0].repetitions
def get_experiments(self):
"""
Getter for the experiment set.
"""
return self._experiments
def prepare_sensors(self, blueprint_library):
sensors = OrderedDict(
rgb_camera=self._prepare_camera(blueprint_library)
)
return sensors
def _prepare_camera(self, blueprint_library):
blueprint_camera = blueprint_library.find('sensor.camera.rgb')
blueprint_camera.set_attribute('image_size_x', '800')
blueprint_camera.set_attribute('image_size_y', '600')
blueprint_camera.set_attribute('fov', '100')
blueprint_camera.set_attribute('sensor_tick', '0.1')
transform_camera = Transform(
location=Location(x=+2.0, y=0.0, z=1.4),
rotation=Rotation(-15.0, 0, 0)
)
return (blueprint_camera, transform_camera)
@property
def weathers(self):
weathers = set(self.train_weathers)
weathers.update(self.test_weathers)
return weathers
@property
def collision_as_failure(self):
return False
@property
def traffic_light_as_failure(self):
return False
@abc.abstractmethod
def build_experiments(self):
"""
Returns a set of experiments to be evaluated
Must be redefined in an inherited class.
"""
@abc.abstractproperty
def train_weathers(self):
"""
Return the weathers that are considered as training conditions
"""
@abc.abstractproperty
def test_weathers(self):
"""
Return the weathers that are considered as testing conditions
"""
| 0
| 623
| 0
| 2,612
| 0
| 0
| 0
| 100
| 112
|
96c8a4ff91d3e3ca6afe90078f997ac43327e709
| 3,675
|
py
|
Python
|
w4_tiled_converter/main.py
|
restitux/w4-tiled-converter
|
7cdee2d425c53a54d46617f9499a43dad3806594
|
[
"MIT"
] | null | null | null |
w4_tiled_converter/main.py
|
restitux/w4-tiled-converter
|
7cdee2d425c53a54d46617f9499a43dad3806594
|
[
"MIT"
] | null | null | null |
w4_tiled_converter/main.py
|
restitux/w4-tiled-converter
|
7cdee2d425c53a54d46617f9499a43dad3806594
|
[
"MIT"
] | null | null | null |
# Convert a tiled tmx tilemap to source files
if __name__ == "__main__":
main()
| 22.826087
| 88
| 0.688163
|
import argparse
import json
from os.path import basename, join, split, splitext
import sys
from w4_tiled_converter import converters
# Convert a tiled tmx tilemap to source files
def tilemap_subcommand(filename: str):
print(f"INFO: Processing tilemap {filename}")
name = basename(splitext(splitext(filename)[0])[0])
# Calculate output filenames
h_filename = splitext(filename)[0] + ".h"
c_filename = splitext(filename)[0] + ".c"
converters.convert_tilemap(filename, h_filename, c_filename, name)
def tileset_subcommand(filename: str):
print(f"INFO: Processing tileset {filename}")
# Calculate output filenames
h_filename = splitext(filename)[0] + ".h"
c_filename = splitext(filename)[0] + ".c"
# Read in JSON tileset
with open(filename) as f:
tileset_json = json.load(f)
# Validate tiles are square
tile_w = tileset_json["tilewidth"]
tile_h = tileset_json["tileheight"]
if tile_w != tile_h:
print(f"ERROR: Tiles of different h / w are not supported ({tile_w}, {tile_h})")
sys.exit(-1)
# Convert tileset to source files
png_filename = join(split(filename)[0], tileset_json["image"])
converters.convert_tileset(
png_filename, h_filename, c_filename, tile_w, tileset_json["name"]
)
def header_subcommand(filename: str):
header = """
#ifndef __TILED_H_
#define __TILED_H_
#include <stdint.h>
#include <stdbool.h>
#include <stdlib.h>
struct Entrance;
struct TileSet {
const uint8_t *tileset;
};
struct TileMap_MapLayer {
uint32_t width;
uint32_t height;
const uint8_t *map;
const uint8_t *map_rotations;
const struct TileSet *tileset;
};
struct TileMap_DataLayer {
uint32_t width;
uint32_t height;
const uint8_t *map;
};
struct TileMap_Entrance {
uint32_t x;
uint32_t y;
uint32_t width;
uint32_t height;
uint8_t id;
const struct TileMap *target_map;
bool is_entrance;
uint32_t target_entrance;
};
struct TileMap_BlockSpawn {
uint32_t x;
uint32_t y;
uint8_t id;
};
struct TileMap_Entrances {
struct TileMap_Entrance *entrances;
uint32_t length;
};
struct TileMap_BlockSpawns {
struct TileMap_BlockSpawn *block_spawns;
uint32_t length;
};
struct TileMap_TextTrigger {
uint8_t id;
uint32_t x;
uint32_t y;
uint32_t width;
uint32_t height;
char *string;
uint16_t length;
int8_t ability_pickup;
};
struct TileMap_TextTriggers {
struct TileMap_TextTrigger *text_triggers;
uint32_t length;
};
struct TileMap {
uint16_t id;
struct TileMap_MapLayer static_map;
struct TileMap_MapLayer overlay_map;
struct TileMap_DataLayer collision_map;
struct TileMap_DataLayer special_map;
struct TileMap_Entrances entrances;
struct TileMap_BlockSpawns block_spawns;
struct TileMap_TextTriggers text_triggers;
};
#endif // __TILED_H
"""
with open(filename, "w") as out:
out.write(header)
def main():
# exit(-1)
# print("w4 tileset converter")
parser = argparse.ArgumentParser(description="Generate sources from a tilemap")
parser.add_argument(
"filetype",
action="store",
help="tilemap, tileset or header",
choices=("tilemap", "tileset", "header"),
)
parser.add_argument("filename", action="store", help="filename")
args = parser.parse_args()
if args.filetype == "tilemap":
tilemap_subcommand(args.filename)
elif args.filetype == "tileset":
tileset_subcommand(args.filename)
elif args.filetype == "header":
header_subcommand(args.filename)
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 3,360
| 0
| 23
| 202
|
3899d52834f5585b5ae4c4435ca9a8f8025273e4
| 404
|
py
|
Python
|
rallybooking/migrations/0007_rally_site_name.py
|
DaleShipp/devoncc
|
8bec11cf363ac5a5c16eda9a6c50e9f901142211
|
[
"BSD-3-Clause"
] | null | null | null |
rallybooking/migrations/0007_rally_site_name.py
|
DaleShipp/devoncc
|
8bec11cf363ac5a5c16eda9a6c50e9f901142211
|
[
"BSD-3-Clause"
] | null | null | null |
rallybooking/migrations/0007_rally_site_name.py
|
DaleShipp/devoncc
|
8bec11cf363ac5a5c16eda9a6c50e9f901142211
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.1.1 on 2018-10-05 22:33
| 21.263158
| 63
| 0.60396
|
# Generated by Django 2.1.1 on 2018-10-05 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rallybooking', '0006_auto_20181005_2042'),
]
operations = [
migrations.AddField(
model_name='rally',
name='site_name',
field=models.CharField(default='', max_length=300),
),
]
| 0
| 0
| 0
| 290
| 0
| 0
| 0
| 19
| 46
|
cc323e6f4b8b6d89a4bc83d3a457b2b702135f01
| 154
|
py
|
Python
|
book_library_app/commands/__init__.py
|
szymcio32/flask-book-library-api
|
9406bb5ff6ec04cc7c049d416913ae084e73a9dc
|
[
"MIT"
] | 1
|
2022-02-03T17:10:03.000Z
|
2022-02-03T17:10:03.000Z
|
book_library_app/commands/__init__.py
|
szymcio32/flask-book-library-api
|
9406bb5ff6ec04cc7c049d416913ae084e73a9dc
|
[
"MIT"
] | null | null | null |
book_library_app/commands/__init__.py
|
szymcio32/flask-book-library-api
|
9406bb5ff6ec04cc7c049d416913ae084e73a9dc
|
[
"MIT"
] | 2
|
2021-04-26T20:57:24.000Z
|
2021-09-20T10:19:00.000Z
|
from flask import Blueprint
db_manage_bp = Blueprint('db_manage_cmd', __name__, cli_group=None)
| 30.8
| 67
| 0.850649
|
from flask import Blueprint
db_manage_bp = Blueprint('db_manage_cmd', __name__, cli_group=None)
from book_library_app.commands import db_manage_commands
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 23
|
fc6a61ad09b5a2873ca3d0bbf8908a1fe92aac0e
| 1,549
|
py
|
Python
|
example.py
|
ssocolow/Neural-Network-Saves
|
78870623cec9752a765d8bb43d00f0c3f0f677b0
|
[
"MIT"
] | 3
|
2019-06-22T05:53:18.000Z
|
2019-06-22T05:53:48.000Z
|
example.py
|
ssocolow/Neural-Network-Python-Lib
|
78870623cec9752a765d8bb43d00f0c3f0f677b0
|
[
"MIT"
] | 2
|
2019-06-22T06:16:19.000Z
|
2019-06-22T06:22:38.000Z
|
example.py
|
ssocolow/Neural-Network-Python-Lib
|
78870623cec9752a765d8bb43d00f0c3f0f677b0
|
[
"MIT"
] | null | null | null |
#importing the library
#nn requires matrix2d.py and the math module and random module for dependencies
import nn
import random
#create the neural network to solve the XOR problem
#takes an array of arrays for argument
#the 2, 4 and 1 represent two nodes in the input and 4 nodes in the hidden layer and 1 node in the output layer
#you can add more layers by adding an array to the larger array with a number in it for the number of nodes you want like [[2],[3],[3],[4]]
#you can set the learning rate and the network's weights and biases after you give it its shape (0.1 is default for learning rate)
example_neural_network = nn.NeuralNetwork([[2],[4],[1]], learning_rate = 0.2)
#have your inputs and targets in an array which match the number of inputs and outputs specificed in the initialization of the neural network
#if you want to use backpropagation and gradient descent in supervised learning
inputs = [[1,0.01],[0.01,1],[1,1],[0.01,0.01]]
targets = [[0.99],[0.99],[0.01],[0.01]]
#train the network on the inputs and the targets
for i in range(20000):
index = random.randint(0,3)
example_neural_network.train(inputs[index], targets[index])
#check what the network outputs after it has been trained
#this should be close to the targets
print(example_neural_network.feedforward(inputs[0]))
print(example_neural_network.feedforward(inputs[1]))
print(example_neural_network.feedforward(inputs[2]))
print(example_neural_network.feedforward(inputs[3]))
#print out some of the information in the network
example_neural_network.print()
| 48.40625
| 141
| 0.769529
|
#importing the library
#nn requires matrix2d.py and the math module and random module for dependencies
import nn
import random
#create the neural network to solve the XOR problem
#takes an array of arrays for argument
#the 2, 4 and 1 represent two nodes in the input and 4 nodes in the hidden layer and 1 node in the output layer
#you can add more layers by adding an array to the larger array with a number in it for the number of nodes you want like [[2],[3],[3],[4]]
#you can set the learning rate and the network's weights and biases after you give it its shape (0.1 is default for learning rate)
example_neural_network = nn.NeuralNetwork([[2],[4],[1]], learning_rate = 0.2)
#have your inputs and targets in an array which match the number of inputs and outputs specificed in the initialization of the neural network
#if you want to use backpropagation and gradient descent in supervised learning
inputs = [[1,0.01],[0.01,1],[1,1],[0.01,0.01]]
targets = [[0.99],[0.99],[0.01],[0.01]]
#train the network on the inputs and the targets
for i in range(20000):
index = random.randint(0,3)
example_neural_network.train(inputs[index], targets[index])
#check what the network outputs after it has been trained
#this should be close to the targets
print(example_neural_network.feedforward(inputs[0]))
print(example_neural_network.feedforward(inputs[1]))
print(example_neural_network.feedforward(inputs[2]))
print(example_neural_network.feedforward(inputs[3]))
#print out some of the information in the network
example_neural_network.print()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fe1792e616a3318ec0936ccae1a3979964e73622
| 5,345
|
py
|
Python
|
geeksbot_web/rcon/models.py
|
dustinpianalto/geeksbot_web
|
ee02452dd5a61b0487706782020f9647ae202238
|
[
"MIT"
] | null | null | null |
geeksbot_web/rcon/models.py
|
dustinpianalto/geeksbot_web
|
ee02452dd5a61b0487706782020f9647ae202238
|
[
"MIT"
] | null | null | null |
geeksbot_web/rcon/models.py
|
dustinpianalto/geeksbot_web
|
ee02452dd5a61b0487706782020f9647ae202238
|
[
"MIT"
] | null | null | null |
# Create your models here.
| 41.434109
| 101
| 0.636109
|
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import status
from guilds.models import Guild
from dmessages.models import Message
from users.models import User
from channels.models import Channel
from .utils import create_error_response
from .utils import create_success_response
# Create your models here.
class RconServer(models.Model):
guild = models.ForeignKey(Guild, on_delete=models.CASCADE)
name = models.CharField(max_length=50)
ip = models.GenericIPAddressField()
port = models.PositiveIntegerField()
password = models.CharField(max_length=50)
monitor_chat = models.BooleanField()
monitor_chat_channel = models.ForeignKey(
Channel, on_delete=models.DO_NOTHING, related_name="+", null=True, blank=True, default=None
)
alerts_channel = models.ForeignKey(
Channel, on_delete=models.DO_NOTHING, related_name="+", null=True, blank=True, default=None
)
info_channel = models.ForeignKey(
Channel, on_delete=models.DO_NOTHING, related_name="+", null=True, blank=True, default=None
)
info_message = models.ForeignKey(
Message, on_delete=models.DO_NOTHING, related_name="+", null=True, blank=True, default=None
)
settings_message = models.ForeignKey(
Message, on_delete=models.DO_NOTHING, related_name="+", null=True, blank=True, default=None
)
whitelist = models.ManyToManyField(User, blank=True)
def update_server(self, data):
if data.get('name'):
self.name = data.get('name')
if data.get('ip'):
self.ip = data.get('ip')
if data.get('port'):
self.port = data.get('port')
if data.get('password'):
self.password = data.get('password')
if data.get('monitor_chat'):
self.monitor_chat = data.get('monitor_chat')
if 'monitor_chat_channel' in data.keys():
self.monitor_chat_channel = Channel.get_channel_by_id(data.get('monitor_chat_channel'))
if 'alerts_channel' in data.keys():
self.alerts_channel = Channel.get_channel_by_id(data.get('alerts_channel'))
if 'info_channel' in data.keys():
self.alerts_channel = Channel.get_channel_by_id(data.get('info_channel'))
if 'info_message' in data.keys():
self.info_message = Message.get_message_by_id(data.get('info_message'))
if 'settings_message' in data.keys():
self.settings_message = Message.get_message_by_id(data.get('settings_message'))
self.save()
return create_success_response(self, status.HTTP_202_ACCEPTED, many=False)
def add_whitelist(self, user_id):
user = User.get_user_by_id(user_id)
if not isinstance(user, User):
return create_error_response("User Does Not Exist",
status=status.HTTP_404_NOT_FOUND)
if not user.steam_id:
return create_error_response("User does not have a Steam 64ID attached to their account",
status=status.HTTP_406_NOT_ACCEPTABLE)
self.whitelist.add(user)
return create_error_response("User has been added to the whitelist",
status=status.HTTP_200_OK)
def remove_from_whitelist(self, user_id):
user = User.get_user_by_id(user_id)
if not isinstance(user, User):
return create_error_response("User Does Not Exist",
status=status.HTTP_404_NOT_FOUND)
self.whitelist.remove(user)
return create_error_response("User has been removed from the whitelist",
status=status.HTTP_200_OK)
@classmethod
def add_new_server(cls, data):
guild_id = data.get('guild')
name = data.get('name')
ip = data.get('ip')
port = data.get('port')
password = data.get('password')
if not (guild_id and name and ip and port and password):
return create_error_response("One or more of the required fields are missing",
status=status.HTTP_400_BAD_REQUEST)
guild = Guild.get_guild_by_id(guild_id)
if not isinstance(guild, Guild):
return create_error_response("Guild Does Not Exist",
status=status.HTTP_404_NOT_FOUND)
server = cls(
guild=guild,
name=name,
ip=ip,
port=port,
password=password,
monitor_chat=data.get('monitor_chat', False)
)
server.save()
return create_success_response(server, status.HTTP_201_CREATED, many=False)
@classmethod
def get_server(cls, guild_id, name):
guild_servers = cls.get_guild_servers(guild_id)
if guild_servers:
try:
return guild_servers.get(name=name)
except ObjectDoesNotExist:
return None
return None
@classmethod
def get_guild_servers(cls, guild_id):
guild = Guild.get_guild_by_id(guild_id)
if not isinstance(guild, Guild):
return None
return cls.objects.filter(guild=guild)
def __str__(self):
return f"{self.guild.id} | {self.name}"
| 0
| 1,436
| 0
| 3,520
| 0
| 0
| 0
| 138
| 222
|
daad75627bc6162d2ea4e3136cfa6943ffef9ecd
| 1,127
|
py
|
Python
|
post.py
|
seqcode/seqview-web-docker-public
|
56dfc222fd25a630cbc63d2841d9ce4fb1c7045c
|
[
"MIT"
] | null | null | null |
post.py
|
seqcode/seqview-web-docker-public
|
56dfc222fd25a630cbc63d2841d9ce4fb1c7045c
|
[
"MIT"
] | null | null | null |
post.py
|
seqcode/seqview-web-docker-public
|
56dfc222fd25a630cbc63d2841d9ce4fb1c7045c
|
[
"MIT"
] | null | null | null |
main()
| 40.25
| 174
| 0.721384
|
from urllib.parse import urlencode
from urllib.request import Request, urlopen
import json
import argparse
import configparser
def main():
parser = argparse.ArgumentParser(description="Post ingest tileset request to higlass")
parser.add_argument('--genome', action="store", dest="genome", default='', help="genome version")
parser.add_argument('--uuid', action="store", dest="uuid", default='', help="higlass tileset uuid")
args = parser.parse_args()
post(**vars(args))
#post(**args)
def post(genome, uuid):
config = configparser.ConfigParser()
config.read('post.ini')
r = Request("http://127.0.0.1:9000/api/api-token-auth/", urlencode({'username': config['Credentials']['username'], 'password' : config['Credentials']['password']}).encode())
response = urlopen(r).read().decode()
response_json = json.loads(response)
postTrack = Request("http://127.0.0.1:9000/api/", urlencode({'genome': genome, 'uid' : uuid}).encode())
postTrack.add_header('Authorization', "Token " + response_json['token'])
response = urlopen(postTrack).read().decode()
response_json = json.loads(response)
print(response_json)
main()
| 0
| 0
| 0
| 0
| 0
| 945
| 0
| 17
| 157
|
eb282e96df605c49958261d1bcdd1be576d4b1bf
| 3,574
|
py
|
Python
|
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
story_chain/flaskrunner.py
|
muchu1983/story_chain
|
3af4bb158be128a52c753f88eaffaed872d85880
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
from flask import Flask
app = Flask(__name__.split(".")[0])
# server
# jsonp response
# (return id)
#
# (/)
# or (return id list)
#
# ()
#= Flask =
#GET POST
#template
#post json
if __name__ == "__main__":
start_flask_server()
| 34.038095
| 94
| 0.689144
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import json
from flask import Flask
from flask import request
from flask import render_template
from flask import jsonify
from story_chain.localdb import LocalDbForStoryChain
app = Flask(__name__.split(".")[0])
#啟動 server
def start_flask_server():
app.run(host="0.0.0.0", port=5000, debug=True)
#建立 jsonp response
def make_jsonp_response(dicJsonObj=None):
strCallback = request.args.get("strJsonpCallback", 0, type=str)
return strCallback + "(" + json.dumps(dicJsonObj) + ")"
#在指定的段落之後 加入新的故事段落 (return 新段落 id)
@app.route("/story_chain/api_post/story", methods=["GET"])
def apiPostNewStory():
db = LocalDbForStoryChain()
strStoryContent = request.args.get("str_story_content", type=str)
intPrevStoryId = request.args.get("int_prev_story_id", type=int)
intNewStoryId = db.insertNewStory(strContent=strStoryContent, intPrevId=intPrevStoryId)
return make_jsonp_response(dicJsonObj={"new_story_id":intNewStoryId})
#取得指定段落內容
@app.route("/story_chain/api_get/story/<int:intStoryId>", methods=["GET"])
def apiGetStoryById(intStoryId=0):
db = LocalDbForStoryChain()
(strContent, intLike, intDislike) = db.fetchStoryById(intStoryId=intStoryId)
dicJsonObj = {"str_content":strContent,
"int_like":intLike,
"int_dislike":intDislike}
return make_jsonp_response(dicJsonObj=dicJsonObj)
#修改指定段落內容 (按贊/按噓)
@app.route("/story_chain/api_put/story/<int:intStoryId>", methods=["GET"])
def apiPutStoryById(intStoryId=0):
pass
#取得 前 or 後 故事段 列表 (return 段落 id list)
@app.route("/story_chain/api_get/story", methods=["GET"])
def apiGetStoryList():
db = LocalDbForStoryChain()
strType = request.args.get("str_type", type=str) #"next" or "prev"
intStoryId = request.args.get("int_story_id", type=int)
lstIntStoryId = db.fetchNextOrPrevStoryId(intStoryId=intStoryId, strFetchType=strType)
dicJsonObj = None
if strType == "prev":
#前一段必定是唯一的
dicJsonObj = {"int_prev_story_id":(lstIntStoryId[0] if lstIntStoryId else 0)}
elif strType == "next":
#下一段可能有多個選擇
dicJsonObj = {"lst_int_next_story_id":lstIntStoryId}
else:
dicJsonObj = {}
return make_jsonp_response(dicJsonObj)
#讀取書籤
@app.route("/story_chain/api_get/tag/<strTagName>", methods=["GET"])
def apiGetTagByName(strTagName=None):
pass
#新增書籤 (書籤有時限)
@app.route("/story_chain/api_post/tag", methods=["GET"])
def apiPostTag(strTagName=None):
request.args.get("strTagName")
request.args.get("intStoryId")
pass
#= Flask 範例 =
#GET POST參數範例
@app.route("/hello/<username>/<int:num>", methods=["GET", "POST"])
def hello(username, num):
#http://192.168.1.101:5000/hello/muchu/7?love=lunna
request.form #get form data when POST
return "Hello World! %s %d method: %s args: %s"%(username, num,
request.method, request.args.get("love"))
#template範例
@app.route("/template/")
@app.route("/template/<name>")
def template(name=None):
return render_template("temp.html", name=name)
#post json範例
@app.route("/jsonpapi", methods=["GET"])
def jsonpapi():
x = request.args.get("x", 0, type=int)
y = request.args.get("y", 0, type=int)
dicResultJson = {"result":x+y}
return make_jsonp_response(dicJsonObj=dicResultJson)
if __name__ == "__main__":
start_flask_server()
| 288
| 2,405
| 0
| 0
| 0
| 203
| 0
| 41
| 352
|
f0463eb840dd62de3267468df2df8a11a6d08fe8
| 2,913
|
py
|
Python
|
venv/Lib/site-packages/xero_python/accounting/models/report_fields.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 77
|
2020-02-16T03:50:18.000Z
|
2022-03-11T03:53:26.000Z
|
venv/Lib/site-packages/xero_python/accounting/models/report_fields.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 50
|
2020-04-06T10:15:52.000Z
|
2022-03-29T21:27:50.000Z
|
venv/Lib/site-packages/xero_python/accounting/models/report_fields.py
|
RobMilinski/Xero-Starter-Branched-Test
|
c82382e674b34c2336ee164f5a079d6becd1ed46
|
[
"MIT"
] | 27
|
2020-06-04T11:16:17.000Z
|
2022-03-19T06:27:36.000Z
|
# coding: utf-8
"""
Xero Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
| 24.897436
| 124
| 0.597666
|
# coding: utf-8
"""
Xero Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class ReportFields(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"field_id": "str", "description": "str", "value": "str"}
attribute_map = {
"field_id": "FieldID",
"description": "Description",
"value": "Value",
}
def __init__(self, field_id=None, description=None, value=None): # noqa: E501
"""ReportFields - a model defined in OpenAPI""" # noqa: E501
self._field_id = None
self._description = None
self._value = None
self.discriminator = None
if field_id is not None:
self.field_id = field_id
if description is not None:
self.description = description
if value is not None:
self.value = value
@property
def field_id(self):
"""Gets the field_id of this ReportFields. # noqa: E501
:return: The field_id of this ReportFields. # noqa: E501
:rtype: str
"""
return self._field_id
@field_id.setter
def field_id(self, field_id):
"""Sets the field_id of this ReportFields.
:param field_id: The field_id of this ReportFields. # noqa: E501
:type: str
"""
self._field_id = field_id
@property
def description(self):
"""Gets the description of this ReportFields. # noqa: E501
:return: The description of this ReportFields. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ReportFields.
:param description: The description of this ReportFields. # noqa: E501
:type: str
"""
self._description = description
@property
def value(self):
"""Gets the value of this ReportFields. # noqa: E501
:return: The value of this ReportFields. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ReportFields.
:param value: The value of this ReportFields. # noqa: E501
:type: str
"""
self._value = value
| 0
| 1,287
| 0
| 1,283
| 0
| 0
| 0
| 7
| 83
|
f594e48a4780469ce9c3653912d489a7c0714b11
| 1,458
|
py
|
Python
|
Python Basics/scipy_basics.py
|
python-sonchau/python-visualization
|
eb139aaabbff858663a96f8e19e30f1418e4330c
|
[
"MIT"
] | null | null | null |
Python Basics/scipy_basics.py
|
python-sonchau/python-visualization
|
eb139aaabbff858663a96f8e19e30f1418e4330c
|
[
"MIT"
] | null | null | null |
Python Basics/scipy_basics.py
|
python-sonchau/python-visualization
|
eb139aaabbff858663a96f8e19e30f1418e4330c
|
[
"MIT"
] | null | null | null |
from scipy import stats
import numpy as np
############################
# CALCULATING CORRELATIONS #
############################
array_1 = np.array([1,2,3,4,5,6]) # Create a numpy array from a list
array_2 = array_1 # Create another array with the same values
print(stats.pearsonr(array_1, array_2)) # Calculate the correlation which will be 1 since the values are the same
#######################
# NORMAL DISTRIBUTION #
#######################
x = stats.norm.rvs(loc=0, scale=10, size=10) # Generate 10 values randomly sampled from a normal distribution with mean 0 and standard deviation of 10
print(x)
################################
# PROBABILITY DENSITY FUNCTION #
################################
p1 = stats.norm.pdf(x=-100, loc=0, scale=10) # Get probability of sampling a value of -100
p2 = stats.norm.pdf(x=0, loc=0, scale=10) # Get probability of sampling a value of 0
print(p1)
print(p2)
####################################
# CUMULATIVE DISTRIBUTION FUNCTION #
####################################
p1 = stats.norm.cdf(x=0, loc=0, scale=10) # Get probability of sampling a value less than or equal to 0
print(p1)
######################################
# CALCULATING DESCRIPTIVE STATISTICS #
######################################
print(stats.describe(stats.norm.rvs(loc=0, scale=1, size=500))) # Calculate descriptive statistics for 500 data points sampled from normal distribution with mean 0 and standard deviation of 1
| 36.45
| 192
| 0.580933
|
from scipy import stats
import numpy as np
############################
# CALCULATING CORRELATIONS #
############################
array_1 = np.array([1,2,3,4,5,6]) # Create a numpy array from a list
array_2 = array_1 # Create another array with the same values
print(stats.pearsonr(array_1, array_2)) # Calculate the correlation which will be 1 since the values are the same
#######################
# NORMAL DISTRIBUTION #
#######################
x = stats.norm.rvs(loc=0, scale=10, size=10) # Generate 10 values randomly sampled from a normal distribution with mean 0 and standard deviation of 10
print(x)
################################
# PROBABILITY DENSITY FUNCTION #
################################
p1 = stats.norm.pdf(x=-100, loc=0, scale=10) # Get probability of sampling a value of -100
p2 = stats.norm.pdf(x=0, loc=0, scale=10) # Get probability of sampling a value of 0
print(p1)
print(p2)
####################################
# CUMULATIVE DISTRIBUTION FUNCTION #
####################################
p1 = stats.norm.cdf(x=0, loc=0, scale=10) # Get probability of sampling a value less than or equal to 0
print(p1)
######################################
# CALCULATING DESCRIPTIVE STATISTICS #
######################################
print(stats.describe(stats.norm.rvs(loc=0, scale=1, size=500))) # Calculate descriptive statistics for 500 data points sampled from normal distribution with mean 0 and standard deviation of 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b76ae8aebcf19bf3deee56b36c1edd3ec1852d21
| 50,717
|
py
|
Python
|
Src/Scripts/DarunGrim.py
|
fengjixuchui/DarunGrim
|
a6cbe5c064f9399423845dea0ab67355d5ac5852
|
[
"BSD-3-Clause"
] | null | null | null |
Src/Scripts/DarunGrim.py
|
fengjixuchui/DarunGrim
|
a6cbe5c064f9399423845dea0ab67355d5ac5852
|
[
"BSD-3-Clause"
] | null | null | null |
Src/Scripts/DarunGrim.py
|
fengjixuchui/DarunGrim
|
a6cbe5c064f9399423845dea0ab67355d5ac5852
|
[
"BSD-3-Clause"
] | null | null | null |
import time
RedirectStdOutErr=True
if __name__=='__main__':
multiprocessing.freeze_support()
import sys
import time
if len(sys.argv)>1:
database_name=sys.argv[1]
else:
database_name=''
app=QApplication(sys.argv)
pixmap=QPixmap('DarunGrimSplash.png')
splash=QSplashScreen(pixmap)
splash.show()
app.processEvents()
time.sleep(0.5)
window=MainWindow(database_name)
window.show()
splash.finish(window)
sys.exit(app.exec_())
| 32.242212
| 318
| 0.763354
|
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
import DarunGrimDatabase
import DiffEngine
from Graphs import *
import FlowGrapher
import FileStoreBrowser
import FileStoreDatabase
import DarunGrimEngine
import pprint
from multiprocessing import Process
from multiprocessing import Queue
import time
import os
import operator
import subprocess
from Log import *
RedirectStdOutErr=True
class FunctionMatchTable(QAbstractTableModel):
Debug=0
def __init__(self,parent, database_name='', *args):
QAbstractTableModel.__init__(self,parent,*args)
self.match_list=[]
if database_name:
database = DarunGrimDatabase.Database(database_name)
for function_match_info in database.GetFunctionMatchInfo():
if function_match_info.match_rate < 100:
if self.Debug>0:
print "%s\t%s\t%s\t%s\t%s%%\t%d\t%d\t%d\t%d\t%d\t%d" % (function_match_info.source_function_name,
function_match_info.target_function_name,
str(function_match_info.block_type),
str(function_match_info.type),
str( function_match_info.match_rate ),
function_match_info.match_count_for_the_source,
function_match_info.non_match_count_for_the_source,
function_match_info.match_count_with_modificationfor_the_source,
function_match_info.match_count_for_the_target,
function_match_info.non_match_count_for_the_target,
function_match_info.match_count_with_modification_for_the_target)
self.match_list.append([function_match_info.source_function_name,
function_match_info.target_function_name,
"%d%%" % (function_match_info.match_rate),
function_match_info])
def GetFunctionAddresses(self,index):
return [self.match_list[index][3].source_address, self.match_list[index][3].target_address]
def rowCount(self,parent):
return len(self.match_list)
def columnCount(self,parent):
return 3
def data(self,index,role):
if not index.isValid():
return None
elif role!=Qt.DisplayRole:
return None
return self.match_list[index.row()][index.column()]
def headerData(self,col,orientation,role):
if orientation==Qt.Horizontal and role==Qt.DisplayRole:
return ["Orig", "Patched", "Match"][col]
return None
def sort(self,col,order):
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.match_list=sorted(self.match_list,key=operator.itemgetter(col))
if order==Qt.DescendingOrder:
self.match_list.reverse()
self.emit(SIGNAL("layoutChanged()"))
class BBMatchTable(QAbstractTableModel):
def __init__(self,parent, database_name='', *args):
QAbstractTableModel.__init__(self,parent,*args)
self.match_list=[]
if database_name:
database = DarunGrimDatabase.Database(database_name)
[matches,source_non_matched,target_non_matched]=database.GetBBMatchInfo()
for (match_map,source_basic_block,source_function_oli,target_basic_block,target_function_oli) in matches:
source_function_name=''
if source_function_oli!=None:
source_function_name=source_function_oli.name
target_function_name=''
if target_function_oli!=None:
target_function_name=target_function_oli.name
self.match_list.append([source_basic_block.disasm_lines,
target_basic_block.disasm_lines,
source_function_name,
target_function_name,
match_map.match_rate])
for (basic_block, function_basic_block, match_function_basic_block) in source_non_matched:
function_name=''
if function_basic_block!=None:
function_name=function_basic_block.name
match_function_name=''
if match_function_basic_block!=None:
match_function_name=match_function_basic_block.name
self.match_list.append([basic_block.disasm_lines,
"",
function_name,
match_function_name,
0])
for (basic_block, function_basic_block, match_function_basic_block) in target_non_matched:
function_name=''
if function_basic_block!=None:
function_name=function_basic_block.name
match_function_name=''
if match_function_basic_block!=None:
match_function_name=match_function_basic_block.name
self.match_list.append(["",
basic_block.disasm_lines,
match_function_name,
function_name,
0])
def rowCount(self,parent):
return len(self.match_list)
def columnCount(self,parent):
return 5
def data(self,index,role):
if not index.isValid():
return None
elif role!=Qt.DisplayRole:
return None
return self.match_list[index.row()][index.column()]
def headerData(self,col,orientation,role):
if orientation==Qt.Horizontal and role==Qt.DisplayRole:
return ["Orig", "Patched", "Orig Func", "Patched Func", "Match"][col]
return None
class BlockTable(QAbstractTableModel):
def __init__(self,parent,database_name='',source_function_address=0, target_function_address=0, *args):
QAbstractTableModel.__init__(self,parent,*args)
self.match_list=[]
self.full_match_list=[]
self.ShowFullMatches=False
if database_name:
database = DarunGrimDatabase.Database(database_name)
self.SourceMatchInfo={}
self.TargetMatchInfo={}
[match_hash, source_non_matches,target_non_matches]=database.GetBlockMatches( source_function_address, target_function_address )
for ( source_address, ( target_address, match_rate ) ) in match_hash.items():
if self.ShowFullMatches or match_rate<100:
self.match_list.append([source_address, target_address, match_rate])
self.full_match_list.append([source_address, target_address, match_rate])
self.SourceMatchInfo[source_address]=[target_address, match_rate]
self.TargetMatchInfo[target_address]=[source_address, match_rate]
for non_match in source_non_matches:
self.match_list.append([non_match, 0, 0])
for non_match in target_non_matches:
self.match_list.append([0, non_match, 0])
def GetSourceMatchInfo(self):
return self.SourceMatchInfo
def GetTargetMatchInfo(self):
return self.TargetMatchInfo
def GetBlockAddresses(self,index):
return [self.match_list[index][0], self.match_list[index][1]]
def GetMatchAddresses(self,col,address):
for (addr1,addr2,match_rate) in self.full_match_list:
if col==0 and address==addr1:
return addr2
if col==1 and address==addr2:
return addr1
return None
def rowCount(self,parent):
return len(self.match_list)
def columnCount(self,parent):
return 3
def data(self,index,role):
if not index.isValid():
return None
elif role!=Qt.DisplayRole:
return None
value=self.match_list[index.row()][index.column()]
if index.column()<2:
if value==0:
return ""
return "%.8X" % value
elif index.column()==2:
if value==0:
return "Non match"
return "%d%%" % value
return value
def headerData(self,col,orientation,role):
if orientation==Qt.Horizontal and role==Qt.DisplayRole:
return ["Orig", "Patched", "Match"][col]
return None
def sort(self,col,order):
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.match_list=sorted(self.match_list,key=operator.itemgetter(col))
if order==Qt.DescendingOrder:
self.match_list.reverse()
self.emit(SIGNAL("layoutChanged()"))
class NewDiffingDialog(QDialog):
def __init__(self,parent=None):
super(NewDiffingDialog,self).__init__(parent)
self.setWindowTitle("New Diffing")
self.setWindowIcon(QIcon('DarunGrim.png'))
self.Filenames={'Orig':'','Patched':'','Result':''}
orig_button=QPushButton('Orig File:',self)
orig_button.clicked.connect(self.getOrigFilename)
self.orig_line=QLineEdit("")
self.orig_line.setAlignment(Qt.AlignLeft)
self.orig_line.setMinimumWidth(250)
patched_button=QPushButton('Patched File:',self)
patched_button.clicked.connect(self.getPatchedFilename)
self.patched_line=QLineEdit("")
self.patched_line.setAlignment(Qt.AlignLeft)
self.patched_line.setMinimumWidth(250)
result_button=QPushButton('Result:',self)
result_button.clicked.connect(self.getResultFilename)
self.result_line=QLineEdit("")
self.result_line.setAlignment(Qt.AlignLeft)
self.result_line.setMinimumWidth(250)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
main_layout=QGridLayout()
main_layout.addWidget(orig_button,0,0)
main_layout.addWidget(self.orig_line,0,1)
main_layout.addWidget(patched_button,1,0)
main_layout.addWidget(self.patched_line,1,1)
main_layout.addWidget(result_button,2,0)
main_layout.addWidget(self.result_line,2,1)
main_layout.addWidget(buttonBox,3,1)
self.setLayout(main_layout)
def keyPressEvent(self,e):
key=e.key()
if key==Qt.Key_Return or key==Qt.Key_Enter:
return
else:
super(NewDiffingDialog,self).keyPressEvent(e)
def getOrigFilename(self):
filename=self.getFilename("Orig")
self.orig_line.setText(filename)
def getPatchedFilename(self):
filename=self.getFilename("Patched")
self.patched_line.setText(filename)
def getResultFilename(self):
(filename,filter)=QFileDialog.getSaveFileName(self,"Result", filter="*.dgf")
self.Filenames['Result']=filename
self.result_line.setText(filename)
def getFilename(self,type):
(filename,filter)=QFileDialog.getOpenFileName(self,type)
if filename:
self.Filenames[type]=filename
return filename
class FileStoreBrowserDialog(QDialog):
ShowResultButton=False
def __init__(self,parent=None,database_name='',darungrim_storage_dir=''):
super(FileStoreBrowserDialog,self).__init__(parent)
self.setWindowTitle("File Store Browser")
self.setWindowIcon(QIcon('DarunGrim.png'))
self.FileStoreDir=darungrim_storage_dir
self.filesWidgetsTemplate=FileStoreBrowser.FilesWidgetsTemplate(self,database_name,qApp)
self.filesWidgetsTemplate.setDarunGrimStore(self.FileStoreDir)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
bottom_layout=QGridLayout()
bottom_layout.addWidget(buttonBox,0,3)
main_layout=QVBoxLayout()
main_layout.addWidget(self.filesWidgetsTemplate.tab_widget)
main_layout.addLayout(bottom_layout)
self.setLayout(main_layout)
self.resize(950,500)
self.setWindowFlags(self.windowFlags()|Qt.WindowSystemMenuHint|Qt.WindowMinMaxButtonsHint)
self.show()
def keyPressEvent(self,e):
key=e.key()
if key==Qt.Key_Return or key==Qt.Key_Enter:
return
else:
super(FileStoreBrowserDialog,self).keyPressEvent(e)
class NewDiffingFromFileStoreDialog(QDialog):
ShowResultButton=False
def __init__(self,parent=None,database_name='',darungrim_storage_dir=''):
super(NewDiffingFromFileStoreDialog,self).__init__(parent)
self.setWindowTitle("File Store Browser")
self.setWindowIcon(QIcon('DarunGrim.png'))
self.FileStoreDir=darungrim_storage_dir
self.InitVars()
self.filesWidgetsTemplate=FileStoreBrowser.FilesWidgetsTemplate(self,database_name,qApp)
self.filesWidgetsTemplate.setDarunGrimStore(self.FileStoreDir)
orig_button=QPushButton('Orig File >> ',self)
orig_button.clicked.connect(self.getOrigFilename)
self.orig_line=QLineEdit("")
self.orig_line.setAlignment(Qt.AlignLeft)
patched_button=QPushButton('Patched File >> ',self)
patched_button.clicked.connect(self.getPatchedFilename)
self.patched_line=QLineEdit("")
self.patched_line.setAlignment(Qt.AlignLeft)
if self.ShowResultButton:
result_button=QPushButton('Result:',self)
result_button.clicked.connect(self.getResultFilename)
self.result_line=QLineEdit("")
self.result_line.setAlignment(Qt.AlignLeft)
name_label=QLabel('Name:')
self.name_line=QLineEdit("")
self.name_line.setAlignment(Qt.AlignLeft)
description_label=QLabel('Description:')
self.description_line=QLineEdit("")
self.description_line.setAlignment(Qt.AlignLeft)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
bottom_layout=QGridLayout()
bottom_layout.addWidget(orig_button,0,0)
bottom_layout.addWidget(self.orig_line,0,1)
bottom_layout.addWidget(patched_button,0,2)
bottom_layout.addWidget(self.patched_line,0,3)
if self.ShowResultButton:
bottom_layout.addWidget(result_button,1,0)
bottom_layout.addWidget(self.result_line,2,1)
bottom_layout.addWidget(name_label,1,0)
bottom_layout.addWidget(self.name_line,1,1)
bottom_layout.addWidget(description_label,1,2)
bottom_layout.addWidget(self.description_line,1,3)
bottom_layout.addWidget(buttonBox,4,3)
main_layout=QVBoxLayout()
main_layout.addWidget(self.filesWidgetsTemplate.tab_widget)
main_layout.addLayout(bottom_layout)
self.setLayout(main_layout)
self.resize(950,500)
self.setWindowFlags(self.windowFlags()|Qt.WindowSystemMenuHint|Qt.WindowMinMaxButtonsHint)
self.show()
def keyPressEvent(self,e):
key=e.key()
if key==Qt.Key_Return or key==Qt.Key_Enter:
return
else:
super(NewDiffingFromFileStoreDialog,self).keyPressEvent(e)
def InitVars(self):
self.OrigFileID=0
self.OrigFilename=''
self.OrigFileSHA1=''
self.PatchedFileID=0
self.PatchedFilename=''
self.PatchedFileSHA1=''
self.ResultFilename=''
self.Name=''
self.Description=''
def getOrigFilename(self):
ret = self.filesWidgetsTemplate.getCurrentSelection()
if ret!=None:
self.OrigFileID=ret['id']
self.OrigFilename=os.path.join(self.FileStoreDir,ret['filename'])
self.OrigFileSHA1=ret['sha1']
self.orig_line.setText(self.OrigFilename)
def getPatchedFilename(self):
ret = self.filesWidgetsTemplate.getCurrentSelection()
if ret!=None:
self.PatchedFileID=ret['id']
self.PatchedFilename=os.path.join(self.FileStoreDir,ret['filename'])
self.PatchedFileSHA1=ret['sha1']
self.patched_line.setText(self.PatchedFilename)
def getResultFilename(self):
(filename,filter)=QFileDialog.getOpenFileName(self,"Result...")
if filename:
self.ResultFilename=str(filename.replace("/","\\"))
if self.ResultFilename[-4:0].lower()!='.dgf':
self.ResultFilename+='.dgf'
self.result_line.setText(self.ResultFilename)
class SessionTable(QAbstractTableModel):
def __init__(self,parent,database_name='',*args):
QAbstractTableModel.__init__(self,parent,*args)
self.list=[]
database=FileStoreDatabase.Database(database_name)
for (session,src_tag,dst_tag) in database.GetSessions():
src_tag_name=''
dst_tag_name=''
if src_tag!=None:
src_tag_name=src_tag.tag
if dst_tag!=None:
dst_tag_name=dst_tag.tag
src_filename=database.GetFileNameWithVersionByID(session.src)
dst_filename=database.GetFileNameWithVersionByID(session.dst)
description="%s - %s vs %s - %s" % (src_filename, src_tag_name, dst_filename, dst_tag_name)
self.list.append([session.name,
session.description,
src_filename,
src_tag_name,
dst_filename,
dst_tag_name,
session.result,
description])
def GetFilename(self,row):
return self.list[row][6]
def GetDescription(self,row):
return self.list[row][7]
def rowCount(self,parent):
return len(self.list)
def columnCount(self,parent):
return 6
def data(self,index,role):
if not index.isValid():
return None
elif role!=Qt.DisplayRole:
return None
return self.list[index.row()][index.column()]
def headerData(self,col,orientation,role):
if orientation==Qt.Horizontal and role==Qt.DisplayRole:
return ["Name", "Description", "Orig", "Tag", "Patched", "Tag"][col]
return None
def sort(self,col,order):
self.emit(SIGNAL("layoutAboutToBeChanged()"))
self.list=sorted(self.list,key=operator.itemgetter(col))
if order==Qt.DescendingOrder:
self.list.reverse()
self.emit(SIGNAL("layoutChanged()"))
class SessionsDialog(QDialog):
def __init__(self,parent=None,database_name=''):
super(SessionsDialog,self).__init__(parent)
self.setWindowTitle("Sessions")
self.setWindowIcon(QIcon('DarunGrim.png'))
self.Filename=''
view=QTableView()
vheader=QHeaderView(Qt.Orientation.Vertical)
vheader.setResizeMode(QHeaderView.ResizeToContents)
view.setVerticalHeader(vheader)
view.horizontalHeader().setResizeMode(QHeaderView.Stretch)
view.setSortingEnabled(True)
view.setSelectionBehavior(QAbstractItemView.SelectRows)
self.SessionTableView=view
self.SessionTable=SessionTable(self,database_name)
self.SessionTableView.setModel(self.SessionTable)
vlayout=QVBoxLayout()
vlayout.addWidget(view)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
vlayout.addWidget(buttonBox)
self.setLayout(vlayout)
self.resize(800,400)
self.setWindowFlags(self.windowFlags()|Qt.WindowSystemMenuHint|Qt.WindowMinMaxButtonsHint)
self.show()
def GetFilename(self):
selection=self.SessionTableView.selectionModel()
if selection!=None:
for index in selection.selection().indexes():
return self.SessionTable.GetFilename(index.row())
return ''
def GetDescription(self):
selection=self.SessionTableView.selectionModel()
if selection!=None:
for index in selection.selection().indexes():
return self.SessionTable.GetDescription(index.row())
return ''
def headerData(self,col,orientation,role):
if orientation==Qt.Horizontal and role==Qt.DisplayRole:
return ["Name", "Description", "Orig", "Patched"][col]
return None
class ServerInfoDialog(QDialog):
def __init__(self,parent=None, port=0):
super(ServerInfoDialog,self).__init__(parent)
self.setWindowTitle("Server Information")
self.setWindowIcon(QIcon('DarunGrim.png'))
port_label=QLabel('Port:',self)
if port==0:
port_text='None'
else:
port_text='%d' % port
port_number_label=QLabel(port_text, self)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
main_layout=QGridLayout()
main_layout.addWidget(port_label,0,0)
main_layout.addWidget(port_number_label,0,1)
main_layout.addWidget(buttonBox,1,1)
self.setLayout(main_layout)
class ConfigurationDialog(QDialog):
def __init__(self,parent=None, file_store_dir='', data_files_dir='', ida_path='', ida64_path='', log_level=0):
super(ConfigurationDialog,self).__init__(parent)
self.setWindowTitle("Configuration")
self.setWindowIcon(QIcon('DarunGrim.png'))
file_store_dir_button=QPushButton('FileSotre Dir:',self)
file_store_dir_button.clicked.connect(self.getFileStoreDir)
self.file_store_dir_line=QLineEdit("")
self.file_store_dir_line.setAlignment(Qt.AlignLeft)
self.file_store_dir_line.setMinimumWidth(250)
self.file_store_dir_line.setText(file_store_dir)
data_files_dir_button=QPushButton('Data Files Dir:',self)
data_files_dir_button.clicked.connect(self.getDataFilesDir)
self.data_files_dir_line=QLineEdit("")
self.data_files_dir_line.setAlignment(Qt.AlignLeft)
self.data_files_dir_line.setMinimumWidth(250)
self.data_files_dir_line.setText(data_files_dir)
ida_path_button=QPushButton('IDA Path:',self)
ida_path_button.clicked.connect(self.getIDAPath)
self.ida_path_line=QLineEdit(ida_path)
self.ida_path_line.setAlignment(Qt.AlignLeft)
self.ida_path_line.setMinimumWidth(250)
self.ida_path_line.setText(ida_path)
self.IDAPath=ida_path
ida64_path_button=QPushButton('IDA64 Path:',self)
ida64_path_button.clicked.connect(self.getIDA64Path)
self.ida64_path_line=QLineEdit(ida64_path)
self.ida64_path_line.setAlignment(Qt.AlignLeft)
self.ida64_path_line.setMinimumWidth(250)
self.ida64_path_line.setText(ida64_path)
self.IDA64Path=ida64_path
log_level_button=QLabel('Log Level:',self)
self.log_level_line=QLineEdit("")
self.log_level_line.setAlignment(Qt.AlignLeft)
self.log_level_line.setMinimumWidth(250)
self.log_level_line.setText('%d' % log_level)
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
main_layout=QGridLayout()
main_layout.addWidget(file_store_dir_button,0,0)
main_layout.addWidget(self.file_store_dir_line,0,1)
main_layout.addWidget(data_files_dir_button,2,0)
main_layout.addWidget(self.data_files_dir_line,2,1)
main_layout.addWidget(ida_path_button,3,0)
main_layout.addWidget(self.ida_path_line,3,1)
main_layout.addWidget(ida64_path_button,4,0)
main_layout.addWidget(self.ida64_path_line,4,1)
main_layout.addWidget(log_level_button,5,0)
main_layout.addWidget(self.log_level_line,5,1)
main_layout.addWidget(buttonBox,6,1)
self.setLayout(main_layout)
def keyPressEvent(self,e):
key=e.key()
if key==Qt.Key_Return or key==Qt.Key_Enter:
return
else:
super(ConfigurationDialog,self).keyPressEvent(e)
def getFileStoreDir(self):
dir_name=QFileDialog.getExistingDirectory(self,'FileStore Dir')
if dir_name:
self.file_store_dir_line.setText(dir_name)
def getFileStoreDatabase(self):
(filename,filter)=QFileDialog.getOpenFileName(self,'FileStore Database File')
if filename:
self.file_store_database_line.setText(filename)
def getDataFilesDir(self):
dir_name=QFileDialog.getExistingDirectory(self,'Data Files Dir')
if dir_name:
self.data_files_dir_line.setText(dir_name)
def getIDAPath(self):
(filename,filter)=QFileDialog.getOpenFileName(self,'IDA Path',filter="*.exe")
if filename:
self.ida_path_line.setText(filename)
def getIDA64Path(self):
(filename,filter)=QFileDialog.getOpenFileName(self,'IDA64 Path',filter="*.exe")
if filename:
self.ida64_path_line.setText(filename)
def SendLogMessage(message,q):
q.put(message)
def PerformDiffThread(src_filename, target_filename, result_filename, log_filename='', log_level=100, dbg_storage_dir='', is_src_target_storage=False, src_ida_log_filename = 'src.log', target_ida_log_filename = 'target.log', ida_path='', ida64_path='', q=None):
if q!=None and RedirectStdOutErr:
ph_out=PrintHook(True,func=SendLogMessage,arg=q)
ph_out.Start()
ph_err=PrintHook(False,func=SendLogMessage,arg=q)
ph_err.Start()
if is_src_target_storage:
darungrim=DarunGrimEngine.DarunGrim()
darungrim.SetStorageNames(src_filename, target_filename)
else:
darungrim=DarunGrimEngine.DarunGrim(src_filename, target_filename)
darungrim.SetIDAPath(ida_path)
darungrim.SetIDAPath(ida64_path,True)
darungrim.SetDGFSotrage(dbg_storage_dir)
if log_filename:
darungrim.SetLogFile(log_filename,log_level)
darungrim.PerformDiff(result_filename,src_ida_log_filename = src_ida_log_filename, target_ida_log_filename = target_ida_log_filename)
class MainWindow(QMainWindow):
UseDock=False
ShowBBMatchTableView=False
def __init__(self,database_name):
super(MainWindow,self).__init__()
self.setWindowTitle("DarunGrim 4")
self.setWindowIcon(QIcon('DarunGrim.png'))
self.PerformDiffProcess=None
self.DatabaseName=database_name
self.LogDialog=LogTextBoxDialog()
self.LogDialog.resize(800,600)
if RedirectStdOutErr:
self.PHOut=PrintHook(True,func=self.onTextBoxDataReady)
self.PHOut.Start()
self.PHErr=PrintHook(False,func=self.onTextBoxDataReady)
self.PHErr.Start()
self.NonMaxGeometry=None
self.DarunGrimEngine=DarunGrimEngine.DarunGrim(start_ida_listener=True)
self.readSettings()
# Menu
self.createActions()
self.createMenus()
#Use dock? not yet
if not self.UseDock:
bottom_splitter=QSplitter()
self.GraphSplitter=QSplitter()
# Functions
self.FunctionMatchTableView=QTableView()
vheader=QHeaderView(Qt.Orientation.Vertical)
vheader.setResizeMode(QHeaderView.ResizeToContents)
self.FunctionMatchTableView.setVerticalHeader(vheader)
self.FunctionMatchTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
self.FunctionMatchTableView.setSortingEnabled(True)
self.FunctionMatchTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
if self.ShowBBMatchTableView:
self.BBMatchTableView=QTableView()
vheader=QHeaderView(Qt.Orientation.Vertical)
vheader.setResizeMode(QHeaderView.ResizeToContents)
self.BBMatchTableView.setVerticalHeader(vheader)
self.BBMatchTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
self.BBMatchTableView.setSortingEnabled(True)
self.BBMatchTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
if self.UseDock:
dock=QDockWidget("Functions",self)
dock.setObjectName("Functions")
dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
dock.setWidget(self.FunctionMatchTableView)
self.addDockWidget(Qt.BottomDockWidgetArea,dock)
else:
bottom_splitter.addWidget(self.FunctionMatchTableView)
# Blocks
self.BlockTableModel=BlockTable(self,database_name)
self.BlockTableView=QTableView()
vheader=QHeaderView(Qt.Orientation.Vertical)
vheader.setResizeMode(QHeaderView.ResizeToContents)
self.BlockTableView.setVerticalHeader(vheader)
self.BlockTableView.horizontalHeader().setResizeMode(QHeaderView.Stretch)
self.BlockTableView.setSortingEnabled(True)
self.BlockTableView.setModel(self.BlockTableModel)
self.BlockTableView.setSelectionBehavior(QAbstractItemView.SelectRows)
if self.UseDock:
dock=QDockWidget("Blocks",self)
dock.setObjectName("Blocks")
dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
dock.setWidget(self.BlockTableView)
self.addDockWidget(Qt.BottomDockWidgetArea,dock)
else:
bottom_splitter.addWidget(self.BlockTableView)
bottom_splitter.setStretchFactor(0,1)
bottom_splitter.setStretchFactor(1,0)
# Function Graph
self.OrigFunctionGraph=MyGraphicsView()
self.OrigFunctionGraph.setRenderHints(QPainter.Antialiasing)
if self.UseDock:
dock=QDockWidget("Orig",self)
dock.setObjectName("Orig")
dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
dock.setWidget(view)
self.addDockWidget(Qt.TopDockWidgetArea,dock)
else:
self.GraphSplitter.addWidget(self.OrigFunctionGraph)
# Function Graph
self.PatchedFunctionGraph=MyGraphicsView()
self.PatchedFunctionGraph.setRenderHints(QPainter.Antialiasing)
if self.UseDock:
dock=QDockWidget("Patched",self)
dock.setObjectName("Patched")
dock.setAllowedAreas(Qt.LeftDockWidgetArea|Qt.RightDockWidgetArea)
dock.setWidget(view)
self.addDockWidget(Qt.TopDockWidgetArea,dock)
else:
self.GraphSplitter.addWidget(self.PatchedFunctionGraph)
self.RefreshGraphViews()
if not self.UseDock:
virt_splitter=QSplitter()
virt_splitter.setOrientation(Qt.Vertical)
virt_splitter.addWidget(self.GraphSplitter)
if self.ShowBBMatchTableView:
tab_widget=QTabWidget()
tab_widget.addTab(bottom_splitter,"Functions..")
tab_widget.addTab(self.BBMatchTableView,"Basic blocks...")
virt_splitter.addWidget(tab_widget)
else:
virt_splitter.addWidget(bottom_splitter)
virt_splitter.setStretchFactor(0,1)
virt_splitter.setStretchFactor(1,0)
main_widget=QWidget()
vlayout=QVBoxLayout()
vlayout.addWidget(virt_splitter)
main_widget.setLayout(vlayout)
self.setCentralWidget(main_widget)
self.show()
self.clearAreas()
if database_name:
self.OpenDatabase(database_name)
self.restoreUI()
def RefreshGraphViews(self):
if self.ShowGraphs==True:
self.OrigFunctionGraph.show()
self.PatchedFunctionGraph.show()
self.GraphSplitter.show()
else:
self.OrigFunctionGraph.hide()
self.PatchedFunctionGraph.hide()
self.GraphSplitter.hide()
def clearAreas(self):
self.OrigFunctionGraph.clear()
self.PatchedFunctionGraph.clear()
self.FunctionMatchTable=FunctionMatchTable(self)
self.FunctionMatchTableView.setModel(self.FunctionMatchTable)
if self.ShowBBMatchTableView:
self.BBMatchTable=BBMatchTable(self)
self.BBMatchTableView.setModel(self.BBMatchTable)
self.BlockTableModel=BlockTable(self)
self.BlockTableView.setModel(self.BlockTableModel)
def manageFileStore(self):
dialog=FileStoreBrowserDialog(database_name=self.FileStoreDatabase, darungrim_storage_dir=self.FileStoreDir)
dialog.exec_()
def newFromFileStore(self):
dialog=NewDiffingFromFileStoreDialog(database_name=self.FileStoreDatabase, darungrim_storage_dir=self.FileStoreDir)
if dialog.exec_():
result_filename='%s-%s.dgf' % (dialog.OrigFileSHA1, dialog.PatchedFileSHA1)
log_filename='%s-%s.log' % (dialog.OrigFileSHA1, dialog.PatchedFileSHA1)
self.StartPerformDiff(dialog.OrigFilename,
dialog.PatchedFilename,
os.path.join(self.DataFilesDir, result_filename),
os.path.join(self.DataFilesDir, log_filename),
debug=False
)
file_store_database=FileStoreDatabase.Database(self.FileStoreDatabase)
file_store_database.AddSession(dialog.name_line.text(), dialog.description_line.text(), dialog.OrigFileID, dialog.PatchedFileID, result_filename)
def openFromFileStore(self):
dialog=SessionsDialog(database_name=self.FileStoreDatabase)
if dialog.exec_():
self.OpenDatabase(os.path.join(self.DataFilesDir, dialog.GetFilename()))
self.setWindowTitle("DarunGrim 4 %s" % dialog.GetDescription())
def new(self):
dialog=NewDiffingDialog()
if dialog.exec_():
src_filename = str(dialog.Filenames['Orig'])
target_filename = str(dialog.Filenames['Patched'])
result_filename = str(dialog.Filenames['Result'])
log_filename=result_filename+'.log'
is_src_target_storage = False
if src_filename.lower()[-4:]=='.dgf' and target_filename.lower()[-4:]=='.dgf':
is_src_target_storage=True
self.StartPerformDiff(
src_filename,
target_filename,
result_filename,
log_filename,
is_src_target_storage=is_src_target_storage,
)
def reanalyze(self):
database = DarunGrimDatabase.Database(self.DatabaseName)
[src_filename,target_filename] = database.GetDGFFileLocations()
database.Close()
del database
result_filename=''
if self.DatabaseName[-4:].lower()=='.dgf':
prefix=self.DatabaseName[0:-4]
else:
prefix=self.DatabaseName
i=0
while True:
result_filename=prefix+'-%d.dgf' % i
if not os.path.isfile(result_filename):
break
i+=1
log_filename=result_filename + '.log'
self.StartPerformDiff(src_filename,
target_filename,
str(self.DatabaseName),
log_filename=log_filename,
is_src_target_storage=True,
debug=False)
def onTextBoxDataReady(self,data):
if not self.LogDialog.isVisible():
self.LogDialog.show()
self.LogDialog.addText(data)
def onDiffLogReady(self,data):
if not self.LogDialog.isVisible():
self.LogDialog.show()
self.LogDialog.addText(data)
def PerformDiffCancelled(self):
if self.PerformDiffProcess!=None:
self.PerformDiffProcess.terminate()
self.PerformDiffProcessCancelled=True
def StartPerformDiff(self,src_filename,target_filename,result_filename,log_filename='',is_src_target_storage=False, debug=False):
print "Start Diffing Process: %s vs %s -> %s" % (src_filename,target_filename,result_filename)
self.clearAreas()
if os.path.isfile(log_filename):
os.unlink(log_filename)
try:
os.makedirs(os.path.dirname(result_filename))
except:
pass
src_ida_log_filename=result_filename+'.src.log'
target_ida_log_filename=result_filename+'.target.log'
q=None
debug=False
if debug:
self.PerformDiffProcess=None
PerformDiffThread(src_filename,target_filename,result_filename,log_level=self.LogLevel,dbg_storage_dir=self.DataFilesDir,is_src_target_storage=is_src_target_storage,src_ida_log_filename = src_ida_log_filename, target_ida_log_filename = target_ida_log_filename, ida_path=self.IDAPath, ida64_path=self.IDA64Path, q=q)
else:
q=Queue()
self.PerformDiffProcess=Process(target=PerformDiffThread,args=(src_filename,target_filename,result_filename,log_filename,self.LogLevel,self.DataFilesDir,is_src_target_storage,src_ida_log_filename,target_ida_log_filename,self.IDAPath,self.IDA64Path,q))
self.PerformDiffProcess.start()
self.PerformDiffProcessCancelled=False
if self.PerformDiffProcess!=None:
qlog_thread=QueReadThread(q)
self.LogDialog.SetCancelCallback(self.PerformDiffCancelled)
self.LogDialog.DisableClose()
self.LogDialog.show()
qlog_thread.data_read.connect(self.onDiffLogReady)
qlog_thread.start()
log_threads=[]
for filename in [log_filename,src_ida_log_filename,target_ida_log_filename]:
log_thread=LogThread(filename)
log_thread.data_read.connect(self.onDiffLogReady)
log_thread.start()
log_threads.append(log_thread)
while True:
time.sleep(0.01)
if not self.PerformDiffProcess.is_alive():
break
qApp.processEvents()
for log_thread in log_threads:
log_thread.end()
qlog_thread.end()
self.LogDialog.EnableClose()
if not self.PerformDiffProcessCancelled:
self.LogDialog.addText("Diffing process finished.")
else:
self.LogDialog.addText("Diffing process cancelled.")
self.LogDialog.SetCancelCallback(None)
self.PerformDiffProcess=None
if not self.PerformDiffProcessCancelled:
self.OpenDatabase(result_filename)
def open(self):
(filename,filter)=QFileDialog.getOpenFileName(self,"Open...")
if filename:
self.clearAreas()
self.OpenDatabase(filename)
def OpenFolder(self,folder):
try:
subprocess.check_call(['explorer', folder])
except:
pass
def openOriginalFilesLocation(self):
database = DarunGrimDatabase.Database(self.DatabaseName)
[src_filename,target_filename]=database.GetFilesLocation()
self.OpenFolder(os.path.dirname(src_filename))
def openPatchedFilesLocation(self):
database = DarunGrimDatabase.Database(self.DatabaseName)
[src_filename,target_filename]=database.GetFilesLocation()
self.OpenFolder(os.path.dirname(target_filename))
def OpenIDA(self,filename):
ida_filename=filename
if filename[-4:].lower()!='.idb' and filename[-4:].lower()!='.i64':
for path in [filename[0:-4] + '.idb', filename[0:-4] + '.i64']:
if os.path.isfile(path):
ida_filename=path
break
self.DarunGrimEngine.OpenIDA(ida_filename)
def synchronizeIDA(self):
if self.DatabaseName:
database = DarunGrimDatabase.Database(self.DatabaseName)
[src_filename,target_filename]=database.GetFilesLocation()
self.DarunGrimEngine.SetSourceIDASession(src_filename)
self.DarunGrimEngine.SetTargetIDASession(target_filename)
self.OpenIDA(src_filename)
self.OpenIDA(target_filename)
def captureWindow(self):
(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
if filename:
pixmap=QPixmap.grabWidget(super(QMainWindow,self))
pixmap.save(filename,"png")
def saveOrigGraph(self):
(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
if filename:
self.OrigFunctionGraph.SaveImg(filename)
def savePatchedGraph(self):
(filename,filter)=QFileDialog.getSaveFileName(self,'Save file', filter="*.png")
if filename:
self.PatchedFunctionGraph.SaveImg(filename)
def showLogs(self):
self.LogDialog.show()
def toggleShowGraphs(self):
if self.ShowGraphs==True:
self.ShowGraphs=False
else:
self.ShowGraphs=True
self.RefreshGraphViews()
def toggleSyncrhonizeIDAUponOpening(self):
if self.SyncrhonizeIDAUponOpening==True:
self.SyncrhonizeIDAUponOpening=False
else:
self.SyncrhonizeIDAUponOpening=True
def showConfiguration(self):
dialog=ConfigurationDialog( file_store_dir=self.FileStoreDir,
data_files_dir=self.DataFilesDir,
ida_path=self.IDAPath,
ida64_path=self.IDA64Path,
log_level=self.LogLevel
)
if dialog.exec_():
self.FileStoreDir=dialog.file_store_dir_line.text()
self.DataFilesDir=dialog.data_files_dir_line.text()
self.FileStoreDatabase=os.path.join(self.DataFilesDir,'index.db')
self.IDAPath=dialog.ida_path_line.text()
self.IDA64Path=dialog.ida64_path_line.text()
self.DarunGrimEngine.SetIDAPath(self.IDAPath)
self.DarunGrimEngine.SetIDAPath(self.IDA64Path,True)
self.LogLevel=int(dialog.log_level_line.text())
def serverInfo(self):
dialog=ServerInfoDialog(port=self.DarunGrimEngine.ListeningPort)
dialog.exec_()
def toggleStaysOnTop(self):
if self.StaysOnTop==True:
self.StaysOnTop=False
self.hide()
self.setWindowFlags(self.windowFlags()& ~Qt.WindowStaysOnTopHint)
self.show()
else:
self.StaysOnTop=True
self.hide()
self.setWindowFlags(self.windowFlags()|Qt.WindowStaysOnTopHint)
self.show()
def intallIDAPlugin(self):
(ret1,message1)=self.DarunGrimEngine.InstallIDAPlugin('DarunGrimPlugin.plw')
(ret2,message2)=self.DarunGrimEngine.InstallIDAPlugin('DarunGrimPlugin.p64')
if not ret1 or not ret2:
msg_box=QMessageBox()
if message1!=message2:
message1 += '\n' + message2
msg_box.setText('Try to run the program with an Administrator privilege\n' + message1)
msg_box.exec_()
return False
else:
msg_box=QMessageBox()
msg_box.setText('Installation successful\n'+message1 + '\n' + message2)
msg_box.exec_()
return True
def createActions(self):
self.newAct = QAction("New Diffing...",
self,
shortcut=QKeySequence.New,
statusTip="Create new diffing output",
triggered=self.new
)
self.openAct = QAction("Open...",
self,
shortcut=QKeySequence.Open,
statusTip="Open a dgf database",
triggered=self.open
)
self.manageFileStoreAct = QAction("Manage FileStore...",
self,
statusTip="Manage FileStore",
triggered=self.manageFileStore
)
self.newFromFileStoreAct = QAction("New Diffing (FileStore)...",
self,
statusTip="Create new diffing output",
triggered=self.newFromFileStore
)
self.openFromFileStoreAct = QAction("Open Diffing (FileStore)...",
self,
statusTip="Open diffing output",
triggered=self.openFromFileStore
)
self.reanalyzeAct = QAction("Reanalyze...",
self,
statusTip="Reanalyze current files",
triggered=self.reanalyze
)
self.synchornizeIDAAct= QAction("Synchornize IDA",
self,
statusTip="Synchronize IDA",
triggered=self.synchronizeIDA
)
self.openOriginalFilesLocationAct = QAction("Open Orininal Files Location",
self,
statusTip="Open original file location",
triggered=self.openOriginalFilesLocation
)
self.openPatchedFilesLocationAct = QAction("Open Patched Files Location",
self,
statusTip="Open patched file location",
triggered=self.openPatchedFilesLocation
)
self.captureWindowAct = QAction("Capture...",
self,
statusTip="Save patched graph",
triggered=self.captureWindow
)
self.saveOrigGraphAct = QAction("Save orig graph...",
self,
statusTip="Save original graph",
triggered=self.saveOrigGraph
)
self.savePatchedGraphAct = QAction("Save patched graph...",
self,
statusTip="Save patched graph",
triggered=self.savePatchedGraph
)
self.showLogsAct = QAction("Show logs...",
self,
statusTip="Show logs",
triggered=self.showLogs
)
self.showGraphsAct = QAction("Show graphs...",
self,
statusTip="Show graphs",
triggered=self.toggleShowGraphs,
checkable=True
)
self.showGraphsAct.setChecked(self.ShowGraphs)
self.syncrhonizeIDAUponOpeningAct = QAction("Synchronize IDA upon opening...",
self,
statusTip="Synchronize IDA upon opening",
triggered=self.toggleSyncrhonizeIDAUponOpening,
checkable=True
)
self.syncrhonizeIDAUponOpeningAct.setChecked(self.SyncrhonizeIDAUponOpening)
self.configurationAct = QAction("Configuration...",
self,
statusTip="Configuration",
triggered=self.showConfiguration
)
self.serverInfoAct = QAction("Server...",
self,
statusTip="Server Info",
triggered=self.serverInfo
)
self.staysOnTopAct = QAction("Statys on top...",
self,
statusTip="Server Info",
triggered=self.toggleStaysOnTop,
checkable=True
)
self.staysOnTopAct.setChecked(self.StaysOnTop)
self.intallIDAPluginAct = QAction("Install IDA Plugin...",
self,
statusTip="Install IDA Plugin...",
triggered=self.intallIDAPlugin
)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu("&File")
self.fileMenu.addAction(self.newAct)
self.fileMenu.addAction(self.openAct)
self.fileMenu.addAction(self.manageFileStoreAct)
self.fileMenu.addAction(self.newFromFileStoreAct)
self.fileMenu.addAction(self.openFromFileStoreAct)
self.fileMenu.addAction(self.reanalyzeAct)
self.analysisMenu = self.menuBar().addMenu("&Analysis")
self.analysisMenu.addAction(self.synchornizeIDAAct)
self.analysisMenu.addAction(self.openOriginalFilesLocationAct)
self.analysisMenu.addAction(self.openPatchedFilesLocationAct)
self.analysisMenu.addAction(self.captureWindowAct)
self.analysisMenu.addAction(self.saveOrigGraphAct)
self.analysisMenu.addAction(self.savePatchedGraphAct)
self.analysisMenu.addAction(self.showLogsAct)
self.optionsMenu = self.menuBar().addMenu("&Options")
self.optionsMenu.addAction(self.showGraphsAct)
self.optionsMenu.addAction(self.syncrhonizeIDAUponOpeningAct)
self.optionsMenu.addAction(self.staysOnTopAct)
self.optionsMenu.addAction(self.configurationAct)
self.optionsMenu.addAction(self.serverInfoAct)
self.optionsMenu.addAction(self.intallIDAPluginAct)
def OpenDatabase(self,databasename):
self.DatabaseName=databasename
self.FunctionMatchTable=FunctionMatchTable(self,self.DatabaseName)
self.FunctionMatchTableView.setModel(self.FunctionMatchTable)
selection=self.FunctionMatchTableView.selectionModel()
if selection!=None:
selection.selectionChanged.connect(self.handleFunctionMatchTableChanged)
if self.ShowBBMatchTableView:
self.BBMatchTable=BBMatchTable(self,self.DatabaseName)
self.BBMatchTableView.setModel(self.BBMatchTable)
selection=self.BBMatchTableView.selectionModel()
if selection!=None:
selection.selectionChanged.connect(self.handleBBMatchTableChanged)
database = DarunGrimDatabase.Database(self.DatabaseName)
self.setWindowTitle("DarunGrim 4 - %s" % (database.GetDescription()))
if self.SyncrhonizeIDAUponOpening:
self.synchronizeIDA()
def ColorController(self, type, disasms, match_info):
for (address,[end_address,disasm]) in disasms.items():
if not match_info.has_key(address):
#Red block
self.DarunGrimEngine.ColorAddress(type, address, end_address+1, 0x0000FF)
elif match_info[address][1]!=100:
#Yellow block
self.DarunGrimEngine.ColorAddress(type, address, end_address+1, 0x00FFFF)
def handleFunctionMatchTableChanged(self,selected,dselected):
for item in selected:
for index in item.indexes():
[source_function_address, target_function_address] = self.FunctionMatchTable.GetFunctionAddresses(index.row())
self.BlockTableModel=BlockTable(self,self.DatabaseName,source_function_address, target_function_address)
self.BlockTableView.setModel(self.BlockTableModel)
selection=self.BlockTableView.selectionModel()
if selection!=None:
selection.selectionChanged.connect(self.handleBlockTableChanged)
database=DarunGrimDatabase.Database(self.DatabaseName)
(source_disasms, source_links) = database.GetFunctionDisasmLines("Source", source_function_address)
(target_disasms, target_links) = database.GetFunctionDisasmLines("Target", target_function_address)
source_match_info=self.BlockTableModel.GetSourceMatchInfo()
target_match_info=self.BlockTableModel.GetTargetMatchInfo()
#IDA Sync
self.ColorController(0, source_disasms, source_match_info )
self.ColorController(1, target_disasms, target_match_info )
self.DarunGrimEngine.JumpToAddresses(source_function_address, target_function_address)
if self.ShowGraphs:
# Draw graphs
self.OrigFunctionGraph.SetDatabaseName(self.DatabaseName)
self.OrigFunctionGraph.DrawFunctionGraph("Source", source_function_address, source_disasms, source_links, source_match_info)
self.OrigFunctionGraph.SetSelectBlockCallback(self.SelectedBlock)
self.OrigFunctionGraph.HilightAddress(source_function_address)
self.PatchedFunctionGraph.SetDatabaseName(self.DatabaseName)
self.PatchedFunctionGraph.DrawFunctionGraph("Target", target_function_address, target_disasms, target_links, target_match_info)
self.PatchedFunctionGraph.SetSelectBlockCallback(self.SelectedBlock)
self.PatchedFunctionGraph.HilightAddress(target_function_address)
break
def handleBBMatchTableChanged(self,selected,dselected):
pass
def handleBlockTableChanged(self,selected,dselected):
for item in selected:
for index in item.indexes():
[orig_address,patched_address]=self.BlockTableModel.GetBlockAddresses(index.row())
if self.ShowGraphs:
if orig_address!=0:
self.OrigFunctionGraph.HilightAddress(orig_address)
if patched_address!=0:
self.PatchedFunctionGraph.HilightAddress(patched_address)
self.DarunGrimEngine.JumpToAddresses(orig_address, patched_address)
break
def SelectedBlock(self,graph,address):
if graph==self.OrigFunctionGraph:
matched_address=self.BlockTableModel.GetMatchAddresses(0,address)
if matched_address!=None:
self.PatchedFunctionGraph.HilightAddress(matched_address)
self.DarunGrimEngine.JumpToAddresses(0, matched_address)
elif graph==self.PatchedFunctionGraph:
matched_address=self.BlockTableModel.GetMatchAddresses(1,address)
if matched_address!=None:
self.OrigFunctionGraph.HilightAddress(matched_address)
self.DarunGrimEngine.JumpToAddresses(matched_address, 0)
def changeEvent(self,event):
if event.type()==QEvent.WindowStateChange:
if (self.windowState()&Qt.WindowMinimized)==0 and \
(self.windowState()&Qt.WindowMaximized)==0 and \
(self.windowState()&Qt.WindowFullScreen)==0 and \
(self.windowState()&Qt.WindowActive)==0:
pass
def resizeEvent(self,event):
if not self.isMaximized():
self.NonMaxGeometry=self.saveGeometry()
def restoreUI(self):
settings=QSettings("DarunGrim LLC", "DarunGrim")
if settings.contains("geometry/non_max"):
self.NonMaxGeometry=settings.value("geometry/non_max")
self.restoreGeometry(self.NonMaxGeometry)
else:
self.resize(800,600)
self.NonMaxGeometry=self.saveGeometry()
if settings.contains("isMaximized"):
if settings.value("isMaximized")=="true":
self.setWindowState(self.windowState()|Qt.WindowMaximized)
self.restoreState(settings.value("windowState"))
self.FirstConfigured=False
if not settings.contains("General/FirstConfigured"):
self.showConfiguration()
if self.intallIDAPlugin():
self.FirstConfigured=True
else:
self.FirstConfigured=True
def readSettings(self):
settings=QSettings("DarunGrim LLC", "DarunGrim")
self.ShowGraphs=True
if settings.contains("General/ShowGraphs"):
if settings.value("General/ShowGraphs")=='true':
self.ShowGraphs=True
else:
self.ShowGraphs=False
self.SyncrhonizeIDAUponOpening=False
if settings.contains("General/SyncrhonizeIDAUponOpening"):
if settings.value("General/SyncrhonizeIDAUponOpening")=='true':
self.SyncrhonizeIDAUponOpening=True
else:
self.SyncrhonizeIDAUponOpening=False
self.StaysOnTop=False
if settings.contains("General/StaysOnTop"):
if settings.value("General/StaysOnTop")=='true':
self.StaysOnTop=True
else:
self.StaysOnTop=False
if self.StaysOnTop==True:
self.setWindowFlags(self.windowFlags()|Qt.WindowStaysOnTopHint)
else:
self.setWindowFlags(self.windowFlags()& ~Qt.WindowStaysOnTopHint)
self.FileStoreDir = os.path.join(os.getcwd(), "DarunGrimStore")
if settings.contains("General/FileStoreDir"):
self.FileStoreDir=settings.value("General/FileStoreDir")
if not os.path.isdir(self.FileStoreDir):
try:
os.makedirs(self.FileStoreDir)
except:
import traceback
traceback.print_exc()
self.FileStoreDatabase='index.db'
if settings.contains("General/FileStoreDatabase"):
self.FileStoreDatabase=settings.value("General/FileStoreDatabase")
self.DataFilesDir=os.path.join(os.getcwd(), "DarunGrimData")
if settings.contains("General/DataFilesDir"):
self.DataFilesDir=settings.value("General/DataFilesDir")
if not os.path.isdir(self.DataFilesDir):
try:
os.makedirs(self.DataFilesDir)
except:
import traceback
traceback.print_exc()
self.IDAPath=''
if settings.contains("General/IDAPath"):
self.IDAPath=settings.value("General/IDAPath")
else:
files=self.DarunGrimEngine.LocateIDAExecutables()
if len(files)>0:
self.IDAPath=files[0][0]
self.DarunGrimEngine.SetIDAPath(self.IDAPath)
if not self.DarunGrimEngine.CheckIDAPlugin():
#print 'DarunGrim plugin is missing'
pass
self.IDA64Path=''
if settings.contains("General/IDA64Path"):
self.IDAPath=settings.value("General/IDA64Path")
else:
files=self.DarunGrimEngine.LocateIDAExecutables(is_64=True)
if len(files)>0:
self.IDA64Path=files[0][0]
self.DarunGrimEngine.SetIDAPath(self.IDA64Path,is_64=True)
self.LogLevel=10
if settings.contains("General/LogLevel"):
self.LogLevel=int(settings.value("General/LogLevel"))
def saveSettings(self):
settings = QSettings("DarunGrim LLC", "DarunGrim")
settings.setValue("General/ShowGraphs", self.ShowGraphs)
settings.setValue("General/SyncrhonizeIDAUponOpening", self.SyncrhonizeIDAUponOpening)
settings.setValue("General/StaysOnTop", self.StaysOnTop)
settings.setValue("General/FileStoreDir", self.FileStoreDir)
settings.setValue("General/FileStoreDatabase", self.FileStoreDatabase)
settings.setValue("General/DataFilesDir", self.DataFilesDir)
settings.setValue("General/LogLevel", self.LogLevel)
if self.FirstConfigured==True:
settings.setValue("General/FirstConfigured", self.FirstConfigured)
if self.NonMaxGeometry!=None:
settings.setValue("geometry/non_max", self.NonMaxGeometry)
settings.setValue("isMaximized", self.isMaximized())
settings.setValue("windowState", self.saveState())
def closeEvent(self, event):
self.PerformDiffCancelled()
self.saveSettings()
QMainWindow.closeEvent(self, event)
if __name__=='__main__':
multiprocessing.freeze_support()
import sys
import time
if len(sys.argv)>1:
database_name=sys.argv[1]
else:
database_name=''
app=QApplication(sys.argv)
pixmap=QPixmap('DarunGrimSplash.png')
splash=QSplashScreen(pixmap)
splash.show()
app.processEvents()
time.sleep(0.5)
window=MainWindow(database_name)
window.show()
splash.finish(window)
sys.exit(app.exec_())
| 0
| 0
| 0
| 48,634
| 0
| 955
| 0
| 9
| 676
|
4fb3719d6c98f0ef30141c1f977dfa7d852da269
| 570
|
py
|
Python
|
test.py
|
FlyingKiwis/Esipraisal
|
47b19baab7f59fd6a0a0f84b85708d017e7ce011
|
[
"MIT"
] | 1
|
2020-06-02T03:54:19.000Z
|
2020-06-02T03:54:19.000Z
|
test.py
|
FlyingKiwis/Esipraisal
|
47b19baab7f59fd6a0a0f84b85708d017e7ce011
|
[
"MIT"
] | null | null | null |
test.py
|
FlyingKiwis/Esipraisal
|
47b19baab7f59fd6a0a0f84b85708d017e7ce011
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
from Esipraisal.Esipraisal import Esipraisal
ep_log = logging.getLogger("Esipraisal")
ep_log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
ep_log.addHandler(ch)
ep = Esipraisal()
region_ids=[10000002, 10000043, 10000032, 10000016, 10000042, 10000030, 10000064, 10000033, 10000068, 10000020, 10000040, 10000013, 10000039, 10000058]
app = asyncio.run(ep.appraise(29988, region_ids))
print(app)
| 31.666667
| 151
| 0.777193
|
import asyncio
import logging
from Esipraisal.Esipraisal import Esipraisal
ep_log = logging.getLogger("Esipraisal")
ep_log.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
ep_log.addHandler(ch)
ep = Esipraisal()
region_ids=[10000002, 10000043, 10000032, 10000016, 10000042, 10000030, 10000064, 10000033, 10000068, 10000020, 10000040, 10000013, 10000039, 10000058]
app = asyncio.run(ep.appraise(29988, region_ids))
print(app)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
69656441d9c0bf3b5e0c5066856ee54e3a84cc09
| 147
|
py
|
Python
|
reddit2telegram/channels/r_goodanimemes/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 187
|
2016-09-20T09:15:54.000Z
|
2022-03-29T12:22:33.000Z
|
reddit2telegram/channels/r_goodanimemes/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 84
|
2016-09-22T14:25:07.000Z
|
2022-03-19T01:26:17.000Z
|
reddit2telegram/channels/r_goodanimemes/app.py
|
mainyordle/reddit2telegram
|
1163e15aed3b6ff0fba65b222d3d9798f644c386
|
[
"MIT"
] | 172
|
2016-09-21T15:39:39.000Z
|
2022-03-16T15:15:58.000Z
|
#encoding:utf-8
subreddit = 'goodanimemes'
t_channel = '@r_goodanimemes'
| 16.333333
| 38
| 0.755102
|
#encoding:utf-8
subreddit = 'goodanimemes'
t_channel = '@r_goodanimemes'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 0
| 0
| 0
| 0
| 0
| 49
| 0
| 0
| 23
|
ee9a6c5456efb857c26a2d7a258bbd6b0bf0d4f4
| 1,369
|
py
|
Python
|
pymoo/learning/part_1.py
|
ubeydtalha/bitirme-projesi
|
71601eb04a5e8a0aa93357ddf8b978d68eae6cdc
|
[
"MIT"
] | null | null | null |
pymoo/learning/part_1.py
|
ubeydtalha/bitirme-projesi
|
71601eb04a5e8a0aa93357ddf8b978d68eae6cdc
|
[
"MIT"
] | null | null | null |
pymoo/learning/part_1.py
|
ubeydtalha/bitirme-projesi
|
71601eb04a5e8a0aa93357ddf8b978d68eae6cdc
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
plt.rc('font', family='serif')
X1 , X2 = np.meshgrid(np.linspace(-2,2,500),np.linspace(-2,2,500))
F1 = X1**2 + X2**2
F2 = (X1-1)**2+X2**2
G = X1**2 - X1 + 3/16
G1 = 2 * (X1[0] - 0.1) * (X1[0] - 0.9)
G2 = 20 * (X1[0] - 0.4) * (X1[0] - 0.6)
levels = [0.02, 0.1 , 0.25 , 0.5 , 0.8]
plt.figure(figsize=(7,5))
CS = plt.contour(X1,X2,F1,levels,linestyles="dashed",color="black", alpha = 0.5)
CS.collections[0].set_label("$f_1(x)$")
CS = plt.contour(X1, X2, F2, levels, linestyles="dashed", colors='black', alpha=0.5)
CS.collections[0].set_label("$f_2(x)$")
plt.plot(X1[0], G1, linewidth=2.0, color="green", linestyle='dotted')
plt.plot(X1[0][G1<0], G1[G1<0], label="$g_1(x)$", linewidth=2.0, color="green")
plt.plot(X1[0], G2, linewidth=2.0, color="blue", linestyle='dotted')
plt.plot(X1[0][X1[0]>0.6], G2[X1[0]>0.6], label="$g_2(x)$",linewidth=2.0, color="blue")
plt.plot(X1[0][X1[0]<0.4], G2[X1[0]<0.4], linewidth=2.0, color="blue")
plt.plot(np.linspace(0.1,0.4,100), np.zeros(100),linewidth=3.0, color="orange")
plt.plot(np.linspace(0.6,0.9,100), np.zeros(100),linewidth=3.0, color="orange")
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 1)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),
ncol=4, fancybox=True, shadow=False)
plt.tight_layout()
plt.show()
| 29.76087
| 87
| 0.620161
|
import numpy as np
import matplotlib.pyplot as plt
plt.rc('font', family='serif')
X1 , X2 = np.meshgrid(np.linspace(-2,2,500),np.linspace(-2,2,500))
F1 = X1**2 + X2**2
F2 = (X1-1)**2+X2**2
G = X1**2 - X1 + 3/16
G1 = 2 * (X1[0] - 0.1) * (X1[0] - 0.9)
G2 = 20 * (X1[0] - 0.4) * (X1[0] - 0.6)
levels = [0.02, 0.1 , 0.25 , 0.5 , 0.8]
plt.figure(figsize=(7,5))
CS = plt.contour(X1,X2,F1,levels,linestyles="dashed",color="black", alpha = 0.5)
CS.collections[0].set_label("$f_1(x)$")
CS = plt.contour(X1, X2, F2, levels, linestyles="dashed", colors='black', alpha=0.5)
CS.collections[0].set_label("$f_2(x)$")
plt.plot(X1[0], G1, linewidth=2.0, color="green", linestyle='dotted')
plt.plot(X1[0][G1<0], G1[G1<0], label="$g_1(x)$", linewidth=2.0, color="green")
plt.plot(X1[0], G2, linewidth=2.0, color="blue", linestyle='dotted')
plt.plot(X1[0][X1[0]>0.6], G2[X1[0]>0.6], label="$g_2(x)$",linewidth=2.0, color="blue")
plt.plot(X1[0][X1[0]<0.4], G2[X1[0]<0.4], linewidth=2.0, color="blue")
plt.plot(np.linspace(0.1,0.4,100), np.zeros(100),linewidth=3.0, color="orange")
plt.plot(np.linspace(0.6,0.9,100), np.zeros(100),linewidth=3.0, color="orange")
plt.xlim(-0.5, 1.5)
plt.ylim(-0.5, 1)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),
ncol=4, fancybox=True, shadow=False)
plt.tight_layout()
plt.show()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
11e5788ea89ce816b5cc94614412f19c35457815
| 4,923
|
py
|
Python
|
tests/test_models.py
|
zbohm/aldryn-translation-tools
|
3e86f575fea12124a25a6c2d28e10324347d6c5e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_models.py
|
zbohm/aldryn-translation-tools
|
3e86f575fea12124a25a6c2d28e10324347d6c5e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_models.py
|
zbohm/aldryn-translation-tools
|
3e86f575fea12124a25a6c2d28e10324347d6c5e
|
[
"BSD-3-Clause"
] | 1
|
2020-09-10T23:39:48.000Z
|
2020-09-10T23:39:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
| 32.82
| 71
| 0.639651
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TransactionTestCase
from django.utils.translation import ugettext_lazy as _
from test_addon.models import Complex, Simple, Unconventional
class TestTranslatableAutoSlugifyMixin(TransactionTestCase):
def test_simple_slug(self):
simple = Simple()
simple.set_current_language('en')
simple.name = 'Simple'
simple.save()
self.assertEquals(simple.slug, 'simple')
def test_unconventional_slug(self):
unconventional = Unconventional()
unconventional.set_current_language('en')
unconventional.title = 'Unconventional'
unconventional.save()
self.assertEquals('unconventional', unconventional.unique_slug)
def test_complex_slug(self):
complex1 = Complex()
complex1.set_current_language('en')
complex1.name = 'one'
complex1.object_type = 'complex'
complex1.save()
self.assertEquals('complex-one', complex1.slug)
def test_existing_object(self):
simple = Simple()
simple.set_current_language('en')
simple.save()
# slug is now the default
simple.name = 'A new name'
simple.slug = None
simple.save()
self.assertEquals('a-new-name', simple.slug)
def test_limited_length(self):
Simple.slug_max_length = 6
try:
for r in range(0, 101):
simple = Simple()
simple.set_current_language('en')
simple.name = 'Simple'
simple.save()
except Exception:
self.fail()
Simple.slug_max_length = None
def test_slug_unique_global(self):
Simple.slug_globally_unique = True
simple_en = Simple()
simple_en.set_current_language('en')
simple_en.name = 'SimpleOne'
simple_en.save()
simple_fr = Simple()
simple_fr.set_current_language('fr')
simple_fr.name = 'SimpleOne'
simple_fr.save()
self.assertNotEquals(simple_en.slug, simple_fr.slug)
Simple.slug_globally_unique = None # default is False
simple_en = Simple()
simple_en.set_current_language('en')
simple_en.name = 'SimpleTwo'
simple_en.save()
simple_fr = Simple()
simple_fr.set_current_language('fr')
simple_fr.name = 'SimpleTwo'
simple_fr.save()
self.assertEquals(simple_en.slug, simple_fr.slug)
def test_slug_unique_for_language(self):
simple_en_1 = Simple()
simple_en_1.set_current_language('en')
simple_en_1.name = 'SimpleOne'
simple_en_1.save()
# make another instance with same name
simple_en_2 = Simple()
simple_en_2.set_current_language('en')
simple_en_2.name = 'SimpleOne'
simple_en_2.save()
# slugs should not be same.
self.assertNotEquals(simple_en_1.slug, simple_en_2.slug)
def test_slug_unique_for_language_if_slug_is_the_same(self):
simple_en_1 = Simple()
simple_en_1.set_current_language('en')
simple_en_1.name = 'SimpleOne'
simple_en_1.slug = 'simpleone'
simple_en_1.save()
# make another instance with same name
simple_en_2 = Simple()
simple_en_2.set_current_language('en')
simple_en_2.name = 'SimpleOne'
simple_en_2.slug = 'simpleone'
simple_en_2.save()
# slugs should not be same.
self.assertNotEquals(simple_en_1.slug, simple_en_2.slug)
def test_simple_slug_default(self):
# First test that the default works
simple = Simple()
simple.set_current_language('en')
simple.save()
self.assertEquals(
'simple-without-name', simple.get_slug_default())
# Also test without explicit language
self.assertEquals(
'simple-without-name', simple.get_slug_default())
# Now test that a default would be used if available
Simple.slug_default = _('unnamed-simple-object')
simple = Simple()
simple.set_current_language('en')
simple.save()
self.assertEquals(
'unnamed-simple-object', simple.get_slug_default())
# Also test without explicit language
self.assertEquals(
'unnamed-simple-object', simple.get_slug_default())
def test_unconventional_slug_default(self):
unconventional = Unconventional()
unconventional.set_current_language('en')
unconventional.save()
self.assertEquals(
'unconventional-model-without-short-title',
unconventional.get_slug_default()
)
def test_complex_slug_default(self):
complex1 = Complex()
complex1.set_current_language('en')
complex1.save()
self.assertEquals('complex-without-name', complex1.slug)
| 0
| 0
| 0
| 4,670
| 0
| 0
| 0
| 96
| 91
|
6571d06b26e9d55f7b954eee07d36c375ace0e0f
| 1,527
|
py
|
Python
|
api/predict.py
|
xuhdev/MAX-Breast-Cancer-Mitosis-Detector
|
c7e777311da070994466f1bf45541451e18b8034
|
[
"Apache-2.0"
] | null | null | null |
api/predict.py
|
xuhdev/MAX-Breast-Cancer-Mitosis-Detector
|
c7e777311da070994466f1bf45541451e18b8034
|
[
"Apache-2.0"
] | null | null | null |
api/predict.py
|
xuhdev/MAX-Breast-Cancer-Mitosis-Detector
|
c7e777311da070994466f1bf45541451e18b8034
|
[
"Apache-2.0"
] | null | null | null |
from flask_restplus import fields
from werkzeug.datastructures import FileStorage
from maxfw.core import MAX_API
input_parser = MAX_API.parser()
input_parser.add_argument('image', type=FileStorage, location='files', required=True,
help='An image file encoded as PNG with the size 64*64')
label_prediction = MAX_API.model('LabelPrediction', {
'probability': fields.Float(required=True, description='Probability of the image containing mitosis')
})
predict_response = MAX_API.model('ModelPredictResponse', {
'status': fields.String(required=True, description='Response status message'),
'predictions': fields.List(fields.Nested(label_prediction), description='Predicted labels and probabilities')
})
| 34.704545
| 113
| 0.683039
|
from core.model import ModelWrapper
from flask_restplus import fields, abort
from werkzeug.datastructures import FileStorage
from maxfw.core import MAX_API, PredictAPI
input_parser = MAX_API.parser()
input_parser.add_argument('image', type=FileStorage, location='files', required=True,
help='An image file encoded as PNG with the size 64*64')
label_prediction = MAX_API.model('LabelPrediction', {
'probability': fields.Float(required=True, description='Probability of the image containing mitosis')
})
predict_response = MAX_API.model('ModelPredictResponse', {
'status': fields.String(required=True, description='Response status message'),
'predictions': fields.List(fields.Nested(label_prediction), description='Predicted labels and probabilities')
})
class ModelPredictAPI(PredictAPI):
model_wrapper = ModelWrapper()
@MAX_API.doc('predict')
@MAX_API.expect(input_parser)
@MAX_API.marshal_with(predict_response)
def post(self):
"""Make a prediction given input data"""
result = {'status': 'error'}
args = input_parser.parse_args()
try:
image_data = args['image'].read()
image = self.model_wrapper._read_image(image_data)
preds = self.model_wrapper.predict(image)
label_preds = [{'probability': float(preds)}]
result['predictions'] = label_preds
result['status'] = 'ok'
except ValueError as e:
abort(400, str(e))
return result
| 0
| 633
| 0
| 76
| 0
| 0
| 0
| 33
| 45
|
13a86095619b618630a1898ef9c8fa97e0d04df6
| 608
|
py
|
Python
|
examples/03-image/04-quad.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 76
|
2015-01-21T11:21:08.000Z
|
2022-02-04T13:33:19.000Z
|
examples/03-image/04-quad.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 8
|
2015-11-12T07:42:58.000Z
|
2020-06-09T10:01:15.000Z
|
examples/03-image/04-quad.py
|
mcjocobe/drawExploration
|
2c50526ef14dea5bc3802b7fda08871919d62ac4
|
[
"BSD-3-Clause"
] | 23
|
2015-01-12T12:07:40.000Z
|
2020-04-13T16:32:15.000Z
|
# Add the upper directory (where the nodebox module is) to the search path.
import os, sys; sys.path.insert(0, os.path.join("..",".."))
img = Image("creature.png")
# The image.quad property describes the four-sided polygon
# on which an image texture is "mounted".
# This is not necessarily a rectangle, the corners can be distorted:
img.quad.dx1 = 200
img.quad.dy1 = 100
img.quad.dx2 = 100
img.quad.dy2 = -100
# This flushes the image cache, so it is a costly operation.
canvas.size = 500, 500
canvas.run(draw)
| 26.434783
| 75
| 0.703947
|
# Add the upper directory (where the nodebox module is) to the search path.
import os, sys; sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics import *
img = Image("creature.png")
# The image.quad property describes the four-sided polygon
# on which an image texture is "mounted".
# This is not necessarily a rectangle, the corners can be distorted:
img.quad.dx1 = 200
img.quad.dy1 = 100
img.quad.dx2 = 100
img.quad.dy2 = -100
# This flushes the image cache, so it is a costly operation.
def draw(canvas):
canvas.clear()
image(img)
canvas.size = 500, 500
canvas.run(draw)
| 0
| 0
| 0
| 0
| 0
| 30
| 0
| 9
| 46
|
0b20183397bcfd04f62df163fded2c064174a562
| 4,874
|
py
|
Python
|
src/five/grok/meta.py
|
zopefoundation/five.grok
|
d5fcbe2e081a4cec96d8ec658498f9fc33963bf9
|
[
"ZPL-2.1"
] | 1
|
2016-10-26T16:45:57.000Z
|
2016-10-26T16:45:57.000Z
|
src/five/grok/meta.py
|
zopefoundation/five.grok
|
d5fcbe2e081a4cec96d8ec658498f9fc33963bf9
|
[
"ZPL-2.1"
] | 2
|
2021-01-05T14:30:32.000Z
|
2021-03-25T18:38:06.000Z
|
src/five/grok/meta.py
|
zopefoundation/five.grok
|
d5fcbe2e081a4cec96d8ec658498f9fc33963bf9
|
[
"ZPL-2.1"
] | 2
|
2015-04-03T04:41:12.000Z
|
2019-08-20T08:02:52.000Z
|
#############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from five.grok import interfaces
import grokcore.component
import grokcore.security
import grokcore.view
if interfaces.HAVE_FORMLIB:
if interfaces.HAVE_LAYOUT:
| 34.567376
| 78
| 0.659007
|
#############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from five.grok import components, interfaces
from grokcore.view.meta.directoryresource import _get_resource_path
from zope import interface
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
import five.grok
import grokcore.component
import grokcore.security
import grokcore.view
import martian
from AccessControl.security import protectClass, protectName
from App.class_init import InitializeClass as initializeClass
if interfaces.HAVE_FORMLIB:
from five.grok import formlib
class FormGrokker(martian.ClassGrokker):
martian.component(components.GrokForm)
martian.directive(grokcore.component.context)
martian.priority(800) # Must be run before real formlib grokker.
def execute(self, factory, config, context, **kw):
# Set up form_fields from context class if they haven't been
# configured manually already using our version of get_auto_fields
if getattr(factory, 'form_fields', None) is None:
factory.form_fields = formlib.get_auto_fields(context)
return True
class ViewSecurityGrokker(martian.ClassGrokker):
martian.component(five.grok.View)
martian.directive(grokcore.security.require, name='permission')
def execute(self, factory, config, permission, **kw):
if permission is None:
permission = 'zope.Public'
config.action(
discriminator = ('five:protectClass', factory),
callable = protectClass,
args = (factory, permission)
)
# Protect the class
config.action(
discriminator = ('five:initialize:class', factory),
callable = initializeClass,
args = (factory,)
)
return True
if interfaces.HAVE_LAYOUT:
import grokcore.layout
class PageSecurityGrokker(ViewSecurityGrokker):
martian.component(grokcore.layout.Page)
def _register_resource(config, resource_path, name, layer):
resource_factory = components.ZopeTwoDirectoryResourceFactory(
name, resource_path)
adapts = (layer,)
provides = interface.Interface
config.action(
discriminator=('adapter', adapts, provides, name),
callable=grokcore.component.util.provideAdapter,
args=(resource_factory, adapts, provides, name),
)
return True
class DirectoryResourceGrokker(martian.ClassGrokker):
martian.component(components.ZopeTwoDirectoryResource)
martian.directive(grokcore.view.name, default=None)
martian.directive(grokcore.view.path)
martian.directive(grokcore.view.layer, default=IDefaultBrowserLayer)
def grok(self, name, factory, module_info, **kw):
# Need to store the module info object on the directory resource
# class so that it can look up the actual directory.
factory.module_info = module_info
return super(DirectoryResourceGrokker, self).grok(
name, factory, module_info, **kw)
def execute(self, factory, config, name, path, layer, **kw):
resource_path = _get_resource_path(factory.module_info, path)
name = name or factory.module_info.dotted_name
return _register_resource(config, resource_path, name, layer)
class ViewletSecurityGrokker(martian.ClassGrokker):
martian.component(five.grok.Viewlet)
martian.directive(grokcore.security.require, name='permission')
def execute(self, factory, config, permission, **kw):
if permission is None:
permission = 'zope.Public'
attributes = ['update', 'render',]
config.action(
discriminator = ('five:protectClass', factory),
callable = protectClass,
args = (factory, permission)
)
for attribute in attributes:
config.action(
discriminator = ('five:protectName', factory, attribute),
callable = protectName,
args = (factory, attribute, permission)
)
# Protect the class
config.action(
discriminator = ('five:initialize:class', factory),
callable = initializeClass,
args = (factory,)
)
return True
| 0
| 0
| 0
| 3,121
| 0
| 410
| 0
| 184
| 354
|
699b48ffb0f7def74fa65d53a33552fec5ab2029
| 283
|
py
|
Python
|
utils/jax_utils/jax_startup.py
|
Jakob-Unfried/msc-legacy
|
2c41f3f714936c25dd534bd66da802c26176fcfa
|
[
"MIT"
] | 1
|
2021-03-22T14:16:43.000Z
|
2021-03-22T14:16:43.000Z
|
utils/jax_utils/jax_startup.py
|
Jakob-Unfried/msc-legacy
|
2c41f3f714936c25dd534bd66da802c26176fcfa
|
[
"MIT"
] | null | null | null |
utils/jax_utils/jax_startup.py
|
Jakob-Unfried/msc-legacy
|
2c41f3f714936c25dd534bd66da802c26176fcfa
|
[
"MIT"
] | null | null | null |
"""
configure jax at startup
"""
| 17.6875
| 56
| 0.699647
|
"""
configure jax at startup
"""
from jax.config import config
def startup():
# Jax config (needs to be executed right at startup)
config.update("jax_enable_x64", True)
def debugging():
config.update("jax_enable_x64", True)
config.update("jax_debug_nans", True)
| 0
| 0
| 0
| 0
| 0
| 171
| 0
| 8
| 69
|
2c5adae6fbb8596c949c80584ff15841235c705d
| 968
|
py
|
Python
|
renew/advancedclass/enumclass.py
|
ianzhengnan/learnpy
|
ed1736ac976d56253183399466a167fb9319f869
|
[
"Apache-2.0"
] | 1
|
2017-06-12T03:12:29.000Z
|
2017-06-12T03:12:29.000Z
|
renew/advancedclass/enumclass.py
|
ianzhengnan/learnpy
|
ed1736ac976d56253183399466a167fb9319f869
|
[
"Apache-2.0"
] | null | null | null |
renew/advancedclass/enumclass.py
|
ianzhengnan/learnpy
|
ed1736ac976d56253183399466a167fb9319f869
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
# Jan => Month.Jan , 1
# Feb => Month.Feb , 2
# Mar => Month.Mar , 3
# Apr => Month.Apr , 4
# May => Month.May , 5
# Jun => Month.Jun , 6
# Jul => Month.Jul , 7
# Aug => Month.Aug , 8
# Sep => Month.Sep , 9
# Oct => Month.Oct , 10
# Nov => Month.Nov , 11
# Dec => Month.Dec , 12
#
day1 = Weekday.Mon
print(day1)
print(Weekday.Tue)
print(Weekday['Tue'])
print(Weekday.Tue.value)
print(day1 == Weekday.Mon)
for name, member in Weekday.__members__.items():
print(name, '=>', member)
# Sun => Weekday.Sun
# Mon => Weekday.Mon
# Tue => Weekday.Tue
# Wed => Weekday.Wed
# Thu => Weekday.Thu
# Fri => Weekday.Fri
# Sat => Weekday.Sat
| 18.615385
| 107
| 0.594008
|
from enum import Enum, unique
Month = Enum('Month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
for name, member in Month.__members__.items():
print(name, '=>', member, ',', member.value)
# Jan => Month.Jan , 1
# Feb => Month.Feb , 2
# Mar => Month.Mar , 3
# Apr => Month.Apr , 4
# May => Month.May , 5
# Jun => Month.Jun , 6
# Jul => Month.Jul , 7
# Aug => Month.Aug , 8
# Sep => Month.Sep , 9
# Oct => Month.Oct , 10
# Nov => Month.Nov , 11
# Dec => Month.Dec , 12
# 自定义枚举类
@unique
class Weekday(Enum):
Sun = 0
Mon = 1
Tue = 2
Wed = 3
Thu = 4
Fri = 5
Sat = 6
day1 = Weekday.Mon
print(day1)
print(Weekday.Tue)
print(Weekday['Tue'])
print(Weekday.Tue.value)
print(day1 == Weekday.Mon)
for name, member in Weekday.__members__.items():
print(name, '=>', member)
# Sun => Weekday.Sun
# Mon => Weekday.Mon
# Tue => Weekday.Tue
# Wed => Weekday.Wed
# Thu => Weekday.Thu
# Fri => Weekday.Fri
# Sat => Weekday.Sat
| 18
| 70
| 0
| 0
| 0
| 0
| 0
| 8
| 22
|
cab45f526ea303c3c63b0a5eb65fac294ce0e7d0
| 1,279
|
py
|
Python
|
force_wfmanager/tests/dummy_classes/dummy_wfmanager.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 1
|
2019-08-19T16:02:20.000Z
|
2019-08-19T16:02:20.000Z
|
force_wfmanager/tests/dummy_classes/dummy_wfmanager.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 396
|
2017-07-18T15:19:55.000Z
|
2021-05-03T06:23:06.000Z
|
force_wfmanager/tests/dummy_classes/dummy_wfmanager.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 2
|
2019-03-05T16:23:10.000Z
|
2020-04-16T08:59:11.000Z
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
| 25.078431
| 76
| 0.692729
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
from envisage.core_plugin import CorePlugin
from envisage.ui.tasks.tasks_plugin import TasksPlugin
from force_wfmanager.tests.dummy_classes.dummy_data_view import (
DummyExtensionPluginWithDataView
)
from force_wfmanager.tests.dummy_classes.dummy_contributed_ui import (
DummyUIPlugin, DummyUIPluginOld
)
from force_wfmanager.wfmanager import WfManager
class DummyWfManager(WfManager):
def __init__(self):
plugins = [CorePlugin(), TasksPlugin()]
super(DummyWfManager, self).__init__(plugins=plugins)
def run(self):
pass
class DummyWfManagerWithPlugins(WfManager):
def __init__(self):
plugins = [
CorePlugin(),
TasksPlugin(),
DummyExtensionPluginWithDataView()
]
super(DummyWfManagerWithPlugins, self).__init__(plugins=plugins)
def run(self):
pass
class DummyUIWfManager(WfManager):
"""A workflow manager with a plugin contributing a UI"""
def __init__(self):
plugins = [
CorePlugin(), TasksPlugin(), DummyUIPlugin(), DummyUIPluginOld()
]
super(DummyUIWfManager, self).__init__(plugins=plugins)
def run(self):
pass
| 0
| 0
| 0
| 765
| 0
| 0
| 0
| 251
| 181
|
ddff0c4a23bdd4d569ad29d522544468116098ff
| 446
|
py
|
Python
|
tests/py/test_currencies.py
|
Scrumplex/liberapay.com
|
2ef3477ceebc6e85b5db8c5a4a447195889cd4e9
|
[
"PostgreSQL",
"CC0-1.0"
] | null | null | null |
tests/py/test_currencies.py
|
Scrumplex/liberapay.com
|
2ef3477ceebc6e85b5db8c5a4a447195889cd4e9
|
[
"PostgreSQL",
"CC0-1.0"
] | null | null | null |
tests/py/test_currencies.py
|
Scrumplex/liberapay.com
|
2ef3477ceebc6e85b5db8c5a4a447195889cd4e9
|
[
"PostgreSQL",
"CC0-1.0"
] | null | null | null |
from __future__ import division, print_function, unicode_literals
| 29.733333
| 85
| 0.674888
|
from __future__ import division, print_function, unicode_literals
from liberapay.testing import EUR, USD, Harness
class TestCurrencies(Harness):
def test_convert(self):
original = EUR('1.00')
expected = USD('1.20')
actual = self.db.one("SELECT convert(%s, %s)", (original, expected.currency))
assert expected == actual
actual = original.convert(expected.currency)
assert expected == actual
| 0
| 0
| 0
| 307
| 0
| 0
| 0
| 26
| 46
|
303f1277d22a2f01641fe04d7dedfbc973da5b6d
| 2,565
|
py
|
Python
|
models/modelos.py
|
Patataman/StudentApi
|
42ff5651cdef2aeda8c012924db39554ab7762ce
|
[
"MIT"
] | 1
|
2016-03-11T22:42:55.000Z
|
2016-03-11T22:42:55.000Z
|
models/modelos.py
|
Patataman/StudentApi
|
42ff5651cdef2aeda8c012924db39554ab7762ce
|
[
"MIT"
] | null | null | null |
models/modelos.py
|
Patataman/StudentApi
|
42ff5651cdef2aeda8c012924db39554ab7762ce
|
[
"MIT"
] | null | null | null |
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
#Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
| 29.147727
| 185
| 0.728265
|
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import ForeignKey
from sqlalchemy.sql import select
from sqlalchemy.orm import relationship
from sqlalchemy.dialects.postgresql import JSON, TEXT
db = SQLAlchemy()
#Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
class Persona(db.Model):
__tablename__ = 'personas'
id = db.Column(db.Integer, primary_key=True)
nia = db.Column(db.Integer)
nombre = db.Column(db.String(200))
apellido1 = db.Column(db.String(200))
apellido2 = db.Column(db.String(200))
curso = db.Column(db.Integer)
id_titulacion = db.Column(db.Integer)
id_centro = db.Column(db.Integer)
def __init__(self, nia, nombre, apellido1, apellido2, curso, id_titulacion):
self.nia = nia
self.nombre = nombre
self.apellido1 = apellido1
self.apellido2 = apellido2
self.curso = curso
self.id_titulacion = id_titulacion
def __repr__(self):
return 'id: {}, NIA: {}, Nombre: {}, Apellidos: {} {}, Curso: {}, Titulacion {}'.format(self.id, self.nia, self.nombre, self.apellido1, self.apellido2, self.curso, self.id_titulacion)
@classmethod
def search(self, nia):
return db.session.query(Persona).filter_by(nia = nia)
@classmethod
def getPermisos(self, app_id, id):
return db.session.query(Permisos).filter_by(id = id, app_id = app_id)
@classmethod
def isDelegado(self, id):
return db.session.query(DelCurso).filter_by(id = id)
@classmethod
def isDelegadoTitulacion(self, id):
return db.session.query(DelTitulacion).filter_by(id = id)
@classmethod
def isDelegadoCentro(self, id):
return db.session.query(DelCentro).filter_by(id = id)
class Permisos(db.Model):
__tablename__ = 'permisos'
def __repr__(self):
return 'id: {}, app_id: {}, rol: {}'.format(self.id, self.app_id, self.rol)
id = db.Column(db.Integer, ForeignKey("Persona.id"), primary_key=True)
app_id = db.Column(db.Integer, primary_key=True)
rol = db.Column(db.Integer)
class DelCurso(db.Model):
__tablename__ = 'delegadoscurso'
def __repr__(self):
return 'id: {}'.format(self.id)
id = db.Column(db.Integer, ForeignKey("Persona.id"), primary_key=True)
class DelTitulacion(db.Model):
__tablename__ = 'delegadostitulacion'
def __repr__(self):
return 'id: {}'.format(self.id)
id = db.Column(db.Integer, ForeignKey("Persona.id"), primary_key=True)
class DelCentro(db.Model):
__tablename__ = 'delegadoscentro'
def __repr__(self):
return 'id: {}, cargo: {}'.format(self.id, self.cargo)
id = db.Column(db.Integer, ForeignKey("Persona.id"), primary_key=True)
cargo = db.Column(db.Integer)
| 0
| 411
| 0
| 1,740
| 0
| 0
| 0
| 74
| 203
|
c06efbae0b2f960ea1ad2726615abc68ecf719f4
| 637
|
py
|
Python
|
problema1.py
|
enzoyoshio/Problemas
|
1e2ce20d9931ef28e57aa54af3fe1708927ebab9
|
[
"MIT"
] | null | null | null |
problema1.py
|
enzoyoshio/Problemas
|
1e2ce20d9931ef28e57aa54af3fe1708927ebab9
|
[
"MIT"
] | null | null | null |
problema1.py
|
enzoyoshio/Problemas
|
1e2ce20d9931ef28e57aa54af3fe1708927ebab9
|
[
"MIT"
] | null | null | null |
# lista de dicionario dado
listDict = [
{1 : 1, 2 : "oi", "nome" : "obrigado"},
{"Bolo" : "Cenoura", "Camaro" : "Verde", "nome" : "Sagrado"},
{1 : 10, "nome" : "oi", "caracol" : "obrigado"},
{"nome":"obrigado"}
]
# a chave que ser procurada
nome = "nome"
# inicializando a lista vazia
lista = []
# verifico para cada nome se ele est ou no no dicionrio
for dict1 in listDict:
# se a chave nome estiver no dicionrio
# e o valor dela no tiver sido adicionado a lista, s adicionar na lista
if nome in dict1 and dict1[nome] not in lista:
lista.append(dict1[nome])
# printa a lista
print(lista)
| 27.695652
| 77
| 0.634223
|
# lista de dicionario dado
listDict = [
{1 : 1, 2 : "oi", "nome" : "obrigado"},
{"Bolo" : "Cenoura", "Camarão" : "Verde", "nome" : "Sagrado"},
{1 : 10, "nome" : "oi", "caracol" : "obrigado"},
{"nome":"obrigado"}
]
# a chave que será procurada
nome = "nome"
# inicializando a lista vazia
lista = []
# verifico para cada nome se ele está ou não no dicionário
for dict1 in listDict:
# se a chave nome estiver no dicionário
# e o valor dela não tiver sido adicionado a lista, só adicionar na lista
if nome in dict1 and dict1[nome] not in lista:
lista.append(dict1[nome])
# printa a lista
print(lista)
| 16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7c97c1d349a7035b8c108e98916ffaaa384b38ef
| 12,469
|
py
|
Python
|
authors/apps/authentication/views.py
|
andela/ah-backend-realers
|
f4b0dbde16fed5e95ab3b1b60e365515e1fe6697
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/authentication/views.py
|
andela/ah-backend-realers
|
f4b0dbde16fed5e95ab3b1b60e365515e1fe6697
|
[
"BSD-3-Clause"
] | 20
|
2019-05-27T13:05:44.000Z
|
2021-06-10T21:29:36.000Z
|
authors/apps/authentication/views.py
|
andela/ah-backend-realers
|
f4b0dbde16fed5e95ab3b1b60e365515e1fe6697
|
[
"BSD-3-Clause"
] | 6
|
2019-06-29T11:49:01.000Z
|
2020-03-02T12:53:06.000Z
|
from rest_framework import exceptions
from .social_auth import ValidateSocialUser
check_user = ValidateSocialUser()
| 38.248466
| 121
| 0.678242
|
from rest_framework import status, exceptions
from rest_framework.generics import RetrieveUpdateAPIView
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.conf import settings
from .models import User
from itsdangerous import URLSafeTimedSerializer, exc
from django.core.mail import send_mail
import os, re
from rest_framework import exceptions
from .renderers import UserJSONRenderer
from .serializers import (
LoginSerializer, RegistrationSerializer, UserSerializer,
ResetPasswordSerializer, SetNewPasswordSerializer,
FacebookAndGoogleSerializer, TwitterSerializer
)
import facebook
import twitter
from google.auth.transport import requests
from google.oauth2 import id_token
from drf_yasg.utils import swagger_auto_schema
from .backends import (
AccountVerification
)
from authors.apps.profiles.models import Profile
from .social_auth import ValidateSocialUser
check_user = ValidateSocialUser()
class RegistrationAPIView(APIView):
# Allow any user (authenticated or not) to hit this endpoint.
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = RegistrationSerializer
@swagger_auto_schema(
operation_description='Regester a new User.',
operation_id='Sign up as a new user',
request_body=serializer_class,
responses={201: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request):
user = request.data.get('user', {})
# The create serializer, validate serializer, save serializer pattern
# below is common and you will see it a lot throughout this course and
# your own work later on. Get familiar with it.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
serializer.save()
AccountVerification().send_verification_email(user.get('email'), request)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class LoginAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = LoginSerializer
@swagger_auto_schema(
operation_description='Login User.',
operation_id='login as a user',
request_body=serializer_class,
responses={201: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request):
user = request.data.get('user', {})
# Notice here that we do not call `serializer.save()` like we did for
# the registration endpoint. This is because we don't actually have
# anything to save. Instead, the `validate` method on our serializer
# handles everything we need.
serializer = self.serializer_class(data=user)
serializer.is_valid(raise_exception=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class UserRetrieveUpdateAPIView(RetrieveUpdateAPIView):
"""
retrieve: Get User Details
Update: Update User Details
"""
permission_classes = (IsAuthenticated,)
renderer_classes = (UserJSONRenderer,)
serializer_class = UserSerializer
@swagger_auto_schema(
operation_id='Retrieve User Details',
request_body=serializer_class,
responses={201: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def retrieve(self, request, *args, **kwargs):
# There is nothing to validate or save here. Instead, we just want the
# serializer to handle turning our `User` object into something that
# can be JSONified and sent to the client.
serializer = self.serializer_class(request.user)
return Response(serializer.data, status=status.HTTP_200_OK)
@swagger_auto_schema(
operation_id='Update User Details',
request_body=serializer_class,
responses={201: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def update(self, request, *args, **kwargs):
serializer_data = request.data.get('user', {})
serializer = self.serializer_class(
request.user, data=serializer_data, partial=True
)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
class AccountActivation(APIView):
def get(self, request, **kwargs):
activation_key = kwargs.get('token')
user = AccountVerification().verify_token(activation_key)
response = AccountVerification.verify_user(user)
return Response(response)
class PasswordResetView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = ResetPasswordSerializer
@classmethod
def check_email(cls, email):
not_email = not email
invalid_email = not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email)
if not_email or invalid_email:
msg = "The email field can not be blank"
raise exceptions.ValidationError(msg) if not_email else \
exceptions.ValidationError("Provide a valid email address")
return None
def email_verification(self, email):
user = User.objects.filter(email = email).first() \
if not PasswordResetView.check_email(email) else None
if not user:
raise exceptions.ValidationError("This Email Address is not attached to any account")
@swagger_auto_schema(
operation_description='Reset Password.',
operation_id='reset password via email',
request_body=serializer_class,
responses={200: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request):
if 'email' not in request.data:
raise exceptions.ValidationError("Please provide an Email Address")
email=request.data["email"]
self.email_verification(email)
serializer = URLSafeTimedSerializer(os.environ.get("SECRET_KEY"))
token = serializer.dumps(email, salt=os.environ.get("SECURITY_PASSWORD_SALT"))
reset_link = "https://authors-frontend-staging.herokuapp.com/change/{}".format(token)
recipient = [email]
sender = os.getenv('EMAIL_HOST_USER')
subject = 'Author\'s Haven Password Reset'
body = "You requested to change your account password.\n\
Click on the link below to complete changing your password.\n\n{}\n\n\
Ignore and Delete this email if you did not make this request.\n\n\t\
Author\'s Haven by The Realers.".format(reset_link)
send_mail(subject, body, sender, recipient, fail_silently=True)
data = {
"message": "Please check your email inbox for the Password Reset link we've sent",
"status": status.HTTP_200_OK
}
return Response(data, status=status.HTTP_200_OK)
class CreateNewPasswordView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = SetNewPasswordSerializer
def password_verification(self, password, confirm_password):
if (not password) or (not confirm_password):
raise exceptions.ValidationError("Provide both Password and Confirm_Password fields")
if len(password) < 8:
raise exceptions.ValidationError("Password length must be 8 or more characters")
if password != confirm_password:
raise exceptions.ValidationError("Password is not macthing with Confirm_password!")
@swagger_auto_schema(
operation_description='Set new Password.',
operation_id='Set new password using link sent in email',
request_body=serializer_class,
responses={201: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def patch(self, request, token):
try:
new_password = request.data.get("password")
confirm_new_password = request.data.get("confirm_password")
self.password_verification(new_password,confirm_new_password)
serializer = URLSafeTimedSerializer(os.environ.get("SECRET_KEY"))
email = serializer.loads(token, salt=os.environ.get("SECURITY_PASSWORD_SALT"),
max_age=3600*12*365)
user = User.objects.filter(email = email).first()
user.set_password(new_password)
user.save()
return Response({
"message": "You have succesfully reset your password",
"status": status.HTTP_201_CREATED
})
except exc.BadSignature:
raise exceptions.ValidationError("This is an invalidated link")
class FacebookAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = FacebookAndGoogleSerializer
@swagger_auto_schema(
operation_description='Social Auth with Facebook',
operation_id='Login in a user using their Facebook credentials',
request_body=serializer_class,
responses={200: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request):
user_data = request.data.get("user", {})
get_token = user_data.get("access_token")
# get the token
try:
facebook_acct_user = facebook.GraphAPI(access_token=get_token)
user_details = facebook_acct_user.get_object(
id='me', fields='id, name, email')
facebook_user = check_user.validate_system_user(user_details)
return Response(facebook_user, status=status.HTTP_200_OK)
except:
return Response(
{"error": "Facebook login failed. Token is expired or invalid"}, status=status.HTTP_400_BAD_REQUEST)
class GoogleAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = FacebookAndGoogleSerializer
@swagger_auto_schema(
operation_description='Social Auth with Google',
operation_id='Login in a user using their google credentials',
request_body=serializer_class,
responses={200: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request):
user_data = request.data.get("user", {})
googl_auth_token = user_data.get("access_token")
# get the token
try:
user_cred = id_token.verify_oauth2_token(
googl_auth_token, requests.Request())
verified_user = check_user.validate_system_user(user_cred)
return Response(verified_user, status=status.HTTP_200_OK)
except:
return Response(
{"error": "google login failed. Token is either invalid or expired"}, status=status.HTTP_400_BAD_REQUEST)
class TwitterAPIView(APIView):
permission_classes = (AllowAny,)
renderer_classes = (UserJSONRenderer,)
serializer_class = TwitterSerializer
@swagger_auto_schema(
operation_description='Social Auth with Twitter',
operation_id='Authenticate user using Twitter',
request_body=serializer_class,
responses={200: serializer_class(many=False), 400: 'BAD REQUEST'},
)
def post(self, request): # pragma: no cover
twitter_token = request.GET.get("access_token")
twitter_token_secret = request.GET.get("access_token_secret")
# get the token and related twitter stuff
try:
from_twitter_api = twitter.Api(
consumer_key=os.getenv("TWITTER_CONSUMER_KEY", ""),
consumer_secret=os.getenv("TWITTER_CONSUMER_SECRET", ""),
access_token_key=twitter_token,
access_token_secret=twitter_token_secret
)
user_details = from_twitter_api.VerifyCredentials(include_email=True)
# get user details as a dictionary/ json format
user_details = user_details.__dict__
twitter_user_exist = check_user.validate_system_user(user_details)
return Response(twitter_user_exist, status=status.HTTP_200_OK)
except:
return Response(
{"error": "Twitter login failed. Token either expired or invalid"}, status=status.HTTP_400_BAD_REQUEST)
| 0
| 8,645
| 0
| 2,551
| 0
| 0
| 0
| 492
| 630
|
fc88e5944833ded64fa3cddeace07e5e0ee6b6f4
| 12,152
|
py
|
Python
|
visual_phenomics_py/calculate.py
|
SeBassTian23/Visual-Phenomics-Python
|
1ce9f2fff6bf47a7a4a2c9059eb534348b65b2b6
|
[
"MIT"
] | null | null | null |
visual_phenomics_py/calculate.py
|
SeBassTian23/Visual-Phenomics-Python
|
1ce9f2fff6bf47a7a4a2c9059eb534348b65b2b6
|
[
"MIT"
] | null | null | null |
visual_phenomics_py/calculate.py
|
SeBassTian23/Visual-Phenomics-Python
|
1ce9f2fff6bf47a7a4a2c9059eb534348b65b2b6
|
[
"MIT"
] | null | null | null |
"""
Calculate additional parameters or recalculate parameters.
"""
def calculate(df=None, param='', *, fm='fm', f0='f0', fmp='fmp', f0p='f0p', fs='fs', fmpp='fmpp', f0pp='f0pp', fmf0=4.88, alias=None):
"""Calculate photosynthetic parameters
Calculate photosynthetic parameters from basic fluorescence parameters
:param df: The DataFrame to add the calculated parameters to.
:param param: Parameter to calculate ('Fvfm','NPQ', 'NPQt','Phi2','PhiNO','PhiNPQ','qE','qEsv','qEt','qI','qIt','qL','qP')
:param fm: fm column name (default 'fm')
:param f0: f0 column name (default 'f0')
:param fmp: fmp column name (default 'fmp')
:param f0p: f0p column name (default 'f0p')
:param fs: fs column name (default 'fs')
:param fmpp: fmpp column name (default 'fmpp')
:param f0pp: f0pp column name (default 'f0pp')
:param fmf0: Fm/F0 for t parameter (default 4.88)
:param alias: rename the selected parameter (default None)
:returns: a dataframe column for the calculated parameter
"""
# Parameter Names
parameters = ['Fvfm', 'NPQ', 'NPQt', 'Phi2', 'PhiNO', 'PhiNOt',
'PhiNPQ', 'PhiNPQt', 'qE', 'qEsv', 'qEt', 'qI', 'qIt', 'qL', 'qP']
if df is None:
raise Exception('No DataFrame selected.')
if (param in parameters):
alias_txt = ""
if alias is not None:
alias_txt = " as {0}".format(alias)
print('Calculating {0}{1}'.format(param, alias_txt))
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
if param == 'Fvfm':
if {fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = fvfm(
getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and f0')
elif param == 'NPQ':
if {fm, fmp}.issubset(df.columns):
df.at[row.Index, alias or param] = npq(
getattr(row, fm), getattr(row, fmp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and fmp')
elif param == 'NPQt':
if {fmp, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = npqt(
getattr(row, fmp), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp and f0p')
elif param == 'Phi2':
if {fmp, fs}.issubset(df.columns):
df.at[row.Index, alias or param] = phi2(
getattr(row, fmp), getattr(row, fs))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp and fs')
elif param == 'PhiNO':
if {fmp, fs, f0p, fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = phino(getattr(row, fmp), getattr(
row, fs), getattr(row, f0p), getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, fm, and f0')
elif param == 'PhiNOt':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = phinot(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'PhiNPQ':
if {fmp, fs, f0p, fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = phinpq(getattr(row, fmp), getattr(
row, fs), getattr(row, f0p), getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, f0p, fm, and f0')
elif param == 'PhiNPQt':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = phinpqt(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'qE':
if {fmpp, fmp}.issubset(df.columns):
df.at[row.Index, alias or param] = qe(
getattr(row, fmpp), getattr(row, fmp))
else:
raise Exception(
'Missing parameter(s). Define columns for fmpp and fmp')
elif param == 'qEsv':
if {fm, fmp, fmpp}.issubset(df.columns):
df.at[row.Index, alias or param] = qesv(
getattr(row, fm), getattr(row, fmp), getattr(row, fmpp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm, fmp, and fmpp')
elif param == 'qEt':
if {fmp, f0p, fmpp, f0pp}.issubset(df.columns):
df.at[row.Index, alias or param] = qet(getattr(row, fmp), getattr(
row, f0p), getattr(row, fmpp), getattr(row, f0pp), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, f0p, fmpp, and f0pp')
elif param == 'qI':
if {fm, fmpp}.issubset(df.columns):
df.at[row.Index, alias or param] = qi(
getattr(row, fm), getattr(row, fmpp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and fmpp')
elif param == 'qIt':
if {fmpp, f0pp}.issubset(df.columns):
df.at[row.Index, alias or param] = qit(
getattr(row, fmpp), getattr(row, f0pp), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmpp and f0pp')
elif param == 'qL':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = ql(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'qP':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = qp(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
else:
raise Exception("No matching parameter found.")
else:
raise Exception('Unknown parameter. Available parameters are: {0}'.format(
", ".join(parameters)))
def calculate_additional(df=None, param='', *, v_phino='PhiNOt', v_phi2='Phi2', v_ql='qL', v_par='light_intensity', phinoopt=0.2, absorptivity=0.5, fmf0=4.88, alias=None):
"""Calculate additional Parameters
Calculate additional photosynthetic parameters based on calculated standard parameters
:param df: The DataFrame to add the calculated parameters to.
:param param: Parameter to calculate ('LEF', 'Vx', 'SPhi2', 'SNPQ', 'deltaNPQ')
:param v_phino: PhiNO column name (default 'PhiNOt')
:param v_phi2: Phi2 column name (default 'Phi2')
:param v_ql: qL column name (default 'qL')
:param phinoopt: Optimal PhiNO (default 0.2)
:param absorptivity: Absorptivity for Vx parameter (default 0.5)
:param fmf0: Fm/F0 for t parameter (default 4.88)
:param alias: rename the selected parameter (default None)
:returns: a dataframe column for the calculated parameter
"""
# Parameter Names
parameters = ['LEF', 'Vx', 'SPhi2', 'SNPQ', 'deltaNPQ']
if df is None:
raise Exception('No DataFrame selected.')
if (param in parameters):
alias_txt = ""
if alias is not None:
alias_txt = " as {0}".format(alias)
print('Calculating {0}{1}'.format(param, alias_txt))
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
if param == 'LEF':
if {v_phi2, v_par}.issubset(df.columns):
df.at[row.Index, alias or param] = lef(
getattr(row, v_phi2), getattr(row, v_par), absorptivity)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phi2 and v_par')
elif param == 'Vx':
if {v_phino, v_phi2, v_par}.issubset(df.columns):
df.at[row.Index, alias or param] = vx(
getattr(row, v_phino), getattr(row, v_phi2), getattr(row, v_par), absorptivity)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_par')
elif param == 'SPhi2':
if {v_phino, v_phi2, v_ql}.issubset(df.columns):
df.at[row.Index, alias or param] = sphi2(
getattr(row, v_phi2), getattr(row, v_phino), getattr(row, v_ql), phinoopt, fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_ql')
elif param == 'SNPQ':
if {v_phino, v_phi2}.issubset(df.columns):
df.at[row.Index, alias or param] = sphinpq(
getattr(row, v_phi2), getattr(row, v_phino), getattr(row, v_ql), phinoopt, fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_ql')
elif param == 'deltaNPQ':
if {v_phino}.issubset(df.columns):
df.at[row.Index, alias or param] = deltanpq(
getattr(row, v_phino), phinoopt)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
else:
raise Exception("No matching parameter found.")
else:
raise Exception('Unknown parameter. Available parameters are: {0}'.format(
", ".join(parameters)))
def calculate_custom(df=None, name='', fn=None, *, cols=[], params={}):
"""Calculate additional Parameters
Use a custom function to calculate a custom parameter.
:param df: The DataFrame to add the calculated parameters to.
:param name: Parameter name
:param fn: Function name for the calculation
:param cols: Column names for parameters passed to function. (*args)
:param params: Parameters passed on to the function (**kwargs)
:returns: a dataframe column for the custom calculated parameter
"""
if df is None:
raise Exception('No DataFrame selected.')
if name == '' or name is None:
raise Exception('No parameter name defined.')
if (fn is None):
raise Exception('No function defined.')
if hasattr(fn, '__call__'):
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
df.at[row.Index, name] = fn(
*[getattr(row, n) for n in cols], **params)
else:
raise Exception('No function defined.')
| 44.028986
| 171
| 0.526991
|
"""
Calculate additional parameters or recalculate parameters.
"""
from visual_phenomics_py.parameters import *
from visual_phenomics_py.parameters_additional import *
def calculate(df=None, param='', *, fm='fm', f0='f0', fmp='fmp', f0p='f0p', fs='fs', fmpp='fmpp', f0pp='f0pp', fmf0=4.88, alias=None):
"""Calculate photosynthetic parameters
Calculate photosynthetic parameters from basic fluorescence parameters
:param df: The DataFrame to add the calculated parameters to.
:param param: Parameter to calculate ('Fvfm','NPQ', 'NPQt','Phi2','PhiNO','PhiNPQ','qE','qEsv','qEt','qI','qIt','qL','qP')
:param fm: fm column name (default 'fm')
:param f0: f0 column name (default 'f0')
:param fmp: fmp column name (default 'fmp')
:param f0p: f0p column name (default 'f0p')
:param fs: fs column name (default 'fs')
:param fmpp: fmpp column name (default 'fmpp')
:param f0pp: f0pp column name (default 'f0pp')
:param fmf0: Fm/F0 for t parameter (default 4.88)
:param alias: rename the selected parameter (default None)
:returns: a dataframe column for the calculated parameter
"""
# Parameter Names
parameters = ['Fvfm', 'NPQ', 'NPQt', 'Phi2', 'PhiNO', 'PhiNOt',
'PhiNPQ', 'PhiNPQt', 'qE', 'qEsv', 'qEt', 'qI', 'qIt', 'qL', 'qP']
if df is None:
raise Exception('No DataFrame selected.')
if (param in parameters):
alias_txt = ""
if alias is not None:
alias_txt = " as {0}".format(alias)
print('Calculating {0}{1}'.format(param, alias_txt))
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
if param == 'Fvfm':
if {fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = fvfm(
getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and f0')
elif param == 'NPQ':
if {fm, fmp}.issubset(df.columns):
df.at[row.Index, alias or param] = npq(
getattr(row, fm), getattr(row, fmp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and fmp')
elif param == 'NPQt':
if {fmp, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = npqt(
getattr(row, fmp), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp and f0p')
elif param == 'Phi2':
if {fmp, fs}.issubset(df.columns):
df.at[row.Index, alias or param] = phi2(
getattr(row, fmp), getattr(row, fs))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp and fs')
elif param == 'PhiNO':
if {fmp, fs, f0p, fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = phino(getattr(row, fmp), getattr(
row, fs), getattr(row, f0p), getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, fm, and f0')
elif param == 'PhiNOt':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = phinot(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'PhiNPQ':
if {fmp, fs, f0p, fm, f0}.issubset(df.columns):
df.at[row.Index, alias or param] = phinpq(getattr(row, fmp), getattr(
row, fs), getattr(row, f0p), getattr(row, fm), getattr(row, f0))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, f0p, fm, and f0')
elif param == 'PhiNPQt':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = phinpqt(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'qE':
if {fmpp, fmp}.issubset(df.columns):
df.at[row.Index, alias or param] = qe(
getattr(row, fmpp), getattr(row, fmp))
else:
raise Exception(
'Missing parameter(s). Define columns for fmpp and fmp')
elif param == 'qEsv':
if {fm, fmp, fmpp}.issubset(df.columns):
df.at[row.Index, alias or param] = qesv(
getattr(row, fm), getattr(row, fmp), getattr(row, fmpp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm, fmp, and fmpp')
elif param == 'qEt':
if {fmp, f0p, fmpp, f0pp}.issubset(df.columns):
df.at[row.Index, alias or param] = qet(getattr(row, fmp), getattr(
row, f0p), getattr(row, fmpp), getattr(row, f0pp), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, f0p, fmpp, and f0pp')
elif param == 'qI':
if {fm, fmpp}.issubset(df.columns):
df.at[row.Index, alias or param] = qi(
getattr(row, fm), getattr(row, fmpp))
else:
raise Exception(
'Missing parameter(s). Define columns for fm and fmpp')
elif param == 'qIt':
if {fmpp, f0pp}.issubset(df.columns):
df.at[row.Index, alias or param] = qit(
getattr(row, fmpp), getattr(row, f0pp), fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for fmpp and f0pp')
elif param == 'qL':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = ql(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
elif param == 'qP':
if {fmp, fs, f0p}.issubset(df.columns):
df.at[row.Index, alias or param] = qp(
getattr(row, fmp), getattr(row, fs), getattr(row, f0p))
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
else:
raise Exception("No matching parameter found.")
else:
raise Exception('Unknown parameter. Available parameters are: {0}'.format(
", ".join(parameters)))
def calculate_additional(df=None, param='', *, v_phino='PhiNOt', v_phi2='Phi2', v_ql='qL', v_par='light_intensity', phinoopt=0.2, absorptivity=0.5, fmf0=4.88, alias=None):
"""Calculate additional Parameters
Calculate additional photosynthetic parameters based on calculated standard parameters
:param df: The DataFrame to add the calculated parameters to.
:param param: Parameter to calculate ('LEF', 'Vx', 'SPhi2', 'SNPQ', 'deltaNPQ')
:param v_phino: PhiNO column name (default 'PhiNOt')
:param v_phi2: Phi2 column name (default 'Phi2')
:param v_ql: qL column name (default 'qL')
:param phinoopt: Optimal PhiNO (default 0.2)
:param absorptivity: Absorptivity for Vx parameter (default 0.5)
:param fmf0: Fm/F0 for t parameter (default 4.88)
:param alias: rename the selected parameter (default None)
:returns: a dataframe column for the calculated parameter
"""
# Parameter Names
parameters = ['LEF', 'Vx', 'SPhi2', 'SNPQ', 'deltaNPQ']
if df is None:
raise Exception('No DataFrame selected.')
if (param in parameters):
alias_txt = ""
if alias is not None:
alias_txt = " as {0}".format(alias)
print('Calculating {0}{1}'.format(param, alias_txt))
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
if param == 'LEF':
if {v_phi2, v_par}.issubset(df.columns):
df.at[row.Index, alias or param] = lef(
getattr(row, v_phi2), getattr(row, v_par), absorptivity)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phi2 and v_par')
elif param == 'Vx':
if {v_phino, v_phi2, v_par}.issubset(df.columns):
df.at[row.Index, alias or param] = vx(
getattr(row, v_phino), getattr(row, v_phi2), getattr(row, v_par), absorptivity)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_par')
elif param == 'SPhi2':
if {v_phino, v_phi2, v_ql}.issubset(df.columns):
df.at[row.Index, alias or param] = sphi2(
getattr(row, v_phi2), getattr(row, v_phino), getattr(row, v_ql), phinoopt, fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_ql')
elif param == 'SNPQ':
if {v_phino, v_phi2}.issubset(df.columns):
df.at[row.Index, alias or param] = sphinpq(
getattr(row, v_phi2), getattr(row, v_phino), getattr(row, v_ql), phinoopt, fmf0)
else:
raise Exception(
'Missing parameter(s). Define columns for v_phino, v_phi2, and v_ql')
elif param == 'deltaNPQ':
if {v_phino}.issubset(df.columns):
df.at[row.Index, alias or param] = deltanpq(
getattr(row, v_phino), phinoopt)
else:
raise Exception(
'Missing parameter(s). Define columns for fmp, fs, and f0p')
else:
raise Exception("No matching parameter found.")
else:
raise Exception('Unknown parameter. Available parameters are: {0}'.format(
", ".join(parameters)))
def calculate_custom(df=None, name='', fn=None, *, cols=[], params={}):
"""Calculate additional Parameters
Use a custom function to calculate a custom parameter.
:param df: The DataFrame to add the calculated parameters to.
:param name: Parameter name
:param fn: Function name for the calculation
:param cols: Column names for parameters passed to function. (*args)
:param params: Parameters passed on to the function (**kwargs)
:returns: a dataframe column for the custom calculated parameter
"""
if df is None:
raise Exception('No DataFrame selected.')
if name == '' or name is None:
raise Exception('No parameter name defined.')
if (fn is None):
raise Exception('No function defined.')
if hasattr(fn, '__call__'):
for row in df.sort_values(by=['sample', 'time'], ascending=True).fillna(method="ffill").itertuples():
df.at[row.Index, name] = fn(
*[getattr(row, n) for n in cols], **params)
else:
raise Exception('No function defined.')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 45
|
8650240fd28ccf9c0925b49b1840e0cf02b48178
| 1,489
|
py
|
Python
|
examples/adjacency_list.py
|
enpaul/peewee
|
177f04cbce02851b0420e2002a72fa91ea9b309b
|
[
"MIT"
] | 8,289
|
2015-01-01T17:10:34.000Z
|
2022-03-30T23:18:33.000Z
|
examples/adjacency_list.py
|
enpaul/peewee
|
177f04cbce02851b0420e2002a72fa91ea9b309b
|
[
"MIT"
] | 2,015
|
2015-01-02T16:59:35.000Z
|
2022-03-31T02:41:24.000Z
|
examples/adjacency_list.py
|
enpaul/peewee
|
177f04cbce02851b0420e2002a72fa91ea9b309b
|
[
"MIT"
] | 1,740
|
2015-01-04T09:48:38.000Z
|
2022-03-31T13:44:48.000Z
|
db = SqliteDatabase(':memory:')
db.create_tables([Node])
tree = ('root', (
('n1', (
('c11', ()),
('c12', ()))),
('n2', (
('c21', ()),
('c22', (
('g221', ()),
('g222', ()))),
('c23', ()),
('c24', (
('g241', ()),
('g242', ()),
('g243', ())))))))
stack = [(None, tree)]
while stack:
parent, (name, children) = stack.pop()
node = Node.create(name=name, parent=parent)
for child_tree in children:
stack.insert(0, (node, child_tree))
# Now that we have created the stack, let's eagerly load 4 levels of children.
# To show that it works, we'll turn on the query debugger so you can see which
# queries are executed.
import logging; logger = logging.getLogger('peewee')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
C = Node.alias('c')
G = Node.alias('g')
GG = Node.alias('gg')
GGG = Node.alias('ggg')
roots = Node.select().where(Node.parent.is_null())
pf = prefetch(roots, C, (G, C), (GG, G), (GGG, GG))
for root in pf:
print(root.dump())
| 25.237288
| 78
| 0.552048
|
from peewee import *
db = SqliteDatabase(':memory:')
class Node(Model):
name = TextField()
parent = ForeignKeyField('self', backref='children', null=True)
class Meta:
database = db
def __str__(self):
return self.name
def dump(self, _indent=0):
return (' ' * _indent + self.name + '\n' +
''.join(child.dump(_indent + 1) for child in self.children))
db.create_tables([Node])
tree = ('root', (
('n1', (
('c11', ()),
('c12', ()))),
('n2', (
('c21', ()),
('c22', (
('g221', ()),
('g222', ()))),
('c23', ()),
('c24', (
('g241', ()),
('g242', ()),
('g243', ())))))))
stack = [(None, tree)]
while stack:
parent, (name, children) = stack.pop()
node = Node.create(name=name, parent=parent)
for child_tree in children:
stack.insert(0, (node, child_tree))
# Now that we have created the stack, let's eagerly load 4 levels of children.
# To show that it works, we'll turn on the query debugger so you can see which
# queries are executed.
import logging; logger = logging.getLogger('peewee')
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
C = Node.alias('c')
G = Node.alias('g')
GG = Node.alias('gg')
GGG = Node.alias('ggg')
roots = Node.select().where(Node.parent.is_null())
pf = prefetch(roots, C, (G, C), (GG, G), (GGG, GG))
for root in pf:
print(root.dump())
| 0
| 0
| 0
| 337
| 0
| 0
| 0
| -1
| 45
|
4567d33898daf9c73323f91f151e6d9eeb0b2e78
| 4,159
|
py
|
Python
|
ihna/kozhukhov/imageanalysis/gui/dataprocessing/spatialfilterdlg.py
|
serik1987/ihna_kozhuhov_image_analysis
|
ccfb3b48cbf6b351acb10f8b99315c65281f8ab8
|
[
"Unlicense"
] | null | null | null |
ihna/kozhukhov/imageanalysis/gui/dataprocessing/spatialfilterdlg.py
|
serik1987/ihna_kozhuhov_image_analysis
|
ccfb3b48cbf6b351acb10f8b99315c65281f8ab8
|
[
"Unlicense"
] | null | null | null |
ihna/kozhukhov/imageanalysis/gui/dataprocessing/spatialfilterdlg.py
|
serik1987/ihna_kozhuhov_image_analysis
|
ccfb3b48cbf6b351acb10f8b99315c65281f8ab8
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8
| 38.509259
| 100
| 0.675884
|
# -*- coding: utf-8
import numpy as np
import wx
from ihna.kozhukhov.imageanalysis import ImagingMap
from ihna.kozhukhov.imageanalysis.mapprocessing import spatial_filter
from ihna.kozhukhov.imageanalysis.gui.complexmapviewerdlg import ComplexMapViewerDlg
from .datatodataprocessor import DataToDataProcessor
class SpatialFilterDlg(DataToDataProcessor):
__radius_box = None
__radius_big_box = None
__radius_checkbox = None
__radius_big_checkbox = None
def _get_processor_title(self):
return "Spatial filter"
def _check_input_data(self):
if not isinstance(self._input_data, ImagingMap):
raise ValueError("The input shall be complex imaging map")
if self._input_data.get_data().dtype != np.complex:
raise ValueError("The input map shall be complex imaging map")
def _get_default_minor_name(self):
return "mapfilt"
def _place_additional_options(self, parent):
additional_options = wx.BoxSizer(wx.VERTICAL)
radius_layout = wx.BoxSizer(wx.HORIZONTAL)
radius_caption = wx.StaticText(parent, label="Inner radius, px")
radius_layout.Add(radius_caption, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
self.__radius_box = wx.TextCtrl(parent)
radius_layout.Add(self.__radius_box, 1, wx.EXPAND | wx.RIGHT, 5)
self.__radius_checkbox = wx.CheckBox(parent, label="Off")
self.__radius_checkbox.Bind(wx.EVT_CHECKBOX, lambda event: self.__switch_inner_radius())
radius_layout.Add(self.__radius_checkbox, 0, wx.ALIGN_CENTER_VERTICAL)
additional_options.Add(radius_layout, 0, wx.EXPAND | wx.BOTTOM, 5)
radius_big_layout = wx.BoxSizer(wx.HORIZONTAL)
radius_big_caption = wx.StaticText(parent, label="Outer radius, px")
radius_big_layout.Add(radius_big_caption, 0, wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 5)
self.__radius_big_box = wx.TextCtrl(parent)
radius_big_layout.Add(self.__radius_big_box, 1, wx.EXPAND | wx.RIGHT, 5)
self.__radius_big_checkbox = wx.CheckBox(parent, label="Off")
self.__radius_big_checkbox.Bind(wx.EVT_CHECKBOX, lambda event: self.__switch_outer_radius())
radius_big_layout.Add(self.__radius_big_checkbox, 0, wx.ALIGN_CENTER_VERTICAL)
additional_options.Add(radius_big_layout, 0, wx.EXPAND)
return additional_options
def __switch_inner_radius(self):
if not self.__radius_checkbox.IsChecked():
self.__radius_box.Enable(True)
else:
self.__radius_box.Enable(False)
self.__radius_box.SetValue("")
def __switch_outer_radius(self):
if not self.__radius_big_checkbox.IsChecked():
self.__radius_big_box.Enable(True)
else:
self.__radius_big_box.Enable(False)
self.__radius_big_box.SetValue("")
def get_inner_radius(self):
if self.__radius_checkbox.IsChecked():
radius = 0
else:
try:
radius = int(self.__radius_box.GetValue())
if radius <= 0:
raise ValueError("The inner radius must be positive")
except ValueError:
raise ValueError("Please, enter a correct name of an inner radius")
return radius
def get_outer_radius(self):
if self.__radius_big_checkbox.IsChecked():
radius_big = 0
else:
try:
radius_big = int(self.__radius_big_box.GetValue())
if radius_big <= 0:
raise ValueError("The outer radius must be positive")
except ValueError:
raise ValueError("Please, enter a correct value of the outer radius")
return radius_big
def _process(self):
radius = self.get_inner_radius()
radius_big = self.get_outer_radius()
if radius > 0 and 0 < radius_big <= radius:
raise ValueError("The outer radius shall be greater than the inner radius")
self._output_data = spatial_filter(self._input_data, radius, radius_big)
def _get_result_viewer(self):
return ComplexMapViewerDlg
| 0
| 0
| 0
| 3,825
| 0
| 0
| 0
| 157
| 156
|
0a0eccb3c3b514b534fa9214a71cc84c4df6c16c
| 6,017
|
py
|
Python
|
pyalgotrade/technical/ma.py
|
tibkiss/pyalgotrade
|
4979315281c362dcba2e6d53da27dc4a7377ebec
|
[
"Apache-2.0"
] | 2
|
2015-04-03T10:29:14.000Z
|
2017-01-21T05:55:00.000Z
|
pyalgotrade/technical/ma.py
|
tibkiss/pyalgotrade
|
4979315281c362dcba2e6d53da27dc4a7377ebec
|
[
"Apache-2.0"
] | null | null | null |
pyalgotrade/technical/ma.py
|
tibkiss/pyalgotrade
|
4979315281c362dcba2e6d53da27dc4a7377ebec
|
[
"Apache-2.0"
] | null | null | null |
# PyAlgoTrade
#
# Copyright 2011 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
# This is the formula I'm using to calculate the averages based on previous ones.
# 1 2 3 4
# x x x
# x x x
#
# avg0 = (a + b + c) / 3
# avg1 = (b + c + d) / 3
#
# avg0 = avg1 + x
# (a + b + c) / 3 = (b + c + d) / 3 + x
# a/3 + b/3 + c/3 = b/3 + c/3 + d/3 + x
# a/3 = d/3 + x
# x = a/3 - d/3
# avg1 = avg0 - x
# avg1 = avg0 + d/3 - a/3
| 33.803371
| 116
| 0.651321
|
# PyAlgoTrade
#
# Copyright 2011 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <[email protected]>
"""
from pyalgotrade import technical
def calculate_sma(filterDS, firstPos, lastPos):
accum = 0
for i in xrange(firstPos, lastPos+1):
value = filterDS.getValueAbsolute(i)
if value is None:
return None
accum += value
ret = accum / float(lastPos - firstPos + 1)
return ret
# This is the formula I'm using to calculate the averages based on previous ones.
# 1 2 3 4
# x x x
# x x x
#
# avg0 = (a + b + c) / 3
# avg1 = (b + c + d) / 3
#
# avg0 = avg1 + x
# (a + b + c) / 3 = (b + c + d) / 3 + x
# a/3 + b/3 + c/3 = b/3 + c/3 + d/3 + x
# a/3 = d/3 + x
# x = a/3 - d/3
# avg1 = avg0 - x
# avg1 = avg0 + d/3 - a/3
class SMA(technical.DataSeriesFilter):
"""Simple Moving Average filter.
:param dataSeries: The DataSeries instance being filtered.
:type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.
:param period: The number of values to use to calculate the SMA.
:type period: int.
"""
def __init__(self, dataSeries, period):
technical.DataSeriesFilter.__init__(self, dataSeries, period)
self.__prevAvg = None
self.__prevAvgPos = None
def __calculateFastSMA(self, firstPos, lastPos):
assert(firstPos > 0)
firstValue = self.getDataSeries().getValueAbsolute(firstPos-1)
lastValue = self.getDataSeries().getValueAbsolute(lastPos)
if lastValue is None:
return None
self.__prevAvg = self.__prevAvg + lastValue / float(self.getPeriod()) - firstValue / float(self.getPeriod())
self.__prevAvgPos = lastPos
return self.__prevAvg
def __calculateSMA(self, firstPos, lastPos):
ret = calculate_sma(self.getDataSeries(), firstPos, lastPos)
self.__prevAvg = ret
self.__prevAvgPos = lastPos
return ret
def getPeriod(self):
return self.getWindowSize()
def calculateValue(self, firstPos, lastPos):
if self.__prevAvgPos != None and self.__prevAvgPos == lastPos - 1:
ret = self.__calculateFastSMA(firstPos, lastPos)
else:
ret = self.__calculateSMA(firstPos, lastPos)
return ret
class EMA(technical.DataSeriesFilter):
"""Exponential Moving Average filter.
:param dataSeries: The DataSeries instance being filtered.
:type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.
:param period: The number of values to use to calculate the EMA.
:type period: int.
"""
def __init__(self, dataSeries, period):
technical.DataSeriesFilter.__init__(self, dataSeries, period)
self.__multiplier = (2.0 / (self.getWindowSize() + 1))
self.__values = {}
def getPeriod(self):
return self.getWindowSize()
# Finds the last available (value, position) starting from pos.
def __findPrevValue(self, pos):
ret = None
while pos >= self.getFirstValidPos() and ret == None:
ret = self.__values.get(pos)
if ret == None:
pos -= 1
return (ret, pos)
def __calculateFirstValue(self):
# Calculate the first value, which is a SMA of the first X values of the wrapped data series.
smaEnd = self.getFirstValidPos()
smaBegin = smaEnd - (self.getWindowSize() - 1)
ret = calculate_sma(self.getDataSeries(), smaBegin, smaEnd)
self.__values[self.getFirstValidPos()] = ret
return ret
def __calculateEMA(self, startingValue, fromPos, toPos):
ret = startingValue
while fromPos <= toPos:
currValue = self.getDataSeries().getValueAbsolute(fromPos)
ret = (currValue - ret) * self.__multiplier + ret
self.__values[fromPos] = ret
fromPos += 1
return ret
def calculateValue(self, firstPos, lastPos):
# Formula from http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:moving_averages
lastValue, lastValuePos = self.__findPrevValue(lastPos-1)
if lastValue == None:
# If we don't have any previous value, we need to start from scratch.
lastValue = self.__calculateFirstValue()
lastValuePos = self.getFirstValidPos()
# Calculate the EMA starting from the last one we have.
return self.__calculateEMA(lastValue, lastValuePos+1, lastPos)
class WMA(technical.DataSeriesFilter):
"""Weighted Moving Average filter.
:param dataSeries: The DataSeries instance being filtered.
:type dataSeries: :class:`pyalgotrade.dataseries.DataSeries`.
:param weights: A list of int/float with the weights.
:type weights: list.
"""
def __init__(self, dataSeries, weights):
technical.DataSeriesFilter.__init__(self, dataSeries, len(weights))
self.__weights = weights
def getPeriod(self):
return self.getWindowSize()
def getWeights(self):
return self.__weights
def calculateValue(self, firstPos, lastPos):
accum = 0
weightSum = 0
for i in xrange(firstPos, lastPos+1):
value = self.getDataSeries().getValueAbsolute(i)
if value is None:
return None
weight = self.__weights[i - firstPos]
accum += value * weight
weightSum += weight
return accum / float(weightSum)
| 0
| 0
| 0
| 4,589
| 0
| 264
| 0
| 12
| 115
|
8a5f497112103293d27e66f42bcc18eee1df1536
| 3,137
|
py
|
Python
|
src/nninst/backend/tensorflow/trace/resnet_18_cifar10_class_trace.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | 15
|
2020-08-24T07:11:20.000Z
|
2021-09-13T08:03:42.000Z
|
src/nninst/backend/tensorflow/trace/resnet_18_cifar10_class_trace.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | 5
|
2021-02-28T17:30:26.000Z
|
2021-06-15T09:33:00.000Z
|
src/nninst/backend/tensorflow/trace/resnet_18_cifar10_class_trace.py
|
uchuhimo/Ptolemy
|
5c8ae188af30ee49d38f27d54c67af2eab9489e7
|
[
"Apache-2.0"
] | 3
|
2020-10-22T09:11:11.000Z
|
2021-01-16T14:49:34.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from nninst import mode
from nninst.backend.tensorflow.dataset.config import (CIFAR10_TRAIN)
from nninst.backend.tensorflow.model.config import RESNET_18_CIFAR10
from nninst.backend.tensorflow.trace.common import (class_trace, class_trace_compact, class_trace_growth, full_trace, save_class_traces, save_class_traces_low_latency, save_full_trace_growth, self_similarity)
from nninst.utils.ray import ray_init
__all__ = ["resnet_18_cifar10_class_trace", "resnet_18_cifar10_self_similarity"]
name = "resnet_18_cifar10"
resnet_18_cifar10_class_trace = class_trace(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_class_trace_growth = class_trace_growth(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_class_trace_compact = class_trace_compact(
resnet_18_cifar10_class_trace, name=name, model_config=RESNET_18_CIFAR10
)
save_resnet_18_cifar10_class_traces_low_latency = save_class_traces_low_latency(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_trace = full_trace(
name=name, class_trace_fn=resnet_18_cifar10_class_trace
)
save_resnet_18_cifar10_trace_growth = save_full_trace_growth(
name=name, class_trace_fn=resnet_18_cifar10_class_trace
)
resnet_18_cifar10_self_similarity = self_similarity(
name=name, trace_fn=resnet_18_cifar10_class_trace, class_ids=range(0, 10)
)
if __name__ == "__main__":
# mode.check(False)
# mode.debug()
# mode.local()
mode.distributed()
# ray_init("dell")
# ray_init("gpu")
ray_init()
threshold = 0.5
# threshold = 1
# threshold = 0.8
label = None
# label = "train_50"
# label = "train_start"
# label = "train_start_more"
# save_class_traces(resnet_18_cifar10_class_trace, range(0, 10), threshold=threshold, label=label,
# example_num=5000, example_upperbound=5000,
# )
save_resnet_18_cifar10_class_traces_low_latency(
range(0, 10), threshold=threshold, label=label, example_num=5000, batch_size=8
)
save_class_traces(
resnet_18_cifar10_class_trace_compact,
range(0, 10),
threshold=threshold,
label=label,
)
resnet_18_cifar10_self_similarity(threshold=threshold, label=label).save()
| 31.059406
| 102
| 0.75263
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from nninst import mode
from nninst.backend.tensorflow.dataset.config import (
CIFAR10_TRAIN,
IMAGENET_RAW_TRAIN,
IMAGENET_TRAIN,
)
from nninst.backend.tensorflow.model.config import RESNET_18_CIFAR10, RESNET_50
from nninst.backend.tensorflow.trace.common import (
class_trace,
class_trace_compact,
class_trace_growth,
full_trace,
save_class_traces,
save_class_traces_low_latency,
save_full_trace_growth,
self_similarity,
)
from nninst.utils.ray import ray_init
__all__ = ["resnet_18_cifar10_class_trace", "resnet_18_cifar10_self_similarity"]
name = "resnet_18_cifar10"
resnet_18_cifar10_class_trace = class_trace(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_class_trace_growth = class_trace_growth(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_class_trace_compact = class_trace_compact(
resnet_18_cifar10_class_trace, name=name, model_config=RESNET_18_CIFAR10
)
save_resnet_18_cifar10_class_traces_low_latency = save_class_traces_low_latency(
name=name, model_config=RESNET_18_CIFAR10, data_config=CIFAR10_TRAIN
)
resnet_18_cifar10_trace = full_trace(
name=name, class_trace_fn=resnet_18_cifar10_class_trace
)
save_resnet_18_cifar10_trace_growth = save_full_trace_growth(
name=name, class_trace_fn=resnet_18_cifar10_class_trace
)
resnet_18_cifar10_self_similarity = self_similarity(
name=name, trace_fn=resnet_18_cifar10_class_trace, class_ids=range(0, 10)
)
if __name__ == "__main__":
# mode.check(False)
# mode.debug()
# mode.local()
mode.distributed()
# ray_init("dell")
# ray_init("gpu")
ray_init()
threshold = 0.5
# threshold = 1
# threshold = 0.8
label = None
# label = "train_50"
# label = "train_start"
# label = "train_start_more"
# save_class_traces(resnet_18_cifar10_class_trace, range(0, 10), threshold=threshold, label=label,
# example_num=5000, example_upperbound=5000,
# )
save_resnet_18_cifar10_class_traces_low_latency(
range(0, 10), threshold=threshold, label=label, example_num=5000, batch_size=8
)
save_class_traces(
resnet_18_cifar10_class_trace_compact,
range(0, 10),
threshold=threshold,
label=label,
)
resnet_18_cifar10_self_similarity(threshold=threshold, label=label).save()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0
|
f77ea59bf822539953a84da90bca3bded5b1d71d
| 4,051
|
py
|
Python
|
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | 1
|
2021-07-08T05:09:38.000Z
|
2021-07-08T05:09:38.000Z
|
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | null | null | null |
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | 1
|
2020-01-09T07:29:17.000Z
|
2020-01-09T07:29:17.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=22&rn=1&type=ppt&callback=bd__cbs__s5lw72
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=23&rn=1&type=ppt&callback=bd__cbs__coo5j5
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=21&rn=1&type=ppt&callback=bd__cbs__2hc9ds
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=5&rn=1&type=ppt&callback=bd__cbs__nh2gao
"""
linkfiles = "F:\\PythonProject\\python-scripts\\spider\\wenkubaidu\\odnimages\\"
if __name__=="__main__":
wk = WK()
for pn in range(1,26):
url = 'https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn={}&rn=1&type=ppt&callback=bd__cbs__nh2gao'.format(pn)
print(url,"")
wk.spyder(url)
"""
with open(linkfiles + "wenkulink.txt",'a+') as fw:
# fw.write(url) # ,
# fw.write("\n")
"""
# wk.spyder(wk.baseUrl)
"""
,\,.
https:\/\/wkretype.bdimg.com\/retype\/zoom\/6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825
"""
| 44.032609
| 883
| 0.689459
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib
import requests
from bs4 import BeautifulSoup
"""
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=22&rn=1&type=ppt&callback=bd__cbs__s5lw72
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=23&rn=1&type=ppt&callback=bd__cbs__coo5j5
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=21&rn=1&type=ppt&callback=bd__cbs__2hc9ds
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=5&rn=1&type=ppt&callback=bd__cbs__nh2gao
"""
linkfiles = "F:\\PythonProject\\python-scripts\\spider\\wenkubaidu\\odnimages\\"
class WK():
'''
百度文库
'''
def __init__(self):
self.baseUrl = "https://wenku.baidu.com/view/564fc70a77a20029bd64783e0912a21615797ff7.html"
self.header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
def getResponse(self,url):
try:
req = urllib.request.Request(url,headers = self.header)
response = urllib.request.urlopen(req,timeout = 10)
except:
print("页面请求失败")
else:
return response.read().decode('gb2312')
def spyder(self,url):
html = self.getResponse(url)
# print(html)
start_index = html.find("https:")
# print(start_index)
print('-'*30)
end_index = html.find('","')
# print(end_index)
print(html[start_index:end_index])
"""
with open(linkfiles + "wenkucontent.txt",'a+') as fa:
fa.write(html)
fa.write("\n")
"""
header = self.header
header['Cookie'] = 'BAIDUID=2CC737B4D3E3D51EA7529F8065A8B708:FG=1; PSTM=1553749648; BIDUPSID=36D49C7DE8F84F920A6D6ADE0E719043; _click_param_pc_rec_doc_2017_testid=4; ZD_ENTRY=bing; cflag=13%3A3; session_name=cn.bing.com; isJiaoyuVip=1; wk_shifen_pop_window=7765_1_1567070315751; Hm_lvt_d8bfb560f8d03bbefc9bdecafc4a4bf6=1566318226,1566571568,1567070267,1567070708; session_id=1567070708094; BCLID=11327784929476180808; BDSFRCVID=aD0OJeC624LjSNrwjvtqhFVMiLK2tRQTH6055tzl7cu_UIsP_XwLEG0PDM8g0Ku-5SOpogKK0mOTHv-F_2uxOjjg8UtVJeC6EG0P3J; H_BDCLCKID_SF=JJ-qVCPbtDvbfP0kb-r_bPk0hNLHJK62aKDs3l-MBhcqEIL4jMv80UCX5U6q-no33HcuBlRcttbCVfbSj60hjJ0hhaJ2-lRPW67TMMn5Bp5nhMJeXj7JDMP0qHogWbOy523ion6vQpn-KqQ3DRoWXPIqbN7P-p5Z5mAqKl0MLIOkbRO4-TFaejOQDfK; userFirstTime=true; ___wk_scode_token=XdTTTDexiuWKJhoY9dcpx3hQOGs%2Bniyz9YrLayUnQsQ%3D; Hm_lpvt_d8bfb560f8d03bbefc9bdecafc4a4bf6=1567072063'
# print(header)
urlrep = html[start_index:end_index].replace('\\','')
# print(urlrep)
# req = requests.get('https://wkretype.bdimg.com//retype//zoom//6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825')
req = requests.get(urlrep,headers = header)
"""
with open(linkfiles + "b.png",'wb') as fb:
fb.write(req.content)
"""
p_index = html.find('"page":')
p_end = html.find('}]')
pag = html[p_index+7:p_end]
with open(linkfiles + pag + ".png",'wb') as fb:
fb.write(req.content)
if __name__=="__main__":
wk = WK()
for pn in range(1,26):
url = 'https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn={}&rn=1&type=ppt&callback=bd__cbs__nh2gao'.format(pn)
print(url,"下载完成")
wk.spyder(url)
"""
with open(linkfiles + "wenkulink.txt",'a+') as fw:
# fw.write(url) # 是统计的页数连接,可以从中获取到图片的链接
# fw.write("\n")
"""
# wk.spyder(wk.baseUrl)
"""
注意该网址粘贴到浏览器上访问是可以的,但是在代码中若不替换\该字符,会导致报错.
https:\/\/wkretype.bdimg.com\/retype\/zoom\/6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825
"""
| 210
| 0
| 0
| 2,636
| 0
| 0
| 0
| -6
| 90
|
dc824dbc29f0b42ffaa3b7d3fe8147c1f7a32031
| 18,983
|
py
|
Python
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 14
|
2018-09-20T23:01:27.000Z
|
2021-05-25T11:05:09.000Z
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 44
|
2018-09-15T03:05:50.000Z
|
2022-03-22T02:46:24.000Z
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 13
|
2018-10-02T11:45:24.000Z
|
2021-08-22T18:41:44.000Z
|
#import string
############################################################################
## 0) overlay id (not used atm, but can allow searches in future. just put something unique)
## 1) overlay type (defined in xgm_mod_options_header)
## 2) overlay type specific parameters (e.g. for number box, it can be lower/upper range, for cbobox, it would be the cbo items etc)
## a) xgm_ov_numberbox : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## b) xgm_ov_combolabel/xgm_ov_combobutton : list of combo items. e.g. ["option1", "option2", "option3"]
## c) xgm_ov_slider : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## d) xgm_ov_checkbox : not used fttb. just leave empty. e.g. []
## 3) text label
## 4) reserved for text label flags
## 5) description (unused for now. may be used for stuff like tooltip in future)
## 6) reserved for description flags
## 7) initialization op block. Used for updating the overlay values from game values. Must assign the desired value to reg1.
## 8) update op block. Used for updating game values from overlay values. The overlay value is in reg1.
## 9) optional. reserved for option page id. unused for now. leave out for options using general page.
############################################################################
mod_options = [
("camp_fuck_setting", xgm_ov_combolabel, ["Disabled", "Consensual Only", "All Enabled"], "Sexual Content:", 0,
"Settings for sexual content in game.", 0,
[(try_begin),
(eq, "$g_sexual_content", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_sexual_content", 1),
(assign, reg1, 1),
(else_try),
(eq, "$g_sexual_content", 2),
(assign, reg1, 2),
(try_end),],
[(try_begin),
(eq, reg1, 0),
(assign, "$g_sexual_content", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_sexual_content", 1),
(else_try),
(eq, reg1, 2),
(assign, "$g_sexual_content", 2),
(try_end),
],
),
("dplmc_woman_prejudice", xgm_ov_combolabel, ["Historical", "Tolerant", "Utopian"], "Diplomacy - Prejudice:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_disable_condescending_comments"),
],
[
(assign, "$g_disable_condescending_comments", reg1),
],
),
("camp_polygamy", xgm_ov_checkbox, [], "Polygamy:", 0,
"Toggles polygamy settings", 0,
[(try_begin),
(eq, "$g_polygamy", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_polygamy", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_polygamy", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_polygamy", 1),
(try_end),
],
),
( "camp_nohomobro", xgm_ov_checkbox , [],
"Disable Gay:", 0,
"Disables gay scenes.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_nohomo"),
],
[ # update block (value is in reg1)
(assign, "$g_nohomo", reg1),
],
),
( "camp_no_dancers", xgm_ov_checkbox , [],
"Feast Dancers:", 0,
"Toggles dancers during feasts.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_feast_dancers"),
],
[ # update block (value is in reg1)
(assign, "$g_feast_dancers", reg1),
],
),
("camp_dark_hunters", xgm_ov_checkbox, [], "Black Khergits and Dark Hunters:", 0,
"Settings for Dark Hunters and Black Khergits.", 0,
[
(try_begin),
(eq, "$g_dark_hunters_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_dark_hunters_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dark_hunters_enabled", 0),
(assign, ":removed", 0),
(try_for_parties, ":party_no"),
(party_get_template_id, ":ptid", ":party_no"),
(this_or_next|eq, ":ptid", "pt_dark_hunters"),
(eq, ":ptid", "pt_black_khergit_raiders"),
(remove_party, ":party_no"),
(val_add, ":removed", 1),
(try_end),
(assign, reg0, ":removed"),
(display_message, "@{reg0} parties removed from the map."),
(else_try),
(eq, reg1, 1),
(assign, "$g_dark_hunters_enabled", 1),
(try_end),
],
),
( "keep_companions", xgm_ov_checkbox , [],
"Keep Companions:", 0,
"Setting for keeping companions after defeat", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_keep_companions"),
],
[ # update block (value is in reg1)
(assign, "$g_keep_companions", reg1),
],
),
( "disable_complaints", xgm_ov_checkbox , [],
"Disable Complaints:", 0,
"Setting for disabling companion complaints", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_npc_complaints"),
],
[ # update block (value is in reg1)
(assign, "$disable_npc_complaints", reg1),
],
),
( "disable_bodyguard", xgm_ov_checkbox , [],
"Disable Bodyguards:", 0,
"Setting for disabling companions as bodyguards", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_bodyguards"),
],
[ # update block (value is in reg1)
(assign, "$disable_bodyguards", reg1),
],
),
("camp_realistic_wounding", xgm_ov_checkbox, [], "Realistic Casualties:", 0,
"Toggles realistic wounding for other damage types", 0,
[(try_begin),
(eq, "$g_realistic_wounding", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_realistic_wounding", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_realistic_wounding", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_realistic_wounding", 1),
(try_end),
],
),
("enable_shield_bash", xgm_ov_combolabel, ["Disabled", "Player Only", "All Combatants"], "Shield Bash:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_enable_shield_bash"),
],
[
(assign, "$g_enable_shield_bash", reg1),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "dplmc_horsespeed", xgm_ov_checkbox , [],
"Diplomacy - Horse Speed:", 0,
"Setting for Diplomacy's horse speed changes", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_horse_speed"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_horse_speed",1,reg1),
],
),
( "dplmc_battlecontinue", xgm_ov_checkbox , [],
"Diplomacy - Battle Continuation:", 0,
"Setting for Diplomacy's battle continuation", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_battle_continuation"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_battle_continuation",1,reg1),
],
),
( "dplmc_disguise", xgm_ov_checkbox , [],
"Diplomacy - Disguise System:", 0,
"Setting for Diplomacy's disguise system", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_dplmc_player_disguise"),
],
[ # update block (value is in reg1)
(assign, "$g_dplmc_player_disguise", reg1),
],
),
( "dplmc_terrain_advantage", xgm_ov_checkbox , [],
"Diplomacy - Autocalc Terrain Advantage:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(try_end),
],
),
( "dplmc_lord_recycling", xgm_ov_checkbox , [],
"Diplomacy - Returning From Exile:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(try_end),
],
),
("dplmc_ai_changes_a", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - AI Changes:", 0,
"Setting for Diplomacy's AI changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(try_end),
],
),
("dplmc_gold_changes", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - Economy Changes:", 0,
"Setting for Diplomacy's economy changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
("minimap_setting", xgm_ov_combolabel, ["Compass Style", "Small Minimap", "Medium Minimap", "Large Minimap", "Disabled"], "Battle Minimap Overlay:", 0,
"Setting for the minimap.", 0,
[
(try_begin),
(eq, "$g_minimap_style", -1),
(assign, reg1, 4),
(else_try),
(assign, reg1, "$g_minimap_style"),
(try_end),
],
[
(try_begin),
(eq, reg1, 4),
(assign, "$g_minimap_style", -1),
(else_try),
(assign, "$g_minimap_style", reg1),
(try_end),
],
),
("minimap_setting", xgm_ov_combolabel, ["Disabled", "Only Allies", "Only Enemies", "All Troops"], "Troop HP Bars:", 0,
"Setting for troop HP bars.", 0,
[
(try_begin), # Ally
(eq, "$g_hp_bar_enemy", 0),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 1),
(else_try), # Enemy
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 0),
(assign, reg1, 2),
(else_try), # Both
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 3),
(else_try), # None
(assign, reg1, 0),
(try_end),
],
[
(try_begin), # Ally
(eq, reg1, 1),
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 1),
(else_try), # Enemy
(eq, reg1, 2),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 0),
(else_try), # Both
(eq, reg1, 3),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 1),
(else_try), # None
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 0),
(try_end),
],
),
("minimap_setting", xgm_ov_numberbox, [3,81], "HP Bar Distance Limit:", 0,
"Setting for the HP Bars.", 0,
[
(assign, reg1, "$g_hp_bar_dis_limit"),
],
[
(assign, "$g_hp_bar_dis_limit", reg1),
],
),
("camp_troop_ratio_bar", xgm_ov_checkbox, [], "Troop ratio bar:", 0,
"Toggles troop ratio bar", 0,
[(try_begin),
(eq, "$g_troop_ratio_bar", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_troop_ratio_bar", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_troop_ratio_bar", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_troop_ratio_bar", 1),
(try_end),
],
),
("camp_decapitation", xgm_ov_checkbox, [], "Decapitation:", 0,
"Toggles Decapitation", 0,
[(try_begin),
(eq, "$g_decapitation_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_decapitation_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_decapitation_enabled", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_decapitation_enabled", 1),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "op_cheatmode", xgm_ov_checkbox , [],
"Cheat mode:", 0,
"This sets the in-game cheat mode", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$cheat_mode"),
],
[ # update block (value is in reg1)
(assign, "$cheat_mode", reg1),
],
),
] # mod_options
# TODO: add option pages here
# collation of all *_mod_options.py from active mods
# import and merge related variables from all {active_mod}_mod_options.py for all active mods
#try:
# from modmerger_options import options, mods_active
# from modmerger import mod_get_process_order, mod_is_active
# from util_common import add_objects
# modcomp_name = "mod_options"
# var_list = ["mod_options",]
#from modmerger import modmerge
#modmerge(var_set)
# mod_process_order = mod_get_process_order(modcomp_name)
# vars_to_import= ["mod_options"]
# for x in mod_process_order:
# if(mod_is_active(x) and x <> "xgm_mod_options"): # must exclude this file since we are using this file as base
# try:
#mergefn_name = "modmerge_%s"%(modcomp_name)
# target_module_name = "%s_%s"%(x,modcomp_name)
# _temp = __import__( target_module_name , globals(), locals(), vars_to_import,-1)
# logger.info("Merging objects for component \"%s\" from mod \"%s\"..."%(modcomp_name,x))
#
# add_objects(mod_options, _temp.mod_options) # import from target module.
#
# # TODO: collect option pages
# except ImportError:
# errstring = "Failed importing for component \"%s\" for mod \"%s\"." % (modcomp_name, x)
# logger.debug(errstring)
# else:
# errstring = "Mod \"%s\" not active for Component \"%s\"." % (x, modcomp_name)
# logger.debug(errstring)
#except:
# raise
# collation end
# At this point, mod_options will contain the list of all mod_options specified.
## utility functions
# helper wrapper to access mod_options
## class ModOptionWrapper
# this function will compute the total height required for a list of mod_options.
## mod_options_get_total_height
| 30.716828
| 155
| 0.566928
|
from header_common import *
from header_presentations import *
from header_mission_templates import *
from ID_meshes import *
from header_operations import *
from header_triggers import *
from module_constants import *
#import string
from xgm_mod_options_header import *
############################################################################
## 0) overlay id (not used atm, but can allow searches in future. just put something unique)
## 1) overlay type (defined in xgm_mod_options_header)
## 2) overlay type specific parameters (e.g. for number box, it can be lower/upper range, for cbobox, it would be the cbo items etc)
## a) xgm_ov_numberbox : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## b) xgm_ov_combolabel/xgm_ov_combobutton : list of combo items. e.g. ["option1", "option2", "option3"]
## c) xgm_ov_slider : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## d) xgm_ov_checkbox : not used fttb. just leave empty. e.g. []
## 3) text label
## 4) reserved for text label flags
## 5) description (unused for now. may be used for stuff like tooltip in future)
## 6) reserved for description flags
## 7) initialization op block. Used for updating the overlay values from game values. Must assign the desired value to reg1.
## 8) update op block. Used for updating game values from overlay values. The overlay value is in reg1.
## 9) optional. reserved for option page id. unused for now. leave out for options using general page.
############################################################################
mod_options = [
("camp_fuck_setting", xgm_ov_combolabel, ["Disabled", "Consensual Only", "All Enabled"], "Sexual Content:", 0,
"Settings for sexual content in game.", 0,
[(try_begin),
(eq, "$g_sexual_content", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_sexual_content", 1),
(assign, reg1, 1),
(else_try),
(eq, "$g_sexual_content", 2),
(assign, reg1, 2),
(try_end),],
[(try_begin),
(eq, reg1, 0),
(assign, "$g_sexual_content", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_sexual_content", 1),
(else_try),
(eq, reg1, 2),
(assign, "$g_sexual_content", 2),
(try_end),
],
),
("dplmc_woman_prejudice", xgm_ov_combolabel, ["Historical", "Tolerant", "Utopian"], "Diplomacy - Prejudice:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_disable_condescending_comments"),
],
[
(assign, "$g_disable_condescending_comments", reg1),
],
),
("camp_polygamy", xgm_ov_checkbox, [], "Polygamy:", 0,
"Toggles polygamy settings", 0,
[(try_begin),
(eq, "$g_polygamy", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_polygamy", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_polygamy", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_polygamy", 1),
(try_end),
],
),
( "camp_nohomobro", xgm_ov_checkbox , [],
"Disable Gay:", 0,
"Disables gay scenes.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_nohomo"),
],
[ # update block (value is in reg1)
(assign, "$g_nohomo", reg1),
],
),
( "camp_no_dancers", xgm_ov_checkbox , [],
"Feast Dancers:", 0,
"Toggles dancers during feasts.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_feast_dancers"),
],
[ # update block (value is in reg1)
(assign, "$g_feast_dancers", reg1),
],
),
("camp_dark_hunters", xgm_ov_checkbox, [], "Black Khergits and Dark Hunters:", 0,
"Settings for Dark Hunters and Black Khergits.", 0,
[
(try_begin),
(eq, "$g_dark_hunters_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_dark_hunters_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dark_hunters_enabled", 0),
(assign, ":removed", 0),
(try_for_parties, ":party_no"),
(party_get_template_id, ":ptid", ":party_no"),
(this_or_next|eq, ":ptid", "pt_dark_hunters"),
(eq, ":ptid", "pt_black_khergit_raiders"),
(remove_party, ":party_no"),
(val_add, ":removed", 1),
(try_end),
(assign, reg0, ":removed"),
(display_message, "@{reg0} parties removed from the map."),
(else_try),
(eq, reg1, 1),
(assign, "$g_dark_hunters_enabled", 1),
(try_end),
],
),
( "keep_companions", xgm_ov_checkbox , [],
"Keep Companions:", 0,
"Setting for keeping companions after defeat", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_keep_companions"),
],
[ # update block (value is in reg1)
(assign, "$g_keep_companions", reg1),
],
),
( "disable_complaints", xgm_ov_checkbox , [],
"Disable Complaints:", 0,
"Setting for disabling companion complaints", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_npc_complaints"),
],
[ # update block (value is in reg1)
(assign, "$disable_npc_complaints", reg1),
],
),
( "disable_bodyguard", xgm_ov_checkbox , [],
"Disable Bodyguards:", 0,
"Setting for disabling companions as bodyguards", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_bodyguards"),
],
[ # update block (value is in reg1)
(assign, "$disable_bodyguards", reg1),
],
),
("camp_realistic_wounding", xgm_ov_checkbox, [], "Realistic Casualties:", 0,
"Toggles realistic wounding for other damage types", 0,
[(try_begin),
(eq, "$g_realistic_wounding", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_realistic_wounding", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_realistic_wounding", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_realistic_wounding", 1),
(try_end),
],
),
("enable_shield_bash", xgm_ov_combolabel, ["Disabled", "Player Only", "All Combatants"], "Shield Bash:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_enable_shield_bash"),
],
[
(assign, "$g_enable_shield_bash", reg1),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "dplmc_horsespeed", xgm_ov_checkbox , [],
"Diplomacy - Horse Speed:", 0,
"Setting for Diplomacy's horse speed changes", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_horse_speed"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_horse_speed",1,reg1),
],
),
( "dplmc_battlecontinue", xgm_ov_checkbox , [],
"Diplomacy - Battle Continuation:", 0,
"Setting for Diplomacy's battle continuation", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_battle_continuation"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_battle_continuation",1,reg1),
],
),
( "dplmc_disguise", xgm_ov_checkbox , [],
"Diplomacy - Disguise System:", 0,
"Setting for Diplomacy's disguise system", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_dplmc_player_disguise"),
],
[ # update block (value is in reg1)
(assign, "$g_dplmc_player_disguise", reg1),
],
),
( "dplmc_terrain_advantage", xgm_ov_checkbox , [],
"Diplomacy - Autocalc Terrain Advantage:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(try_end),
],
),
( "dplmc_lord_recycling", xgm_ov_checkbox , [],
"Diplomacy - Returning From Exile:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(try_end),
],
),
("dplmc_ai_changes_a", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - AI Changes:", 0,
"Setting for Diplomacy's AI changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(try_end),
],
),
("dplmc_gold_changes", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - Economy Changes:", 0,
"Setting for Diplomacy's economy changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
("minimap_setting", xgm_ov_combolabel, ["Compass Style", "Small Minimap", "Medium Minimap", "Large Minimap", "Disabled"], "Battle Minimap Overlay:", 0,
"Setting for the minimap.", 0,
[
(try_begin),
(eq, "$g_minimap_style", -1),
(assign, reg1, 4),
(else_try),
(assign, reg1, "$g_minimap_style"),
(try_end),
],
[
(try_begin),
(eq, reg1, 4),
(assign, "$g_minimap_style", -1),
(else_try),
(assign, "$g_minimap_style", reg1),
(try_end),
],
),
("minimap_setting", xgm_ov_combolabel, ["Disabled", "Only Allies", "Only Enemies", "All Troops"], "Troop HP Bars:", 0,
"Setting for troop HP bars.", 0,
[
(try_begin), # Ally
(eq, "$g_hp_bar_enemy", 0),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 1),
(else_try), # Enemy
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 0),
(assign, reg1, 2),
(else_try), # Both
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 3),
(else_try), # None
(assign, reg1, 0),
(try_end),
],
[
(try_begin), # Ally
(eq, reg1, 1),
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 1),
(else_try), # Enemy
(eq, reg1, 2),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 0),
(else_try), # Both
(eq, reg1, 3),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 1),
(else_try), # None
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 0),
(try_end),
],
),
("minimap_setting", xgm_ov_numberbox, [3,81], "HP Bar Distance Limit:", 0,
"Setting for the HP Bars.", 0,
[
(assign, reg1, "$g_hp_bar_dis_limit"),
],
[
(assign, "$g_hp_bar_dis_limit", reg1),
],
),
("camp_troop_ratio_bar", xgm_ov_checkbox, [], "Troop ratio bar:", 0,
"Toggles troop ratio bar", 0,
[(try_begin),
(eq, "$g_troop_ratio_bar", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_troop_ratio_bar", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_troop_ratio_bar", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_troop_ratio_bar", 1),
(try_end),
],
),
("camp_decapitation", xgm_ov_checkbox, [], "Decapitation:", 0,
"Toggles Decapitation", 0,
[(try_begin),
(eq, "$g_decapitation_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_decapitation_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_decapitation_enabled", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_decapitation_enabled", 1),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "op_cheatmode", xgm_ov_checkbox , [],
"Cheat mode:", 0,
"This sets the in-game cheat mode", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$cheat_mode"),
],
[ # update block (value is in reg1)
(assign, "$cheat_mode", reg1),
],
),
] # mod_options
# TODO: add option pages here
# collation of all *_mod_options.py from active mods
# import and merge related variables from all {active_mod}_mod_options.py for all active mods
#try:
# from modmerger_options import options, mods_active
# from modmerger import mod_get_process_order, mod_is_active
# from util_common import add_objects
# modcomp_name = "mod_options"
# var_list = ["mod_options",]
#from modmerger import modmerge
#modmerge(var_set)
# mod_process_order = mod_get_process_order(modcomp_name)
# vars_to_import= ["mod_options"]
# for x in mod_process_order:
# if(mod_is_active(x) and x <> "xgm_mod_options"): # must exclude this file since we are using this file as base
# try:
#mergefn_name = "modmerge_%s"%(modcomp_name)
# target_module_name = "%s_%s"%(x,modcomp_name)
# _temp = __import__( target_module_name , globals(), locals(), vars_to_import,-1)
# logger.info("Merging objects for component \"%s\" from mod \"%s\"..."%(modcomp_name,x))
#
# add_objects(mod_options, _temp.mod_options) # import from target module.
#
# # TODO: collect option pages
# except ImportError:
# errstring = "Failed importing for component \"%s\" for mod \"%s\"." % (modcomp_name, x)
# logger.debug(errstring)
# else:
# errstring = "Mod \"%s\" not active for Component \"%s\"." % (x, modcomp_name)
# logger.debug(errstring)
#except:
# raise
# collation end
# At this point, mod_options will contain the list of all mod_options specified.
## utility functions
from util_wrappers import *
# helper wrapper to access mod_options
class ModOptionWrapper(BaseWrapper):
def __init__(self, _data):
# verify _data
if( not isinstance(_data,TupleType) or (len(_data)<2)):
raise ValueError("ItemSetWrapper: Wrapped must be a tuple.")
BaseWrapper.__init__(self,_data)
def GetId(self):
return self.data[0]
def GetType(self):
return self.data[1]
def GetParameters(self):
if len(self.data) >2:
return self.data[2]
return None
def GetParameter(self, i):
if len(self.data) >2:
return self.data[2][i]
return None
def GetTextLabel(self):
if len(self.data) >3:
return self.data[3]
return None
def GetTextLabelFlags(self):
if len(self.data) >4:
return self.data[4]
return None
def GetDescription(self):
if len(self.data) >5:
return self.data[5]
return None
def GetDescriptionFlags(self):
if len(self.data) >6:
return self.data[6]
return None
def GetInitializeBlock(self):
if len(self.data) >7:
return OpBlockWrapper(self.data[7])
return None
def GetUpdateBlock(self):
if len(self.data) >8:
return OpBlockWrapper(self.data[8])
return None
def GetHeight(self):
if self.GetType() == xgm_ov_line:
return xgm_mod_options_line_height
elif self.GetType() in [xgm_ov_checkbox, xgm_ov_numberbox, xgm_ov_combolabel]:
return xgm_mod_options_property_height
return 0 # no other types supported
## class ModOptionWrapper
# this function will compute the total height required for a list of mod_options.
def mod_options_get_total_height(_mod_options = mod_options):
height = 0
for x in _mod_options:
aModOption = ModOptionWrapper(x)
height += aModOption.GetHeight()
# for x in _mod_options:
return height;
## mod_options_get_total_height
| 0
| 0
| 0
| 1,597
| 0
| 212
| 0
| 86
| 244
|
8abd70df157d14db679e659f636c0cd688861cb3
| 6,182
|
py
|
Python
|
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | 11
|
2021-10-01T17:23:18.000Z
|
2022-03-31T22:10:36.000Z
|
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | null | null | null |
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
# normalization, pointwise gaussian
# normalization, Gaussian
# normalization, scaling by range
#loss function with rel/abs Lp loss
# A simple feedforward neural network
| 26.761905
| 113
| 0.550793
|
import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except ValueError:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
| 0
| 0
| 0
| 5,588
| 0
| 0
| 0
| -19
| 198
|
15b531407df3e093f666b046edd03aed1f14e76a
| 4,874
|
py
|
Python
|
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
def extract_author_and_title(metadata: dict) -> (str, str):
"""Try to get the author and title from the metadata.
Return empty strings if not found."""
author, title = '', ''
for key in ('Author', 'author', 'dc:creator', 'creator', 'meta:author'):
if key in metadata:
author = metadata[key]
break
for key in ('Title', 'title', 'dc:title', 'meta:title'):
if key in metadata:
title = metadata[key]
break
return author, title
| 38.078125
| 115
| 0.649569
|
from io import StringIO
import re
from pathlib import Path
import logging
from django.db import models
from django.conf import settings
from bs4 import BeautifulSoup
from tika import parser
logger = logging.getLogger(__name__)
class TikaParseError(RuntimeError):
"""Raised when the conversion of a document into html by Tika fails."""
def extract_author_and_title(metadata: dict) -> (str, str):
"""Try to get the author and title from the metadata.
Return empty strings if not found."""
author, title = '', ''
for key in ('Author', 'author', 'dc:creator', 'creator', 'meta:author'):
if key in metadata:
author = metadata[key]
break
for key in ('Title', 'title', 'dc:title', 'meta:title'):
if key in metadata:
title = metadata[key]
break
return author, title
class ParentDocument(models.Model):
"""Each book/file is represented here.
"""
# source document's full path
filepath = models.CharField(unique=True, max_length=1024)
# try to get the author and title from the document metadata
# but it's not always there
author = models.CharField(max_length=512, blank=True, default='')
title = models.CharField(max_length=512, blank=True, default='')
def __str__(self):
return f"id: {self.id} {Path(self.filepath).name}"
def convert_to_html_child_pages(self, clean=True):
"""Convert book/file at filepath to html pages.
This constructs a ChildPage object for each page of the document.
Pages are determined by Tika's parsing.
Populates author and title if available in the metadata.
:param clean - if True clean non-ascii whitespace
"""
try_count, successful_parse = 0, False
while try_count < settings.TIKA_PARSE_MAX_RETRY:
if settings.TIKA_CONFIG_FILE:
data = parser.from_file(str(self.filepath), xmlContent=True, config_path=settings.TIKA_CONFIG_FILE)
else:
data = parser.from_file(str(self.filepath), xmlContent=True)
if data['status'] == 200:
successful_parse = True
break
if not successful_parse:
logger.error('Failed to parse file: %s', self.filepath)
author, title = extract_author_and_title(data['metadata'])
self.author, self.title = author, title
self.save()
soup = BeautifulSoup(data['content'], features='lxml')
# convert all pages successfully before creating children
pages = []
for i, content in enumerate(soup.find_all('div', attrs={'class': 'page'})):
_buffer = StringIO()
_buffer.write(str(content))
parsed_content = parser.from_buffer(_buffer.getvalue(), xmlContent=True)
text = parsed_content['content'].strip()
if clean:
text = re.sub(r' +\n', '\n', parsed_content['content'].strip().replace('\xa0', ' '))
# remove the html head from the doc so it doesn't cause any garbage in ES highlights
page_soup = BeautifulSoup(text, features='lxml')
page_soup.head.extract()
pages.append(page_soup.prettify())
for i, html in enumerate(pages):
child = ChildPage(parent=self, page_number=i+1, html_content=html,
author=self.author, title=self.title,
parent_doc_id=self.id)
if i == len(pages) - 1:
child.is_last_page = True
child.save()
class ChildPage(models.Model):
"""Each page of a book/file is represented by a ChildPage.
With the initial implementation, this model will also have the html_content
field filled with the full text of the page. This is very inefficient
space-wise as you are storing the full text in the database as well as in
Elasticsearch. But it allows reading the text online and being able to
navigate directly from the search to the location in the text.
The reason that it is mandatory now is due to using django-elasticsearch-dsl.
In the future, we can get rid of django-es-dsl and then allow an option to
not store the full text to save space.
"""
parent = models.ForeignKey(ParentDocument, on_delete=models.CASCADE)
page_number = models.IntegerField()
html_content = models.TextField()
is_last_page = models.BooleanField(default=False)
# need to duplicate keys from parent so django-elasticsearch-dsl can access them
author = models.CharField(max_length=512)
title = models.CharField(max_length=512)
parent_doc_id = models.IntegerField()
def url(self):
return f"/{self.parent_doc_id}/{self.page_number}/"
def __str__(self):
return (f"{self.author} - {self.title} - page {self.page_number}")
| 0
| 0
| 0
| 4,060
| 0
| 0
| 0
| 21
| 224
|
7b9c55eaa5d05bc09b14fe1a2ce8e97213b9c0ef
| 2,284
|
py
|
Python
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 206
|
2021-09-23T08:55:29.000Z
|
2022-03-26T13:15:41.000Z
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 24
|
2021-09-24T05:54:39.000Z
|
2022-03-25T01:44:49.000Z
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 34
|
2021-09-26T02:17:29.000Z
|
2022-03-28T07:01:54.000Z
|
import logging
logger = logging.getLogger(__name__)
| 30.453333
| 91
| 0.612522
|
from typing import List, Tuple, Type
from .tensor import Tensor
from .device import Device
from .allocator import Allocator
from cpm_kernels.library import cudart
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Context:
def __init__(self,
device_idx : List[int],
allocators : List[Allocator]
) -> None:
assert len(device_idx) > 0, "device_idx must be a non-empty list"
assert len(device_idx) == len(allocators)
self.__devices = [
Device(idx) for idx in device_idx
]
self.__calc_streams = {}
for d in self.__devices:
with d:
self.__calc_streams[d.idx] = cudart.cudaStreamCreate().value
self.__allocators = {
device_idx : allocator for device_idx, allocator in zip(device_idx, allocators)
}
def allocate(self, shape : int, dtype : np.dtype) -> Tensor:
device = Device(cudart.cudaGetDevice())
allocator = self.__allocators[device.idx]
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
nbytes = int(np.prod(shape) * itemsize)
mem = allocator.allocate(nbytes, self.__calc_streams[device.idx])
return Tensor(mem, shape, dtype)
def free(self, tensor : Tensor):
allocator = self.__allocators[tensor.device_id]
tensor._released = True
allocator.free(tensor._memory)
def device(self, device_idx : int) -> Device:
return self.__devices[device_idx]
@property
def current_stream(self):
device_idx = cudart.cudaGetDevice()
return self.__calc_streams[device_idx]
def memory_stats(self):
ret = {}
for device_idx, allocator in self.__allocators.items():
ret[device_idx] = allocator.memory_stats()
return ret
def free_all(self):
for _, allocator in self.__allocators.items():
allocator.free_all()
def __del__(self):
try:
self.free_all()
for stream in self.__calc_streams.values():
cudart.cudaStreamDestroy(stream)
except Exception:
# logger.exception("Exception in Context.__del__")
pass
| 0
| 109
| 0
| 1,919
| 0
| 0
| 0
| 50
| 155
|
d79f6521598d0b35ad0abac23c970dfac3a65db6
| 3,999
|
py
|
Python
|
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
dlinkedList = DoublyLinkedList(10)
dlinkedList.append(20)
dlinkedList.append(30)
dlinkedList.prepend(-5)
dlinkedList.prepend(-8)
dlinkedList.insert(value=12, index=2)
dlinkedList.print_list()
dlinkedList.remove(index=5)
dlinkedList.insert(value=30, index=4)
dlinkedList.append(55)
dlinkedList.print_list()
dlinkedList.print_head()
dlinkedList.print_tail()
| 26.483444
| 85
| 0.523631
|
class Node:
def __init__(self, value):
self.value = value
self.prev = None
self.next = None
class DoublyLinkedList:
def __init__(self, value):
self.head = Node(value)
self.tail = self.head
self.length = 1
def append(self, value):
''' Adds a value to the end of a doubly linked list
type: value
'''
self.length += 1
postNode = Node(value)
# Wire the postNode
self.tail.next = postNode
postNode.prev = self.tail
# Sets new tail node
self.tail = postNode
def prepend(self, value):
''' Adds a value to the beginning of a doubly linked list
type: value
'''
self.length += 1
preNode = Node(value)
# Wire the preNode
preNode.next = self.head
self.head.prev = preNode
# Sets new head node
self.head = preNode
def insert(self, value, index):
''' Inserts a value in the DLL at a provided index position
type: value
type: index: str
'''
if not index in range(self.length):
print("ERROR! This index does not exist!")
return
elif index == 0:
self.prepend(value)
else:
self.length += 1
insertNode = Node(value)
currentNode = self.head
for position in range(self.length - 1):
if position == index - 1:
insertNode.next = currentNode.next
currentNode.next.prev = insertNode
insertNode.prev = currentNode
currentNode.next = insertNode
break
currentNode = currentNode.next
def remove(self, index):
''' Removes a node from a given index
type: index: int
'''
if not index in range(self.length + 1):
print("ERROR! This index does not exist!")
return
if index == 0:
# Remove head of the DLL
self.head = self.head.next
self.head.prev = None
elif index == self.length - 1:
# Remove tail of the DLL
self.tail = self.tail.prev
self.tail.next = None
else:
# Introduce a temporary node for
# traversing through the list
currentNode = self.head
for position in range(self.length - 1):
if position == index:
currentNode.prev.next = currentNode.next
currentNode.next.prev = currentNode.prev
break
currentNode = currentNode.next
# Decrease length of the list
self.length -= 1
def print_list(self):
'''
Print the linked list
'''
currentNode = self.head
print(f"<<<<<<< {self.length} >>>>>>>")
for index in range(self.length):
nextValue = currentNode.next.value if currentNode.next else 'None'
print(f"{index}: {currentNode.value} <-> {nextValue}")
currentNode = currentNode.next
print(f"<<<<<<<<.>>>>>>>>")
def print_head(self):
print(f">> head: {self.head.value}") if self.head else print(">> head: None")
def print_tail(self):
print(f">> tail: {self.tail.value}") if self.tail else print(">> tail: None")
if __name__ == "__main__":
dlinkedList = DoublyLinkedList(10)
dlinkedList.append(20)
dlinkedList.append(30)
dlinkedList.prepend(-5)
dlinkedList.prepend(-8)
dlinkedList.insert(value=12, index=2)
dlinkedList.print_list()
dlinkedList.remove(index=5)
dlinkedList.insert(value=30, index=4)
dlinkedList.append(55)
dlinkedList.print_list()
dlinkedList.print_head()
dlinkedList.print_tail()
| 0
| 0
| 0
| 3,502
| 0
| 0
| 0
| 0
| 45
|
7893b475e4bb1bb6f28c83e8b1af171635285c0f
| 843
|
py
|
Python
|
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
if os.environ.get('CI_COMMIT_TAG'):
version = os.environ['CI_COMMIT_TAG']
else:
version = "0.0.4"
setuptools.setup(
name="abbreviator",
version=version,
author="Stephanie Wagenaar",
author_email="[email protected]",
description="Abbreviate Long Sentences/Names based on hyphenation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/BOLD-lab/abbreviator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pyphen>=0.11.0']
)
| 28.1
| 71
| 0.679715
|
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
if os.environ.get('CI_COMMIT_TAG'):
version = os.environ['CI_COMMIT_TAG']
else:
version = "0.0.4"
setuptools.setup(
name="abbreviator",
version=version,
author="Stephanie Wagenaar",
author_email="[email protected]",
description="Abbreviate Long Sentences/Names based on hyphenation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/BOLD-lab/abbreviator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pyphen>=0.11.0']
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9c86682e5fb8a773190f40daabb31d80b79ab5ec
| 750
|
py
|
Python
|
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
'''
QUESTION:
561. Array Partition I
Given an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
Example 1:
Input: [1,4,3,2]
Output: 4
Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
Note:
n is a positive integer, which is in the range of [1, 10000].
All the integers in the array will be in the range of [-10000, 10000].
'''
'''
Ideas/thoughts:
sort and return even nums
'''
| 25.862069
| 205
| 0.64
|
'''
QUESTION:
561. Array Partition I
Given an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
Example 1:
Input: [1,4,3,2]
Output: 4
Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
Note:
n is a positive integer, which is in the range of [1, 10000].
All the integers in the array will be in the range of [-10000, 10000].
'''
class Solution(object):
def arrayPairSum(self, nums):
total=0
nums= sorted(nums)
for i in range (0,len(nums),2):
total+= nums[i]
return total
'''
Ideas/thoughts:
sort and return even nums
'''
| 0
| 0
| 0
| 169
| 0
| 0
| 0
| 0
| 23
|
96c66bbd32ce6b5cd183eb7717b9022db143812a
| 4,881
|
py
|
Python
|
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__":
# Cool banner ofc
print("""
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKKNMMMMMMMMMMMMMMMMMMMMWWWMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMXl,co0NWMMMMMMMMMMMMMMXxc:xWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNd''',;cdkKNNNNNNWNKko,...oWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMO;''.....';ccllc:,. ...'kMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMWXOxdllllldxOXWMMMMMMMWNd'........ .... ..lNMMMMMMMMMMMMMMMMMMM
MMMMMMMMMN0o:,;;:clllc:;,';oONMMMMMWd'',,,'. ..... .dWMMMMMMMMMMMMMMMMMMM
MMMMMWWWO:,cdOO0K0O0K0K0klc:';dXMMMXl,'',;;. .'''''.lXMMMMMMMMMMMMMMMMMMM
MMMMMMXo;oKWM0dkkdddoo0xddkW0o',kWM0c...,lol;. . .ccoc..;cdXMMMMMMMMMMMMMMMMMMM
MMMMMXo:0MMMMWK0KXXKKKKX00NMMWK:'dWO,....';;' .. .;::,'',,lKMMMMMMMMMMMMMMMMMMM
MMMMWxc0MMMMWW0kOxxkKkk0OXWWWMMNl'kO:'........,:'........,,cKMMMMMMMMMMMMMMMMMMM
MMMMNdxWMMMMMWOxkdddxxdxkKNWWWWMK;cXd'........,,'''.....',,:kXMMMMMMMMMMMMMMMMMM
MMMMXokMMMMMMMNXXXNNXNX0KXWWWWWWNlcXXd,.'......'..'.','.'',;:oKWMMMMMMMMMMMMMMMM
MMMMXoxWMMMMMMM0olxkoxxkXWMMMMMMNloNWNd... ..................:0WMMMMMMMMMMMMMMM
MMMMNxcOWMMMMMMKkkkOOkOOXWMMMMMMO:kMMNl.. .. .l0WMMMMMMMMMMMMMM
MMMMM0:;kNWXXNKO0K0000KKXK0OONWKlcOWNd' .,oKWMMMMMMMMMMMMM
MMMMMWO;'lOxxOddooddlcdxxxlox0Oolo0W0,. .,;oKMMMMMMMMMMMMM
MMMMMMWKc..';dkOKX0KXXXK00Oxdl:;,,oOo. .'',oKWMMMMMMMMMMM
MMMMMMMMWOl,..';coddxxdol:,..,;:;..':;.. .. ..''';dKWWMMMMMMMM
MMMMMMMMMMMN0dl:;''.'',:cokO0KNWW0l..''. ... ..,,'':xXWMMMMMMM
MMMMMMMMMMMMMMMWWNXKKXXWMMMMMMMMMMNl... . ..,'',,:xNWMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0;.. .. .,;::,'cKMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWx' .,;'. ....... ..','.lXMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK:. . .',. .. .. ....dWMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMk. .. ...cXMMM
""")
print("Starting script..")
CiscoDnacMacLookupRunner().main()
| 59.52439
| 110
| 0.562795
|
from time import sleep
import csv
from datetime import datetime
import mac_vendor_lookup
import cisco_service
class CiscoDnacMacLookupRunner():
headers = {'Content-Type': 'application/json'}
def __init__(self):
self.cisco = cisco_service.CiscoService()
self.mac_lookup = mac_vendor_lookup.MacLookup()
self.today = datetime.now()
self.filename = "mac_address_lookup_{}T{}Z.csv".format(str(self.today.date()), str(self.today.time()))
def main(self):
print("Obtaining token..")
token = self.cisco.get_dnac_jwt_token()
self.headers["X-Auth-Token"] = token
print("Fetching network devices..")
devices = self.cisco.get_network_devices(self.headers)
with open(self.filename, 'w') as csvfile:
print("MAC lookup as begun. This may take a while..")
print("Estimated run time: {} min".format(int(363/5)))
csvwriter = csv.writer(csvfile)
counter_rate_limit = 0
for item in devices:
if(counter_rate_limit == 5):
sleep(60)
counter_rate_limit = 0
details = self.cisco.get_device_enrichment_details(self.headers, item['macAddress'])
counter_rate_limit += 1
if 'links' in details['deviceDetails']['neighborTopology'][0]:
for detail in details['deviceDetails']['neighborTopology'][0]['links']:
if 'interfaceDetails' in detail and detail['id'] == "CLIENTS":
for client in detail['interfaceDetails']:
mac_address = client['clientMacAddress']
manufacturer = self.mac_lookup.lookup_mac_vendor(mac_address)
csvwriter.writerow([mac_address,manufacturer])
print("Ending script..")
print("See the result in {}".format(self.filename))
if __name__ == "__main__":
# Cool banner ofc
print("""
╔═╗╦╔═╗╔═╗╔═╗ ╔╦╗╔╗╔╔═╗╔═╗ ╔╦╗╔═╗╔═╗ ╦ ╔═╗╔═╗╦╔═╦ ╦╔═╗
║ ║╚═╗║ ║ ║ ║║║║║╠═╣║ ║║║╠═╣║ ║ ║ ║║ ║╠╩╗║ ║╠═╝
╚═╝╩╚═╝╚═╝╚═╝ ═╩╝╝╚╝╩ ╩╚═╝ ╩ ╩╩ ╩╚═╝ ╩═╝╚═╝╚═╝╩ ╩╚═╝╩
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKKNMMMMMMMMMMMMMMMMMMMMWWWMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMXl,co0NWMMMMMMMMMMMMMMXxc:xWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNd''',;cdkKNNNNNNWNKko,...oWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMO;''.....';ccllc:,. ...'kMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMWXOxdllllldxOXWMMMMMMMWNd'........ .... ..lNMMMMMMMMMMMMMMMMMMM
MMMMMMMMMN0o:,;;:clllc:;,';oONMMMMMWd'',,,'. ..... .dWMMMMMMMMMMMMMMMMMMM
MMMMMWWWO:,cdOO0K0O0K0K0klc:';dXMMMXl,'',;;. .'''''.lXMMMMMMMMMMMMMMMMMMM
MMMMMMXo;oKWM0dkkdddoo0xddkW0o',kWM0c...,lol;. . .ccoc..;cdXMMMMMMMMMMMMMMMMMMM
MMMMMXo:0MMMMWK0KXXKKKKX00NMMWK:'dWO,....';;' .. .;::,'',,lKMMMMMMMMMMMMMMMMMMM
MMMMWxc0MMMMWW0kOxxkKkk0OXWWWMMNl'kO:'........,:'........,,cKMMMMMMMMMMMMMMMMMMM
MMMMNdxWMMMMMWOxkdddxxdxkKNWWWWMK;cXd'........,,'''.....',,:kXMMMMMMMMMMMMMMMMMM
MMMMXokMMMMMMMNXXXNNXNX0KXWWWWWWNlcXXd,.'......'..'.','.'',;:oKWMMMMMMMMMMMMMMMM
MMMMXoxWMMMMMMM0olxkoxxkXWMMMMMMNloNWNd... ..................:0WMMMMMMMMMMMMMMM
MMMMNxcOWMMMMMMKkkkOOkOOXWMMMMMMO:kMMNl.. .. .l0WMMMMMMMMMMMMMM
MMMMM0:;kNWXXNKO0K0000KKXK0OONWKlcOWNd' .,oKWMMMMMMMMMMMMM
MMMMMWO;'lOxxOddooddlcdxxxlox0Oolo0W0,. .,;oKMMMMMMMMMMMMM
MMMMMMWKc..';dkOKX0KXXXK00Oxdl:;,,oOo. .'',oKWMMMMMMMMMMM
MMMMMMMMWOl,..';coddxxdol:,..,;:;..':;.. .. ..''';dKWWMMMMMMMM
MMMMMMMMMMMN0dl:;''.'',:cokO0KNWW0l..''. ... ..,,'':xXWMMMMMMM
MMMMMMMMMMMMMMMWWNXKKXXWMMMMMMMMMMNl... . ..,'',,:xNWMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0;.. .. .,;::,'cKMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWx' .,;'. ....... ..','.lXMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK:. . .',. .. .. ....dWMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMk. .. ...cXMMM
""")
print("Starting script..")
CiscoDnacMacLookupRunner().main()
| 396
| 0
| 0
| 1,837
| 0
| 0
| 0
| 0
| 133
|
5c0946952b71037bb1f97ce65af023f47196a25c
| 35,474
|
py
|
Python
|
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | 1
|
2021-03-08T03:39:23.000Z
|
2021-03-08T03:39:23.000Z
|
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
power = {'BUSES': {'Area': 1.08752,
'Bus/Area': 1.08752,
'Bus/Gate Leakage': 0.00541455,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0564625,
'Bus/Subthreshold Leakage with power gating': 0.0211734,
'Gate Leakage': 0.00541455,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0564625,
'Subthreshold Leakage with power gating': 0.0211734},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955308,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.852868,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679223,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337297,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584077,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377493,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202647,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319665,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59121,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161125,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122273,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110483,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0904283,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271608,
'Execution Unit/Register Files/Runtime Dynamic': 0.102656,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293143,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.835198,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.51333,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000675581,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000258971,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129901,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00353661,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076553,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0869311,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.52956,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.213019,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295257,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02054,
'Instruction Fetch Unit/Runtime Dynamic': 0.606399,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152811,
'L2/Runtime Dynamic': 0.0364529,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72689,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.24846,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.08055,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0805499,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10881,
'Load Store Unit/Runtime Dynamic': 1.72626,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198623,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397245,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0704918,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0727723,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.343808,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0347031,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624173,
'Memory Management Unit/Runtime Dynamic': 0.107475,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0592,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562129,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.163866,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750006,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.73993,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955837,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277764,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.853885,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679669,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337724,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584816,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377927,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.30047,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202914,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319906,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59314,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161317,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122428,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110585,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0905428,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271902,
'Execution Unit/Register Files/Runtime Dynamic': 0.102786,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293405,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.836102,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.5167,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000676533,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000259327,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00130065,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00354144,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076668,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0870411,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.53656,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212864,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295631,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02789,
'Instruction Fetch Unit/Runtime Dynamic': 0.606744,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152816,
'L2/Runtime Dynamic': 0.036542,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.73104,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.25059,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1136,
'Load Store Unit/Runtime Dynamic': 1.72918,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198953,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397907,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0706092,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0728902,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.344243,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0346814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624811,
'Memory Management Unit/Runtime Dynamic': 0.107572,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0739,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562798,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240417,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.164074,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750914,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.74765,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.72117540729286,
'Runtime Dynamic': 3.72117540729286,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.569896,
'Runtime Dynamic': 0.377251,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 128.211,
'Gate Leakage': 0.799822,
'Peak Dynamic': 48.7031,
'Peak Power': 68.7978,
'Runtime Dynamic': 13.8648,
'Subthreshold Leakage': 19.2949,
'Subthreshold Leakage with power gating': 8.76959,
'Total Cores/Area': 65.2164,
'Total Cores/Gate Leakage': 0.745993,
'Total Cores/Peak Dynamic': 48.1332,
'Total Cores/Runtime Dynamic': 13.4876,
'Total Cores/Subthreshold Leakage': 12.4375,
'Total Cores/Subthreshold Leakage with power gating': 5.16621,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.569896,
'Total L3s/Runtime Dynamic': 0.377251,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 20.0947,
'Total NoCs/Area': 1.08752,
'Total NoCs/Gate Leakage': 0.00541455,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0564625,
'Total NoCs/Subthreshold Leakage with power gating': 0.0211734}}
| 73.59751
| 124
| 0.677398
|
power = {'BUSES': {'Area': 1.08752,
'Bus/Area': 1.08752,
'Bus/Gate Leakage': 0.00541455,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0564625,
'Bus/Subthreshold Leakage with power gating': 0.0211734,
'Gate Leakage': 0.00541455,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0564625,
'Subthreshold Leakage with power gating': 0.0211734},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955308,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.852868,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679223,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337297,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584077,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377493,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202647,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319665,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59121,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161125,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122273,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110483,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0904283,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271608,
'Execution Unit/Register Files/Runtime Dynamic': 0.102656,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293143,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.835198,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.51333,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000675581,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000258971,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129901,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00353661,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076553,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0869311,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.52956,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.213019,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295257,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02054,
'Instruction Fetch Unit/Runtime Dynamic': 0.606399,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152811,
'L2/Runtime Dynamic': 0.0364529,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72689,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.24846,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.08055,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0805499,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10881,
'Load Store Unit/Runtime Dynamic': 1.72626,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198623,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397245,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0704918,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0727723,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.343808,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0347031,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624173,
'Memory Management Unit/Runtime Dynamic': 0.107475,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0592,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562129,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.163866,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750006,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.73993,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955837,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277764,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.853885,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679669,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337724,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584816,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377927,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.30047,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202914,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319906,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59314,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161317,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122428,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110585,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0905428,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271902,
'Execution Unit/Register Files/Runtime Dynamic': 0.102786,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293405,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.836102,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.5167,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000676533,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000259327,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00130065,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00354144,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076668,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0870411,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.53656,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212864,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295631,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02789,
'Instruction Fetch Unit/Runtime Dynamic': 0.606744,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152816,
'L2/Runtime Dynamic': 0.036542,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.73104,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.25059,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1136,
'Load Store Unit/Runtime Dynamic': 1.72918,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198953,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397907,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0706092,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0728902,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.344243,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0346814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624811,
'Memory Management Unit/Runtime Dynamic': 0.107572,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0739,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562798,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240417,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.164074,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750914,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.74765,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.72117540729286,
'Runtime Dynamic': 3.72117540729286,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.569896,
'Runtime Dynamic': 0.377251,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 128.211,
'Gate Leakage': 0.799822,
'Peak Dynamic': 48.7031,
'Peak Power': 68.7978,
'Runtime Dynamic': 13.8648,
'Subthreshold Leakage': 19.2949,
'Subthreshold Leakage with power gating': 8.76959,
'Total Cores/Area': 65.2164,
'Total Cores/Gate Leakage': 0.745993,
'Total Cores/Peak Dynamic': 48.1332,
'Total Cores/Runtime Dynamic': 13.4876,
'Total Cores/Subthreshold Leakage': 12.4375,
'Total Cores/Subthreshold Leakage with power gating': 5.16621,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.569896,
'Total L3s/Runtime Dynamic': 0.377251,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 20.0947,
'Total NoCs/Area': 1.08752,
'Total NoCs/Gate Leakage': 0.00541455,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0564625,
'Total NoCs/Subthreshold Leakage with power gating': 0.0211734}}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e9a2f6e36e21d2f812a566f6b88b2d9f4025924d
| 1,890
|
py
|
Python
|
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#########
#LICENSE#
#########
'''
MIT License
Copyright (c) 2021 ItsMeAlfie0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#########
#IMPORTS#
#########
import os
import sys
import urllib.request
import json
######
#CODE#
######
arg = sys.argv
if arg[1] == "--add-host":
with open("conf/hosts.json", "r") as f: data = json.load(f)
data[arg[2]] = arg[3]
with open("conf/hosts.json", "w") as e: json.dump(e)
print(f"Added host '{arg[2]}' '{arg[3]}'")
elif arg[1] == "install":
with open("conf/hosts.json", "r") as f: data = json.load(f)
host = data[arg[2]]
setup_sh = urllib.request.urlopen(f"{host}?repo={arg[3]}").read()
os.system(f"mkdir /etc/chum/{arg[3]}")
with open(f"/etc/chum/{arg[3]}/setup.sh", "w")as f:
f.write(setup_sh)
f.close()
os.system(f"sh /etc/chumj/{arg[3]}/setup.sh")
print("Package installed!")
| 29.076923
| 78
| 0.691534
|
#!/usr/bin/env python
#########
#LICENSE#
#########
'''
MIT License
Copyright (c) 2021 ItsMeAlfie0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#########
#IMPORTS#
#########
import os
import sys
import urllib.request
import json
######
#CODE#
######
arg = sys.argv
if arg[1] == "--add-host":
with open("conf/hosts.json", "r") as f: data = json.load(f)
data[arg[2]] = arg[3]
with open("conf/hosts.json", "w") as e: json.dump(e)
print(f"Added host '{arg[2]}' '{arg[3]}'")
elif arg[1] == "install":
with open("conf/hosts.json", "r") as f: data = json.load(f)
host = data[arg[2]]
setup_sh = urllib.request.urlopen(f"{host}?repo={arg[3]}").read()
os.system(f"mkdir /etc/chum/{arg[3]}")
with open(f"/etc/chum/{arg[3]}/setup.sh", "w")as f:
f.write(setup_sh)
f.close()
os.system(f"sh /etc/chumj/{arg[3]}/setup.sh")
print("Package installed!")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
10235f4c22917028f59e78a277404007dacc9d74
| 1,058
|
py
|
Python
|
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
#
window = display.set_mode((1000, 700))
display.set_caption('')
#
background = transform.scale(image.load('ping.jpg'), (1000, 700))
# 2
x1 = 0
y1 = 300
x2 = 900
y2 = 300
sprite1 = transform.scale(image.load('raketka1.png'), (100, 100)
)
sprite2 = transform.scale(image.load('raketka2.jpg'), (100, 100)
)
run = True
clock = time.Clock()
FPS = 60
while run:
window.blit(background,(0, 0))
window.blit(sprite1, (x1, y1))
window.blit(sprite2, (x2, y2))
for e in event.get():
if e.type == QUIT:
run = False
speed = 4
keys_pressed = key.get_pressed()
if keys_pressed[K_w] and y1 > 5:
y1 -= speed
if keys_pressed[K_s] and y1 < 600:
y1 += speed
if keys_pressed[K_UP] and y2 > 5:
y2 -= speed
if keys_pressed[K_DOWN] and y2 < 600:
y2 += speed
display.update()
clock.tick(FPS)
| 19.592593
| 66
| 0.571834
|
from pygame import *
#создай окно игры
window = display.set_mode((1000, 700))
display.set_caption('догонялки')
#задай фон сцены
background = transform.scale(image.load('ping.jpg'), (1000, 700))
#создай 2 спрайта и размести их на сцене
x1 = 0
y1 = 300
x2 = 900
y2 = 300
sprite1 = transform.scale(image.load('raketka1.png'), (100, 100)
)
sprite2 = transform.scale(image.load('raketka2.jpg'), (100, 100)
)
run = True
clock = time.Clock()
FPS = 60
while run:
window.blit(background,(0, 0))
window.blit(sprite1, (x1, y1))
window.blit(sprite2, (x2, y2))
for e in event.get():
if e.type == QUIT:
run = False
speed = 4
keys_pressed = key.get_pressed()
if keys_pressed[K_w] and y1 > 5:
y1 -= speed
if keys_pressed[K_s] and y1 < 600:
y1 += speed
if keys_pressed[K_UP] and y2 > 5:
y2 -= speed
if keys_pressed[K_DOWN] and y2 < 600:
y2 += speed
display.update()
clock.tick(FPS)
| 134
| 0
| 0
| 0
| 0
| 0
| 0
| -1
| 23
|
4095a34c413d03e43c4c7d0136819b20e9686d8b
| 3,010
|
py
|
Python
|
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | null | null | null |
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | 9
|
2019-02-15T16:59:39.000Z
|
2019-02-26T22:42:10.000Z
|
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | 1
|
2019-07-31T13:38:51.000Z
|
2019-07-31T13:38:51.000Z
|
import csv
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
def measure_response_time(url, criteria, write=True):
'''
Measures and saves an API request's response time to a CSV file
:param url: The URL for API request
:param criteria: The criteria in effect
:return: Path to a CSV file with response time in seconds with its timestamp as columns
'''
response = requests.get(url)
response_time = response.elapsed.total_seconds()
date_time = datetime.datetime.now()
fieldnames = ['timestamp', 'responseTime', 'criteria'] # Headers of the CSV file
out_path = 'Response-Times.csv'
if write:
with open(out_path, 'a') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
if csvFile.tell() == 0:
writer.writeheader()
writer.writerow({'timestamp': date_time, 'responseTime': response_time, 'criteria': criteria})
csvFile.close()
return out_path
def generate_histogram(path, title):
'''
Saves a histogram with average response time per number of requests
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
criteria_values = list(criteria_dict.values())
plt.title(title)
plt.style.use("seaborn-deep")
plt.hist(x=criteria_values, bins=30, label=critera_keys)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Number of Requests")
plt.savefig(title + " Histogram")
plt.show()
def generate_density_plot(path, title):
'''
Saves a density plot with density of requests per second
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
# criteria_values = list(criteria_dict.values())
for criteria in critera_keys:
subset = response_times[response_times["criteria"] == criteria]
sns.distplot(subset["responseTime"], hist=False, kde=True, kde_kws={"linewidth": 3}, label=criteria)
plt.title(title)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Density")
plt.savefig(title + " Density Plot")
plt.show()
local_simple_csv = "output/local/simple/Response-Times.csv"
local_complex_csv = "output/local/complex/Response-Times.csv"
cloud_simple_csv = "output/gcloud/simple/Response-Times.csv"
cloud_complex_csv = "output/gcloud/complex/Response-Times.csv"
generate_histogram(local_simple_csv, "Local Machine Simple Task")
generate_density_plot(local_complex_csv, "Local Machine Complex Task")
generate_density_plot(cloud_simple_csv, "Cloud Simple Task")
generate_histogram(cloud_complex_csv, "Cloud Complex Task")
| 32.021277
| 108
| 0.707641
|
import csv
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
def measure_response_time(url, criteria, write=True):
'''
Measures and saves an API request's response time to a CSV file
:param url: The URL for API request
:param criteria: The criteria in effect
:return: Path to a CSV file with response time in seconds with its timestamp as columns
'''
response = requests.get(url)
response_time = response.elapsed.total_seconds()
date_time = datetime.datetime.now()
fieldnames = ['timestamp', 'responseTime', 'criteria'] # Headers of the CSV file
out_path = 'Response-Times.csv'
if write:
with open(out_path, 'a') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
if csvFile.tell() == 0:
writer.writeheader()
writer.writerow({'timestamp': date_time, 'responseTime': response_time, 'criteria': criteria})
csvFile.close()
return out_path
def generate_histogram(path, title):
'''
Saves a histogram with average response time per number of requests
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
criteria_values = list(criteria_dict.values())
plt.title(title)
plt.style.use("seaborn-deep")
plt.hist(x=criteria_values, bins=30, label=critera_keys)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Number of Requests")
plt.savefig(title + " Histogram")
plt.show()
def generate_density_plot(path, title):
'''
Saves a density plot with density of requests per second
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
# criteria_values = list(criteria_dict.values())
for criteria in critera_keys:
subset = response_times[response_times["criteria"] == criteria]
sns.distplot(subset["responseTime"], hist=False, kde=True, kde_kws={"linewidth": 3}, label=criteria)
plt.title(title)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Density")
plt.savefig(title + " Density Plot")
plt.show()
local_simple_csv = "output/local/simple/Response-Times.csv"
local_complex_csv = "output/local/complex/Response-Times.csv"
cloud_simple_csv = "output/gcloud/simple/Response-Times.csv"
cloud_complex_csv = "output/gcloud/complex/Response-Times.csv"
generate_histogram(local_simple_csv, "Local Machine Simple Task")
generate_density_plot(local_complex_csv, "Local Machine Complex Task")
generate_density_plot(cloud_simple_csv, "Cloud Simple Task")
generate_histogram(cloud_complex_csv, "Cloud Complex Task")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
821041c230e611989e036de3de8d4f9ba908a39e
| 1,620
|
py
|
Python
|
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
api_key = "AIzaSyAedPSTmyoW1ejPtwG_cSu7fEjLxOOUrXg"
# Uses the Geocode API
import requests
from urllib.parse import urlencode
#Input address here!
lat, lng = extract_lat_lng("1600 Amphitheatre Parkway, Mountain View, CA")
places_endpoint_2 = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params_2 = {
"key": api_key,
"location": f"{lat},{lng}",
"radius": "1500",
"keyword": "pharmacy"
}
params_2_encoded = urlencode(params_2)
places_url=f"{places_endpoint_2}?{params_2_encoded}"
r2 = requests.get(places_url)
# Returns the first 3 closest locations and stores it in variables within a 1500 meter radius
try:
nameVicinity0 = r2.json()['results'][0]
name0 = nameVicinity0.get('name')
vicinity0 = nameVicinity0.get('vicinity')
except:
pass
try:
nameVicinity1 = r2.json()['results'][1]
name1 = nameVicinity1.get('name')
vicinity1 = nameVicinity1.get('vicinity')
except:
pass
try:
nameVicinity2 = r2.json()['results'][2]
name2 = nameVicinity2.get('name')
vicinity2 = nameVicinity2.get('vicinity')
except:
pass
| 27.931034
| 93
| 0.683951
|
api_key = "AIzaSyAedPSTmyoW1ejPtwG_cSu7fEjLxOOUrXg"
# Uses the Geocode API
import requests
from urllib.parse import urlencode
def extract_lat_lng(address_or_postalcode, data_type = 'json'):
endpoint = f"https://maps.googleapis.com/maps/api/geocode/{data_type}"
params = {"address": address_or_postalcode, "key": api_key}
url_params = urlencode(params)
url = f"{endpoint}?{url_params}"
r = requests.get(url)
if r.status_code not in range(200, 299):
return {}
latlng = {}
try:
latlng = r.json()['results'][0]['geometry']['location']
except:
pass
return latlng.get("lat"), latlng.get("lng")
#Input address here!
lat, lng = extract_lat_lng("1600 Amphitheatre Parkway, Mountain View, CA")
places_endpoint_2 = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params_2 = {
"key": api_key,
"location": f"{lat},{lng}",
"radius": "1500",
"keyword": "pharmacy"
}
params_2_encoded = urlencode(params_2)
places_url=f"{places_endpoint_2}?{params_2_encoded}"
r2 = requests.get(places_url)
# Returns the first 3 closest locations and stores it in variables within a 1500 meter radius
try:
nameVicinity0 = r2.json()['results'][0]
name0 = nameVicinity0.get('name')
vicinity0 = nameVicinity0.get('vicinity')
except:
pass
try:
nameVicinity1 = r2.json()['results'][1]
name1 = nameVicinity1.get('name')
vicinity1 = nameVicinity1.get('vicinity')
except:
pass
try:
nameVicinity2 = r2.json()['results'][2]
name2 = nameVicinity2.get('name')
vicinity2 = nameVicinity2.get('vicinity')
except:
pass
| 0
| 0
| 0
| 0
| 0
| 504
| 0
| 0
| 23
|
f74c328b4e8be5db4ab0478db22db83a43dfc36e
| 38,645
|
py
|
Python
|
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-28 19:30
| 56.25182
| 112
| 0.73712
|
# Generated by Django 2.0.1 on 2018-01-28 19:30
from django.db import migrations
def add_initial_data(apps, schema_editor):
County = apps.get_model('petitions', 'County')
Court = apps.get_model('petitions', 'Court')
SubCounty = apps.get_model('petitions', 'SubCounty')
Prison = apps.get_model('petitions', 'Prison')
Offence = apps.get_model('petitions', 'Offence')
baringo = County.objects.create(name='BARINGO')
SubCounty.objects.create(name='BARINGO EAST', county=baringo)
SubCounty.objects.create(name='BARINGO WEST', county=baringo)
SubCounty.objects.create(name='BARINGO CENTRAL', county=baringo)
SubCounty.objects.create(name='MOCHONGOI', county=baringo)
SubCounty.objects.create(name='MOGOTIO', county=baringo)
SubCounty.objects.create(name='ELDAMA RAVINE', county=baringo)
bomet = County.objects.create(name='BOMET')
SubCounty.objects.create(name='SOTIK', county=bomet)
SubCounty.objects.create(name='CHEPALUNGU', county=bomet)
SubCounty.objects.create(name='BOMET EAST', county=bomet)
SubCounty.objects.create(name='BOMET CENTRAL', county=bomet)
SubCounty.objects.create(name='KONOIN', county=bomet)
bungoma = County.objects.create(name='BUNGOMA')
SubCounty.objects.create(name='MT ELGON', county=bungoma)
SubCounty.objects.create(name='SIRISIA', county=bungoma)
SubCounty.objects.create(name='KABUCHIA', county=bungoma)
SubCounty.objects.create(name='BUMULA', county=bungoma)
SubCounty.objects.create(name='KANDUNYI', county=bungoma)
SubCounty.objects.create(name='WEBUYE', county=bungoma)
SubCounty.objects.create(name='BOKOLI', county=bungoma)
SubCounty.objects.create(name='KIMILILI', county=bungoma)
SubCounty.objects.create(name='TONGAREN', county=bungoma)
busia = County.objects.create(name='BUSIA')
SubCounty.objects.create(name='TESO NORTH', county=busia)
SubCounty.objects.create(name='TESO SOUTH', county=busia)
SubCounty.objects.create(name='NAMBALE', county=busia)
SubCounty.objects.create(name='MATAYOS', county=busia)
SubCounty.objects.create(name='BUTULA', county=busia)
SubCounty.objects.create(name='FUNYULA', county=busia)
SubCounty.objects.create(name='BUDALANGI', county=busia)
elgeiyomarakwet = County.objects.create(name='ELGEYO MARAKWET')
SubCounty.objects.create(name='MARAKWET EAST', county=elgeiyomarakwet)
SubCounty.objects.create(name='MARAKWET WEST', county=elgeiyomarakwet)
SubCounty.objects.create(name='KEIYO EAST', county=elgeiyomarakwet)
SubCounty.objects.create(name='KEIYO SOUTH', county=elgeiyomarakwet)
embu = County.objects.create(name='EMBU')
SubCounty.objects.create(name='MANYATTA', county=embu)
SubCounty.objects.create(name='RUNYENJES', county=embu)
SubCounty.objects.create(name='GACHOKA', county=embu)
SubCounty.objects.create(name='SIAKAGO', county=embu)
garissa = County.objects.create(name='GARISSA')
SubCounty.objects.create(name='TAVEDUJIS', county=garissa)
SubCounty.objects.create(name='BALAMBALA', county=garissa)
SubCounty.objects.create(name='LAGDERA', county=garissa)
SubCounty.objects.create(name='DADAAB', county=garissa)
SubCounty.objects.create(name='FAFI', county=garissa)
SubCounty.objects.create(name='IJARA', county=garissa)
homabay = County.objects.create(name='HOMA BAY')
SubCounty.objects.create(name='KASIPUL', county=homabay)
SubCounty.objects.create(name='KABONDO', county=homabay)
SubCounty.objects.create(name='KARACHUONYO', county=homabay)
SubCounty.objects.create(name='RANGWE', county=homabay)
SubCounty.objects.create(name='HOMABAY TOWN', county=homabay)
SubCounty.objects.create(name='NDHIWA', county=homabay)
SubCounty.objects.create(name='MBITA', county=homabay)
SubCounty.objects.create(name='GWASSI', county=homabay)
isiolo = County.objects.create(name='ISIOLO')
SubCounty.objects.create(name='ISIOLO NORTH', county=isiolo)
SubCounty.objects.create(name='ISIOLO SOUTH', county=isiolo)
kajiado = County.objects.create(name='KAJIADO')
SubCounty.objects.create(name='KAJIADO CENTRAL', county=kajiado)
SubCounty.objects.create(name='KAJIADO NORTH', county=kajiado)
SubCounty.objects.create(name='KAJIADO SOUTH', county=kajiado)
kakamega = County.objects.create(name='KAKAMEGA')
SubCounty.objects.create(name='LUGARI', county=kakamega)
SubCounty.objects.create(name='LIKUYANI', county=kakamega)
SubCounty.objects.create(name='MALAVA', county=kakamega)
SubCounty.objects.create(name='LURAMBI', county=kakamega)
SubCounty.objects.create(name='MAKHOLO', county=kakamega)
SubCounty.objects.create(name='MUMIAS', county=kakamega)
SubCounty.objects.create(name='MUMIAS EAST', county=kakamega)
SubCounty.objects.create(name='MATUNGU', county=kakamega)
SubCounty.objects.create(name='BUTERE', county=kakamega)
SubCounty.objects.create(name='KHWISERO', county=kakamega)
SubCounty.objects.create(name='SHINYALU', county=kakamega)
SubCounty.objects.create(name='IKOLOMANI', county=kakamega)
kericho = County.objects.create(name='KERICHO')
SubCounty.objects.create(name='AINAMOI', county=kericho)
SubCounty.objects.create(name='BELGUT', county=kericho)
SubCounty.objects.create(name='KIPKELION', county=kericho)
kiambu = County.objects.create(name='KIAMBU')
SubCounty.objects.create(name='GATUNDU SOUTH', county=kiambu)
SubCounty.objects.create(name='GATUNDU NORTH', county=kiambu)
SubCounty.objects.create(name='JUJA', county=kiambu)
SubCounty.objects.create(name='THIKA TOWN', county=kiambu)
SubCounty.objects.create(name='RUIRU GITHUNGURI', county=kiambu)
SubCounty.objects.create(name='KIAMBU', county=kiambu)
SubCounty.objects.create(name='KIAMBAA', county=kiambu)
SubCounty.objects.create(name='KABETE', county=kiambu)
SubCounty.objects.create(name='KIKUYU', county=kiambu)
SubCounty.objects.create(name='LIMURU', county=kiambu)
SubCounty.objects.create(name='LARI', county=kiambu)
kilifi = County.objects.create(name='KILIFI')
SubCounty.objects.create(name='KILIFI NORTH', county=kilifi)
SubCounty.objects.create(name='KILIFI SOUTH', county=kilifi)
SubCounty.objects.create(name='KALOLENI', county=kilifi)
SubCounty.objects.create(name='RABAI', county=kilifi)
SubCounty.objects.create(name='GANZE', county=kilifi)
SubCounty.objects.create(name='MALINDI', county=kilifi)
SubCounty.objects.create(name='MAGARINI', county=kilifi)
kirinyaga = County.objects.create(name='KIRINYAGA')
SubCounty.objects.create(name='MWEA', county=kirinyaga)
SubCounty.objects.create(name='GICHUGU', county=kirinyaga)
SubCounty.objects.create(name='NDIA', county=kirinyaga)
SubCounty.objects.create(name='KIRINYAGA CENTRAL', county=kirinyaga)
kisii = County.objects.create(name='KISII')
SubCounty.objects.create(name='BONCHARI', county=kisii)
SubCounty.objects.create(name='SOUTH MUGIRANGO', county=kisii)
SubCounty.objects.create(name='BOMACHOGE', county=kisii)
SubCounty.objects.create(name='BOBASI', county=kisii)
SubCounty.objects.create(name='GUCHA', county=kisii)
SubCounty.objects.create(name='NYARIBARI MASABA', county=kisii)
SubCounty.objects.create(name='NYARIBARI CHACHE', county=kisii)
SubCounty.objects.create(name='MATRANI', county=kisii)
SubCounty.objects.create(name='MOSOCHO', county=kisii)
kisumu = County.objects.create(name='KISUMU')
SubCounty.objects.create(name='KISUMU EAST', county=kisumu)
SubCounty.objects.create(name='KISUMU WEST', county=kisumu)
SubCounty.objects.create(name='KISUMU CENTRAL', county=kisumu)
SubCounty.objects.create(name='SEME', county=kisumu)
SubCounty.objects.create(name='NYANDO', county=kisumu)
SubCounty.objects.create(name='MUHORONI', county=kisumu)
SubCounty.objects.create(name='NYAKACH', county=kisumu)
kitui = County.objects.create(name='KITUI')
SubCounty.objects.create(name='MWINGI NORTH', county=kitui)
SubCounty.objects.create(name='MWINGI CENTRAL', county=kitui)
SubCounty.objects.create(name='MWINGI SOUTH', county=kitui)
SubCounty.objects.create(name='KITUI WEST', county=kitui)
SubCounty.objects.create(name='KITUI RURAL', county=kitui)
SubCounty.objects.create(name='KITUI TOWN', county=kitui)
SubCounty.objects.create(name='MUTITU', county=kitui)
SubCounty.objects.create(name='KITUI SOUTH', county=kitui)
kwale = County.objects.create(name='KWALE')
SubCounty.objects.create(name='MSAMBWENI', county=kwale)
SubCounty.objects.create(name='LUNGA LUNGA', county=kwale)
SubCounty.objects.create(name='MATUGA', county=kwale)
SubCounty.objects.create(name='KINANGO', county=kwale)
laikipia = County.objects.create(name='LAIKIPIA')
SubCounty.objects.create(name='LAIKIPIA WEST', county=laikipia)
SubCounty.objects.create(name='LAIKIPIA EAST', county=laikipia)
SubCounty.objects.create(name='LAIKIPIA NORTH', county=laikipia)
lamu = County.objects.create(name='LAMU')
SubCounty.objects.create(name='LAMU EAST', county=lamu)
SubCounty.objects.create(name='LAMU WEST', county=lamu)
machakos = County.objects.create(name='MACHAKOS')
SubCounty.objects.create(name='MASINGA', county=machakos)
SubCounty.objects.create(name='YATTA', county=machakos)
SubCounty.objects.create(name='KANGUNDO', county=machakos)
SubCounty.objects.create(name='MATUNGULU', county=machakos)
SubCounty.objects.create(name='KATHIANI', county=machakos)
SubCounty.objects.create(name='MAVOKO', county=machakos)
SubCounty.objects.create(name='MACHAKOS TOWN', county=machakos)
SubCounty.objects.create(name='MWALA', county=machakos)
makueni = County.objects.create(name='MAKUENI')
SubCounty.objects.create(name='MBOONI', county=makueni)
SubCounty.objects.create(name='KILOME', county=makueni)
SubCounty.objects.create(name='KAITI', county=makueni)
SubCounty.objects.create(name='MAKUENI', county=makueni)
SubCounty.objects.create(name='KIBWEZI WEST', county=makueni)
SubCounty.objects.create(name='KIBWEZI EAST', county=makueni)
mandera = County.objects.create(name='MANDERA')
SubCounty.objects.create(name='MANDERA WEST', county=mandera)
SubCounty.objects.create(name='BANISA', county=mandera)
SubCounty.objects.create(name='MANDERA NORTH', county=mandera)
SubCounty.objects.create(name='MANDERA EAST', county=mandera)
SubCounty.objects.create(name='LAFEY', county=mandera)
marsabit = County.objects.create(name='MARSABIT')
SubCounty.objects.create(name='MOYALE', county=marsabit)
SubCounty.objects.create(name='NORTH HORR', county=marsabit)
SubCounty.objects.create(name='SAKU', county=marsabit)
SubCounty.objects.create(name='LAISAMIS', county=marsabit)
meru = County.objects.create(name='MERU')
SubCounty.objects.create(name='IGEMBE SOUTH', county=meru)
SubCounty.objects.create(name='IGEMBE CENTRAL', county=meru)
SubCounty.objects.create(name='IGEMBE NORTH', county=meru)
SubCounty.objects.create(name='TIGANIA WEST', county=meru)
SubCounty.objects.create(name='TIGANIA EAST', county=meru)
SubCounty.objects.create(name='NORTH IMENTI', county=meru)
SubCounty.objects.create(name='BUURI', county=meru)
SubCounty.objects.create(name='CENTRAL IMENTI', county=meru)
SubCounty.objects.create(name='SOUTH IMENTI', county=meru)
migori = County.objects.create(name='MIGORI')
SubCounty.objects.create(name='RONGO', county=migori)
SubCounty.objects.create(name='AWENDO', county=migori)
SubCounty.objects.create(name='MIGORI EAST', county=migori)
SubCounty.objects.create(name='MIGORI WEST', county=migori)
SubCounty.objects.create(name='URIRI', county=migori)
SubCounty.objects.create(name='NYATIKE', county=migori)
SubCounty.objects.create(name='KURIA EAST', county=migori)
SubCounty.objects.create(name='KURIA WEST', county=migori)
mombasa = County.objects.create(name='MOMBASA')
SubCounty.objects.create(name='CHANGAMWE', county=mombasa)
SubCounty.objects.create(name='JOMVU', county=mombasa)
SubCounty.objects.create(name='KISAUNI', county=mombasa)
SubCounty.objects.create(name='NYALI', county=mombasa)
SubCounty.objects.create(name='LIKONI', county=mombasa)
SubCounty.objects.create(name='MVITA', county=mombasa)
muranga = County.objects.create(name='MURANGA')
SubCounty.objects.create(name='KANGEMA', county=muranga)
SubCounty.objects.create(name='MATHIOYA', county=muranga)
SubCounty.objects.create(name='KIHARU', county=muranga)
SubCounty.objects.create(name='KIGUMO', county=muranga)
SubCounty.objects.create(name='MARAGWA', county=muranga)
SubCounty.objects.create(name='KANDARA', county=muranga)
SubCounty.objects.create(name='GATANGA', county=muranga)
nairobi = County.objects.create(name='NAIROBI')
SubCounty.objects.create(name='WESTLANDS', county=nairobi)
SubCounty.objects.create(name='PARKLANDS', county=nairobi)
SubCounty.objects.create(name='DAGORETTI', county=nairobi)
SubCounty.objects.create(name='KAREN / LANGATA', county=nairobi)
SubCounty.objects.create(name='KIBIRA', county=nairobi)
SubCounty.objects.create(name='ROYSAMBU', county=nairobi)
SubCounty.objects.create(name='KASARANI', county=nairobi)
SubCounty.objects.create(name='RUARAKA', county=nairobi)
SubCounty.objects.create(name='KARIOBANGI', county=nairobi)
SubCounty.objects.create(name='KAYOLE', county=nairobi)
SubCounty.objects.create(name='EMBAKASI', county=nairobi)
SubCounty.objects.create(name='MIHANG’O', county=nairobi)
SubCounty.objects.create(name='NAIROBI WEST', county=nairobi)
SubCounty.objects.create(name='MAKADARA', county=nairobi)
SubCounty.objects.create(name='KAMUKUNJI', county=nairobi)
SubCounty.objects.create(name='STAREHE', county=nairobi)
SubCounty.objects.create(name='MATHARE', county=nairobi)
nakuru = County.objects.create(name='NAKURU')
SubCounty.objects.create(name='MOLO', county=nakuru)
SubCounty.objects.create(name='NJORO', county=nakuru)
SubCounty.objects.create(name='NAIVASHA', county=nakuru)
SubCounty.objects.create(name='GILGIL', county=nakuru)
SubCounty.objects.create(name='KURESOI SOUTH', county=nakuru)
SubCounty.objects.create(name='KURESOI NORTH', county=nakuru)
SubCounty.objects.create(name='SUBUKIA', county=nakuru)
SubCounty.objects.create(name='RONGAI', county=nakuru)
SubCounty.objects.create(name='BAHATI', county=nakuru)
SubCounty.objects.create(name='NAKURU TOWN WEST', county=nakuru)
SubCounty.objects.create(name='NAKURU TOWN EAST', county=nakuru)
nandi = County.objects.create(name='NANDI')
SubCounty.objects.create(name='TINDERET', county=nandi)
SubCounty.objects.create(name='ALDAI', county=nandi)
SubCounty.objects.create(name='NANDI HILLS', county=nandi)
SubCounty.objects.create(name='EMGWEN NORTH', county=nandi)
SubCounty.objects.create(name='EMGWEN SOUTH', county=nandi)
SubCounty.objects.create(name='MOSOP', county=nandi)
narok = County.objects.create(name='NAROK')
SubCounty.objects.create(name='KILGORIS', county=narok)
SubCounty.objects.create(name='EMURUA DIKIRR', county=narok)
SubCounty.objects.create(name='NAROK NORTH', county=narok)
SubCounty.objects.create(name='KAJIADO EAST', county=narok)
SubCounty.objects.create(name='KAJIADO WEST', county=narok)
nyamira = County.objects.create(name='NYAMIRA')
SubCounty.objects.create(name='KITUTU MASABA', county=nyamira)
SubCounty.objects.create(name='NORTH MUGIRANGO', county=nyamira)
SubCounty.objects.create(name='WEST MUGIRANGO', county=nyamira)
nyandarua = County.objects.create(name='NYANDARUA')
SubCounty.objects.create(name='KINANGOP', county=nyandarua)
SubCounty.objects.create(name='KIPIPIRI', county=nyandarua)
SubCounty.objects.create(name='OL-KALOU', county=nyandarua)
SubCounty.objects.create(name='OL-JOROK', county=nyandarua)
SubCounty.objects.create(name='NDARAGWA', county=nyandarua)
nyeri = County.objects.create(name='NYERI')
SubCounty.objects.create(name='TETU', county=nyeri)
SubCounty.objects.create(name='KIENI', county=nyeri)
SubCounty.objects.create(name='MATHIRA', county=nyeri)
SubCounty.objects.create(name='OTHAYA', county=nyeri)
SubCounty.objects.create(name='MUKUWE-INI', county=nyeri)
SubCounty.objects.create(name='NYERI TOWN', county=nyeri)
samburu = County.objects.create(name='SAMBURU')
SubCounty.objects.create(name='SAMBURU WEST', county=samburu)
SubCounty.objects.create(name='SAMBURU NORTH', county=samburu)
SubCounty.objects.create(name='SAMBURU EAST', county=samburu)
siaya = County.objects.create(name='SIAYA')
SubCounty.objects.create(name='UGENYA', county=siaya)
SubCounty.objects.create(name='UGUNJA', county=siaya)
SubCounty.objects.create(name='ALEGO USONGA', county=siaya)
SubCounty.objects.create(name='GEM', county=siaya)
SubCounty.objects.create(name='BONDO', county=siaya)
SubCounty.objects.create(name='RARIEDA', county=siaya)
taitataveta = County.objects.create(name='TAITA TAVETA')
SubCounty.objects.create(name='TAVETA', county=taitataveta)
SubCounty.objects.create(name='WUNDANYI', county=taitataveta)
SubCounty.objects.create(name='MWATATE', county=taitataveta)
SubCounty.objects.create(name='VOI', county=taitataveta)
tanariver = County.objects.create(name='TANA RIVER')
SubCounty.objects.create(name='GARSEN', county=tanariver)
SubCounty.objects.create(name='GALOLE', county=tanariver)
SubCounty.objects.create(name='BURA', county=tanariver)
tharakanithi = County.objects.create(name='THARAKA NITHI')
SubCounty.objects.create(name='NITHI', county=tharakanithi)
SubCounty.objects.create(name='MAARA', county=tharakanithi)
SubCounty.objects.create(name='THARAKA', county=tharakanithi)
transnzoia = County.objects.create(name='TRANS NZOIA')
SubCounty.objects.create(name='KWANZA', county=transnzoia)
SubCounty.objects.create(name='ENDEBESS', county=transnzoia)
SubCounty.objects.create(name='SABOTI', county=transnzoia)
SubCounty.objects.create(name='KIMININI', county=transnzoia)
SubCounty.objects.create(name='CHERENGANYI', county=transnzoia)
turkana = County.objects.create(name='TURKANA')
SubCounty.objects.create(name='TURKANA NORTH', county=turkana)
SubCounty.objects.create(name='TURKANA WEST', county=turkana)
SubCounty.objects.create(name='TURKANA CENTRAL', county=turkana)
SubCounty.objects.create(name='LOIMA', county=turkana)
SubCounty.objects.create(name='TURKANA SOUTH', county=turkana)
SubCounty.objects.create(name='TURKANA EAST', county=turkana)
uasingishu = County.objects.create(name='UASIN GISHU')
SubCounty.objects.create(name='ELDORET EAST', county=uasingishu)
SubCounty.objects.create(name='ELDORET NORT', county=uasingishu)
SubCounty.objects.create(name='ELDORET SOUTH', county=uasingishu)
vihiga = County.objects.create(name='VIHIGA')
SubCounty.objects.create(name='VIHIGA', county=vihiga)
SubCounty.objects.create(name='SABATIA', county=vihiga)
SubCounty.objects.create(name='HAMISI', county=vihiga)
SubCounty.objects.create(name='EMUHAYA', county=vihiga)
SubCounty.objects.create(name='LUANDA', county=vihiga)
wajir = County.objects.create(name='WAJIR')
SubCounty.objects.create(name='WAJIR NORTH', county=wajir)
SubCounty.objects.create(name='WAJIR EAST', county=wajir)
SubCounty.objects.create(name='TARBAJ', county=wajir)
SubCounty.objects.create(name='WAJIR WEST', county=wajir)
SubCounty.objects.create(name='ELDAS', county=wajir)
SubCounty.objects.create(name='WAJIR SOUTH', county=wajir)
westpokot = County.objects.create(name='WEST POKOT')
SubCounty.objects.create(name='KAPENGURIA ', county=westpokot)
SubCounty.objects.create(name='SIGOR ', county=westpokot)
SubCounty.objects.create(name='KACHELIBA', county=westpokot)
SubCounty.objects.create(name='POKOT SOUTH ', county=westpokot)
#courts
instance = Court.objects.create(name='BARICHO MAGISTRATES\' COURT')
instance = Court.objects.create(name='BOMET LAW COURT')
instance = Court.objects.create(name='BOMET MAGISTRATES\' COURT')
instance = Court.objects.create(name='BONDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='BUNGOMA LAW COURT')
instance = Court.objects.create(name='BUSIA LAW COURT')
instance = Court.objects.create(name='BUTALI MAGISTRATES\' COURT')
instance = Court.objects.create(name='BUTERE MAGISTRATES\' COURT')
instance = Court.objects.create(name='CHILDREN’S COURT NAIROBI MAGISTRATES\' COURT')
instance = Court.objects.create(name='CHUKA LAW COURT')
instance = Court.objects.create(name='CHUKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='CITY COURT MAGISTRATES\' COURT')
instance = Court.objects.create(name='ELDAMA RAVINE MAGISTRATES\' COURT')
instance = Court.objects.create(name='ELDORET LAW COURT')
instance = Court.objects.create(name='ELDORET MAGISTRATES\' COURT')
instance = Court.objects.create(name='EMBU LAW COURT')
instance = Court.objects.create(name='EMBU MAGISTRATES\' COURT')
instance = Court.objects.create(name='ENGINEER MAGISTRATES\' COURT')
instance = Court.objects.create(name='GARISSA LAW COURT')
instance = Court.objects.create(name='GARISSA MAGISTRATES\' COURT')
instance = Court.objects.create(name='GARSEN LAW COURT')
instance = Court.objects.create(name='GATUNDU MAGISTRATES\' COURT')
instance = Court.objects.create(name='GICHUGU MAGISTRATES\' COURT')
instance = Court.objects.create(name='GITHUNGURI MAGISTRATES\' COURT')
instance = Court.objects.create(name='HAMISI MAGISTRATES\' COURT')
instance = Court.objects.create(name='HOLA MAGISTRATES\' COURT')
instance = Court.objects.create(name='HOMA-BAY LAW COURT')
instance = Court.objects.create(name='HOMABAY MAGISTRATES\' COURT')
instance = Court.objects.create(name='ISIOLO MAGISTRATES\' COURT')
instance = Court.objects.create(name='ITEN MAGISTRATES\' COURT')
instance = Court.objects.create(name='KABARNET LAW COURT')
instance = Court.objects.create(name='KABARNET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KABARNET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KADHI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAJIADO LAW COURT')
instance = Court.objects.create(name='KAJIADO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAKAMEGA LAW COURT')
instance = Court.objects.create(name='KAKAMEGA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KALOLENI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANDARA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANGEMA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANGUNDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAPENGURIA LAW COURT')
instance = Court.objects.create(name='KAPENGURIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAPSABET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KARATINA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KEHANCHA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KERICHO LAW COURT')
instance = Court.objects.create(name='KERICHO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KEROKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KERUGOYA LAW COURT')
instance = Court.objects.create(name='KERUGOYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIAMBU LAW COURT')
instance = Court.objects.create(name='KIAMBU MAGISTRATES\' COUR')
instance = Court.objects.create(name='KIBERA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIGUMO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIKUYU MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILGORIS MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILIFI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILUNGU/NUNGUNI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIMILILI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KISII LAW COURT')
instance = Court.objects.create(name='KISII MAGISTRATES\' COURT')
instance = Court.objects.create(name='KISUMU LAW COURT')
instance = Court.objects.create(name='KISUMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITALE LAW COURT')
instance = Court.objects.create(name='KITALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITHIMANI/YATTA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITUI LAW COURT')
instance = Court.objects.create(name='KITUI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KWALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='KYUSO MAGISTRATES\' COURT')
instance = Court.objects.create(name='LAMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='LIMURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='LODWAR LAW COURT')
instance = Court.objects.create(name='LODWAR MAGISTRATES\' COURT')
instance = Court.objects.create(name='MACHAKOS LAW COURT')
instance = Court.objects.create(name='MACHAKOS MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKADARA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKINDU MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKUENI LAW COURT')
instance = Court.objects.create(name='MAKUENI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MALINDI LAW COURT')
instance = Court.objects.create(name='MALINDI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MANDERA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARALAL MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARIAKANI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARIMANTI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARSABIT LAW COURT')
instance = Court.objects.create(name='MARSABIT MAGISTRATES\' COURT')
instance = Court.objects.create(name='MASENO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAUA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAVOKO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MERU LAW COURT')
instance = Court.objects.create(name='MERU MAGISTRATES\' COURT')
instance = Court.objects.create(name='MIGORI LAW COURT')
instance = Court.objects.create(name='MIGORI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MILIMANI COMMERCIAL COURT MAGISTRATES\' COURT')
instance = Court.objects.create(name='MILIMANI LAW COURT')
instance = Court.objects.create(name='MILIMANI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOLO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOMBASA LAW COURT')
instance = Court.objects.create(name='MOMBASA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOYALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUKURWEINI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUMIAS MAGISTRATES\' COURT')
instance = Court.objects.create(name='MURANG’A LAW COURT')
instance = Court.objects.create(name='MURANG’A MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUTOMO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MWINGI MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAIVASHA LAW COURT')
instance = Court.objects.create(name='NAIVASHA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAKURU LAW COURT')
instance = Court.objects.create(name='NAKURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NANYUKI LAW COURT')
instance = Court.objects.create(name='NANYUKI MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAROK LAW COURT')
instance = Court.objects.create(name='NAROK MAGISTRATES\' COURT')
instance = Court.objects.create(name='NDHIWA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NKUBU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYAHURURU LAW COURT')
instance = Court.objects.create(name='NYAHURURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYAMIRA LAW COURT')
instance = Court.objects.create(name='NYAMIRA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYANDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYERI LAW COURT')
instance = Court.objects.create(name='NYERI MAGISTRATES\' COURT')
instance = Court.objects.create(name='OGEMBO MAGISTRATES\' COURT')
instance = Court.objects.create(name='OTHAYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='OYUGIS MAGISTRATES\' COURT')
instance = Court.objects.create(name='RONGO MAGISTRATES\' COURT')
instance = Court.objects.create(name='RUNYENJES MAGISTRATES\' COURT')
instance = Court.objects.create(name='SHANZU MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIAKAGO MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIAYA LAW COURT')
instance = Court.objects.create(name='SIAYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIRISIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='SOTIK MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAVETA MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAWA MAGISTRATES\' COURT')
instance = Court.objects.create(name='THIKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='TIGANIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='UKWALA MAGISTRATES\' COURT')
instance = Court.objects.create(name='VIHIGA MAGISTRATES\' COURT')
instance = Court.objects.create(name='VOI LAW COURT')
instance = Court.objects.create(name='VOI MAGISTRATES\' COURT')
instance = Court.objects.create(name='WAJIR MAGISTRATES\' COURT')
instance = Court.objects.create(name='WANGURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='WINAM MAGISTRATES\' COURT')
instance = Court.objects.create(name='WUNDANYI MAGISTRATES\' COURT')
#prisons
instance = Prison.objects.create(name='ATHI RIVER PRISON')
instance = Prison.objects.create(name='BOMET PRISON')
instance = Prison.objects.create(name='BUNGOMA')
instance = Prison.objects.create(name='BUSIA MAIN')
instance = Prison.objects.create(name='CHUKA')
instance = Prison.objects.create(name='ELDAMA RAVINE')
instance = Prison.objects.create(name='ELDORET MAIN PRISON')
instance = Prison.objects.create(name='ELDORET WOMEN PRISON')
instance = Prison.objects.create(name='EMBU MAIN')
instance = Prison.objects.create(name='EMBU WOMEN')
instance = Prison.objects.create(name='GARISSA MAIN')
instance = Prison.objects.create(name='GARISSA MEDIUM')
instance = Prison.objects.create(name='HINDI')
instance = Prison.objects.create(name='HOLA')
instance = Prison.objects.create(name='HOMABAY')
instance = Prison.objects.create(name='ISIOLO')
instance = Prison.objects.create(name='JAMUHURI PRISON')
instance = Prison.objects.create(name='KABARNET')
instance = Prison.objects.create(name='KAJIADO MAIN PRISON')
instance = Prison.objects.create(name='KAKAMEGA MAIN')
instance = Prison.objects.create(name='KAKAMEGA WOMEN')
instance = Prison.objects.create(name='KALOLENI')
instance = Prison.objects.create(name='KAMAE GIRLS PRISON')
instance = Prison.objects.create(name='KAMITI MAXIMUM SECURITY PRISON')
instance = Prison.objects.create(name='KAMITI MEDIUM PRISON')
instance = Prison.objects.create(name='KAMITI YCTC')
instance = Prison.objects.create(name='KANGETA')
instance = Prison.objects.create(name='KAPENGURIA PRISON')
instance = Prison.objects.create(name='KAPSABET')
instance = Prison.objects.create(name='KEHANCHA')
instance = Prison.objects.create(name='KERICHO MAIN')
instance = Prison.objects.create(name='KERICHO MEDIUM')
instance = Prison.objects.create(name='KERICHO WOMEN')
instance = Prison.objects.create(name='KERUGOYA PRISON')
instance = Prison.objects.create(name='KIAMBU PRISON')
instance = Prison.objects.create(name='KIBOS MAIN')
instance = Prison.objects.create(name='KIBOS MEDIUM')
instance = Prison.objects.create(name='KILGORIS')
instance = Prison.objects.create(name='KILIFI')
instance = Prison.objects.create(name='KING\'ORANI')
instance = Prison.objects.create(name='KISII MAIN')
instance = Prison.objects.create(name='KISII WOMEN')
instance = Prison.objects.create(name='KISUMU MAIN')
instance = Prison.objects.create(name='KISUMU MEDIUM')
instance = Prison.objects.create(name='KISUMU WOMEN')
instance = Prison.objects.create(name='KITALE ANNEXE')
instance = Prison.objects.create(name='KITALE MAIN')
instance = Prison.objects.create(name='KITALE MEDIUM')
instance = Prison.objects.create(name='KITALE WOMEN')
instance = Prison.objects.create(name='KITUI MAIN')
instance = Prison.objects.create(name='KITUI WOMEN')
instance = Prison.objects.create(name='KWALE MAIN')
instance = Prison.objects.create(name='KWALE WOMEN')
instance = Prison.objects.create(name='LANGATA WOMEN MAXIMUM PRISON')
instance = Prison.objects.create(name='LODWAR')
instance = Prison.objects.create(name='LOITOKTOK PRISON')
instance = Prison.objects.create(name='MACHAKOS MAIN')
instance = Prison.objects.create(name='MACHAKOS WOMEN')
instance = Prison.objects.create(name='MAKUENI REMAND')
instance = Prison.objects.create(name='MALINDI MAIN')
instance = Prison.objects.create(name='MALINDI WOMEN')
instance = Prison.objects.create(name='MANDERA')
instance = Prison.objects.create(name='MANYANI')
instance = Prison.objects.create(name='MARA')
instance = Prison.objects.create(name='MARALAL')
instance = Prison.objects.create(name='MARANJAU PRISON')
instance = Prison.objects.create(name='MARIMATI')
instance = Prison.objects.create(name='MARSABIT')
instance = Prison.objects.create(name='MAUKENI MAIN')
instance = Prison.objects.create(name='MERU MAIN')
instance = Prison.objects.create(name='MERU WOMEN')
instance = Prison.objects.create(name='MIGORI MAIN')
instance = Prison.objects.create(name='MIGORI WOMEN')
instance = Prison.objects.create(name='MOYALE')
instance = Prison.objects.create(name='MURANGA MAIN PRSION')
instance = Prison.objects.create(name='MURANGA WOMEN PRISON')
instance = Prison.objects.create(name='MUTOMO')
instance = Prison.objects.create(name='MWEA MAIN PRISON')
instance = Prison.objects.create(name='MWINGI')
instance = Prison.objects.create(name='NAIROBI MEDIUM PRISON')
instance = Prison.objects.create(name='NAIROBI REMAND AND ALLOCATION MAXIMUM PRISON')
instance = Prison.objects.create(name='NAIROBI WEST PRISON')
instance = Prison.objects.create(name='NAIVASHA MAXIMUM PRISON')
instance = Prison.objects.create(name='NAIVASHA MEDIUM PRISON')
instance = Prison.objects.create(name='NAIVASHA WOMEN PRISON')
instance = Prison.objects.create(name='NAKURU MAIN PRISON')
instance = Prison.objects.create(name='NAKURU WOMEN PRISON')
instance = Prison.objects.create(name='NANYUKI')
instance = Prison.objects.create(name='NAROK')
instance = Prison.objects.create(name='NGERIA FARM')
instance = Prison.objects.create(name='NYAMIRA')
instance = Prison.objects.create(name='NYANDARUA MAIN PRISON')
instance = Prison.objects.create(name='NYERI MAIN MAXIMUM PRISON')
instance = Prison.objects.create(name='NYERI MEDIUM PRISON')
instance = Prison.objects.create(name='NYERI WOMEN PRISON')
instance = Prison.objects.create(name='RACHUONYO')
instance = Prison.objects.create(name='RC EASTERN')
instance = Prison.objects.create(name='RUIRU PRISON')
instance = Prison.objects.create(name='RUMURUTI')
instance = Prison.objects.create(name='SHIKUSA B.I')
instance = Prison.objects.create(name='SHIKUSA FARM')
instance = Prison.objects.create(name='SHIMO B.I')
instance = Prison.objects.create(name='SHIMO MAIN')
instance = Prison.objects.create(name='SHIMO MEDIUM')
instance = Prison.objects.create(name='SHIMO WOMEN')
instance = Prison.objects.create(name='SIAYA')
instance = Prison.objects.create(name='SOTIK')
instance = Prison.objects.create(name='T/FALL WOMEN PRISON')
instance = Prison.objects.create(name='T/FALLS MAIN PRISON')
instance = Prison.objects.create(name='TAMBACH')
instance = Prison.objects.create(name='TAVETA')
instance = Prison.objects.create(name='THIKA MAIN PRISON')
instance = Prison.objects.create(name='THIKA WOMEN PRISON')
instance = Prison.objects.create(name='URUKU')
instance = Prison.objects.create(name='VIHIGA')
instance = Prison.objects.create(name='VOI')
instance = Prison.objects.create(name='WAJIR')
instance = Prison.objects.create(name='WUNDANYI')
instance = Prison.objects.create(name='YATTA')
#add few offences
instance = Offence.objects.create(name='Assault')
instance = Offence.objects.create(name='Handling of stolen goods')
instance = Offence.objects.create(name='Grevious harm')
instance = Offence.objects.create(name='Attempted defilement')
instance = Offence.objects.create(name='Robbery with violence contrary to section 296(2) of the Penal Code')
instance = Offence.objects.create(name='Murder')
instance = Offence.objects.create(name='Robbery')
instance = Offence.objects.create(name='Manslaughter')
instance = Offence.objects.create(name='Defilement')
instance = Offence.objects.create(name='Rape')
instance = Offence.objects.create(name='Attempted Rape')
instance = Offence.objects.create(name='Attempted Robbery With Violence')
class Migration(migrations.Migration):
dependencies = [
('petitions', '0001_initial'),
]
operations = [
migrations.RunPython(add_initial_data),
]
| 12
| 0
| 0
| 158
| 0
| 38,352
| 0
| 11
| 69
|