hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64939646e87c224c58ffcfd8cf705c67c41ee7e3
| 16,321
|
py
|
Python
|
cornflow-server/cornflow/tests/integration/test_cornflowclient.py
|
baobabsoluciones/cornflow
|
bd7cae22107e5fe148704d5f41d4f58f9c410b40
|
[
"Apache-2.0"
] | 1
|
2022-03-30T16:44:50.000Z
|
2022-03-30T16:44:50.000Z
|
cornflow-server/cornflow/tests/integration/test_cornflowclient.py
|
baobabsoluciones/cornflow
|
bd7cae22107e5fe148704d5f41d4f58f9c410b40
|
[
"Apache-2.0"
] | 2
|
2022-03-31T08:42:10.000Z
|
2022-03-31T12:05:23.000Z
|
cornflow-server/cornflow/tests/integration/test_cornflowclient.py
|
baobabsoluciones/cornflow
|
bd7cae22107e5fe148704d5f41d4f58f9c410b40
|
[
"Apache-2.0"
] | null | null | null |
"""
"""
# Full imports
# Imports from environment
# Import internal modules
| 40.298765
| 85
| 0.658477
|
"""
"""
# Full imports
import json
import pulp
import logging as log
import time
# Imports from environment
from cornflow_client import CornFlowApiError
from cornflow_client.constants import INSTANCE_SCHEMA, SOLUTION_SCHEMA
# Import internal modules
from cornflow.app import create_app
from cornflow.shared.const import (
EXEC_STATE_CORRECT,
EXEC_STATE_STOPPED,
EXEC_STATE_RUNNING,
STATUS_HEALTHY,
)
from cornflow.tests.const import INSTANCE_PATH
from cornflow.tests.custom_liveServer import CustomTestCaseLive
def load_file(_file):
with open(_file) as f:
temp = json.load(f)
return temp
class TestCornflowClientBasic(CustomTestCaseLive):
def setUp(self, create_all=False):
super().setUp()
self.items_to_check = ["name", "description"]
def create_new_instance_file(self, mps_file):
name = "test_instance1"
description = "description123"
response = self.client.create_instance_file(
filename=mps_file, name=name, description=description, minimize=True
)
self.assertTrue("id" in response)
instance = self.client.get_one_instance(response["id"])
log.debug("Got instance with id: {}".format(instance["id"]))
# row = InstanceModel.query.get(response['id'])
self.assertEqual(instance["id"], response["id"])
self.assertEqual(instance["name"], name)
self.assertEqual(instance["description"], description)
payload = pulp.LpProblem.fromMPS(mps_file, sense=1)[1].toDict()
instance_data = self.client.get_api_for_id(
"instance", response["id"], "data"
).json()
self.assertEqual(instance_data["data"], payload)
log.debug("validated instance data")
return instance
def create_new_instance(self, mps_file):
name = "test_instance1"
description = "description123"
data = pulp.LpProblem.fromMPS(mps_file, sense=1)[1].toDict()
schema = "solve_model_dag"
payload = dict(data=data, name=name, description=description, schema=schema)
return self.create_new_instance_payload(payload)
def create_new_instance_payload(self, payload):
response = self.client.create_instance(**payload)
log.debug("Created instance with id: {}".format(response["id"]))
self.assertTrue("id" in response)
instance = self.client.get_one_instance(response["id"])
log.debug("Instance with id={} exists in server".format(instance["id"]))
self.assertEqual(instance["id"], response["id"])
self.assertEqual(instance["name"], payload["name"])
self.assertEqual(instance["description"], payload["description"])
instance_data = self.client.get_api_for_id(
"instance", response["id"], "data"
).json()
self.assertEqual(instance_data["data"], payload["data"])
return instance
def create_new_case_payload(self, payload):
response = self.client.create_case(**payload)
self.assertTrue("id" in response)
log.debug("Created case with id: {}".format(response["id"]))
case = self.client.get_one_case(response["id"])
log.debug("Case with id={} exists in server".format(case["id"]))
self.assertEqual(case["id"], response["id"])
self.assertEqual(case["name"], payload["name"])
self.assertEqual(case["description"], payload["description"])
case_data = self.client.get_api_for_id("case", response["id"], "data").json()
self.assertEqual(case_data["data"], payload["data"])
return case
def create_new_execution(self, payload):
response = self.client.create_execution(**payload)
log.debug("Created execution with id={}".format(response["id"]))
self.assertTrue("id" in response)
execution = self.client.get_results(response["id"])
log.debug("Execution with id={} exists in server".format(execution["id"]))
self.assertEqual(execution["id"], response["id"])
for item in self.items_to_check:
self.assertEqual(execution[item], payload[item])
response = self.client.get_status(response["id"])
self.assertTrue("state" in response)
log.debug("Execution has state={} in server".format(response["state"]))
return execution
def create_instance_and_execution(self):
one_instance = self.create_new_instance("./cornflow/tests/data/test_mps.mps")
name = "test_execution_name_123"
description = "test_execution_description_123"
schema = "solve_model_dag"
payload = dict(
instance_id=one_instance["id"],
config=dict(solver="PULP_CBC_CMD", timeLimit=10),
description=description,
name=name,
schema=schema,
)
return self.create_new_execution(payload)
def create_timer_instance_and_execution(self, seconds=5):
payload = dict(
data=dict(seconds=seconds),
name="timer_instance",
schema="timer",
description="timer_description",
)
one_instance = self.create_new_instance_payload(payload)
payload = dict(
instance_id=one_instance["id"],
config=dict(timeLimit=seconds, solver="default"),
name="timer_execution",
description="timer_exec_description",
schema="timer",
)
return self.create_new_execution(payload)
class TestCornflowClientOpen(TestCornflowClientBasic):
# TODO: user management
# TODO: infeasible execution
def test_new_instance_file(self):
self.create_new_instance_file("./cornflow/tests/data/test_mps.mps")
def test_new_instance(self):
return self.create_new_instance("./cornflow/tests/data/test_mps.mps")
# TODO: reactivate test with new version of cornflow client which allows to pass
# optional arguments for the headers of the request
# def test_get_instance__data(self):
# instance = self.create_new_instance("./cornflow/tests/data/test_mps.mps")
# response = self.client.get_api_for_id(
# "instance", instance["id"], "data", encoding="gzip"
# )
# self.assertEqual(response.headers["Content-Encoding"], "gzip")
def test_delete_instance(self):
instance = self.test_new_instance()
response = self.client.get_api_for_id("instance", instance["id"])
self.assertEqual(200, response.status_code)
response = self.client.delete_api_for_id("instance", instance["id"])
self.assertEqual(200, response.status_code)
response = self.client.get_api_for_id("instance", instance["id"])
self.assertEqual(404, response.status_code)
def test_new_execution(self):
return self.create_instance_and_execution()
def test_delete_execution(self):
execution = self.test_new_execution()
response = self.client.get_api_for_id("execution/", execution["id"])
self.assertEqual(200, response.status_code)
response = self.client.delete_api_for_id("execution/", execution["id"])
self.assertEqual(200, response.status_code)
response = self.client.get_api_for_id("execution/", execution["id"])
self.assertEqual(404, response.status_code)
def test_get_dag_schema_good(self):
response = self.client.get_schema("solve_model_dag")
for sch in [INSTANCE_SCHEMA, SOLUTION_SCHEMA]:
content = response[sch]
self.assertTrue("properties" in content)
def test_get_all_schemas(self):
response = self.client.get_all_schemas()
self.assertIn({"name": "solve_model_dag"}, response)
def test_get_dag_schema_no_schema(self):
response = self.client.get_schema("this_dag_does_not_exist")
self.assertTrue("error" in response)
def test_new_execution_bad_dag_name(self):
one_instance = self.create_new_instance("./cornflow/tests/data/test_mps.mps")
name = "test_execution_name_123"
description = "test_execution_description_123"
payload = dict(
instance_id=one_instance["id"],
config=dict(solver="PULP_CBC_CMD", timeLimit=10),
description=description,
name=name,
schema="solve_model_dag_bad_this_does_not_exist",
)
_bad_func = lambda: self.client.create_execution(**payload)
self.assertRaises(CornFlowApiError, _bad_func)
def test_new_execution_with_schema(self):
one_instance = self.create_new_instance("./cornflow/tests/data/test_mps.mps")
name = "test_execution_name_123"
description = "test_execution_description_123"
payload = dict(
instance_id=one_instance["id"],
config=dict(solver="PULP_CBC_CMD", timeLimit=10),
description=description,
name=name,
schema="solve_model_dag",
)
return self.create_new_execution(payload)
def test_new_instance_with_default_schema_bad(self):
payload = load_file(INSTANCE_PATH)
payload["data"].pop("objective")
_error_fun = lambda: self.client.create_instance(**payload)
self.assertRaises(CornFlowApiError, _error_fun)
def test_new_instance_with_schema_bad(self):
payload = load_file(INSTANCE_PATH)
payload["data"].pop("objective")
payload["schema"] = "solve_model_dag"
_error_fun = lambda: self.client.create_instance(**payload)
self.assertRaises(CornFlowApiError, _error_fun)
def test_new_instance_with_schema_additional_data(self):
payload = load_file(INSTANCE_PATH)
payload["data"]["objective"]["inexistant_property"] = 1
payload["schema"] = "solve_model_dag"
self.client.create_instance(**payload)
def test_new_instance_with_schema_good(self):
payload = load_file(INSTANCE_PATH)
payload["schema"] = "solve_model_dag"
self.create_new_instance_payload(payload)
def test_new_case_without_parent(self):
payload = load_file(INSTANCE_PATH)
self.create_new_case_payload(payload)
def test_new_case_with_parent(self):
payload = load_file(INSTANCE_PATH)
payload_dir = dict(payload)
payload_dir.pop("data")
response = self.client.create_case(**payload_dir)
payload["parent_id"] = response["id"]
case2 = self.create_new_case_payload(payload)
self.assertEqual(case2["path"], "{}/".format(response["id"]))
def test_server_alive(self):
data = self.client.is_alive()
cf_status = data["cornflow_status"]
af_status = data["airflow_status"]
self.assertEqual(str, type(cf_status))
self.assertEqual(str, type(af_status))
self.assertEqual(cf_status, STATUS_HEALTHY)
self.assertEqual(af_status, STATUS_HEALTHY)
class TestCornflowClientNotOpen(TestCornflowClientBasic):
def create_app(self):
app = create_app("testing")
app.config["LIVESERVER_PORT"] = 5050
app.config["OPEN_DEPLOYMENT"] = 0
return app
def test_get_all_schemas(self):
response = self.client.get_all_schemas()
self.assertEqual([], response)
def test_get_one_schema(self):
response = self.client.get_schema("solve_model_dag")
self.assertEqual(
{"error": "User does not have permission to access this dag"}, response
)
class TestCornflowClientAdmin(TestCornflowClientBasic):
def setUp(self, create_all=False):
super().setUp()
# we create a service user:
self.create_service_user(
dict(username="airflow", pwd="Airflow_test_password1", email="[email protected]")
)
self.create_service_user(
dict(
username="[email protected]",
pwd="Serviceuser_1234",
email="[email protected]",
)
)
# we create an admin user
# we guarantee that the admin is there for airflow
self.client.token = self.create_admin(
dict(
username="[email protected]",
email="[email protected]",
pwd="Airflow_test_password1",
)
)
def test_solve_and_wait(self):
execution = self.create_instance_and_execution()
time.sleep(15)
status = self.client.get_status(execution["id"])
results = self.client.get_results(execution["id"])
self.assertEqual(status["state"], EXEC_STATE_CORRECT)
self.assertEqual(results["state"], EXEC_STATE_CORRECT)
def test_interrupt(self):
execution = self.create_timer_instance_and_execution(5)
self.client.stop_execution(execution_id=execution["id"])
time.sleep(2)
status = self.client.get_status(execution["id"])
results = self.client.get_results(execution["id"])
self.assertEqual(status["state"], EXEC_STATE_STOPPED)
self.assertEqual(results["state"], EXEC_STATE_STOPPED)
def test_status_solving(self):
execution = self.create_instance_and_execution()
time.sleep(2)
status = self.client.get_status(execution["id"])
self.assertEqual(status["state"], EXEC_STATE_RUNNING)
def test_status_solving_timer(self):
execution = self.create_timer_instance_and_execution(10)
time.sleep(5)
status = self.client.get_status(execution["id"])
self.assertEqual(status["state"], EXEC_STATE_RUNNING)
def test_manual_execution(self):
instance_payload = load_file(INSTANCE_PATH)
one_instance = self.create_new_instance_payload(instance_payload)
name = "test_execution_name_123"
description = "test_execution_description_123"
# for the solution we can use the same standard than the instance data
payload = dict(
instance_id=one_instance["id"],
config=dict(solver="PULP_CBC_CMD", timeLimit=10),
description=description,
name=name,
data=instance_payload["data"],
schema="solve_model_dag",
)
response = self.client.manual_execution(**payload)
execution = self.client.get_results(response["id"])
self.assertEqual(execution["id"], response["id"])
for item in ["config", "description", "name"]:
self.assertEqual(execution[item], payload[item])
response = self.client.get_status(response["id"])
self.assertTrue("state" in response)
execution_data = self.client.get_solution(response["id"])
self.assertEqual(execution_data["data"], payload["data"])
def test_manual_execution_2(self):
instance_payload = load_file(INSTANCE_PATH)
one_instance = self.create_new_instance_payload(instance_payload)
name = "test_execution_name_123"
description = "test_execution_description_123"
payload = dict(
instance_id=one_instance["id"],
config=dict(solver="PULP_CBC_CMD", timeLimit=10),
description=description,
name=name,
schema="solve_model_dag",
)
response = self.client.manual_execution(**payload)
execution = self.client.get_results(response["id"])
self.assertEqual(execution["id"], response["id"])
for item in ["config", "description", "name"]:
self.assertEqual(execution[item], payload[item])
response = self.client.get_status(response["id"])
self.assertTrue("state" in response)
execution_data = self.client.get_solution(response["id"])
self.assertIsNone(execution_data["data"])
def test_edit_one_execution(self):
one_instance = self.create_new_instance("./cornflow/tests/data/test_mps.mps")
payload = dict(
name="bla",
config=dict(solver="CBC"),
instance_id=one_instance["id"],
schema="solve_model_dag",
)
execution = self.client.create_api("execution/?run=0", json=payload)
payload = dict(log_text="")
response = self.client.put_api_for_id(
api="dag/", id=execution.json()["id"], payload=payload
)
self.assertEqual(response.status_code, 200)
| 0
| 0
| 0
| 15,600
| 0
| 71
| 0
| 231
| 335
|
c99d93e1bf1ed54873350ee1c6e4595ebffd5172
| 594
|
py
|
Python
|
code/lufangxiao/contrastive_methods/DiResNet_model/__init__.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | 1
|
2021-12-20T07:20:25.000Z
|
2021-12-20T07:20:25.000Z
|
code/lufangxiao/contrastive_methods/DiResNet_model/__init__.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
code/lufangxiao/contrastive_methods/DiResNet_model/__init__.py
|
xueruoyao/FCN-pytorch
|
a5019da3943f47fa4f7baed3640cdbfeae2d677e
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
| 42.428571
| 110
| 0.80303
|
from torchvision.models import resnet
from .DirectionNet import DirectionNet
from .DiResNet import FCN_Ref
import torch
import torch.nn as nn
def build_model(in_channels=3, num_classes=1, pretrained=True):
return FCN_Ref(in_channels=in_channels, num_classes=num_classes, pretrained=pretrained)
def build_aux_part(in_channels=1, rwidth=7, range_detect=9, rescale=False):
dir_net = DirectionNet(in_channels=in_channels, rwidth=rwidth, range_detect=range_detect, rescale=rescale)
struc_loss = nn.MSELoss()
dir_loss = nn.CrossEntropyLoss()
return dir_net, struc_loss, dir_loss
| 0
| 0
| 0
| 0
| 0
| 407
| 0
| 32
| 134
|
32c510321206b11b0e0f4d12dc5852d212e9a734
| 4,668
|
py
|
Python
|
pysnmp/INTEL-IP-MULTICAST-ROUTER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/INTEL-IP-MULTICAST-ROUTER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/INTEL-IP-MULTICAST-ROUTER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module INTEL-IP-MULTICAST-ROUTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INTEL-IP-MULTICAST-ROUTER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:43:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
mib2ext, = mibBuilder.importSymbols("INTEL-GEN-MIB", "mib2ext")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Bits, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ModuleIdentity, IpAddress, Gauge32, Unsigned32, Counter32, TimeTicks, NotificationType, iso, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ModuleIdentity", "IpAddress", "Gauge32", "Unsigned32", "Counter32", "TimeTicks", "NotificationType", "iso", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ipmrouter = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 32))
conf = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 32, 1))
confMaxDvmrpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confMaxDvmrpRoutes.setStatus('mandatory')
confIfTable = MibTable((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2), )
if mibBuilder.loadTexts: confIfTable.setStatus('mandatory')
confIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1), ).setIndexNames((0, "INTEL-IP-MULTICAST-ROUTER-MIB", "confIfIndex"))
if mibBuilder.loadTexts: confIfEntry.setStatus('mandatory')
confIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: confIfIndex.setStatus('mandatory')
confIfMCRouteProto = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfMCRouteProto.setStatus('mandatory')
confIfIgmpQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfIgmpQueryInterval.setStatus('mandatory')
confIfIgmpRobustness = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfIgmpRobustness.setStatus('mandatory')
confIfDvmrpMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDvmrpMetric.setStatus('mandatory')
confIfDvmrpUnreachableMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDvmrpUnreachableMetric.setStatus('mandatory')
confIfCreateObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(11, 11)).setFixedLength(11)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfCreateObj.setStatus('mandatory')
confIfDeleteObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("delete", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDeleteObj.setStatus('mandatory')
mibBuilder.exportSymbols("INTEL-IP-MULTICAST-ROUTER-MIB", confIfDvmrpUnreachableMetric=confIfDvmrpUnreachableMetric, confIfCreateObj=confIfCreateObj, confIfMCRouteProto=confIfMCRouteProto, confIfDeleteObj=confIfDeleteObj, ipmrouter=ipmrouter, confIfTable=confIfTable, confMaxDvmrpRoutes=confMaxDvmrpRoutes, conf=conf, confIfEntry=confIfEntry, confIfIgmpQueryInterval=confIfIgmpQueryInterval, confIfIgmpRobustness=confIfIgmpRobustness, confIfDvmrpMetric=confIfDvmrpMetric, confIfIndex=confIfIndex)
| 116.7
| 496
| 0.76928
|
#
# PySNMP MIB module INTEL-IP-MULTICAST-ROUTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INTEL-IP-MULTICAST-ROUTER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:43:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint")
mib2ext, = mibBuilder.importSymbols("INTEL-GEN-MIB", "mib2ext")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Bits, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, ModuleIdentity, IpAddress, Gauge32, Unsigned32, Counter32, TimeTicks, NotificationType, iso, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "ModuleIdentity", "IpAddress", "Gauge32", "Unsigned32", "Counter32", "TimeTicks", "NotificationType", "iso", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ipmrouter = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 32))
conf = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 32, 1))
confMaxDvmrpRoutes = MibScalar((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confMaxDvmrpRoutes.setStatus('mandatory')
confIfTable = MibTable((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2), )
if mibBuilder.loadTexts: confIfTable.setStatus('mandatory')
confIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1), ).setIndexNames((0, "INTEL-IP-MULTICAST-ROUTER-MIB", "confIfIndex"))
if mibBuilder.loadTexts: confIfEntry.setStatus('mandatory')
confIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: confIfIndex.setStatus('mandatory')
confIfMCRouteProto = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfMCRouteProto.setStatus('mandatory')
confIfIgmpQueryInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfIgmpQueryInterval.setStatus('mandatory')
confIfIgmpRobustness = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfIgmpRobustness.setStatus('mandatory')
confIfDvmrpMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDvmrpMetric.setStatus('mandatory')
confIfDvmrpUnreachableMetric = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDvmrpUnreachableMetric.setStatus('mandatory')
confIfCreateObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(11, 11)).setFixedLength(11)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfCreateObj.setStatus('mandatory')
confIfDeleteObj = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 32, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1))).clone(namedValues=NamedValues(("delete", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: confIfDeleteObj.setStatus('mandatory')
mibBuilder.exportSymbols("INTEL-IP-MULTICAST-ROUTER-MIB", confIfDvmrpUnreachableMetric=confIfDvmrpUnreachableMetric, confIfCreateObj=confIfCreateObj, confIfMCRouteProto=confIfMCRouteProto, confIfDeleteObj=confIfDeleteObj, ipmrouter=ipmrouter, confIfTable=confIfTable, confMaxDvmrpRoutes=confMaxDvmrpRoutes, conf=conf, confIfEntry=confIfEntry, confIfIgmpQueryInterval=confIfIgmpQueryInterval, confIfIgmpRobustness=confIfIgmpRobustness, confIfDvmrpMetric=confIfDvmrpMetric, confIfIndex=confIfIndex)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
adc2c97cbe4ef08ab5968eeb58293ab3cbed3fc5
| 1,836
|
py
|
Python
|
python/day04a.py
|
hurryabit/adventofcode-2021
|
3f9949895fc5541eb050b427f1432a6c91b6297e
|
[
"Unlicense"
] | null | null | null |
python/day04a.py
|
hurryabit/adventofcode-2021
|
3f9949895fc5541eb050b427f1432a6c91b6297e
|
[
"Unlicense"
] | null | null | null |
python/day04a.py
|
hurryabit/adventofcode-2021
|
3f9949895fc5541eb050b427f1432a6c91b6297e
|
[
"Unlicense"
] | null | null | null |
import io
from typing import Optional
EXAMPLE = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
Board = list[list[Optional[int]]]
assert solve(io.StringIO(EXAMPLE)) == 4512
if __name__ == "__main__":
main()
| 23.240506
| 83
| 0.601307
|
import io
from collections.abc import Iterable
from typing import Optional
EXAMPLE = """7,4,9,5,11,17,23,2,0,14,21,24,10,16,13,6,15,25,12,22,18,20,8,19,3,26,1
22 13 17 11 0
8 2 23 4 24
21 9 14 16 7
6 10 3 18 5
1 12 20 15 19
3 15 0 2 22
9 18 13 17 5
19 8 7 25 23
20 11 10 24 4
14 21 16 12 6
14 21 17 24 4
10 16 15 9 19
18 8 23 26 20
22 11 13 6 5
2 0 12 3 7"""
Board = list[list[Optional[int]]]
def parse(reader: io.TextIOBase) -> tuple[list[int], list[Board]]:
numbers = list(map(int, reader.readline().strip().split(",")))
boards = []
while True:
line0 = reader.readline()
if not line0:
break
board: Board = []
for _ in range(5):
board.append(list(map(int, reader.readline().strip().split())))
boards.append(board)
return (numbers, boards)
def call(board: Board, number: int) -> bool:
def has_complete_row(iter: Iterable[Iterable[Optional[int]]]) -> bool:
return any(all(cell is None for cell in row) for row in iter)
for (i, row) in enumerate(board):
for (j, cell) in enumerate(row):
if cell == number:
board[i][j] = None
return has_complete_row(board) or has_complete_row(zip(*board))
def score(board: Board) -> int:
return sum(cell for row in board for cell in row if cell is not None)
def solve(reader: io.TextIOBase) -> Optional[int]:
(numbers, boards) = parse(reader)
for number in numbers:
for board in boards:
if call(board, number):
return score(board) * number
return None
assert solve(io.StringIO(EXAMPLE)) == 4512
def main():
with open("input/day04.txt") as file:
result = solve(file)
print(f"The final score will be {result}")
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 1,204
| 0
| 15
| 137
|
d8adf4335ffbc8c602a605d3ce3a80a4d9fb7c26
| 1,854
|
py
|
Python
|
src/main/python/validate_endpoint/source.py
|
cwilloughby-bw/vra-ipam-racktables
|
f21654030dd51e4f007fef762c0156a53e568715
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/main/python/validate_endpoint/source.py
|
cwilloughby-bw/vra-ipam-racktables
|
f21654030dd51e4f007fef762c0156a53e568715
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/main/python/validate_endpoint/source.py
|
cwilloughby-bw/vra-ipam-racktables
|
f21654030dd51e4f007fef762c0156a53e568715
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 VMware, Inc.
This product is licensed to you under the Apache License, Version 2.0 (the "License").
You may not use this product except in compliance with the License.
This product may include a number of subcomponents with separate copyright notices
and license terms. Your use of these subcomponents is subject to the terms and
conditions of the subcomponent's license, as noted in the LICENSE file.
"""
'''
Example payload:
"inputs": {
"authCredentialsLink": "/core/auth/credentials/13c9cbade08950755898c4b89c4a0",
"endpointProperties": {
"hostName": "sampleipam.sof-mbu.eng.vmware.com"
}
}
'''
| 31.423729
| 100
| 0.705502
|
"""
Copyright (c) 2020 VMware, Inc.
This product is licensed to you under the Apache License, Version 2.0 (the "License").
You may not use this product except in compliance with the License.
This product may include a number of subcomponents with separate copyright notices
and license terms. Your use of these subcomponents is subject to the terms and
conditions of the subcomponent's license, as noted in the LICENSE file.
"""
import requests
from vra_ipam_utils.ipam import IPAM
from vra_ipam_utils.exceptions import InvalidCertificateException
import logging
import pymysql
'''
Example payload:
"inputs": {
"authCredentialsLink": "/core/auth/credentials/13c9cbade08950755898c4b89c4a0",
"endpointProperties": {
"hostName": "sampleipam.sof-mbu.eng.vmware.com"
}
}
'''
def handler(context, inputs):
ipam = IPAM(context, inputs)
IPAM.do_validate_endpoint = do_validate_endpoint
return ipam.validate_endpoint()
def do_validate_endpoint(self, auth_credentials, cert):
username = auth_credentials["privateKeyId"]
password = auth_credentials["privateKey"]
hostname = self.inputs["endpointProperties"]["hostName"]
databasename = self.inputs["endpointProperties"]["databaseName"]
try:
db = pymysql.connect(host=hostname,user=username,password=password,database=databasename)
cursor = db.cursor()
cursor.execute("SELECT VERSION()")
response = cursor.fetchone()
if response is not None:
return {
"message": "Validated successfully",
}
else:
raise Exception(f"Invalid response to SELECT VERSION: {str(response)}")
except Exception as e:
if "Unknown database" in str(e):
raise Exception(f"Couldn't find database {str(databasename)} on server {str(hostname)}")
raise e
| 0
| 0
| 0
| 0
| 0
| 1,013
| 0
| 39
| 156
|
95553f90167c4cf047272b85e24f8a3ed2b62aaa
| 51
|
py
|
Python
|
SPI/__init__.py
|
philwil/RPi.GPIO-PineA64
|
dd22ca81bd979c31e749e017962cfefd4d0e6ac4
|
[
"MIT"
] | 58
|
2016-04-22T08:13:43.000Z
|
2021-12-31T05:27:10.000Z
|
SPI/__init__.py
|
philwil/RPi.GPIO-PineA64
|
dd22ca81bd979c31e749e017962cfefd4d0e6ac4
|
[
"MIT"
] | 10
|
2016-05-11T11:05:30.000Z
|
2021-08-01T12:02:17.000Z
|
SPI/__init__.py
|
philwil/RPi.GPIO-PineA64
|
dd22ca81bd979c31e749e017962cfefd4d0e6ac4
|
[
"MIT"
] | 21
|
2016-04-25T05:12:42.000Z
|
2021-11-14T09:04:11.000Z
|
"""
"""
VERSION = '0.6.2'
| 7.285714
| 22
| 0.529412
|
"""
"""
from RPi._SPI import *
VERSION = '0.6.2'
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 23
|
8ff67be84d9fcd15146add087969fc8f612e50cc
| 21,656
|
py
|
Python
|
qtoolkit/data_structures/quantum_circuit/quantum_circuit.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | 3
|
2018-12-30T04:50:44.000Z
|
2019-12-25T12:26:02.000Z
|
qtoolkit/data_structures/quantum_circuit/quantum_circuit.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | null | null | null |
qtoolkit/data_structures/quantum_circuit/quantum_circuit.py
|
nelimee/qtoolkit
|
1e99bd7d3a143a327c3bb92595ea88ec12dbdb89
|
[
"CECILL-B"
] | 1
|
2021-08-08T15:59:46.000Z
|
2021-08-08T15:59:46.000Z
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau ([email protected])
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implementation of the :py:class:`~.QuantumCircuit` class.
The :py:class:`~.QuantumCircuit` class represents a general quantum circuit as a
Directed Acyclic Graph with possibly some multi-edges (2 edges can share the
same source **and** the same target).
"""
import typing
CircuitCostFunction = typing.Callable[[QuantumCircuit], float]
| 40.478505
| 88
| 0.626478
|
# ======================================================================
# Copyright CERFACS (October 2018)
# Contributor: Adrien Suau ([email protected])
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the
# CeCILL-B license as circulated by CEA, CNRS and INRIA at the following
# URL "http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided
# only with a limited warranty and the software's author, the holder of
# the economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards
# their requirements in conditions enabling the security of their
# systems and/or data to be ensured and, more generally, to use and
# operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
# ======================================================================
"""Implementation of the :py:class:`~.QuantumCircuit` class.
The :py:class:`~.QuantumCircuit` class represents a general quantum circuit as a
Directed Acyclic Graph with possibly some multi-edges (2 edges can share the
same source **and** the same target).
"""
import copy
import typing
import networkx as nx
import numpy
import qtoolkit.data_structures.quantum_circuit.gate_hierarchy as qgate
import qtoolkit.data_structures.quantum_circuit.quantum_operation as qop
class QuantumCircuit:
def __init__(self, qubit_number: int, cache_matrix: bool = True) -> None:
"""Initialise the :py:class:`~.QuantumCircuit` instance.
For documentation about the :py:class:`~.QuantumCircuit` internals see
the :py:mod:`.quantum_circuit.quantum_circuit` documentation.
:param qubit_number: The number of qubits the instance will acts on.
:param cache_matrix: A boolean flag indicating if the instance should
keep in memory the current value of its representing matrix or if it
should recompute this matrix at each call to
:py:attr:`.QuantumCircuit.matrix`.
"""
assert qubit_number > 0, (
"A circuit with less than 1 qubit cannot be " "created."
)
self._qubit_number = qubit_number
self._graph = nx.MultiDiGraph()
self._node_counter = 0
for qubit_id in range(qubit_number):
self._graph.add_node(self._node_counter, type="input", key=qubit_id)
self._node_counter += 1
self._last_inserted_operations = numpy.arange(qubit_number)
self._cache_matrix = cache_matrix
self._matrix = None
if self._cache_matrix:
self._matrix = numpy.identity(2 ** self._qubit_number)
def add_operation(self, operation: qop.QuantumOperation) -> None:
"""Add an operation to the circuit.
:param operation: The operation to add to the
:py:class:`~.QuantumCircuit` instance.
"""
self._check_operation(operation)
current_node_id = self._node_counter
self._graph.add_node(self._node_counter, type="op", op=operation)
self._node_counter += 1
# Create the target wire
self._create_edge(
self._last_inserted_operations[operation.target],
current_node_id,
operation.target,
)
self._last_inserted_operations[operation.target] = current_node_id
# Create the control wires
for ctrl in operation.controls:
self._create_edge(
self._last_inserted_operations[ctrl], current_node_id, ctrl
)
self._last_inserted_operations[ctrl] = current_node_id
# Compute the new matrix if needed and possible.
if self._cache_matrix:
self._matrix = self._matrix @ operation.matrix(self._qubit_number)
def apply(
self, gate: qgate.QuantumGate, target: int, controls: typing.Sequence[int] = ()
) -> None:
"""Apply a quantum operation to the circuit.
:param gate: The quantum gate to apply.
:param target: The target qubit. The quantum gate will be applied on
this qubit.
:param controls: The control qubit(s).
"""
self.add_operation(qop.QuantumOperation(gate, target, controls))
def _check_operation(self, operation: qop.QuantumOperation) -> None:
"""Check if the operation is valid. If not, raise an exception.
:param operation: The operation to check for validity.
:raise IndexError: if the qubits of the operation (target or control(s))
are not within the range of the current instance.
:raise RuntimeError: if one of the qubits on the operation (target or
control(s)) is None or if the target qubit is also listed in the
control qubit(s).
"""
if operation.target is None or any(
(ctrl is None for ctrl in operation.controls)
):
raise RuntimeError(
"At least one of the target or control qubit is None. Generic "
"QuantumOperations are not supported in a QuantumCircuit "
"instance."
)
if operation.target in operation.controls:
raise RuntimeError(
"The target qubit cannot be in the list of control qubits."
)
if operation.target >= self._qubit_number or operation.target < 0:
raise IndexError(
f"The operation's target ({operation.target}) is not valid "
f"for the current quantum circuit with {self._qubit_number} "
f"qubits."
)
for ctrl in operation.controls:
if ctrl >= self._qubit_number or ctrl < 0:
raise IndexError(
"One of the control qubit is not valid for the current "
"quantum circuit."
)
def pop(self) -> qop.QuantumOperation:
"""Deletes the last inserted operation from the instance and returns it.
:return: The last inserted operation.
"""
if self._node_counter <= self._qubit_number:
raise RuntimeError(
"Attempting to pop a QuantumOperation from an empty " "QuantumCircuit."
)
# Recover the last operation performed.
op = self.last
# Update the last_inserted structure
for pred, _, key in self._graph.in_edges(
nbunch=self._node_counter - 1, keys=True
):
self._last_inserted_operations[key] = pred
# Remove the node (and the edges associated to it).
self._graph.remove_node(self._node_counter - 1)
self._node_counter -= 1
# Compute the new matrix if needed and possible.
if self._cache_matrix:
self._matrix = self._matrix @ op.matrix(self._qubit_number).T.conj()
return op
def _create_edge(self, from_id: int, to_id: int, qubit_id: int) -> None:
"""Create an edge between `from_id` and `to_id`.
:param from_id: Source of the edge.
:param to_id: Target of the edge.
:param qubit_id: Identifier of the qubit concerned by the target
operation.
"""
self._graph.add_edge(from_id, to_id, key=qubit_id)
def get_n_last_operations_on_qubit_reversed(
self, n: int, qubit_id: int
) -> typing.Iterable[qop.QuantumOperation]:
"""Get the `n` last inserted operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param n: Number of quantum operation to retrieve.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over the `n` last quantum operations involving
`qubit_id` in the reverse order of insertion.
:raise IndexError: if `qubit_id` is involved in less than `n`
operations.
"""
try:
all_ops_gen = self.get_operations_on_qubit_reversed(qubit_id)
for op_id in range(n):
yield next(all_ops_gen)
except StopIteration:
raise IndexError(
f"Cannot retrieve {n} operations on qubit n°{qubit_id}: only "
f"{op_id} operation are available."
)
def get_n_last_operations_on_qubit(
self, n: int, qubit_id: int
) -> typing.Iterable[qop.QuantumOperation]:
"""Get the `n` last inserted operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param n: Number of quantum operation to retrieve.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over the `n` last quantum operations involving
`qubit_id` in the order of insertion.
:raise IndexError: if `qubit_id` is involved in less than `n`
operations.
"""
return list(self.get_n_last_operations_on_qubit_reversed(n, qubit_id))[::-1]
def get_operations_on_qubit_reversed(self, qubit_id: int):
"""Get all the operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over all the quantum operations involving
`qubit_id` in the reverse order of insertion.
"""
current = self._last_inserted_operations[qubit_id]
while current >= self.qubit_number:
yield self._graph.nodes[current]["op"]
# Update the current node.
current = next(
filter(
lambda node_id: qubit_id
in self._graph.get_edge_data(node_id, current),
self._graph.predecessors(current),
)
)
def get_operations_on_qubit(self, qubit_id: int):
"""Get all the operations involving `qubit_id`.
The returned operations can have the qubit `qubit_id` either as target
or control qubit.
:param qubit_id: Identifier of the qubit we are interested in.
:return: an iterable over all the quantum operations involving
`qubit_id` in the order of insertion.
"""
return list(self.get_operations_on_qubit_reversed(qubit_id))[::-1]
def __getitem__(self, idx: int) -> qop.QuantumOperation:
"""Method used when []-indexing is used.
:param idx: The position of the operation we want to retrieve.
:return: The idx-th inserted operation.
"""
return self._graph.nodes[idx + self._qubit_number]["op"]
@property
def last(self) -> qop.QuantumOperation:
"""Getter for the last inserted operation.
:return: the last inserted operation.
:raise IndexError: if the circuit is empty.
"""
if self._node_counter == self._qubit_number:
raise IndexError(
"Trying to recover the last operation of an " "empty QuantumCircuit."
)
return self._graph.nodes[self._node_counter - 1]["op"]
@property
def operations(self) -> typing.Iterable[qop.QuantumOperation]:
"""Getter on the operations performed in this quantum circuit.
:return: a generator that generates all the operations of the circuit.
"""
return (
self._graph.nodes[i]["op"]
for i in range(self._qubit_number, self._node_counter)
)
def gates_on_qubit(self, qubit_index: int) -> typing.Iterable[qop.QuantumOperation]:
"""Getter for the gates applied on the qubit at the given index.
:param qubit_index: the qubit we are interested in.
:return: a generator yielding all the quantum gates in the circuit
that involve the specified qubit.
"""
return (op.gate for op in self.get_operations_on_qubit(qubit_index))
@property
def matrix(self) -> numpy.ndarray:
"""Getter on the unitary matrix representing the circuit.
Depending on the value of `cache_matrix` given at initialisation, this
method will either return the cached matrix or compute it.
:return: the unitary matrix representing the current quantum circuit.
"""
if self._cache_matrix:
return self._matrix
ret = numpy.identity(2 ** self._qubit_number)
for operation in self.operations:
ret = ret @ operation.matrix(self._qubit_number)
return ret
@property
def qubit_number(self) -> int:
"""Getter on the number of qubits of the current instance."""
return self._qubit_number
@property
def size(self) -> int:
"""Getter on the number of quantum gates in the current instance."""
return self._node_counter - self._qubit_number
def __iadd__(self, other: "QuantumCircuit") -> "QuantumCircuit":
"""Add all the operations contained in `other` to the current instance.
:param other: the quantum circuit containing the operations to append
to the current instance. `other` and the instance
:py:meth:`~.__iadd__` is called on should have the same number of
qubits.
:return: The union of self and other.
:raise RuntimeError: if `self` and `other` have a different number of
qubits.
"""
# 1. Checks
if self.qubit_number != other.qubit_number:
raise RuntimeError(
f"The number of qubits of the first circuit "
f"({self.qubit_number}) does not match the "
f"number of qubits of the second circuit "
f"({other.qubit_number})."
)
# 2. Update the graph
# 2.1. First remove the "input" nodes from the other graph. We don't
# want to change or copy the other graph so we take a view of the other
# graph without the "input" nodes.
other_subgraph = other._graph.subgraph(
range(other.qubit_number, other._node_counter)
)
# 2.2. Regroup the two graphs into one graph.
self._graph = nx.disjoint_union(self._graph, other_subgraph)
# 2.3. Join the nodes if possible.
for qubit_index in range(self.qubit_number):
old_neighbor = list(other._graph.neighbors(qubit_index))
if old_neighbor:
new_neighbor = old_neighbor[0] - other.qubit_number + self._node_counter
self._graph.add_edge(
self._last_inserted_operations[qubit_index], new_neighbor
)
# Only change the last inserted index if we joined the nodes.
self._last_inserted_operations[qubit_index] = new_neighbor
# 3. Update the other attributes:
self._node_counter += other._node_counter - other.qubit_number
if self._cache_matrix and other._matrix is not None:
self._matrix = self.matrix @ other.matrix
return self
def __matmul__(self: "QuantumCircuit", other: "QuantumCircuit") -> "QuantumCircuit":
"""Wrapper around __iadd__ for the new '@' operator."""
cpy = copy.copy(self)
return cpy.__iadd__(other)
def __copy__(self) -> "QuantumCircuit":
"""Override the default copy behaviour."""
cpy = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
if self.compressed:
cpy._compressed_graph = copy.copy(self._compressed_graph)
else:
cpy._graph = self._graph.copy()
cpy._node_counter = self._node_counter
cpy._last_inserted_operations = self._last_inserted_operations.copy()
if self._cache_matrix:
cpy._matrix = self._matrix
return cpy
def compress(self) -> "QuantumCircuit":
"""Compress the instance to save some memory.
This method is useful when a large number of small circuits needs to be
stored in memory.
.. warning:: Several methods of the :py:class:`~.QuantumCircuit` class
will not work as expected (or will raise an exception) if called on
a compressed circuit.
"""
if not self.compressed:
self._compressed_graph = CompressedMultiDiGraph(self._graph)
del self._graph
return self
def uncompress(self) -> "QuantumCircuit":
"""Uncompress the instance."""
if self.compressed:
self._graph = self._compressed_graph.uncompress()
del self._compressed_graph
return self
@property
def compressed(self) -> bool:
"""Return True if the instance is compressed, else False."""
return hasattr(self, "_compressed_graph")
def inverse(self) -> "QuantumCircuit":
"""Create the inverse of the instance it is called on.
This method will create a new :py:class:`~.QuantumCircuit` and construct
in this new circuit the inverse of `self`.
"""
inv = QuantumCircuit(self._qubit_number, cache_matrix=self._cache_matrix)
for op in reversed(list(self.operations)):
inv.add_operation(op.inverse())
return inv
def __str__(self) -> str:
"""Textual representation of the circuit.
The representation used is very similar to OpenQASM.
"""
return "\n".join(
(
"{Cs}{opname} {controls}{commaornot}{target}".format(
Cs="C" * len(op.controls),
opname=op.gate.name,
controls=",".join(map(str, op.controls)),
commaornot=(", " if op.controls else ""),
target=op.target,
)
for op in self.operations
)
)
class CompressedMultiDiGraph:
def __init__(self, graph: nx.MultiDiGraph = None) -> None:
"""Initialise the :py:class:`~.CompressedMultiDiGraph` instance.
Instances of :py:class:`~.CompressedMultiDiGraph` are just storing
a :py:class:`networkx.MultiDiGraph` in a more memory efficient format.
:param graph: The graph to compress.
"""
if graph is None:
self._qubit_number = 0
return
node_number = len(graph.nodes)
edge_number = len(graph.edges)
if node_number < 2 ** 8:
data_type = numpy.uint8
elif node_number < 2 ** 16:
data_type = numpy.uint16
else:
data_type = numpy.uint32
# We keep each edge with its corresponding qubit ID.
self._from_arr = numpy.zeros((edge_number,), dtype=data_type)
self._to_arr = numpy.zeros((edge_number,), dtype=data_type)
self._data_arr = numpy.zeros((edge_number,), dtype=data_type)
for idx, (u, v, qubit_id) in enumerate(graph.edges):
self._from_arr[idx] = u
self._to_arr[idx] = v
self._data_arr[idx] = qubit_id
# And the we keep each node
self._qubit_number = 0
self._is_op_node = numpy.zeros((node_number,), dtype=numpy.bool)
self._operations = list()
for node_id, node_data in graph.nodes.items():
if node_data["type"] == "op":
self._is_op_node[node_id] = True
self._operations.append(node_data["op"])
else:
self._qubit_number += 1
def __copy__(self) -> "CompressedMultiDiGraph":
"""Override the default copy behaviour."""
cpy = CompressedMultiDiGraph()
cpy._qubit_number = self._qubit_number
cpy._from_arr = self._from_arr.copy()
cpy._to_arr = self._to_arr.copy()
cpy._data_arr = self._data_arr.copy()
cpy._is_op_node = self._is_op_node.copy()
cpy._operations = copy.copy(self._operations)
return cpy
def uncompress(self) -> nx.MultiDiGraph:
"""Uncompress the stored :py:class:`networkx.MultiDiGraph`.
:return: the uncompressed :py:class:`networkx.MultiDiGraph`.
"""
graph = nx.MultiDiGraph()
if self._qubit_number == 0:
return graph
# Re-create the nodes.
for i in range(self._qubit_number):
graph.add_node(i, type="input", key=i)
for node_id in range(self._qubit_number, len(self._is_op_node)):
graph.add_node(
node_id, type="op", op=self._operations[node_id - self._qubit_number]
)
# Re-create the edges
for u, v, qubit_id in zip(self._from_arr, self._to_arr, self._data_arr):
graph.add_edge(u, v, key=qubit_id)
return graph
CircuitCostFunction = typing.Callable[[QuantumCircuit], float]
| 2
| 1,776
| 0
| 17,570
| 0
| 0
| 0
| 82
| 159
|
343ff326a8f186dde320d062ee1820bd51033851
| 1,784
|
py
|
Python
|
ImageToCode.py
|
SL-RU/sfam_generator
|
eed23f8c6322a5dfe328851c85d24cb20f344613
|
[
"MIT"
] | 10
|
2017-03-15T16:48:19.000Z
|
2021-12-07T06:55:18.000Z
|
ImageToCode.py
|
SL-RU/sfam_generator
|
eed23f8c6322a5dfe328851c85d24cb20f344613
|
[
"MIT"
] | null | null | null |
ImageToCode.py
|
SL-RU/sfam_generator
|
eed23f8c6322a5dfe328851c85d24cb20f344613
|
[
"MIT"
] | 5
|
2018-01-19T17:28:32.000Z
|
2021-12-26T09:01:37.000Z
|
from PIL import Image
import sys
#This script converts 1bit image to special byte file and code for using in your projects.
png = "pinkie_pie_by_cellularsp-d4j7sj2.gif" #Image file
out_f = png[:-4] + ".img"
print(out_f)
max_ll = 8
im = Image.open(png)
if im.width > 255 or im.height > 255:
print("Image max width and heigh must be less then 256")
sys.exit()
s = ""
bits = list()
frame = 0
bits_len = 0
try:
while 1:
frame+=1
ouu = im.convert("L")
ouu = ouu.point(lambda x: 0 if x<128 else 255, '1')
#ouu.save("out_gif" + str(frame) + ".png")
sn, ln = frame_to_code(ouu, bits)
s += "\n//Frame: " + str(frame - 1)
s += sn
bits_len = ln
im.seek(im.tell()+1)
except EOFError:
pass
s = "uint8_t png[] = { " + str(im.width) + ", " + str(im.height) + ", " + str(frame) + ", " + str(bits_len & 0xFF) + ", " + str((bits_len >> 8) & 0xFF) + ", //Width, height, frame_count, frame_size_low_byte, frame_size_high_byte" + s + "\n};"
#print(frame)
with open(out_f, "wb") as fl:
fl.write(bytes([im.width, im.height, frame, bits_len & 0xFF, (bits_len >> 8) & 0xFF]))
fl.write(bytes(int(b[::-1], 2) for b in bits))
print(s)
| 21.756098
| 243
| 0.57287
|
import PIL
from PIL import Image, ImageFont, ImageDraw
import math, sys
#This script converts 1bit image to special byte file and code for using in your projects.
png = "pinkie_pie_by_cellularsp-d4j7sj2.gif" #Image file
out_f = png[:-4] + ".img"
print(out_f)
max_ll = 8
im = Image.open(png)
if im.width > 255 or im.height > 255:
print("Image max width and heigh must be less then 256")
sys.exit()
s = ""
bits = list()
def frame_to_code(im, ext_bits):
s = ""
i = 0
sg = ""
ou = ""
bits = list()
for i in range(im.width*im.height):
if(i % im.width == 0):
ou += ("\n")
if i%8 == 0 and i != 0:
bits.append(sg)
sg = ""
rgb = im.getpixel((i%im.width, i/im.width))
if rgb > 100:
ou += ("*")
else:
ou += (" ")
sg += "1" if rgb > 100 else "0"
#print(ou)
if i%8 != 0 and i%8 != 7:
while i%8 != 0:
i += 1
sg += "0"
bits.append(sg)
i = 0
for b in bits:
if i % max_ll == 0:
s+="\n"
i+=1
s += ("0x%02x" % int(b[::-1], 2)) + ", "
ext_bits.extend(bits)
return (s, len(bits))
frame = 0
bits_len = 0
try:
while 1:
frame+=1
ouu = im.convert("L")
ouu = ouu.point(lambda x: 0 if x<128 else 255, '1')
#ouu.save("out_gif" + str(frame) + ".png")
sn, ln = frame_to_code(ouu, bits)
s += "\n//Frame: " + str(frame - 1)
s += sn
bits_len = ln
im.seek(im.tell()+1)
except EOFError:
pass
s = "uint8_t png[] = { " + str(im.width) + ", " + str(im.height) + ", " + str(frame) + ", " + str(bits_len & 0xFF) + ", " + str((bits_len >> 8) & 0xFF) + ", //Width, height, frame_count, frame_size_low_byte, frame_size_high_byte" + s + "\n};"
#print(frame)
with open(out_f, "wb") as fl:
fl.write(bytes([im.width, im.height, frame, bits_len & 0xFF, (bits_len >> 8) & 0xFF]))
fl.write(bytes(int(b[::-1], 2) for b in bits))
print(s)
| 0
| 0
| 0
| 0
| 0
| 580
| 0
| 17
| 45
|
5b492c1bb5a26e044019d775af24ca4dc00c554b
| 688
|
py
|
Python
|
realtoxicityprompts/scripts/helper_dataparallel_merge_generations.py
|
ml-research/MoRT_NMI
|
98dc14f42714b1b794d685507c01b593cde5638c
|
[
"MIT"
] | 4
|
2021-04-04T13:42:34.000Z
|
2021-11-29T15:38:50.000Z
|
realtoxicityprompts/scripts/helper_dataparallel_merge_generations.py
|
ml-research/MoRT_NMI
|
98dc14f42714b1b794d685507c01b593cde5638c
|
[
"MIT"
] | null | null | null |
realtoxicityprompts/scripts/helper_dataparallel_merge_generations.py
|
ml-research/MoRT_NMI
|
98dc14f42714b1b794d685507c01b593cde5638c
|
[
"MIT"
] | null | null | null |
base_dir = './resultsMCM/prompted/gpt2mcm-k50-keepmin5-t00'
base_path = base_dir+'/{}-{}/generations.jsonl'
outfile = base_dir+'/all/generations.jsonl'
batch_size = 4000
filenames = [(i*batch_size, i*batch_size+batch_size) for i in range(25)]
print(filenames)
cnt = 0
with open(outfile, 'w') as outfile:
for i, fname_ in enumerate(filenames):
start = fname_[0]
end = fname_[1]
if i + 1 == len(filenames):
end = 'end'
fname = base_path.format(start, end)
with open(fname) as infile:
for line in infile:
outfile.write(line)
cnt += 1
print("Finished merging generations: #{}".format(cnt))
| 31.272727
| 72
| 0.614826
|
base_dir = './resultsMCM/prompted/gpt2mcm-k50-keepmin5-t00'
base_path = base_dir+'/{}-{}/generations.jsonl'
outfile = base_dir+'/all/generations.jsonl'
batch_size = 4000
filenames = [(i*batch_size, i*batch_size+batch_size) for i in range(25)]
print(filenames)
cnt = 0
with open(outfile, 'w') as outfile:
for i, fname_ in enumerate(filenames):
start = fname_[0]
end = fname_[1]
if i + 1 == len(filenames):
end = 'end'
fname = base_path.format(start, end)
with open(fname) as infile:
for line in infile:
outfile.write(line)
cnt += 1
print("Finished merging generations: #{}".format(cnt))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
495408fc1dc0cfbec8a05472b485424bce359f08
| 1,064
|
py
|
Python
|
dexp/processing/color/cairo_utils.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 16
|
2021-04-21T14:09:19.000Z
|
2022-03-22T02:30:59.000Z
|
dexp/processing/color/cairo_utils.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 28
|
2021-04-15T17:43:08.000Z
|
2022-03-29T16:08:35.000Z
|
dexp/processing/color/cairo_utils.py
|
haesleinhuepf/dexp
|
2ea84f3db323724588fac565fae56f0d522bc5ca
|
[
"BSD-3-Clause"
] | 3
|
2022-02-08T17:41:30.000Z
|
2022-03-18T15:32:27.000Z
|
import numpy
from dexp.utils.backends import Backend
def get_array_for_cairo_surface(surface: "ImageSurface"): # noqa: F821
"""
Returns an array given a ImageSurface from PyCairo.
Parameters
----------
surface : surface
Returns
-------
RGBA numpy array of shape: (...,4)
"""
from cairocffi import ImageSurface
surface: ImageSurface
xp = Backend.get_xp_module()
width = surface.get_width()
height = surface.get_height()
# Get pycairo surface buffer:
buffer = surface.get_data()
# Reshape array to get an extra uint8 axis:
surface_array = numpy.ndarray(shape=(height, width, 4), dtype=xp.uint8, buffer=buffer)
# Move to backend:
surface_array = Backend.to_backend(surface_array)
# We have now: BGRA, we need to flip color axis because of endianness to ARGB:
surface_array = xp.flip(surface_array, axis=surface_array.ndim - 1)
# Convert ARGB to RGBA:
surface_array = xp.roll(surface_array, shift=-1, axis=surface_array.ndim - 1)
return surface_array
| 23.644444
| 90
| 0.678571
|
import numpy
from dexp.utils.backends import Backend
def get_array_for_cairo_surface(surface: "ImageSurface"): # noqa: F821
"""
Returns an array given a ImageSurface from PyCairo.
Parameters
----------
surface : surface
Returns
-------
RGBA numpy array of shape: (...,4)
"""
from cairocffi import ImageSurface
surface: ImageSurface
xp = Backend.get_xp_module()
width = surface.get_width()
height = surface.get_height()
# Get pycairo surface buffer:
buffer = surface.get_data()
# Reshape array to get an extra uint8 axis:
surface_array = numpy.ndarray(shape=(height, width, 4), dtype=xp.uint8, buffer=buffer)
# Move to backend:
surface_array = Backend.to_backend(surface_array)
# We have now: BGRA, we need to flip color axis because of endianness to ARGB:
surface_array = xp.flip(surface_array, axis=surface_array.ndim - 1)
# Convert ARGB to RGBA:
surface_array = xp.roll(surface_array, shift=-1, axis=surface_array.ndim - 1)
return surface_array
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
45c2e6179e1f48ab5b034e304716466d80f630a3
| 3,979
|
py
|
Python
|
scripts/animateGridData.py
|
sayanadhikari207/picsp
|
dc6315a11b4dd3d567f2d51673807e82e5378921
|
[
"MIT"
] | 3
|
2021-02-22T13:23:08.000Z
|
2021-05-27T21:08:32.000Z
|
scripts/animateGridData.py
|
sayanadhikari207/picsp
|
dc6315a11b4dd3d567f2d51673807e82e5378921
|
[
"MIT"
] | 8
|
2021-03-23T15:47:15.000Z
|
2021-04-05T12:55:54.000Z
|
scripts/animateGridData.py
|
sayanadhikari207/picsp
|
dc6315a11b4dd3d567f2d51673807e82e5378921
|
[
"MIT"
] | 1
|
2021-02-22T13:36:53.000Z
|
2021-02-22T13:36:53.000Z
|
#!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib as mp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits import mplot3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
import argparse
import os
# import plotly.graph_objects as go
#========= Configuration ===========
parser = argparse.ArgumentParser(description='Grid Data Animator PICSP')
parser.add_argument('-p', default="phi", type=str, help='Name of the parameter (phi/den.e/den.i)')
parser.add_argument('-a', default=True, type=bool, help='Show Animation (True/False)')
parser.add_argument('-s', default=False, type=bool, help='Save Animation (True/False)')
parser.add_argument('-d', default=False, type=bool, help='3D Animation (True/False)')
args = parser.parse_args()
param = args.p
show_anim = args.a
save_anim = args.s
Vis3D = args.d
interval = 100 #in mseconds
DIR ="../output/"
file_name = "data"#"rhoNeutral" #"P"
#========== Figure Directory Setup =============
figPath = "figures" # DO NOT CHANGE THE PATH
if os.path.exists(figPath):
print("figure directory exists. Existing figures will be replaced.")
else:
os.mkdir(figPath)
h5 = h5py.File(DIR+file_name+'.h5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Nx = int(h5.attrs["Nx"])
Ny = int(h5.attrs["Ny"])
dp = int(h5.attrs["dp"])
Nt = int(h5.attrs["Nt"])
x = np.linspace(0,Lx,Nx)
y = np.linspace(0,Ly,Ny)
X, Y = np.meshgrid(x, y)
# dataset index
data_num = np.arange(start=0, stop=Nt, step=dp, dtype=int)
maxP = np.max(h5[param+"/%d"%Nt]);
minP = np.min(h5[param+"/%d"%Nt]);
if (show_anim == True):
##### FIG SIZE CALC ############
figsize = np.array([150,150/1.618]) #Figure size in mm
dpi = 300 #Print resolution
ppi = np.sqrt(1920**2+1200**2)/24 #Screen resolution
mp.rc('text', usetex=True)
mp.rc('font', family='sans-serif', size=10, serif='Computer Modern Roman')
mp.rc('axes', titlesize=10)
mp.rc('axes', labelsize=10)
mp.rc('xtick', labelsize=10)
mp.rc('ytick', labelsize=10)
mp.rc('legend', fontsize=10)
if (show_anim == True):
fig,ax1 = plt.subplots(figsize=figsize/25.4,constrained_layout=False,dpi=ppi)
div = make_axes_locatable(ax1)
cax = div.append_axes('right', '4%', '4%')
data = h5[param+"/%d"%data_num[0]]
if Vis3D == True:
fig = plt.figure(figsize=figsize/25.4,constrained_layout=True,dpi=ppi)
ax1 = plt.axes(projection ="3d")
img1 = ax1.plot_surface(X,Y,data)
else:
img1 = ax1.contourf(X,Y,data)
cbar = fig.colorbar(img1,cax=cax)
ani = animation.FuncAnimation(fig,animate,frames=len(data_num),interval=interval,blit=False)
# ani.save('phase_space.gif',writer='imagemagick')
plt.show()
if(save_anim == True):
try:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=(1/interval), metadata=dict(artist='Me'), bitrate=1800)
except RuntimeError:
print("ffmpeg not available trying ImageMagickWriter")
writer = animation.ImageMagickWriter(fps=(1/interval))
print("Saving movie to "+figPath+"/. Please wait .....")
ani.save(figPath+"/"+param+'_animation_PICSP.mp4')
| 32.08871
| 98
| 0.620508
|
#!/usr/bin/env python3
import numpy as np
import h5py
import matplotlib as mp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits import mplot3d
from mpl_toolkits.axes_grid1 import make_axes_locatable
import argparse
import os
# import plotly.graph_objects as go
#========= Configuration ===========
parser = argparse.ArgumentParser(description='Grid Data Animator PICSP')
parser.add_argument('-p', default="phi", type=str, help='Name of the parameter (phi/den.e/den.i)')
parser.add_argument('-a', default=True, type=bool, help='Show Animation (True/False)')
parser.add_argument('-s', default=False, type=bool, help='Save Animation (True/False)')
parser.add_argument('-d', default=False, type=bool, help='3D Animation (True/False)')
args = parser.parse_args()
param = args.p
show_anim = args.a
save_anim = args.s
Vis3D = args.d
interval = 100 #in mseconds
DIR ="../output/"
file_name = "data"#"rhoNeutral" #"P"
#========== Figure Directory Setup =============
figPath = "figures" # DO NOT CHANGE THE PATH
if os.path.exists(figPath):
print("figure directory exists. Existing figures will be replaced.")
else:
os.mkdir(figPath)
h5 = h5py.File(DIR+file_name+'.h5','r')
Lx = h5.attrs["Lx"]
Ly = h5.attrs["Ly"]
Nx = int(h5.attrs["Nx"])
Ny = int(h5.attrs["Ny"])
dp = int(h5.attrs["dp"])
Nt = int(h5.attrs["Nt"])
x = np.linspace(0,Lx,Nx)
y = np.linspace(0,Ly,Ny)
X, Y = np.meshgrid(x, y)
# dataset index
data_num = np.arange(start=0, stop=Nt, step=dp, dtype=int)
maxP = np.max(h5[param+"/%d"%Nt]);
minP = np.min(h5[param+"/%d"%Nt]);
if (show_anim == True):
def animate(i):
#======Potential Data=========
data = h5[param+"/%d"%data_num[i]]
ax1.cla()
if Vis3D == True:
img1 = ax1.plot_surface(X,Y,data)
ax1.set_zlim([minP, maxP])
else:
img1 = ax1.contourf(X,Y,data)
if ("phi" in param ):
ax1.set_title('Potential (TimeSteps = %d'%(i*dp)+')')
elif ("den" in param ):
if ("i" in param ):
ax1.set_title('Ion Density (TimeSteps = %d'%(i*dp)+')')
else:
ax1.set_title('Electron Density (TimeSteps = %d'%(i*dp)+')')
ax1.set_xlabel("$x$")
ax1.set_ylabel("$y$")
ax1.set_xlim([0, Lx])
ax1.set_ylim([0, Ly])
cax.cla()
fig.colorbar(img1, cax=cax)
##### FIG SIZE CALC ############
figsize = np.array([150,150/1.618]) #Figure size in mm
dpi = 300 #Print resolution
ppi = np.sqrt(1920**2+1200**2)/24 #Screen resolution
mp.rc('text', usetex=True)
mp.rc('font', family='sans-serif', size=10, serif='Computer Modern Roman')
mp.rc('axes', titlesize=10)
mp.rc('axes', labelsize=10)
mp.rc('xtick', labelsize=10)
mp.rc('ytick', labelsize=10)
mp.rc('legend', fontsize=10)
if (show_anim == True):
fig,ax1 = plt.subplots(figsize=figsize/25.4,constrained_layout=False,dpi=ppi)
div = make_axes_locatable(ax1)
cax = div.append_axes('right', '4%', '4%')
data = h5[param+"/%d"%data_num[0]]
if Vis3D == True:
fig = plt.figure(figsize=figsize/25.4,constrained_layout=True,dpi=ppi)
ax1 = plt.axes(projection ="3d")
img1 = ax1.plot_surface(X,Y,data)
else:
img1 = ax1.contourf(X,Y,data)
cbar = fig.colorbar(img1,cax=cax)
ani = animation.FuncAnimation(fig,animate,frames=len(data_num),interval=interval,blit=False)
# ani.save('phase_space.gif',writer='imagemagick')
plt.show()
if(save_anim == True):
try:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=(1/interval), metadata=dict(artist='Me'), bitrate=1800)
except RuntimeError:
print("ffmpeg not available trying ImageMagickWriter")
writer = animation.ImageMagickWriter(fps=(1/interval))
print("Saving movie to "+figPath+"/. Please wait .....")
ani.save(figPath+"/"+param+'_animation_PICSP.mp4')
| 0
| 0
| 0
| 0
| 0
| 764
| 0
| 0
| 26
|
2a4d363b4d24f4f58e77f8bb01b4f21712e7dc72
| 2,547
|
py
|
Python
|
aloe/aloe/common/synthetic/dataset.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
aloe/aloe/common/synthetic/dataset.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
aloe/aloe/common/synthetic/dataset.py
|
deepneuralmachine/google-research
|
d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
| 35.873239
| 91
| 0.619552
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
import numpy as np
from aloe.common.synthetic.toy_data_gen import inf_train_gen
class ToyDataset(object):
def __init__(self, dim, data_file=None, static_data=None):
if data_file is not None:
self.static_data = np.load(data_file)
elif static_data is not None:
self.static_data = static_data
else:
self.static_data = None
self.dim = dim
def gen_batch(self, batch_size):
raise NotImplementedError
def data_gen(self, batch_size, auto_reset):
if self.static_data is not None:
num_obs = self.static_data.shape[0]
while True:
for pos in range(0, num_obs, batch_size):
if pos + batch_size > num_obs: # the last mini-batch has fewer samples
if auto_reset: # no need to use this last mini-batch
break
else:
num_samples = num_obs - pos
else:
num_samples = batch_size
yield self.static_data[pos : pos + num_samples, :]
if not auto_reset:
break
np.random.shuffle(self.static_data)
else:
while True:
yield self.gen_batch(batch_size)
class OnlineToyDataset(ToyDataset):
def __init__(self, data_name):
super(OnlineToyDataset, self).__init__(2)
self.data_name = data_name
self.rng = np.random.RandomState()
rng = np.random.RandomState(1)
samples = inf_train_gen(self.data_name, rng, 5000)
self.f_scale = np.max(np.abs(samples)) + 1
self.int_scale = 2 ** 15 / (self.f_scale + 1)
print('f_scale,', self.f_scale, 'int_scale,', self.int_scale)
def gen_batch(self, batch_size):
return inf_train_gen(self.data_name, self.rng, batch_size)
| 0
| 0
| 0
| 1,790
| 0
| 0
| 0
| 36
| 91
|
8f619ef687d1f9623aae26c5a9fd22fb12fed77a
| 4,422
|
py
|
Python
|
ngram.py
|
PdePinguino/n-grams
|
73a199fce2eeb3fa75c0df863ca430e5a5f9be0f
|
[
"MIT"
] | null | null | null |
ngram.py
|
PdePinguino/n-grams
|
73a199fce2eeb3fa75c0df863ca430e5a5f9be0f
|
[
"MIT"
] | null | null | null |
ngram.py
|
PdePinguino/n-grams
|
73a199fce2eeb3fa75c0df863ca430e5a5f9be0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
This files has the NGram class.
"""
import pickle
from os.path import join
if __name__ == '__main__':
with open(join('corpus', 'poems.pkl'), 'rb') as handle:
poems = pickle.load(handle)
unigram = NGram(poems, n=1)
#bigram = NGram(poems, n=2)
#trigram = NGram(poems, n=3)
#fourthgram = NGram(poems, n=4)
# save to pkl file
with open('unigram.pkl', 'wb') as handle:
pickle.dump(unigram, handle)
| 30.287671
| 92
| 0.568521
|
#!/usr/bin/env python3
"""
This files has the NGram class.
"""
import pickle
from os.path import join
import numpy as np
import re
from tqdm import tqdm
class NGram():
def __init__(self, poems, n, reduced=False):
self.n = n
self.poems = poems
self.tagged_lines = self.append_tag(self.get_lines())
if reduced:
print('using reduced corpus')
self.tagged_lines = self.tagged_lines[:10]
self.vocab = self.get_vocab() # stores unique words without <s> and </s>
self.index = 0
self.n2i = {} # ngram to index
self.i2n = {} # index to ngram
self.ngram_counts = {} # counts of ngrams occurrences
self.count_ngram_in_poems() # it populates n2i, i2n, ngram_counts dictionaries
# key is an ngram-1, value is the log of next-seen-word occurrence probability
# example for bigram case:
# self.ngram_probs['en'] = {'el': -0.40546510810816444, 'mi': -1.0986122886681098}
self.ngram_probs = {}
self.compute_probs()
def compute_probs(self):
if self.n == 1:
self.compute_unigram_probs()
else:
self.compute_ngram_probs()
return
def compute_ngram_probs(self):
print('computing ngram probs. this might take a while...')
for ngram in tqdm(self.ngram_counts):
words_from = ngram.rpartition('-')[0]
pattern = re.compile(f'{words_from}-.*')
seen_ngrams = [ngram for ngram in self.ngram_counts if re.match(pattern, ngram)]
total_counts = sum([self.ngram_counts[ngram] for ngram in seen_ngrams])
self.ngram_probs[words_from] = {}
for seen_ngram in seen_ngrams:
word_to = seen_ngram.rpartition('-')[2]
ngram_counts = self.ngram_counts[seen_ngram]
self.ngram_probs[words_from][word_to] = np.log(ngram_counts / total_counts)
return
def compute_unigram_probs(self):
total_counts = sum([self.ngram_counts[word] for word in self.ngram_counts])
for word in tqdm(self.ngram_counts):
word_counts = self.ngram_counts[word]
self.ngram_probs[word] = np.log(word_counts / total_counts)
return
def get_vocab(self):
words = []
for line in self.tagged_lines:
words.extend(line.split()[1:-1])
return list(set(words))
def get_lines(self):
lines = [line for book in self.poems
for file in self.poems[book]
for line in self.poems[book][file][1]]
return lines
def count_ngram_in_poems(self):
for line in self.tagged_lines:
self.count_ngram_in_line(line)
return
def count_ngram_in_line(self, line):
if self.n == 1:
self.count_unigram(line)
else:
self.count_ngram(line)
return
def count_unigram(self, line):
for unigram in line.split()[1:]: # skipping first token <s>
try:
self.ngram_counts[unigram] += 1
except KeyError:
self.ngram_counts[unigram] = 1
self.n2i[unigram] = self.index
self.i2n[self.index] = unigram
self.index += 1
return
def count_ngram(self, line):
words = line.split()
for index in range(len(words) - (self.n - 1)):
ngram = '-'.join(words[index: index + self.n])
try:
self.ngram_counts[ngram] += 1
except KeyError:
self.ngram_counts[ngram] = 1
self.n2i[ngram] = self.index
self.i2n[self.index] = ngram
self.index += 1
return
def append_tag(self, lines):
lines_tag = []
for line in lines:
line = line.split()
line.insert(0, '<s>')
line.append('</s>')
line = ' '.join(line)
lines_tag.append(line)
return lines_tag
if __name__ == '__main__':
with open(join('corpus', 'poems.pkl'), 'rb') as handle:
poems = pickle.load(handle)
unigram = NGram(poems, n=1)
#bigram = NGram(poems, n=2)
#trigram = NGram(poems, n=3)
#fourthgram = NGram(poems, n=4)
# save to pkl file
with open('unigram.pkl', 'wb') as handle:
pickle.dump(unigram, handle)
| 0
| 0
| 0
| 3,877
| 0
| 0
| 0
| -15
| 89
|
fbeaacdbc188f5a72bb2f95b5542a2c449342e0c
| 19,674
|
py
|
Python
|
scripts/generate_scripts.py
|
brunocampos01/automated-business-intelligence-at-azure
|
82b8ab1ea0311166d12216effc15ac5e68c47b61
|
[
"MIT"
] | 6
|
2020-02-08T03:19:55.000Z
|
2021-09-06T00:33:16.000Z
|
scripts/generate_scripts.py
|
brunocampos01/automated-business-intelligence-at-azure
|
82b8ab1ea0311166d12216effc15ac5e68c47b61
|
[
"MIT"
] | null | null | null |
scripts/generate_scripts.py
|
brunocampos01/automated-business-intelligence-at-azure
|
82b8ab1ea0311166d12216effc15ac5e68c47b61
|
[
"MIT"
] | 2
|
2020-11-13T15:26:26.000Z
|
2021-09-15T12:43:19.000Z
|
"""
Generate each script with customs variables by client
"""
import os
import glob
import argparse
from pathlib import Path
# paths
path_cloud = str(Path(__file__).absolute().parent.parent)
# path: scripts powershell
path_ori_runbooks = '/scripts/runbooks/'
path_dest_runbooks = '/azure_automatiom_account/runbooks/'
path_automation_account = ''.join(path_cloud + path_dest_runbooks)
path_create_db_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-create_db-as-runbook.ps1')
path_apply_roles_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-apply-roles-as-runbook.ps1')
path_restore_bkp_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-restore-bkp-as-runbook.ps1')
path_send_email = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-send-email-runbook.ps1')
path_start_stop_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-start-stop-as-runbook.ps1')
path_update_bkp_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-update-bkp-as-runbook.ps1')
path_process_large_volume_tables_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-process-large-volume-tables-as-runbook.ps1')
path_process_partitions_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-daily-process-partitions-as-runbook.ps1')
path_process_partitions_monthly_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-monthly-process-partitions-as-runbook.ps1')
path_update_modules = ''.join(path_cloud + path_ori_runbooks + 'update-modules-runbook.ps1')
path_update_certicate = ''.join(path_cloud + path_ori_runbooks + 'update-certificate-runbook.ps1')
# path: scripts tmsl
path_ori_tmsl = '/scripts/tmsl_mp/'
path_dest_tmsl = '/azure_analysis_services/tmsl/'
path_analysis_services = ''.join(path_cloud + path_dest_tmsl)
path_ori_role_admins = ''.join(path_cloud + path_ori_tmsl + 'role_admins.json')
path_ori_role_readers = ''.join(path_cloud + path_ori_tmsl + 'role_readers.json')
path_dest_role_admins = ''.join(path_cloud + path_dest_tmsl + 'role_admins.json')
path_dest_role_readers = ''.join(path_cloud + path_dest_tmsl + 'role_readers.json')
# path: partitions
path_partitions = ''.join(path_cloud + '/azure_analysis_services/partitions/')
def get_partitions_name(path_partitions: str, partition_to_exclude: str) -> str:
""" Read all files in path_partitions and create a list
:return: string in format of tuple because powershell need this ()
special caraters.
"""
partitions_files = []
for file in glob.glob(path_partitions + '*.sql'):
partition = os.path.splitext(os.path.basename(file))[0]
partitions_files.append(partition)
partitions_files.remove(partition_to_exclude) # Remove large volumne table
partition_tuple = tuple(i for i in partitions_files)
return str(partition_tuple)
def prepare_tmsl(tmsl: str, local: str) -> str:
"""Prepare the path from local
:return:
Path of file tmsl
"""
if local == 'origin':
return ''.join(path_cloud + path_ori_tmsl + 'create_db_'+tmsl+'.json')
else:
return ''.join(path_cloud + path_dest_tmsl + 'create_db_'+tmsl+'.json')
def write_script(path_dest: str, file_name: str, script_content: str):
"""Create script powershell to use in runbooks
:return:
file *.ps1
"""
try:
return open(''.join(path_dest + file_name), mode='w+', encoding="utf-8") \
.write(script_content)
except IOError:
raise Exception('Request Error in ', file_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate each script with customs variables by client')
parser.add_argument('--subscritpion_id',
type=str,
required=False,
help='Number subscritpion_id with 4 fileds')
parser.add_argument('--data_source',
type=str,
default='oracle',
required=False,
help='oracle or postgresql')
parser.add_argument('--product_name',
type=str,
default='PRODUCT_NAME',
required=False,
help='Name in lower case')
parser.add_argument('--client_name',
type=str,
default='mp',
required=False,
help='Name in Upper case')
parser.add_argument('--location',
type=str,
default='brazilsouth',
required=False,
help='Localization to create resources in Azure')
parser.add_argument('--list_admins',
type=str,
default='[[email protected]]',
required=False,
help='Users to role admins in analysis services')
parser.add_argument('--list_readers',
type=str,
default='[[email protected]]',
required=False,
help='Users to role readers in analysis services')
parser.add_argument('--large_volume_table',
type=str,
default='fInfoProcessoMensal',
required=False,
help='Table`s name with need split partitions')
parser.add_argument('--column_to_split',
type=str,
default='idanomesreferencia',
required=False,
help='Column`s name with need split partitions')
parser.add_argument('--total_month',
type=str,
default='12',
required=False,
help='Range of month to storage in Analysis Services')
parser.add_argument('--email_from',
type=str,
default='[email protected]',
required=False,
help='Sender email when runbook fail')
parser.add_argument('--email_to',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
parser.add_argument('--smtp_server',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
parser.add_argument('--smtp_port',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
args = parser.parse_args() # <class 'argparse.ArgumentParser'>
subscritpion_id = args.subscritpion_id
data_source = args.data_source
product_name = args.product_name
client_name = args.client_name
location = args.location
list_admins = args.list_admins
list_readers = args.list_readers
large_volume_table = args.large_volume_table
column_to_split = args.column_to_split
total_month = args.total_month
email_from = args.email_from
email_to = args.email_to
smtp_server = args.smtp_server
smtp_port = args.smtp_port
main(subscritpion_id,
data_source,
product_name,
client_name,
location,
list_admins,
list_readers,
large_volume_table,
column_to_split,
total_month,
email_from,
email_to,
smtp_server,
smtp_port)
| 51.101299
| 153
| 0.563434
|
"""
Generate each script with customs variables by client
"""
import os
import glob
import json
import argparse
from pathlib import Path
# paths
path_cloud = str(Path(__file__).absolute().parent.parent)
# path: scripts powershell
path_ori_runbooks = '/scripts/runbooks/'
path_dest_runbooks = '/azure_automatiom_account/runbooks/'
path_automation_account = ''.join(path_cloud + path_dest_runbooks)
path_create_db_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-create_db-as-runbook.ps1')
path_apply_roles_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-apply-roles-as-runbook.ps1')
path_restore_bkp_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-restore-bkp-as-runbook.ps1')
path_send_email = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-send-email-runbook.ps1')
path_start_stop_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-start-stop-as-runbook.ps1')
path_update_bkp_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-update-bkp-as-runbook.ps1')
path_process_large_volume_tables_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-process-large-volume-tables-as-runbook.ps1')
path_process_partitions_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-daily-process-partitions-as-runbook.ps1')
path_process_partitions_monthly_as = ''.join(path_cloud + path_ori_runbooks + 'PRODUCT_NAME-CLIENT_NAME-monthly-process-partitions-as-runbook.ps1')
path_update_modules = ''.join(path_cloud + path_ori_runbooks + 'update-modules-runbook.ps1')
path_update_certicate = ''.join(path_cloud + path_ori_runbooks + 'update-certificate-runbook.ps1')
# path: scripts tmsl
path_ori_tmsl = '/scripts/tmsl_mp/'
path_dest_tmsl = '/azure_analysis_services/tmsl/'
path_analysis_services = ''.join(path_cloud + path_dest_tmsl)
path_ori_role_admins = ''.join(path_cloud + path_ori_tmsl + 'role_admins.json')
path_ori_role_readers = ''.join(path_cloud + path_ori_tmsl + 'role_readers.json')
path_dest_role_admins = ''.join(path_cloud + path_dest_tmsl + 'role_admins.json')
path_dest_role_readers = ''.join(path_cloud + path_dest_tmsl + 'role_readers.json')
# path: partitions
path_partitions = ''.join(path_cloud + '/azure_analysis_services/partitions/')
def get_partitions_name(path_partitions: str, partition_to_exclude: str) -> str:
""" Read all files in path_partitions and create a list
:return: string in format of tuple because powershell need this ()
special caraters.
"""
partitions_files = []
for file in glob.glob(path_partitions + '*.sql'):
partition = os.path.splitext(os.path.basename(file))[0]
partitions_files.append(partition)
partitions_files.remove(partition_to_exclude) # Remove large volumne table
partition_tuple = tuple(i for i in partitions_files)
return str(partition_tuple)
def prepare_users_names_tmsl(list_users: str) -> str:
list_users = list(list_users.split(','))
list_name = []
for user in list_users:
d = {"memberName": user,
"identityProvider": "AzureAD"}
list_name.append(d)
return str(json.dumps(list_name))
def prepare_tmsl(tmsl: str, local: str) -> str:
"""Prepare the path from local
:return:
Path of file tmsl
"""
if local == 'origin':
return ''.join(path_cloud + path_ori_tmsl + 'create_db_'+tmsl+'.json')
else:
return ''.join(path_cloud + path_dest_tmsl + 'create_db_'+tmsl+'.json')
def replace_tags(path: str = None,
subscritpion_id: str = '0000',
product_name: str = 'PRODUCT_NAME',
client_name: str = 'mp',
location: str = 'brazilsouth',
tmsl_create_db_as: str = path_cloud,
tmsl_role_admins_as: str = path_cloud,
tmsl_role_readers_as: str = path_cloud,
list_readers_users: str = '@company.com.br',
list_admins_users: str = '@company.com.br',
email_from: str = '@company.com.br',
email_to: str = '@company.com.br',
smtp_server: str = '',
smtp_port: str = '587',
large_volume_table: str = 'historic',
column_to_split: str = 'idname',
total_month: str = '12',
list_partitions: str = '(part_1, part_2)') -> 'stream':
return open(file=path, mode='r', encoding="utf-8")\
.read()\
.replace('<SUBSCRIPTION_ID>', subscritpion_id)\
.replace('<PRODUCT_NAME>', product_name)\
.replace('<CLIENT_NAME>', client_name)\
.replace('<CLIENT_NAME_LOWER>', client_name.lower())\
.replace('<LOCATION>', location)\
.replace('<SCRIPT_CREATE_DB>', tmsl_create_db_as)\
.replace('<SCRIPT_ROLE_ADMINS>', tmsl_role_admins_as)\
.replace('<SCRIPT_ROLE_READERS>', tmsl_role_readers_as) \
.replace('<LIST_READERS_USERS>', list_readers_users)\
.replace('<LIST_ADMINS_USERS>', list_admins_users)\
.replace('<EMAIL_FROM>', email_from)\
.replace('<EMAIL_TO>', email_to)\
.replace('<SMTP_SERVER>', smtp_server)\
.replace('<SMTP_PORT>', smtp_port)\
.replace('<LIST_PARTITIONS>', list_partitions)\
.replace('<LARGE_VOLUME_TABLE>', large_volume_table)\
.replace('<COLUMN_TO_SPLIT>', column_to_split)\
.replace('<TOTAL_MONTH>', total_month)
def write_script(path_dest: str, file_name: str, script_content: str):
"""Create script powershell to use in runbooks
:return:
file *.ps1
"""
try:
return open(''.join(path_dest + file_name), mode='w+', encoding="utf-8") \
.write(script_content)
except IOError:
raise Exception('Request Error in ', file_name)
def main(subscritpion_id: str, data_source: str, product_name: str,
client_name: str, location: str, list_admins: str, list_readers: str,
large_volume_table: str, column_to_split: str, total_month: str,
email_from: str, email_to: str, smtp_server: str, smtp_port: str):
path_ori_create_db = prepare_tmsl(tmsl=data_source, local='origin')
path_dest_create_db = prepare_tmsl(tmsl=data_source, local='destination')
list_partitions = get_partitions_name(path_partitions=path_partitions,
partition_to_exclude=large_volume_table)
list_admins = prepare_users_names_tmsl(list_users=list_admins)
list_readers = prepare_users_names_tmsl(list_users=list_readers)
# tmsl
stream_ori_create_db_as = replace_tags(path=path_ori_create_db,
product_name=product_name,
client_name=client_name,
list_readers_users=list_readers,
list_admins_users=list_admins)
stream_ori_role_admins_as = replace_tags(path=path_ori_role_admins,
product_name=product_name,
client_name=client_name,
list_admins_users=list_admins)
stream_ori_role_readers = replace_tags(path=path_ori_role_readers,
product_name=product_name,
client_name=client_name,
list_readers_users=list_readers)
write_script(path_dest=path_analysis_services,
file_name=f'create_db_{data_source}.json',
script_content=stream_ori_create_db_as)
write_script(path_dest=path_analysis_services,
file_name='role_admins.json',
script_content=stream_ori_role_admins_as)
write_script(path_dest=path_analysis_services,
file_name='role_readers.json',
script_content=stream_ori_role_readers)
# stream: tmsl
stream_dest_create_db_as = open(file=path_dest_create_db,
mode='r',
encoding="utf-8").read()
stream_dest_role_admins_as = open(file=path_dest_role_admins,
mode='r',
encoding="utf-8").read()
stream_dest_role_readers_as = open(file=path_dest_role_readers,
mode='r',
encoding="utf-8").read()
# runbooks
stream_runbook_create_db_as = replace_tags(path=path_create_db_as,
product_name=product_name,
client_name=client_name,
location=location,
tmsl_create_db_as=stream_dest_create_db_as)
stream_runbook_apply_roles_as = replace_tags(path=path_apply_roles_as,
product_name=product_name,
client_name=client_name,
location=location,
tmsl_role_admins_as=stream_dest_role_admins_as,
tmsl_role_readers_as=stream_dest_role_readers_as)
stream_runbook_restore_bkp_as = replace_tags(path=path_restore_bkp_as,
product_name=product_name,
client_name=client_name,
location=location)
stream_runbook_send_email = replace_tags(path=path_send_email,
product_name=product_name,
client_name=client_name,
location=location,
email_from=email_from,
email_to=email_to,
smtp_server=smtp_server,
smtp_port=smtp_port)
stream_runbook_start_stop_as = replace_tags(path=path_start_stop_as,
product_name=product_name,
client_name=client_name)
stream_runbook_update_bkp_as = replace_tags(path=path_update_bkp_as,
product_name=product_name,
client_name=client_name,
location=location)
steam_runbook_process_large_volume_tables_as = replace_tags(
path=path_process_large_volume_tables_as,
product_name=product_name,
client_name=client_name,
location=location,
large_volume_table=large_volume_table,
column_to_split=column_to_split,
total_month=total_month)
steam_runbook_process_partitions_as = replace_tags(
path=path_process_partitions_as,
product_name=product_name,
client_name=client_name,
location=location,
list_partitions=list_partitions)
steam_runbook_process_partitions_monthly_as = replace_tags(
path=path_process_partitions_monthly_as,
product_name=product_name,
client_name=client_name,
location=location,
large_volume_table=large_volume_table,
column_to_split=column_to_split)
steam_runbook_update_modules = replace_tags(path=path_update_modules,
product_name=product_name,
client_name=client_name)
steam_runbook_update_certificate = replace_tags(path=path_update_certicate,
product_name=product_name,
client_name=client_name,
subscritpion_id=subscritpion_id)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-create-db-as-runbook.ps1',
script_content=stream_runbook_create_db_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-apply-roles-as-runbook.ps1',
script_content=stream_runbook_apply_roles_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-restore-bkp-as-runbook.ps1',
script_content=stream_runbook_restore_bkp_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-send-email-runbook.ps1',
script_content=stream_runbook_send_email)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-start-stop-as-runbook.ps1',
script_content=stream_runbook_start_stop_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-update-bkp-as-runbook.ps1',
script_content=stream_runbook_update_bkp_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-process-large-volume-tables-as-runbook.ps1',
script_content=steam_runbook_process_large_volume_tables_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-daily-process-partitions-as-runbook.ps1',
script_content=steam_runbook_process_partitions_as)
write_script(path_dest=path_automation_account,
file_name=f'{product_name}-{client_name}-monthly-process-partitions-as-runbook.ps1',
script_content=steam_runbook_process_partitions_monthly_as)
write_script(path_dest=path_automation_account,
file_name=f'update-modules-runbook.ps1',
script_content=steam_runbook_update_modules)
write_script(path_dest=path_automation_account,
file_name=f'update-certificate-runbook.ps1',
script_content=steam_runbook_update_certificate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate each script with customs variables by client')
parser.add_argument('--subscritpion_id',
type=str,
required=False,
help='Number subscritpion_id with 4 fileds')
parser.add_argument('--data_source',
type=str,
default='oracle',
required=False,
help='oracle or postgresql')
parser.add_argument('--product_name',
type=str,
default='PRODUCT_NAME',
required=False,
help='Name in lower case')
parser.add_argument('--client_name',
type=str,
default='mp',
required=False,
help='Name in Upper case')
parser.add_argument('--location',
type=str,
default='brazilsouth',
required=False,
help='Localization to create resources in Azure')
parser.add_argument('--list_admins',
type=str,
default='[[email protected]]',
required=False,
help='Users to role admins in analysis services')
parser.add_argument('--list_readers',
type=str,
default='[[email protected]]',
required=False,
help='Users to role readers in analysis services')
parser.add_argument('--large_volume_table',
type=str,
default='fInfoProcessoMensal',
required=False,
help='Table`s name with need split partitions')
parser.add_argument('--column_to_split',
type=str,
default='idanomesreferencia',
required=False,
help='Column`s name with need split partitions')
parser.add_argument('--total_month',
type=str,
default='12',
required=False,
help='Range of month to storage in Analysis Services')
parser.add_argument('--email_from',
type=str,
default='[email protected]',
required=False,
help='Sender email when runbook fail')
parser.add_argument('--email_to',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
parser.add_argument('--smtp_server',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
parser.add_argument('--smtp_port',
type=str,
default='[email protected]',
required=False,
help='Receiver email when runbooks fail.')
args = parser.parse_args() # <class 'argparse.ArgumentParser'>
subscritpion_id = args.subscritpion_id
data_source = args.data_source
product_name = args.product_name
client_name = args.client_name
location = args.location
list_admins = args.list_admins
list_readers = args.list_readers
large_volume_table = args.large_volume_table
column_to_split = args.column_to_split
total_month = args.total_month
email_from = args.email_from
email_to = args.email_to
smtp_server = args.smtp_server
smtp_port = args.smtp_port
main(subscritpion_id,
data_source,
product_name,
client_name,
location,
list_admins,
list_readers,
large_volume_table,
column_to_split,
total_month,
email_from,
email_to,
smtp_server,
smtp_port)
| 0
| 0
| 0
| 0
| 0
| 11,540
| 0
| -10
| 91
|
fb87826767923a06891871f7e6d5e30765f0233b
| 1,852
|
py
|
Python
|
ftarc/task/fastqc.py
|
dceoy/ftarc
|
d9ce11902a7ce3ad3e47d717f863dcf03ea73ed3
|
[
"MIT"
] | null | null | null |
ftarc/task/fastqc.py
|
dceoy/ftarc
|
d9ce11902a7ce3ad3e47d717f863dcf03ea73ed3
|
[
"MIT"
] | null | null | null |
ftarc/task/fastqc.py
|
dceoy/ftarc
|
d9ce11902a7ce3ad3e47d717f863dcf03ea73ed3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import luigi
if __name__ == '__main__':
luigi.run()
| 30.866667
| 78
| 0.563715
|
#!/usr/bin/env python
import re
from pathlib import Path
import luigi
from .core import FtarcTask
class CollectFqMetricsWithFastqc(FtarcTask):
fq_paths = luigi.ListParameter()
dest_dir_path = luigi.Parameter(default='.')
fastqc = luigi.Parameter(default='fastqc')
n_cpu = luigi.IntParameter(default=1)
memory_mb = luigi.FloatParameter(default=4096)
sh_config = luigi.DictParameter(default=dict())
priority = 10
def output(self):
return [
luigi.LocalTarget(o)
for o in self._generate_output_files(*self.fq_paths)
]
def run(self):
dest_dir = Path(self.dest_dir_path).resolve()
for p in self.fq_paths:
fq = Path(p).resolve()
run_id = fq.stem
self.print_log(f'Collect FASTQ metrics using FastQC:\t{run_id}')
self.setup_shell(
run_id=run_id, commands=self.fastqc, cwd=dest_dir,
**self.sh_config,
env={
'JAVA_TOOL_OPTIONS': '-Xmx{}m'.format(int(self.memory_mb))
}
)
self.run_shell(
args=(
f'set -e && {self.fastqc} --nogroup'
+ f' --threads {self.n_cpu} --outdir {dest_dir} {p}'
),
input_files_or_dirs=p,
output_files_or_dirs=list(self._generate_output_files(p))
)
tmp_dir = dest_dir.joinpath('?')
self.remove_files_and_dirs(tmp_dir)
def _generate_output_files(self, *paths):
dest_dir = Path(self.dest_dir_path).resolve()
for p in paths:
stem = re.sub(r'\.(fq|fastq)$', '', Path(str(p)).stem)
for e in ['html', 'zip']:
yield dest_dir.joinpath(f'{stem}_fastqc.{e}')
if __name__ == '__main__':
luigi.run()
| 0
| 0
| 0
| 1,682
| 0
| 0
| 0
| -3
| 91
|
f067f89023af4ada7a41c3edf295ca19e2dffe30
| 2,135
|
py
|
Python
|
flask/ajax-setinterval-get-thread-result/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 140
|
2017-02-21T22:49:04.000Z
|
2022-03-22T17:51:58.000Z
|
flask/ajax-setinterval-get-thread-result/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 5
|
2017-12-02T19:55:00.000Z
|
2021-09-22T23:18:39.000Z
|
flask/ajax-setinterval-get-thread-result/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 79
|
2017-01-25T10:53:33.000Z
|
2022-03-11T16:13:57.000Z
|
#!/usr/bin/env python3
# date: 2020.01.17
# https://stackoverflow.com/questions/59780007/ajax-with-flask-for-real-time-esque-updates-of-sensor-data-on-webpage/
from flask import Flask
app = Flask(__name__)
running = False # to control loop in thread
value = 0
app.run(debug=True)
| 25.416667
| 117
| 0.513817
|
#!/usr/bin/env python3
# date: 2020.01.17
# https://stackoverflow.com/questions/59780007/ajax-with-flask-for-real-time-esque-updates-of-sensor-data-on-webpage/
from flask import Flask, request, render_template_string, jsonify
import datetime
import time
import threading
app = Flask(__name__)
running = False # to control loop in thread
value = 0
def rpi_function():
global value
print('start of thread')
while running: # global variable to stop loop
value += 1
time.sleep(1)
print('stop of thread')
@app.route('/')
@app.route('/<device>/<action>')
def index(device=None, action=None):
global running
global value
if device:
if action == 'on':
if not running:
print('start')
running = True
threading.Thread(target=rpi_function).start()
else:
print('already running')
elif action == 'off':
if running:
print('stop')
running = False
else:
print('not running')
return render_template_string('''<!DOCTYPE html>
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>
</head>
<body>
<a href="/bioR/on">TURN ON</a>
<a href="/bioR/off">TURN OFF</a>
<h1 id="num"></h1>
<h1 id="time"></h1>
<script>
setInterval(function(){$.ajax({
url: '/update',
type: 'POST',
success: function(response) {
console.log(response);
$("#num").html(response["value"]);
$("#time").html(response["time"]);
},
error: function(error) {
console.log(error);
}
})}, 1000);
</script>
</body>
</html>
''')
@app.route('/update', methods=['POST'])
def update():
return jsonify({
'value': value,
'time': datetime.datetime.now().strftime("%H:%M:%S"),
})
app.run(debug=True)
| 0
| 1,506
| 0
| 0
| 0
| 170
| 0
| 21
| 139
|
173e22545eafe25afdd8caf76400a2e631dc90af
| 884
|
py
|
Python
|
opportune/tests/test_account_model.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | null | null | null |
opportune/tests/test_account_model.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | 2
|
2019-12-26T16:41:26.000Z
|
2020-01-06T18:52:13.000Z
|
opportune/tests/test_account_model.py
|
Mildly-Sketchy/Mildly-Sketchy
|
d215d5c478bfbdac0056ed0144edfe8c9efa557f
|
[
"MIT"
] | 1
|
2018-06-13T18:16:45.000Z
|
2018-06-13T18:16:45.000Z
|
def test_constructed_account_added_to_database(db_session):
"""Test adding a complete stock entry."""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
account = Account(
username='TEST',
password='1234',
email='[email protected]',
)
db_session.add(account)
assert len(db_session.query(Account).all()) == 1
def test_account_with_no_email_throws_error(db_session):
"""Test adding stock with required field empty."""
from ..models import Account
import pytest
from sqlalchemy.exc import IntegrityError
assert len(db_session.query(Account).all()) == 0
account = Account(
username='Test2',
password='1234',
email=None
)
with pytest.raises(IntegrityError):
db_session.add(account)
assert db_session.query(Account).one_or_none() is None
| 29.466667
| 62
| 0.668552
|
def test_constructed_account_added_to_database(db_session):
"""Test adding a complete stock entry."""
from ..models import Account
assert len(db_session.query(Account).all()) == 0
account = Account(
username='TEST',
password='1234',
email='[email protected]',
)
db_session.add(account)
assert len(db_session.query(Account).all()) == 1
def test_account_with_no_email_throws_error(db_session):
"""Test adding stock with required field empty."""
from ..models import Account
import pytest
from sqlalchemy.exc import IntegrityError
assert len(db_session.query(Account).all()) == 0
account = Account(
username='Test2',
password='1234',
email=None
)
with pytest.raises(IntegrityError):
db_session.add(account)
assert db_session.query(Account).one_or_none() is None
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7be240fbdc21ca63847b43f7fdcf1774a5755f88
| 282
|
py
|
Python
|
zillowdb/packages/pathlib_mate/__init__.py
|
MacHu-GWU/zillowdb-project
|
020266257311fa667a3b5fcca15450eb00584aaf
|
[
"MIT"
] | null | null | null |
zillowdb/packages/pathlib_mate/__init__.py
|
MacHu-GWU/zillowdb-project
|
020266257311fa667a3b5fcca15450eb00584aaf
|
[
"MIT"
] | null | null | null |
zillowdb/packages/pathlib_mate/__init__.py
|
MacHu-GWU/zillowdb-project
|
020266257311fa667a3b5fcca15450eb00584aaf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pathlib_mate provide extensive methods, attributes for pathlib.
"""
__version__ = "0.0.6"
__short_description__ = "An extended and more powerful pathlib."
__license__ = "MIT"
__author__ = "Sanhe Hu"
| 20.142857
| 64
| 0.705674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pathlib_mate provide extensive methods, attributes for pathlib.
"""
__version__ = "0.0.6"
__short_description__ = "An extended and more powerful pathlib."
__license__ = "MIT"
__author__ = "Sanhe Hu"
from .pathlib import Path
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 27
|
33de6ce4592643dd5c627b7e893f88550ab7fb4f
| 3,476
|
py
|
Python
|
baseseven.py
|
FykAikawa/Anchor-based-SR-pytorch
|
60ec9b49efd23cf59a385268f4d10a98e5a73965
|
[
"MIT"
] | 3
|
2021-07-18T17:28:44.000Z
|
2022-02-28T03:08:13.000Z
|
baseseven.py
|
FykAikawa/Anchor-based-SR-pytorch
|
60ec9b49efd23cf59a385268f4d10a98e5a73965
|
[
"MIT"
] | null | null | null |
baseseven.py
|
FykAikawa/Anchor-based-SR-pytorch
|
60ec9b49efd23cf59a385268f4d10a98e5a73965
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
upsampler = nn.Upsample(scale_factor=4, mode='bilinear')
| 38.622222
| 172
| 0.59954
|
import numpy as np
import torch.nn as nn
import torch
def rgb_to_ycbcr(image):
r: torch.Tensor = image[..., 0, :, :]
g: torch.Tensor = image[..., 1, :, :]
b: torch.Tensor = image[..., 2, :, :]
delta = 0.5
y = 0.299 * r + 0.587 * g + 0.114 * b
cb = (b - y) * 0.564 + delta
cr = (r - y) * 0.713 + delta
return torch.stack([y, cb, cr], -3)
def ycbcr_to_rgb(image):
y image[..., 0, :, :]
cb = image[..., 1, :, :]
cr = image[..., 2, :, :]
delta = 0.5
cb_shifted = cb - delta
cr_shifted = cr - delta
r = y + 1.403 * cr_shifted
g = y - 0.714 * cr_shifted - 0.344 * cb_shifted
b = y + 1.773 * cb_shifted
return torch.stack([r, g, b], -3)
upsampler = nn.Upsample(scale_factor=4, mode='bilinear')
class Base7(nn.Module):
def __init__(self):
super(Base7, self).__init__()
self.in_channels = 3
self.out_channels = 3
self.m = 4
self.num_fea = 28
self.scale = 4
self.conv1 = nn.Conv2d(self.in_channels, self.num_fea, kernel_size=3, stride=1, padding=1)
self.convs = nn.Sequential(*[nn.Sequential(nn.Conv2d(self.num_fea, self.num_fea, kernel_size=3, stride=1, padding=1),nn.ReLU(inplace=True)) for _ in range(self.m)])
self.conv2 = nn.Conv2d(self.num_fea,self.out_channels*(self.scale**2),kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(self.out_channels*(self.scale**2),self.out_channels*(self.scale**2),kernel_size=3, stride=1, padding=1)
self.ps = nn.PixelShuffle(self.scale)
def forward(self, inputs):
out = inputs
upsampled_inp = torch.cat([out for _ in range(self.scale**2)],dim=1)
out = self.conv1(out)
out = torch.nn.functional.relu_(out)
out = self.convs(out)
out = self.conv2(out)
out = torch.nn.functional.relu_(out)
out = self.conv3(out)
out = out + upsampled_inp
out = self.ps(out)
out = torch.clamp(out,min=0,max=1)
return out
class Base7yuv(nn.Module): #my original model, only super-resolve Y channel
def __init__(self):
super(Base7, self).__init__()
self.in_channels = 1
self.out_channels = 1
self.m = 4
self.num_fea = 28
self.scale = 4
self.conv1 = nn.Conv2d(self.in_channels, self.num_fea, kernel_size=3, stride=1, padding=1)
self.convs = nn.Sequential(*[nn.Sequential(nn.Conv2d(self.num_fea, self.num_fea, kernel_size=3, stride=1, padding=1),nn.ReLU(inplace=True)) for _ in range(self.m)])
self.conv2 = nn.Conv2d(self.num_fea,self.out_channels*(self.scale**2),kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(self.out_channels*(self.scale**2),self.out_channels*(self.scale**2),kernel_size=3, stride=1, padding=1)
self.ps = nn.PixelShuffle(self.scale)
def forward(self, inputs):
ycbcrimage=rgb_to_ycbcr(inputs)
out = torch.unsqueeze(ycbcrimage[...,0,:,:],1)
upsampled_inp = torch.cat([out for _ in range(self.scale**2)],dim=1)
out = self.conv1(out)
out = torch.nn.functional.relu_(out)
out = self.convs(out)
out = self.conv2(out)
out = torch.nn.functional.relu_(out)
out = self.conv3(out)
out = out + upsampled_inp
out = self.ps(out)
out = torch.cat((out,upsampler(ycbcrimage[...,[1,2],:,:])),1)
out = torch.clamp(ycbcr_to_rgb(out),min=0,max=1)
return out
| 0
| 0
| 0
| 2,663
| 0
| 608
| 0
| -12
| 136
|
e81ad75e45ab4660c1a2fd705fc8ce5e9c88c56b
| 199
|
py
|
Python
|
oo/pessoa.py
|
cef01/pythonbirds
|
5af5278a784cb76162de756cec213b14cce1e1dd
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
cef01/pythonbirds
|
5af5278a784cb76162de756cec213b14cce1e1dd
|
[
"MIT"
] | null | null | null |
oo/pessoa.py
|
cef01/pythonbirds
|
5af5278a784cb76162de756cec213b14cce1e1dd
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
p = Pessoa()
print(Pessoa.comprimentar(p))
print(id(p))
print(p.comprimentar())
| 19.9
| 33
| 0.60804
|
class Pessoa:
def comprimentar(self):
return f'olá {id(self)}'
if __name__ == '__main__':
p = Pessoa()
print(Pessoa.comprimentar(p))
print(id(p))
print(p.comprimentar())
| 2
| 0
| 0
| 52
| 0
| 0
| 0
| 0
| 22
|
9fc44a3261d2a0f0f9c0aeed1ed8fd908fe8b78e
| 2,717
|
py
|
Python
|
python/ns/maya/Progress.py
|
redpawfx/massiveImporter
|
2772d1ce530041007d00d8ba4274dccdda7b8900
|
[
"MIT"
] | 2
|
2018-01-30T07:50:48.000Z
|
2020-03-10T02:10:38.000Z
|
python/ns/maya/Progress.py
|
redpawfx/massiveImporter
|
2772d1ce530041007d00d8ba4274dccdda7b8900
|
[
"MIT"
] | null | null | null |
python/ns/maya/Progress.py
|
redpawfx/massiveImporter
|
2772d1ce530041007d00d8ba4274dccdda7b8900
|
[
"MIT"
] | 3
|
2016-10-25T14:29:34.000Z
|
2021-08-09T13:37:33.000Z
|
# The MIT License
#
# Copyright (c) 2008 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Later this can be made into a class to support progress windows
# on different platforms (e.g. Max vs. Maya, or Win vs. Linux)
#
_progressing = False
_uiProgress = False
| 29.215054
| 80
| 0.741627
|
# The MIT License
#
# Copyright (c) 2008 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Later this can be made into a class to support progress windows
# on different platforms (e.g. Max vs. Maya, or Win vs. Linux)
#
import sys
from maya.OpenMayaUI import MProgressWindow
from maya.OpenMaya import MGlobal
import ns.py as npy
import ns.py.Errors
_progressing = False
_uiProgress = False
def reset( maxRange ):
global _uiProgress
global _progressing
_progressing = True
_uiProgress = (MGlobal.mayaState() == MGlobal.kInteractive)
if _uiProgress:
MProgressWindow.reserve()
MProgressWindow.setProgressRange( 0, maxRange )
MProgressWindow.setProgress( 0 )
MProgressWindow.startProgress()
MProgressWindow.setInterruptable( True )
def stop():
global _progressing
if _uiProgress:
MProgressWindow.endProgress()
_progressing = False
def checkForCancel():
if not _progressing:
return
if _uiProgress:
if MProgressWindow.isCancelled():
raise npy.Errors.AbortError("Operation cancelled by user")
def setTitle( title ):
if not _progressing:
return
if _uiProgress:
MProgressWindow.setTitle( title )
def setProgressStatus( status ):
if not _progressing:
return
if _uiProgress:
MProgressWindow.setProgressStatus( status )
else:
print >> sys.stderr, "### %s" % status
def setProgress( progress ):
if not _progressing:
return
if _uiProgress:
MProgressWindow.setProgress( progress )
checkForCancel()
def advanceProgress( progress ):
if not _progressing:
return
if _uiProgress:
MProgressWindow.advanceProgress( progress )
checkForCancel()
| 0
| 0
| 0
| 0
| 0
| 1,078
| 0
| 19
| 299
|
a028741d9b11b9c49111dc54ccac4cee2d3d2068
| 1,274
|
py
|
Python
|
microWsgi/uwsgiTestApp.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | 1
|
2018-10-19T17:35:01.000Z
|
2018-10-19T17:35:01.000Z
|
microWsgi/uwsgiTestApp.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | null | null | null |
microWsgi/uwsgiTestApp.py
|
chrisbrake/PythonSandbox
|
8cd2ea847676d6a300b55c560f49cd980f760b00
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import logging
import socket
"""
Server:
uwsgi --module "microWsgi.uwsgiTestApp:assemble()" --http :5050 --stats stats.socket
Client:
curl -s 'http://localhost:5050?test=potatoes' | python -m 'json.tool'
Stats:
nc -U stats.socket | python -m 'json.tool'
"""
logging.basicConfig(level=logging.DEBUG)
def ready(req, resp):
""" Proof of life """
return "yes"
def params(req, resp):
""" Params from the URL string """
return req.params
def stats(req, resp):
""" uwsgi's stats """
stats_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
stats_socket.connect('stats.socket')
diag = b''
part = b''
while part or not diag:
diag += part
part = stats_socket.recv(16)
return json.loads(diag)
| 20.222222
| 85
| 0.603611
|
import json
import logging
import socket
import falcon
from . import db
"""
Server:
uwsgi --module "microWsgi.uwsgiTestApp:assemble()" --http :5050 --stats stats.socket
Client:
curl -s 'http://localhost:5050?test=potatoes' | python -m 'json.tool'
Stats:
nc -U stats.socket | python -m 'json.tool'
"""
logging.basicConfig(level=logging.DEBUG)
def ready(req, resp):
""" Proof of life """
return "yes"
def params(req, resp):
""" Params from the URL string """
return req.params
def stats(req, resp):
""" uwsgi's stats """
stats_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
stats_socket.connect('stats.socket')
diag = b''
part = b''
while part or not diag:
diag += part
part = stats_socket.recv(16)
return json.loads(diag)
class Tester(object):
""" Sandbox """
tests = {
'db': db.diag,
'stats': stats,
'params': params,
'ready': ready,
}
def on_get(self, req, resp, resource):
if resource:
resp.media = self.tests[resource](req, resp)
else:
resp.media = {k: v(req, resp) for k, v in self.tests.items()}
def assemble():
app = falcon.API()
app.add_route('/{resource}', Tester())
return app
| 0
| 0
| 0
| 348
| 0
| 75
| 0
| -13
| 90
|
aa26db1011e01edbcbb699f248c229f20b132387
| 365,395
|
py
|
Python
|
modality/pyuml2/uml_mixins.py
|
bmjjr/modality
|
700c608b4e8150f1413ba022938ab6a8f1f148c1
|
[
"MIT"
] | 1
|
2019-01-09T07:09:36.000Z
|
2019-01-09T07:09:36.000Z
|
modality/pyuml2/uml_mixins.py
|
bmjjr/modality
|
700c608b4e8150f1413ba022938ab6a8f1f148c1
|
[
"MIT"
] | null | null | null |
modality/pyuml2/uml_mixins.py
|
bmjjr/modality
|
700c608b4e8150f1413ba022938ab6a8f1f148c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Mixins to be implemented by user."""
import pyecore.ecore as ecore
| 44.598438
| 652
| 0.709646
|
# -*- coding: utf-8 -*-
"""Mixins to be implemented by user."""
import pyecore.ecore as ecore
from pyecore.ecore import EDerivedCollection
from pyecore.valuecontainer import EcoreUtils, BadValueError
from pyecore.innerutils import ignored
def check(value, etype):
if not EcoreUtils.isinstance(value, etype):
raise BadValueError(value, etype)
class ActivityContentMixin(object):
"""User defined mixin class for ActivityContent."""
def __init__(self, **kwargs):
super(ActivityContentMixin, self).__init__()
def containing_activity(self):
raise NotImplementedError(
"operation containing_activity(...) not yet implemented"
)
class DerivedOwnedelement(EDerivedCollection):
def __len__(self):
return len(self.owner.eContents)
def __contains__(self, x):
return x in self.owner.eContents
def __iter__(self):
return iter(self.owner.eContents)
def __repr__(self):
return repr(self.owner.eContents)
class ElementMixin(object):
"""User defined mixin class for Element."""
@property
def owner(self):
return self.eContainer()
def __init__(self, ownedComment=None, ownedElement=None, owner=None, **kwargs):
super(ElementMixin, self).__init__(**kwargs)
def has_owner(self, diagnostics=None, context=None):
"""Elements that must be owned must have an owner.
mustBeOwned() implies owner->notEmpty()"""
return self.owner is not None
def not_own_self(self, diagnostics=None, context=None):
"""An element may not directly or indirectly own itself.
not allOwnedElements()->includes(self)"""
return self not in self.all_owned_elements()
def add_keyword(self, keyword=None):
"""Adds the specified keyword to this element."""
raise NotImplementedError("operation add_keyword(...) not yet implemented")
def apply_stereotype(self, stereotype):
"""Applies the specified stereotype to this element."""
from .profile_utils import get_definition_reference_matching
gdrm = get_definition_reference_matching
definition, base_reference = gdrm(stereotype, self)
if definition is None:
return
application = definition()
setattr(application, base_reference.name, self)
self.eResource.append(application)
def create_EAnnotation(self, source=None):
"""Creates an annotation with the specified source and this element
as its model element."""
annotation = ecore.EAnnotation(source=source)
self.eAnnotations.append(annotation)
return annotation
def destroy(self):
"""Destroys this element by removing all cross references to/from it and
removing it from its containing resource or object."""
self.delete()
def get_keywords(self):
"""Retrieves the keywords for this element."""
raise NotImplementedError("operation get_keywords(...) not yet implemented")
def get_applicable_stereotype(self, qualifiedName=None):
"""Retrieves the stereotype with the specified qualified name that is
applicable to this element, or null if no such stereotype is applicable."""
raise NotImplementedError(
"operation get_applicable_stereotype(...) not yet implemented"
)
def get_applicable_stereotypes(self):
"""Retrieves the stereotypes that are applicable to this element,
including those that are required and/or may already be applied."""
raise NotImplementedError(
"operation get_applicable_stereotypes(...) not yet implemented"
)
def get_applied_stereotype(self, qualifiedName):
"""Retrieves the stereotype with the specified qualified name that is
applied to this element, or null if no such stereotype is applied."""
from .profile_utils import get_stereotype_from_application
for o, r in self._inverse_rels:
if r.name.startswith("base_"):
stereotype = get_stereotype_from_application(o)
with ignored(Exception):
if stereotype.qualifiedName == qualifiedName:
return stereotype
return None
def get_applied_stereotypes(self):
"""Retrieves the stereotypes that are applied to this element."""
from .profile_utils import get_stereotype_from_application
result = set()
for o, r in self._inverse_rels:
if r.name.startswith("base_"):
stereotype = get_stereotype_from_application(o)
if stereotype:
result.add(stereotype)
return tuple(result)
def get_applied_substereotype(self, stereotype=None, qualifiedName=None):
"""Retrieves the substereotype of the specified stereotype with the
specified qualified name that is applied to this element, or null if
no such stereotype is applied."""
raise NotImplementedError(
"operation get_applied_substereotype(...) not yet implemented"
)
def get_applied_substereotypes(self, stereotype=None):
"""Retrieves the substereotypes of the specified stereotype that are
applied to this element."""
raise NotImplementedError(
"operation get_applied_substereotypes(...) not yet implemented"
)
def get_model(self):
"""Retrieves the model that owns (either directly or indirectly) this
element."""
from .uml import Model
parent = self.eContainer()
while parent is not None and not isinstance(parent, Model):
parent = parent.eContainer()
return parent
def get_nearest_package(self):
"""Retrieves the nearest package that owns (either directly or indirectly)
this element, or the element itself (if it is a package)."""
from .uml import Package
parent = self.eContainer()
while parent is not None and not isinstance(parent, Package):
parent = parent.eContainer()
return parent
def get_relationships(self):
"""Retrieves the relationships in which this element is involved."""
raise NotImplementedError(
"operation get_relationships(...) not yet implemented"
)
def get_relationships(self, eClass=None):
"""Retrieves the relationships of the specified type in which this element
is involved."""
raise NotImplementedError(
"operation get_relationships(...) not yet implemented"
)
def get_required_stereotype(self, qualifiedName=None):
"""Retrieves the stereotype with the specified qualified name that is
required for this element, or null if no such stereotype is required."""
raise NotImplementedError(
"operation get_required_stereotype(...) not yet implemented"
)
def get_required_stereotypes(self):
"""Retrieves the stereotypes that are required for this element."""
raise NotImplementedError(
"operation get_required_stereotypes(...) not yet implemented"
)
def get_source_directed_relationships(self):
"""Retrieves the directed relationships for which this element is a source."""
raise NotImplementedError(
"operation get_source_directed_relationships(...) not yet implemented"
)
def get_source_directed_relationships(self, eClass=None):
"""Retrieves the directed relationships of the specified type for which
this element is a source."""
raise NotImplementedError(
"operation get_source_directed_relationships(...) not yet implemented"
)
def get_stereotype_application(self, stereotype=None):
"""Retrieves the application of the specified stereotype for this element,
or null if no such stereotype application exists."""
raise NotImplementedError(
"operation get_stereotype_application(...) not yet implemented"
)
def get_stereotype_applications(self):
"""Retrieves the stereotype applications for this element."""
from .profile_utils import get_stereotype_from_application
return tuple(
o
for o, r in self._inverse_rels
if r.name.startswith("base_") and get_stereotype_from_application(o)
)
def get_target_directed_relationships(self):
"""Retrieves the directed relationships for which this element is a target."""
raise NotImplementedError(
"operation get_target_directed_relationships(...) not yet implemented"
)
def get_target_directed_relationships(self, eClass=None):
"""Retrieves the directed relationships of the specified type for which
this element is a target."""
raise NotImplementedError(
"operation get_target_directed_relationships(...) not yet implemented"
)
def get_value(self, stereotype=None, propertyName=None):
"""Retrieves the value of the property with the specified name in the
specified stereotype for this element."""
raise NotImplementedError("operation get_value(...) not yet implemented")
def has_keyword(self, keyword=None):
"""Determines whether this element has the specified keyword."""
raise NotImplementedError("operation has_keyword(...) not yet implemented")
def has_value(self, stereotype=None, propertyName=None):
"""Determines whether this element has a (non-default) value for the
property with the specified name in the specified stereotype."""
raise NotImplementedError("operation has_value(...) not yet implemented")
def is_stereotype_applicable(self, stereotype=None):
"""Determines whether the specified stereotype is applicable to this element."""
raise NotImplementedError(
"operation is_stereotype_applicable(...) not yet implemented"
)
def is_stereotype_applied(self, stereotype=None):
"""Determines whether the specified stereotype is applied to this element."""
stereotype = self.get_applied_stereotype(stereotype.qualifiedName)
return stereotype is not None
def is_stereotype_required(self, stereotype=None):
"""Determines whether the specified stereotype is required for this element."""
raise NotImplementedError(
"operation is_stereotype_required(...) not yet implemented"
)
def remove_keyword(self, keyword=None):
"""Removes the specified keyword from this element."""
raise NotImplementedError("operation remove_keyword(...) not yet implemented")
def set_value(self, stereotype=None, propertyName=None, newValue=None):
"""Sets the value of the property with the specified name in the specified
stereotype for this element."""
raise NotImplementedError("operation set_value(...) not yet implemented")
def unapply_stereotype(self, stereotype=None):
"""Unapplies the specified stereotype from this element."""
raise NotImplementedError(
"operation unapply_stereotype(...) not yet implemented"
)
def all_owned_elements(self):
"""The query allOwnedElements() gives all of the direct and indirect
ownedElements of an Element.
result = (ownedElement->union(ownedElement->collect(
e | e.allOwnedElements()))->asSet())
<p>From package UML::CommonStructure.</p>"""
return self.eAllContents()
def must_be_owned(self):
"""
The query mustBeOwned() indicates whether Elements of this type must have
an owner. Subclasses of Element that do not require an owner must override
this operation.
result = (true)
<p>From package UML::CommonStructure.</p>
"""
return True
class DerivedClientdependency(EDerivedCollection):
pass
class NamedElementMixin(object):
"""User defined mixin class for NamedElement."""
@property
def namespace(self):
raise NotImplementedError("Missing implementation for namespace")
@property
def qualifiedName(self):
qualified_name = self.name
element = self
separator = self.separator()
while element.eContainer():
element = element.eContainer()
qualified_name = element.name + separator + qualified_name
return qualified_name
def __init__(
self,
clientDependency=None,
name=None,
nameExpression=None,
namespace=None,
qualifiedName=None,
visibility=None,
**kwargs,
):
super(NamedElementMixin, self).__init__(**kwargs)
def visibility_needs_ownership(self, diagnostics=None, context=None):
"""
If a NamedElement is owned by something other than a Namespace,
it does not have a visibility. One that is not owned by anything
(and hence must be a Package, as this is the only kind of
NamedElement that overrides mustBeOwned()) may have a visibility.
(namespace = null and owner <> null) implies visibility = null
"""
raise NotImplementedError(
"operation visibility_needs_ownership(...) not yet implemented"
)
def has_qualified_name(self, diagnostics=None, context=None):
"""When there is a name, and all of the containing Namespaces have a name,
the qualifiedName is constructed from the name of the NamedElement and the
names of the containing Namespaces.
(name <> null and allNamespaces()->select(ns | ns.name = null)->isEmpty()) implies
qualifiedName = allNamespaces()->iterate( ns : Namespace; agg: String =
name | ns.name.concat(self.separator()).concat(agg))"""
raise NotImplementedError(
"operation has_qualified_name(...) not yet implemented"
)
def has_no_qualified_name(self, diagnostics=None, context=None):
"""If there is no name, or one of the containing Namespaces has no name,
there is no qualifiedName.
name=null or allNamespaces()->select( ns | ns.name=null )->notEmpty() implies
qualifiedName = null"""
raise NotImplementedError(
"operation has_no_qualified_name(...) not yet implemented"
)
def create_dependency(self, supplier=None):
"""Creates a dependency between this named element and the specified
supplier, owned by this named element's nearest package."""
raise NotImplementedError(
"operation create_dependency(...) not yet implemented"
)
def create_usage(self, supplier=None):
"""Creates a usage between this named element and the specified supplier,
owned by this named element's nearest package."""
raise NotImplementedError("operation create_usage(...) not yet implemented")
def get_label(self):
"""Retrieves a localized label for this named element."""
raise NotImplementedError("operation get_label(...) not yet implemented")
def get_label(self, localize=None):
"""Retrieves a label for this named element, localized if indicated."""
raise NotImplementedError("operation get_label(...) not yet implemented")
def get_namespace(self):
raise NotImplementedError("operation get_namespace(...) not yet implemented")
def all_namespaces(self):
"""The query allNamespaces() gives the sequence of Namespaces in which the
NamedElement is nested, working outwards.
result = (
if owner = null
then OrderedSet{}
else
let enclosingNamespace : Namespace =
if owner.oclIsKindOf(TemplateParameter) and owner.oclAsType(
TemplateParameter).signature.template.oclIsKindOf(Namespace)
then owner.oclAsType(TemplateParameter).signature.template.oclAsType(Namespace)
else
namespace
endif
in enclosingNamespace.allNamespaces()->prepend(enclosingNamespace)
endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation all_namespaces(...) not yet implemented")
def all_owning_packages(self):
"""The query allOwningPackages() returns the set of all the enclosing
Namespaces of this NamedElement, working outwards, that are Packages,
up to but not including the first such Namespace that is not a Package.
result = (if namespace.oclIsKindOf(Package)
then
let owningPackage : Package = namespace.oclAsType(Package) in
owningPackage->union(owningPackage.allOwningPackages())
else
null
endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation all_owning_packages(...) not yet implemented"
)
def is_distinguishable_from(self, n=None, ns=None):
"""The query isDistinguishableFrom() determines whether two NamedElements
may logically co-exist within a Namespace. By default, two named elements
are distinguishable if (a) they have types neither of which is a kind of
the other or (b) they have different names.
result = ((self.oclIsKindOf(n.oclType()) or n.oclIsKindOf(self.oclType())) implies
ns.getNamesOfMember(self)->intersection(ns.getNamesOfMember(n))->isEmpty()
)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation is_distinguishable_from(...) not yet implemented"
)
def get_qualified_name(self):
"""When a NamedElement has a name, and all of its containing Namespaces
have a name, the qualifiedName is constructed from the name of the
NamedElement and the names of the containing Namespaces.
result = (if self.name <> null and self.allNamespaces()->select(
ns | ns.name=null )->isEmpty()
then
self.allNamespaces()->iterate( ns : Namespace; agg: String =
self.name | ns.name.concat(self.separator()).concat(agg))
else
null
endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation get_qualified_name(...) not yet implemented"
)
def separator(self):
"""The query separator() gives the string that is used to separate names
when constructing a qualifiedName.
result = ('::')
<p>From package UML::CommonStructure.</p>"""
return "::"
def get_client_dependencies(self):
"""result = (Dependency.allInstances()->select(d | d.client->includes(self)))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation get_client_dependencies(...) not yet implemented"
)
class CommentMixin(object):
"""User defined mixin class for Comment."""
def __init__(self, annotatedElement=None, body=None, **kwargs):
super(CommentMixin, self).__init__(**kwargs)
class ImageMixin(object):
"""User defined mixin class for Image."""
def __init__(self, content=None, format=None, location=None, **kwargs):
super(ImageMixin, self).__init__(**kwargs)
class ParameterableElementMixin(object):
"""User defined mixin class for ParameterableElement."""
def __init__(self, owningTemplateParameter=None, templateParameter=None, **kwargs):
super(ParameterableElementMixin, self).__init__(**kwargs)
def is_compatible_with(self, p=None):
"""The query isCompatibleWith() determines if this ParameterableElement
is compatible with the specified ParameterableElement. By default,
this ParameterableElement is compatible with another ParameterableElement
p if the kind of this ParameterableElement is the same as or a subtype
of the kind of p. Subclasses of ParameterableElement should override
this operation to specify different compatibility constraints.
result = (self.oclIsKindOf(p.oclType()))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation is_compatible_with(...) not yet implemented"
)
def is_template_parameter(self):
"""The query isTemplateParameter() determines if this ParameterableElement
is exposed as a formal TemplateParameter.
result = (templateParameter->notEmpty())
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation is_template_parameter(...) not yet implemented"
)
class TemplateParameterMixin(object):
"""User defined mixin class for TemplateParameter."""
def __init__(
self,
default=None,
ownedDefault=None,
parameteredElement=None,
signature=None,
ownedParameteredElement=None,
**kwargs,
):
super(TemplateParameterMixin, self).__init__(**kwargs)
def must_be_compatible(self, diagnostics=None, context=None):
"""The default must be compatible with the formal TemplateParameter.
default <> null implies default.isCompatibleWith(parameteredElement)"""
raise NotImplementedError(
"operation must_be_compatible(...) not yet implemented"
)
class TemplateSignatureMixin(object):
"""User defined mixin class for TemplateSignature."""
def __init__(self, parameter=None, template=None, ownedParameter=None, **kwargs):
super(TemplateSignatureMixin, self).__init__(**kwargs)
def own_elements(self, diagnostics=None, context=None):
"""Parameters must own the ParameterableElements they parameter or
those ParameterableElements must be owned by the TemplateableElement
being templated.
template.ownedElement->includesAll(parameter.parameteredElement->asSet() -
parameter.ownedParameteredElement->asSet())"""
raise NotImplementedError("operation own_elements(...) not yet implemented")
def unique_parameters(self, diagnostics=None, context=None):
"""The names of the parameters of a TemplateSignature are unique.
parameter->forAll( p1, p2 | (p1 <> p2 and p1.parameteredElement.oclIsKindOf(
NamedElement) and p2.parameteredElement.oclIsKindOf(NamedElement) ) implies
p1.parameteredElement.oclAsType(NamedElement).name <>
p2.parameteredElement.oclAsType(NamedElement).name)"""
raise NotImplementedError(
"operation unique_parameters(...) not yet implemented"
)
class TemplateableElementMixin(object):
"""User defined mixin class for TemplateableElement."""
def __init__(self, templateBinding=None, ownedTemplateSignature=None, **kwargs):
super(TemplateableElementMixin, self).__init__(**kwargs)
def is_template(self):
"""The query isTemplate() returns whether this TemplateableElement is actually
a template.
result = (ownedTemplateSignature <> null)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation is_template(...) not yet implemented")
def parameterable_elements(self):
"""The query parameterableElements() returns the set of ParameterableElements
that may be used as the parameteredElements for a TemplateParameter of this
TemplateableElement. By default, this set includes all the ownedElements.
Subclasses may override this operation if they choose to restrict the set
of ParameterableElements.
result = (self.allOwnedElements()->select(oclIsKindOf(
ParameterableElement)).oclAsType(ParameterableElement)->asSet())
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation parameterable_elements(...) not yet implemented"
)
class DerivedRelatedelement(EDerivedCollection):
pass
class RelationshipMixin(object):
"""User defined mixin class for Relationship."""
def __init__(self, relatedElement=None, **kwargs):
super(RelationshipMixin, self).__init__(**kwargs)
class TemplateParameterSubstitutionMixin(object):
"""User defined mixin class for TemplateParameterSubstitution."""
def __init__(
self, actual=None, formal=None, ownedActual=None, templateBinding=None, **kwargs
):
super(TemplateParameterSubstitutionMixin, self).__init__(**kwargs)
def must_be_compatible(self, diagnostics=None, context=None):
"""The actual ParameterableElement must be compatible with the formal
TemplateParameter, e.g., the actual ParameterableElement for a Class
TemplateParameter must be a Class.
actual->forAll(a | a.isCompatibleWith(formal.parameteredElement))"""
raise NotImplementedError(
"operation must_be_compatible(...) not yet implemented"
)
class MultiplicityElementMixin(object):
"""User defined mixin class for MultiplicityElement."""
@property
def lower(self):
return self._lower
@lower.setter
def lower(self, value):
self._lower = value
@property
def upper(self):
return self._upper
@upper.setter
def upper(self, value):
self._upper = value
def __init__(
self,
isOrdered=None,
isUnique=None,
lower=None,
lowerValue=None,
upper=None,
upperValue=None,
**kwargs,
):
super(MultiplicityElementMixin, self).__init__(**kwargs)
def upper_ge_lower(self, diagnostics=None, context=None):
"""The upper bound must be greater than or equal to the lower bound.
upperBound() >= lowerBound()"""
raise NotImplementedError("operation upper_ge_lower(...) not yet implemented")
def lower_ge_0(self, diagnostics=None, context=None):
"""The lower bound must be a non-negative integer literal.
lowerBound() >= 0"""
raise NotImplementedError("operation lower_ge_0(...) not yet implemented")
def value_specification_no_side_effects(self, diagnostics=None, context=None):
"""If a non-literal ValueSpecification is used for lowerValue or upperValue,
then evaluating that specification must not have side effects."""
raise NotImplementedError(
"operation value_specification_no_side_effects(...) not yet implemented"
)
def value_specification_constant(self, diagnostics=None, context=None):
"""If a non-literal ValueSpecification is used for lowerValue or upperValue,
then that specification must be a constant expression."""
raise NotImplementedError(
"operation value_specification_constant(...) not yet implemented"
)
def lower_is_integer(self, diagnostics=None, context=None):
"""If it is not empty, then lowerValue must have an Integer value.
lowerValue <> null implies lowerValue.integerValue() <> null"""
raise NotImplementedError("operation lower_is_integer(...) not yet implemented")
def upper_is_unlimited_natural(self, diagnostics=None, context=None):
"""If it is not empty, then upperValue must have an UnlimitedNatural value.
upperValue <> null implies upperValue.unlimitedValue() <> null"""
raise NotImplementedError(
"operation upper_is_unlimited_natural(...) not yet implemented"
)
def set_lower(self, newLower=None):
raise NotImplementedError("operation set_lower(...) not yet implemented")
def set_upper(self, newUpper=None):
raise NotImplementedError("operation set_upper(...) not yet implemented")
def compatible_with(self, other=None):
"""The operation compatibleWith takes another multiplicity as input. It
returns true if the other multiplicity is wider than, or the same as, self.
result = ((other.lowerBound() <= self.lowerBound()) and ((other.upperBound() = *)
or (self.upperBound() <= other.upperBound())))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation compatible_with(...) not yet implemented")
def includes_multiplicity(self, M=None):
"""The query includesMultiplicity() checks whether this multiplicity includes
all the cardinalities allowed by the specified multiplicity.
self.upperBound()->notEmpty() and self.lowerBound()->notEmpty() and
M.upperBound()->notEmpty() and M.lowerBound()->notEmpty()
result = ((self.lowerBound() <= M.lowerBound()) and (self.upperBound() >=
M.upperBound()))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation includes_multiplicity(...) not yet implemented"
)
def is_(self, lowerbound=None, upperbound=None):
"""The operation is determines if the upper and lower bound of the
ranges are the ones given.
result = (lowerbound = self.lowerBound() and upperbound = self.upperBound())
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation is_(...) not yet implemented")
def is_multivalued(self):
"""The query isMultivalued() checks whether this multiplicity has an
upper bound greater than one.
upperBound()->notEmpty()
result = (upperBound() > 1)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation is_multivalued(...) not yet implemented")
def get_lower(self):
"""The derived lower attribute must equal the lowerBound.
result = (lowerBound())
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation get_lower(...) not yet implemented")
def lower_bound(self):
"""The query lowerBound() returns the lower bound of the multiplicity as
an integer, which is the integerValue of lowerValue, if this is given,
and 1 otherwise.
result = (if (lowerValue=null or lowerValue.integerValue()=null) then 1 else
lowerValue.integerValue() endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation lower_bound(...) not yet implemented")
def get_upper(self):
"""The derived upper attribute must equal the upperBound.
result = (upperBound())
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation get_upper(...) not yet implemented")
def upper_bound(self):
"""The query upperBound() returns the upper bound of the multiplicity for
a bounded multiplicity as an unlimited natural, which is the
unlimitedNaturalValue of upperValue, if given, and 1, otherwise.
result = (if (upperValue=null or upperValue.unlimitedValue()=null) then 1 else
upperValue.unlimitedValue() endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation upper_bound(...) not yet implemented")
class SlotMixin(object):
"""User defined mixin class for Slot."""
def __init__(self, definingFeature=None, value=None, owningInstance=None, **kwargs):
super(SlotMixin, self).__init__(**kwargs)
class ExceptionHandlerMixin(object):
"""User defined mixin class for ExceptionHandler."""
def __init__(
self,
exceptionInput=None,
exceptionType=None,
handlerBody=None,
protectedNode=None,
**kwargs,
):
super(ExceptionHandlerMixin, self).__init__(**kwargs)
def handler_body_edges(self, diagnostics=None, context=None):
"""The handlerBody has no incoming or outgoing ActivityEdges and the
exceptionInput has no incoming ActivityEdges.
handlerBody.incoming->isEmpty() and handlerBody.outgoing->isEmpty() and
exceptionInput.incoming->isEmpty()"""
raise NotImplementedError(
"operation handler_body_edges(...) not yet implemented"
)
def output_pins(self, diagnostics=None, context=None):
"""If the protectedNode is an Action with OutputPins, then the
handlerBody must also be an Action with the same number of OutputPins,
which are compatible in type, ordering, and multiplicity to those of
the protectedNode.
(protectedNode.oclIsKindOf(Action) and protectedNode.oclAsType(
Action).output->notEmpty()) implies
(handlerBody.oclIsKindOf(Action) and let protectedNodeOutput :
OrderedSet(OutputPin) = protectedNode.oclAsType(
Action).output,handlerBodyOutput : OrderedSet(OutputPin) =
handlerBody.oclAsType(Action).output in
protectedNodeOutput->size() = handlerBodyOutput->size() and
Sequence{1..protectedNodeOutput->size()}->forAll(i |
handlerBodyOutput->at(i).type.conformsTo(protectedNodeOutput->at(i).type) and
handlerBodyOutput->at(i).isOrdered=protectedNodeOutput->at(i).isOrdered and
handlerBodyOutput->at(i).compatibleWith(protectedNodeOutput->at(i)))
)"""
raise NotImplementedError("operation output_pins(...) not yet implemented")
def one_input(self, diagnostics=None, context=None):
"""The handlerBody is an Action with one InputPin, and that InputPin is the
same as the exceptionInput.
handlerBody.oclIsKindOf(Action) and
let inputs: OrderedSet(InputPin) = handlerBody.oclAsType(Action).input in
inputs->size()=1 and inputs->first()=exceptionInput"""
raise NotImplementedError("operation one_input(...) not yet implemented")
def edge_source_target(self, diagnostics=None, context=None):
"""An ActivityEdge that has a source within the handlerBody of an
ExceptionHandler must have its target in the handlerBody also, and vice versa.
let nodes:Set(ActivityNode) = handlerBody.oclAsType(Action).allOwnedNodes() in
nodes.outgoing->forAll(nodes->includes(target)) and
nodes.incoming->forAll(nodes->includes(source))"""
raise NotImplementedError(
"operation edge_source_target(...) not yet implemented"
)
def handler_body_owner(self, diagnostics=None, context=None):
"""The handlerBody must have the same owner as the protectedNode.
handlerBody.owner=protectedNode.owner"""
raise NotImplementedError(
"operation handler_body_owner(...) not yet implemented"
)
def exception_input_type(self, diagnostics=None, context=None):
"""The exceptionInput must either have no type or every exceptionType must
conform to the exceptionInput type.
exceptionInput.type=null or
exceptionType->forAll(conformsTo(exceptionInput.type.oclAsType(Classifier)))
"""
raise NotImplementedError(
"operation exception_input_type(...) not yet implemented"
)
class LinkEndDataMixin(object):
"""User defined mixin class for LinkEndData."""
def __init__(self, end=None, qualifier=None, value=None, **kwargs):
super(LinkEndDataMixin, self).__init__(**kwargs)
def same_type(self, diagnostics=None, context=None):
"""The type of the value InputPin conforms to the type of the Association end.
value<>null implies value.type.conformsTo(end.type)"""
raise NotImplementedError("operation same_type(...) not yet implemented")
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the value InputPin must be 1..1.
value<>null implies value.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def end_object_input_pin(self, diagnostics=None, context=None):
"""The value InputPin is not also the qualifier value InputPin.
value->excludesAll(qualifier.value)"""
raise NotImplementedError(
"operation end_object_input_pin(...) not yet implemented"
)
def property_is_association_end(self, diagnostics=None, context=None):
"""The Property must be an Association memberEnd.
end.association <> null"""
raise NotImplementedError(
"operation property_is_association_end(...) not yet implemented"
)
def qualifiers(self, diagnostics=None, context=None):
"""The qualifiers must be qualifiers of the Association end.
end.qualifier->includesAll(qualifier.qualifier)"""
raise NotImplementedError("operation qualifiers(...) not yet implemented")
def all_pins(self):
"""Returns all the InputPins referenced by this LinkEndData. By default
this includes the value and qualifier InputPins, but subclasses may
override the operation to add other InputPins.
result = (value->asBag()->union(qualifier.value))
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation all_pins(...) not yet implemented")
class QualifierValueMixin(object):
"""User defined mixin class for QualifierValue."""
def __init__(self, qualifier=None, value=None, **kwargs):
super(QualifierValueMixin, self).__init__(**kwargs)
def multiplicity_of_qualifier(self, diagnostics=None, context=None):
"""The multiplicity of the value InputPin is 1..1.
value.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_qualifier(...) not yet implemented"
)
def type_of_qualifier(self, diagnostics=None, context=None):
"""The type of the value InputPin conforms to the type of the qualifier
Property.value.type.conformsTo(qualifier.type)"""
raise NotImplementedError(
"operation type_of_qualifier(...) not yet implemented"
)
def qualifier_attribute(self, diagnostics=None, context=None):
"""The qualifier must be a qualifier of the Association end of the
linkEndData that owns this QualifierValue.
linkEndData.end.qualifier->includes(qualifier)"""
raise NotImplementedError(
"operation qualifier_attribute(...) not yet implemented"
)
class ClauseMixin(object):
"""User defined mixin class for Clause."""
def __init__(
self,
body=None,
bodyOutput=None,
decider=None,
predecessorClause=None,
successorClause=None,
test=None,
**kwargs,
):
super(ClauseMixin, self).__init__(**kwargs)
def body_output_pins(self, diagnostics=None, context=None):
"""The bodyOutput Pins are OutputPins on Actions in the body of the Clause.
_'body'.oclAsType(Action).allActions().output->includesAll(bodyOutput)"""
raise NotImplementedError("operation body_output_pins(...) not yet implemented")
def decider_output(self, diagnostics=None, context=None):
"""The decider Pin must be on an Action in the test section of the Clause and
must be of type Boolean with multiplicity 1..1.
test.oclAsType(Action).allActions().output->includes(decider) and
decider.type = Boolean and
decider.is(1,1)"""
raise NotImplementedError("operation decider_output(...) not yet implemented")
def test_and_body(self, diagnostics=None, context=None):
"""The test and body parts of a ConditionalNode must be disjoint with each
other.
test->intersection(_'body')->isEmpty()"""
raise NotImplementedError("operation test_and_body(...) not yet implemented")
class DerivedOwnedmember(EDerivedCollection):
pass
class DerivedImportedmember(EDerivedCollection):
pass
class DerivedMember(EDerivedCollection):
pass
class NamespaceMixin(object):
"""User defined mixin class for Namespace."""
def __init__(
self,
ownedRule=None,
elementImport=None,
packageImport=None,
ownedMember=None,
importedMember=None,
member=None,
**kwargs,
):
super(NamespaceMixin, self).__init__(**kwargs)
def members_distinguishable(self, diagnostics=None, context=None):
"""All the members of a Namespace are distinguishable within it.
membersAreDistinguishable()"""
raise NotImplementedError(
"operation members_distinguishable(...) not yet implemented"
)
def cannot_import_self(self, diagnostics=None, context=None):
"""A Namespace cannot have a PackageImport to itself.
packageImport.importedPackage.oclAsType(Namespace)->excludes(self)"""
raise NotImplementedError(
"operation cannot_import_self(...) not yet implemented"
)
def cannot_import_owned_members(self, diagnostics=None, context=None):
"""A Namespace cannot have an ElementImport to one of its ownedMembers.
elementImport.importedElement.oclAsType(Element)->excludesAll(ownedMember)"""
raise NotImplementedError(
"operation cannot_import_owned_members(...) not yet implemented"
)
def create_element_import(self, element=None, visibility=None):
"""Creates an import of the specified element into this namespace with the
specified visibility."""
raise NotImplementedError(
"operation create_element_import(...) not yet implemented"
)
def create_package_import(self, package_=None, visibility=None):
"""Creates an import of the specified package into this namespace with the
specified visibility."""
raise NotImplementedError(
"operation create_package_import(...) not yet implemented"
)
def get_imported_elements(self):
"""Retrieves the elements imported by this namespace."""
raise NotImplementedError(
"operation get_imported_elements(...) not yet implemented"
)
def get_imported_packages(self):
"""Retrieves the packages imported by this namespace."""
raise NotImplementedError(
"operation get_imported_packages(...) not yet implemented"
)
def get_owned_members(self):
raise NotImplementedError(
"operation get_owned_members(...) not yet implemented"
)
def exclude_collisions(self, imps=None):
"""The query excludeCollisions() excludes from a set of PackageableElements
any that would not be distinguishable from each other in this Namespace.
result = (imps->reject(imp1 | imps->exists(imp2 | not imp1.isDistinguishableFrom(
imp2, self)))) <p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation exclude_collisions(...) not yet implemented"
)
def get_names_of_member(self, element=None):
"""The query getNamesOfMember() gives a set of all of the names that a
member would have in a Namespace, taking importing into account. In general
a member can have multiple names in a Namespace if it is imported more than
once with different aliases.
result = (if self.ownedMember ->includes(element)
then Set{element.name}
else let elementImports : Set(ElementImport) = self.elementImport->select(
ei | ei.importedElement = element) in
if elementImports->notEmpty()
then
elementImports->collect(el | el.getName())->asSet()
else
self.packageImport->select(
pi | pi.importedPackage.visibleMembers().oclAsType(NamedElement)->
includes(element))->
collect(pi | pi.importedPackage.getNamesOfMember(element))->asSet()
endif
endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation get_names_of_member(...) not yet implemented"
)
def import_members(self, imps=None):
"""The query importMembers() defines which of a set of PackageableElements
are actually imported into the Namespace. This excludes hidden ones,
i.e., those which have names that conflict with names of ownedMembers,
and it also excludes PackageableElements that would have the
indistinguishable names when imported.
result = (self.excludeCollisions(imps)->select(
imp | self.ownedMember->forAll(mem | imp.isDistinguishableFrom(mem, self))))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation import_members(...) not yet implemented")
def get_imported_members(self):
"""The importedMember property is derived as the PackageableElements
that are members of this Namespace as a result of either PackageImports
or ElementImports.
result = (self.importMembers(elementImport.importedElement->asSet()->union(
packageImport.importedPackage->collect(p | p.visibleMembers()))->asSet()))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation get_imported_members(...) not yet implemented"
)
def members_are_distinguishable(self):
"""The Boolean query membersAreDistinguishable() determines whether all
of the Namespace's members are distinguishable within it.
result = (member->forAll( memb |
member->excluding(memb)->forAll(other |
memb.isDistinguishableFrom(other, self))))
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError(
"operation members_are_distinguishable(...) not yet implemented"
)
class DerivedSource(EDerivedCollection):
pass
class DerivedTarget(EDerivedCollection):
pass
class DirectedRelationshipMixin(object):
"""User defined mixin class for DirectedRelationship."""
def __init__(self, source=None, target=None, **kwargs):
super(DirectedRelationshipMixin, self).__init__(**kwargs)
class TypedElementMixin(object):
"""User defined mixin class for TypedElement."""
def __init__(self, type=None, **kwargs):
super(TypedElementMixin, self).__init__(**kwargs)
class ConnectorEndMixin(object):
"""User defined mixin class for ConnectorEnd."""
@property
def definingEnd(self):
raise NotImplementedError("Missing implementation for definingEnd")
def __init__(self, definingEnd=None, partWithPort=None, role=None, **kwargs):
super(ConnectorEndMixin, self).__init__(**kwargs)
def role_and_part_with_port(self, diagnostics=None, context=None):
"""If a ConnectorEnd references a partWithPort, then the role must be a
Port that is defined or inherited by the type of the partWithPort.
partWithPort->notEmpty() implies
(role.oclIsKindOf(Port) and partWithPort.type.oclAsType(
Namespace).member->includes(role))"""
raise NotImplementedError(
"operation role_and_part_with_port(...) not yet implemented"
)
def part_with_port_empty(self, diagnostics=None, context=None):
"""If a ConnectorEnd is attached to a Port of the containing Classifier,
partWithPort will be empty.(role.oclIsKindOf(Port) and
role.owner = connector.owner) implies partWithPort->isEmpty()"""
raise NotImplementedError(
"operation part_with_port_empty(...) not yet implemented"
)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the ConnectorEnd may not be more general than
the multiplicity of the corresponding end of the Association typing
the owning Connector, if any.self.compatibleWith(definingEnd)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def self_part_with_port(self, diagnostics=None, context=None):
"""The Property held in self.partWithPort must not be a Port.
partWithPort->notEmpty() implies not partWithPort.oclIsKindOf(Port)"""
raise NotImplementedError(
"operation self_part_with_port(...) not yet implemented"
)
def get_defining_end(self):
"""Derivation for ConnectorEnd::/definingEnd : Property
result = (if connector.type = null
then
null
else
let index : Integer = connector.end->indexOf(self) in
connector.type.memberEnd->at(index)
endif)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_defining_end(...) not yet implemented")
class ConnectableElementTemplateParameterMixin(object):
"""User defined mixin class for ConnectableElementTemplateParameter."""
def __init__(self, **kwargs):
super(ConnectableElementTemplateParameterMixin, self).__init__(**kwargs)
class DerivedDeployedelement(EDerivedCollection):
pass
class DeploymentTargetMixin(object):
"""User defined mixin class for DeploymentTarget."""
def __init__(self, deployedElement=None, deployment=None, **kwargs):
super(DeploymentTargetMixin, self).__init__(**kwargs)
def get_deployed_elements(self):
"""Derivation for DeploymentTarget::/deployedElement
result = (deployment.deployedArtifact->select(oclIsKindOf(Artifact))->collect(
oclAsType(Artifact).manifestation)->collect(utilizedElement)->asSet())
<p>From package UML::Deployments.</p>"""
raise NotImplementedError(
"operation get_deployed_elements(...) not yet implemented"
)
class DeployedArtifactMixin(object):
"""User defined mixin class for DeployedArtifact."""
def __init__(self, **kwargs):
super(DeployedArtifactMixin, self).__init__(**kwargs)
class DerivedRedefinedelement(EDerivedCollection):
pass
class DerivedRedefinitioncontext(EDerivedCollection):
pass
class RedefinableElementMixin(object):
"""User defined mixin class for RedefinableElement."""
def __init__(
self, isLeaf=None, redefinedElement=None, redefinitionContext=None, **kwargs
):
super(RedefinableElementMixin, self).__init__(**kwargs)
def redefinition_consistent(self, diagnostics=None, context=None):
"""A redefining element must be consistent with each redefined element.
redefinedElement->forAll(re | re.isConsistentWith(self))"""
raise NotImplementedError(
"operation redefinition_consistent(...) not yet implemented"
)
def non_leaf_redefinition(self, diagnostics=None, context=None):
"""A RedefinableElement can only redefine non-leaf RedefinableElements.
redefinedElement->forAll(re | not re.isLeaf)"""
raise NotImplementedError(
"operation non_leaf_redefinition(...) not yet implemented"
)
def redefinition_context_valid(self, diagnostics=None, context=None):
"""At least one of the redefinition contexts of the redefining element
must be a specialization of at least one of the redefinition contexts for
each redefined element.
redefinedElement->forAll(re | self.isRedefinitionContextValid(re))"""
raise NotImplementedError(
"operation redefinition_context_valid(...) not yet implemented"
)
def is_consistent_with(self, redefiningElement=None):
"""The query isConsistentWith() specifies, for any two RedefinableElements
in a context in which redefinition is possible, whether redefinition
would be logically consistent. By default, this is false; this operation
must be overridden for subclasses of RedefinableElement to define the
consistency conditions.
redefiningElement.isRedefinitionContextValid(self)
result = (false)
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation is_consistent_with(...) not yet implemented"
)
def is_redefinition_context_valid(self, redefinedElement=None):
"""The query isRedefinitionContextValid() specifies whether the redefinition
contexts of this RedefinableElement are properly related to the redefinition
contexts of the specified RedefinableElement to allow this element to redefine
the other. By default at least one of the redefinition contexts of this
element must be a specialization of at least one of the redefinition contexts
of the specified element.
result = (redefinitionContext->exists(c | c.allParents()->includesAll(
redefinedElement.redefinitionContext)))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation is_redefinition_context_valid(...) not yet implemented"
)
class ParameterSetMixin(object):
"""User defined mixin class for ParameterSet."""
def __init__(self, condition=None, parameter=None, **kwargs):
super(ParameterSetMixin, self).__init__(**kwargs)
def same_parameterized_entity(self, diagnostics=None, context=None):
"""The Parameters in a ParameterSet must all be inputs or all be outputs
of the same parameterized entity, and the ParameterSet is owned by that entity.
parameter->forAll(p1, p2 | self.owner = p1.owner and self.owner = p2.owner and
p1.direction = p2.direction)"""
raise NotImplementedError(
"operation same_parameterized_entity(...) not yet implemented"
)
def input(self, diagnostics=None, context=None):
"""If a parameterized entity has input Parameters that are in a ParameterSet,
then any inputs that are not in a ParameterSet must be streaming. Same for
output Parameters.
((parameter->exists(direction = ParameterDirectionKind::_'in')) implies
behavioralFeature.ownedParameter->select(p | p.direction =
ParameterDirectionKind::_'in' and p.parameterSet->isEmpty())->forAll(isStream))
and
((parameter->exists(direction = ParameterDirectionKind::out)) implies
behavioralFeature.ownedParameter->select(p | p.direction =
ParameterDirectionKind::out and p.parameterSet->isEmpty())->forAll(isStream))
"""
raise NotImplementedError("operation input(...) not yet implemented")
def two_parameter_sets(self, diagnostics=None, context=None):
"""Two ParameterSets cannot have exactly the same set of Parameters.
parameter->forAll(parameterSet->forAll(s1, s2 | s1->size() = s2->size() implies
s1.parameter->exists(p | not s2.parameter->includes(p))))"""
raise NotImplementedError(
"operation two_parameter_sets(...) not yet implemented"
)
class DerivedIncoming(EDerivedCollection):
pass
class DerivedOutgoing(EDerivedCollection):
pass
class VertexMixin(object):
"""User defined mixin class for Vertex."""
def __init__(self, container=None, incoming=None, outgoing=None, **kwargs):
super(VertexMixin, self).__init__(**kwargs)
def containing_state_machine(self):
"""The operation containingStateMachine() returns the StateMachine in which
this Vertex is defined.
result = (if container <> null
then
-- the container is a region
container.containingStateMachine()
else
if (self.oclIsKindOf(Pseudostate)) and ((self.oclAsType(Pseudostate).kind =
PseudostateKind::entryPoint) or (self.oclAsType(Pseudostate).kind =
PseudostateKind::exitPoint)) then
self.oclAsType(Pseudostate).stateMachine
else
if (self.oclIsKindOf(ConnectionPointReference)) then
self.oclAsType(
ConnectionPointReference).state.containingStateMachine() --
no other valid cases possible
else
null
endif
endif
endif
)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation containing_state_machine(...) not yet implemented"
)
def get_incomings(self):
"""Derivation for Vertex::/incoming.
result = (Transition.allInstances()->select(target=self))
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation get_incomings(...) not yet implemented")
def get_outgoings(self):
"""Derivation for Vertex::/outgoing
result = (Transition.allInstances()->select(source=self))
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation get_outgoings(...) not yet implemented")
def is_contained_in_state(self, s=None):
"""This utility operation returns true if the Vertex is contained in the
State s (input argument).
result = (if not s.isComposite() or container->isEmpty() then
false
else
if container.state = s then
true
else
container.state.isContainedInState(s)
endif
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation is_contained_in_state(...) not yet implemented"
)
def is_contained_in_region(self, r=None):
"""This utility query returns true if the Vertex is contained in the Region r (input argument).
result = (if (container = r) then
true
else
if (r.state->isEmpty()) then
false
else
container.state.isContainedInRegion(r)
endif
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation is_contained_in_region(...) not yet implemented"
)
class TriggerMixin(object):
"""User defined mixin class for Trigger."""
def __init__(self, event=None, port=None, **kwargs):
super(TriggerMixin, self).__init__(**kwargs)
def trigger_with_ports(self, diagnostics=None, context=None):
"""If a Trigger specifies one or more ports, the event of the Trigger must be
a MessageEvent.
port->notEmpty() implies event.oclIsKindOf(MessageEvent)"""
raise NotImplementedError(
"operation trigger_with_ports(...) not yet implemented"
)
class OperationTemplateParameterMixin(object):
"""User defined mixin class for OperationTemplateParameter."""
def __init__(self, **kwargs):
super(OperationTemplateParameterMixin, self).__init__(**kwargs)
def match_default_signature(self, diagnostics=None, context=None):
"""default->notEmpty() implies (default.oclIsKindOf(Operation) and (
let defaultOp : Operation = default.oclAsType(Operation) in
defaultOp.ownedParameter->size() = parameteredElement.ownedParameter->size() and
Sequence{1.. defaultOp.ownedParameter->size()}->forAll( ix |
let p1: Parameter = defaultOp.ownedParameter->at(ix), p2 : Parameter =
parameteredElement.ownedParameter->at(ix) in
p1.type = p2.type and p1.upper = p2.upper and p1.lower = p2.lower and
p1.direction = p2.direction and p1.isOrdered = p2.isOrdered and p1.isUnique =
p2.isUnique)))"""
raise NotImplementedError(
"operation match_default_signature(...) not yet implemented"
)
class CollaborationUseMixin(object):
"""User defined mixin class for CollaborationUse."""
def __init__(self, roleBinding=None, type=None, **kwargs):
super(CollaborationUseMixin, self).__init__(**kwargs)
def client_elements(self, diagnostics=None, context=None):
"""All the client elements of a roleBinding are in one Classifier and all
supplier elements of a roleBinding are in one Collaboration.
roleBinding->collect(client)->forAll(ne1, ne2 |
ne1.oclIsKindOf(ConnectableElement) and ne2.oclIsKindOf(ConnectableElement) and
let ce1 : ConnectableElement = ne1.oclAsType(ConnectableElement), ce2 :
ConnectableElement = ne2.oclAsType(ConnectableElement) in
ce1.structuredClassifier = ce2.structuredClassifier)
and
roleBinding->collect(supplier)->forAll(ne1, ne2 |
ne1.oclIsKindOf(ConnectableElement) and ne2.oclIsKindOf(ConnectableElement) and
let ce1 : ConnectableElement = ne1.oclAsType(ConnectableElement), ce2 :
ConnectableElement = ne2.oclAsType(ConnectableElement) in
ce1.collaboration = ce2.collaboration)"""
raise NotImplementedError("operation client_elements(...) not yet implemented")
def every_role(self, diagnostics=None, context=None):
"""Every collaborationRole in the Collaboration is bound within the CollaborationUse.
type.collaborationRole->forAll(role | roleBinding->exists(rb | rb.supplier->includes(role)))"""
raise NotImplementedError("operation every_role(...) not yet implemented")
def connectors(self, diagnostics=None, context=None):
"""Connectors in a Collaboration typing a CollaborationUse must have
corresponding Connectors between elements bound in the context Classifier,
and these corresponding Connectors must have the same or more general type
than the Collaboration Connectors.
type.ownedConnector->forAll(connector |
let rolesConnectedInCollab : Set(ConnectableElement) = connector.end.role->asSet(),
relevantBindings : Set(Dependency) = roleBinding->select(rb | rb.supplier->intersection(rolesConnectedInCollab)->notEmpty()),
boundRoles : Set(ConnectableElement) = relevantBindings->collect(client.oclAsType(ConnectableElement))->asSet(),
contextClassifier : StructuredClassifier = boundRoles->any(true).structuredClassifier->any(true) in
contextClassifier.ownedConnector->exists( correspondingConnector |
correspondingConnector.end.role->forAll( role | boundRoles->includes(role) )
and (connector.type->notEmpty() and correspondingConnector.type->notEmpty())
implies connector.type->forAll(conformsTo(correspondingConnector.type)) )
)"""
raise NotImplementedError("operation connectors(...) not yet implemented")
class ClassifierTemplateParameterMixin(object):
"""User defined mixin class for ClassifierTemplateParameter."""
def __init__(self, allowSubstitutable=None, constrainingClassifier=None, **kwargs):
super(ClassifierTemplateParameterMixin, self).__init__(**kwargs)
def has_constraining_classifier(self, diagnostics=None, context=None):
"""If allowSubstitutable is true, then there must be a constrainingClassifier.
allowSubstitutable implies constrainingClassifier->notEmpty()"""
raise NotImplementedError(
"operation has_constraining_classifier(...) not yet implemented"
)
def parametered_element_no_features(self, diagnostics=None, context=None):
"""The parameteredElement has no direct features, and if constrainedElement is
empty it has no generalizations.
parameteredElement.feature->isEmpty() and (constrainingClassifier->isEmpty() implies
parameteredElement.allParents()->isEmpty())"""
raise NotImplementedError(
"operation parametered_element_no_features(...) not yet implemented"
)
def matching_abstract(self, diagnostics=None, context=None):
"""If the parameteredElement is not abstract, then the Classifier used as an
argument shall not be abstract.
(not parameteredElement.isAbstract) implies
templateParameterSubstitution.actual->forAll(a | not a.oclAsType(Classifier).isAbstract)"""
raise NotImplementedError(
"operation matching_abstract(...) not yet implemented"
)
def actual_is_classifier(self, diagnostics=None, context=None):
"""The argument to a ClassifierTemplateParameter is a Classifier.
templateParameterSubstitution.actual->forAll(a | a.oclIsKindOf(Classifier))"""
raise NotImplementedError(
"operation actual_is_classifier(...) not yet implemented"
)
def constraining_classifiers_constrain_args(self, diagnostics=None, context=None):
"""If there are any constrainingClassifiers, then every argument must be the
same as or a specialization of them, or if allowSubstitutable is true,
then it can also be substitutable.
templateParameterSubstitution.actual->forAll( a |
let arg : Classifier = a.oclAsType(Classifier) in
constrainingClassifier->forAll(
cc |
arg = cc or arg.conformsTo(cc) or (allowSubstitutable and arg.isSubstitutableFor(cc))
)
)"""
raise NotImplementedError(
"operation constraining_classifiers_constrain_args(...) not yet implemented"
)
def constraining_classifiers_constrain_parametered_element(
self, diagnostics=None, context=None
):
"""If there are any constrainingClassifiers, then the parameteredElement
must be the same as or a specialization of them, or if allowSubstitutable is true, then it can also be substitutable.
constrainingClassifier->forAll(
cc | parameteredElement = cc or parameteredElement.conformsTo(cc) or
(allowSubstitutable and parameteredElement.isSubstitutableFor(cc))
)"""
raise NotImplementedError(
"operation constraining_classifiers_constrain_parametered_element(...) not yet implemented"
)
class LinkEndCreationDataMixin(object):
"""User defined mixin class for LinkEndCreationData."""
def __init__(self, insertAt=None, isReplaceAll=None, **kwargs):
super(LinkEndCreationDataMixin, self).__init__(**kwargs)
def insert_at_pin(self, diagnostics=None, context=None):
"""LinkEndCreationData for ordered Association ends must have a single
insertAt InputPin for the insertion point with type UnlimitedNatural and multiplicity of 1..1, if isReplaceAll=false, and must have no InputPin for the insertion point when the association ends are unordered.
if not end.isOrdered
then insertAt = null
else
not isReplaceAll=false implies
insertAt <> null and insertAt->forAll(type=UnlimitedNatural and is(1,1))
endif"""
raise NotImplementedError("operation insert_at_pin(...) not yet implemented")
class LinkEndDestructionDataMixin(object):
"""User defined mixin class for LinkEndDestructionData."""
def __init__(self, destroyAt=None, isDestroyDuplicates=None, **kwargs):
super(LinkEndDestructionDataMixin, self).__init__(**kwargs)
def destroy_at_pin(self, diagnostics=None, context=None):
"""LinkEndDestructionData for ordered, nonunique Association ends must have a single destroyAt InputPin if isDestroyDuplicates is false, which must be of type UnlimitedNatural and have a multiplicity of 1..1. Otherwise, the action has no destroyAt input pin.
if not end.isOrdered or end.isUnique or isDestroyDuplicates
then destroyAt = null
else
destroyAt <> null and
destroyAt->forAll(type=UnlimitedNatural and is(1,1))
endif"""
raise NotImplementedError("operation destroy_at_pin(...) not yet implemented")
class MessageMixin(object):
"""User defined mixin class for Message."""
@property
def messageKind(self):
raise NotImplementedError("Missing implementation for messageKind")
def __init__(
self,
argument=None,
connector=None,
interaction=None,
messageKind=None,
messageSort=None,
receiveEvent=None,
sendEvent=None,
signature=None,
**kwargs,
):
super(MessageMixin, self).__init__(**kwargs)
def sending_receiving_message_event(self, diagnostics=None, context=None):
"""If the sendEvent and the receiveEvent of the same Message are on the same Lifeline, the sendEvent must be ordered before the receiveEvent.
receiveEvent.oclIsKindOf(MessageOccurrenceSpecification)
implies
let f : Lifeline = sendEvent->select(oclIsKindOf(MessageOccurrenceSpecification)).oclAsType(MessageOccurrenceSpecification)->asOrderedSet()->first().covered in
f = receiveEvent->select(oclIsKindOf(MessageOccurrenceSpecification)).oclAsType(MessageOccurrenceSpecification)->asOrderedSet()->first().covered implies
f.events->indexOf(sendEvent.oclAsType(MessageOccurrenceSpecification)->asOrderedSet()->first() ) <
f.events->indexOf(receiveEvent.oclAsType(MessageOccurrenceSpecification)->asOrderedSet()->first() )"""
raise NotImplementedError(
"operation sending_receiving_message_event(...) not yet implemented"
)
def arguments(self, diagnostics=None, context=None):
"""Arguments of a Message must only be: i) attributes of the sending lifeline, ii) constants, iii) symbolic values (which are wildcard values representing any legal value), iv) explicit parameters of the enclosing Interaction, v) attributes of the class owning the Interaction."""
raise NotImplementedError("operation arguments(...) not yet implemented")
def cannot_cross_boundaries(self, diagnostics=None, context=None):
"""Messages cannot cross boundaries of CombinedFragments or their operands. This is true if and only if both MessageEnds are enclosed within the same InteractionFragment (i.e., an InteractionOperand or an Interaction).
sendEvent->notEmpty() and receiveEvent->notEmpty() implies
let sendEnclosingFrag : Set(InteractionFragment) =
sendEvent->asOrderedSet()->first().enclosingFragment()
in
let receiveEnclosingFrag : Set(InteractionFragment) =
receiveEvent->asOrderedSet()->first().enclosingFragment()
in sendEnclosingFrag = receiveEnclosingFrag"""
raise NotImplementedError(
"operation cannot_cross_boundaries(...) not yet implemented"
)
def signature_is_signal(self, diagnostics=None, context=None):
"""In the case when the Message signature is a Signal, the arguments of the Message must correspond to the attributes of the Signal. A Message Argument corresponds to a Signal Attribute if the Argument is of the same Class or a specialization of that of the Attribute.
(messageSort = MessageSort::asynchSignal ) and signature.oclIsKindOf(Signal) implies
let signalAttributes : OrderedSet(Property) = signature.oclAsType(Signal).inheritedMember()->
select(n:NamedElement | n.oclIsTypeOf(Property))->collect(oclAsType(Property))->asOrderedSet()
in signalAttributes->size() = self.argument->size()
and self.argument->forAll( o: ValueSpecification |
not (o.oclIsKindOf(Expression)
and o.oclAsType(Expression).symbol->size()=0
and o.oclAsType(Expression).operand->isEmpty() ) implies
let p : Property = signalAttributes->at(self.argument->indexOf(o))
in o.type.oclAsType(Classifier).conformsTo(p.type.oclAsType(Classifier)))"""
raise NotImplementedError(
"operation signature_is_signal(...) not yet implemented"
)
def occurrence_specifications(self, diagnostics=None, context=None):
"""If the MessageEnds are both OccurrenceSpecifications, then the connector must go between the Parts represented by the Lifelines of the two MessageEnds."""
raise NotImplementedError(
"operation occurrence_specifications(...) not yet implemented"
)
def signature_refer_to(self, diagnostics=None, context=None):
"""The signature must either refer an Operation (in which case messageSort is either synchCall or asynchCall or reply) or a Signal (in which case messageSort is asynchSignal). The name of the NamedElement referenced by signature must be the same as that of the Message.
signature->notEmpty() implies
((signature.oclIsKindOf(Operation) and
(messageSort = MessageSort::asynchCall or messageSort = MessageSort::synchCall or messageSort = MessageSort::reply)
) or (signature.oclIsKindOf(Signal) and messageSort = MessageSort::asynchSignal )
) and name = signature.name"""
raise NotImplementedError(
"operation signature_refer_to(...) not yet implemented"
)
def signature_is_operation_request(self, diagnostics=None, context=None):
"""In the case when a Message with messageSort synchCall or asynchCall has a non empty Operation signature, the arguments of the Message must correspond to the in and inout parameters of the Operation. A Parameter corresponds to an Argument if the Argument is of the same Class or a specialization of that of the Parameter.
(messageSort = MessageSort::asynchCall or messageSort = MessageSort::synchCall) and signature.oclIsKindOf(Operation) implies
let requestParms : OrderedSet(Parameter) = signature.oclAsType(Operation).ownedParameter->
select(direction = ParameterDirectionKind::inout or direction = ParameterDirectionKind::_'in' )
in requestParms->size() = self.argument->size() and
self.argument->forAll( o: ValueSpecification |
not (o.oclIsKindOf(Expression) and o.oclAsType(Expression).symbol->size()=0 and o.oclAsType(Expression).operand->isEmpty() ) implies
let p : Parameter = requestParms->at(self.argument->indexOf(o)) in
o.type.oclAsType(Classifier).conformsTo(p.type.oclAsType(Classifier))
)"""
raise NotImplementedError(
"operation signature_is_operation_request(...) not yet implemented"
)
def signature_is_operation_reply(self, diagnostics=None, context=None):
"""In the case when a Message with messageSort reply has a non empty Operation signature, the arguments of the Message must correspond to the out, inout, and return parameters of the Operation. A Parameter corresponds to an Argument if the Argument is of the same Class or a specialization of that of the Parameter.
(messageSort = MessageSort::reply) and signature.oclIsKindOf(Operation) implies
let replyParms : OrderedSet(Parameter) = signature.oclAsType(Operation).ownedParameter->
select(direction = ParameterDirectionKind::inout or direction = ParameterDirectionKind::out or direction = ParameterDirectionKind::return)
in replyParms->size() = self.argument->size() and
self.argument->forAll( o: ValueSpecification | o.oclIsKindOf(Expression) and let e : Expression = o.oclAsType(Expression) in
e.operand->notEmpty() implies
let p : Parameter = replyParms->at(self.argument->indexOf(o)) in
e.operand->asSequence()->first().type.oclAsType(Classifier).conformsTo(p.type.oclAsType(Classifier))
)"""
raise NotImplementedError(
"operation signature_is_operation_reply(...) not yet implemented"
)
def get_message_kind(self):
"""This query returns the MessageKind value for this Message.
result = (messageKind)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation get_message_kind(...) not yet implemented")
class InteractionFragmentMixin(object):
"""User defined mixin class for InteractionFragment."""
def __init__(
self,
covered=None,
enclosingOperand=None,
enclosingInteraction=None,
generalOrdering=None,
**kwargs,
):
super(InteractionFragmentMixin, self).__init__(**kwargs)
class LifelineMixin(object):
"""User defined mixin class for Lifeline."""
def __init__(
self,
decomposedAs=None,
interaction=None,
represents=None,
selector=None,
coveredBy=None,
**kwargs,
):
super(LifelineMixin, self).__init__(**kwargs)
def selector_specified(self, diagnostics=None, context=None):
"""The selector for a Lifeline must only be specified if the referenced Part is multivalued.
self.selector->notEmpty() = (self.represents.oclIsKindOf(MultiplicityElement) and self.represents.oclAsType(MultiplicityElement).isMultivalued())"""
raise NotImplementedError(
"operation selector_specified(...) not yet implemented"
)
def interaction_uses_share_lifeline(self, diagnostics=None, context=None):
"""If a lifeline is in an Interaction referred to by an InteractionUse in an enclosing Interaction, and that lifeline is common with another lifeline in an Interaction referred to by another InteractonUse within that same enclosing Interaction, it must be common to a lifeline within that enclosing Interaction. By common Lifelines we mean Lifelines with the same selector and represents associations.
let intUses : Set(InteractionUse) = interaction.interactionUse in
intUses->forAll
( iuse : InteractionUse |
let usingInteraction : Set(Interaction) = iuse.enclosingInteraction->asSet()
->union(
iuse.enclosingOperand.combinedFragment->asSet()->closure(enclosingOperand.combinedFragment).enclosingInteraction->asSet()
)
in
let peerUses : Set(InteractionUse) = usingInteraction.fragment->select(oclIsKindOf(InteractionUse)).oclAsType(InteractionUse)->asSet()
->union(
usingInteraction.fragment->select(oclIsKindOf(CombinedFragment)).oclAsType(CombinedFragment)->asSet()
->closure(operand.fragment->select(oclIsKindOf(CombinedFragment)).oclAsType(CombinedFragment)).operand.fragment->
select(oclIsKindOf(InteractionUse)).oclAsType(InteractionUse)->asSet()
)->excluding(iuse)
in
peerUses->forAll( peerUse : InteractionUse |
peerUse.refersTo.lifeline->forAll( l : Lifeline | (l.represents = self.represents and
( self.selector.oclIsKindOf(LiteralString) implies
l.selector.oclIsKindOf(LiteralString) and
self.selector.oclAsType(LiteralString).value = l.selector.oclAsType(LiteralString).value )
and
( self.selector.oclIsKindOf(LiteralInteger) implies
l.selector.oclIsKindOf(LiteralInteger) and
self.selector.oclAsType(LiteralInteger).value = l.selector.oclAsType(LiteralInteger).value )
)
implies
usingInteraction.lifeline->exists(represents = self.represents and
( self.selector.oclIsKindOf(LiteralString) implies
l.selector.oclIsKindOf(LiteralString) and
self.selector.oclAsType(LiteralString).value = l.selector.oclAsType(LiteralString).value )
and
( self.selector.oclIsKindOf(LiteralInteger) implies
l.selector.oclIsKindOf(LiteralInteger) and
self.selector.oclAsType(LiteralInteger).value = l.selector.oclAsType(LiteralInteger).value )
)
)
)
)"""
raise NotImplementedError(
"operation interaction_uses_share_lifeline(...) not yet implemented"
)
def same_classifier(self, diagnostics=None, context=None):
"""The classifier containing the referenced ConnectableElement must be the same classifier, or an ancestor, of the classifier that contains the interaction enclosing this lifeline.
represents.namespace->closure(namespace)->includes(interaction._'context')"""
raise NotImplementedError("operation same_classifier(...) not yet implemented")
def selector_int_or_string(self, diagnostics=None, context=None):
"""The selector value, if present, must be a LiteralString or a LiteralInteger
self.selector->notEmpty() implies
self.selector.oclIsKindOf(LiteralInteger) or
self.selector.oclIsKindOf(LiteralString)"""
raise NotImplementedError(
"operation selector_int_or_string(...) not yet implemented"
)
class MessageEndMixin(object):
"""User defined mixin class for MessageEnd."""
def __init__(self, message=None, **kwargs):
super(MessageEndMixin, self).__init__(**kwargs)
def opposite_end(self):
"""This query returns a set including the MessageEnd (if exists) at the opposite end of the Message for this MessageEnd.
message->notEmpty()
result = (message->asSet().messageEnd->asSet()->excluding(self))
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation opposite_end(...) not yet implemented")
def is_send(self):
"""This query returns value true if this MessageEnd is a sendEvent.
message->notEmpty()
result = (message.sendEvent->asSet()->includes(self))
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_send(...) not yet implemented")
def is_receive(self):
"""This query returns value true if this MessageEnd is a receiveEvent.
message->notEmpty()
result = (message.receiveEvent->asSet()->includes(self))
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_receive(...) not yet implemented")
def enclosing_fragment(self):
"""This query returns a set including the enclosing InteractionFragment this MessageEnd is enclosed within.
result = (if self->select(oclIsKindOf(Gate))->notEmpty()
then -- it is a Gate
let endGate : Gate =
self->select(oclIsKindOf(Gate)).oclAsType(Gate)->asOrderedSet()->first()
in
if endGate.isOutsideCF()
then endGate.combinedFragment.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(endGate.combinedFragment.enclosingOperand.oclAsType(InteractionFragment)->asSet())
else if endGate.isInsideCF()
then endGate.combinedFragment.oclAsType(InteractionFragment)->asSet()
else if endGate.isFormal()
then endGate.interaction.oclAsType(InteractionFragment)->asSet()
else if endGate.isActual()
then endGate.interactionUse.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(endGate.interactionUse.enclosingOperand.oclAsType(InteractionFragment)->asSet())
else null
endif
endif
endif
endif
else -- it is a MessageOccurrenceSpecification
let endMOS : MessageOccurrenceSpecification =
self->select(oclIsKindOf(MessageOccurrenceSpecification)).oclAsType(MessageOccurrenceSpecification)->asOrderedSet()->first()
in
if endMOS.enclosingInteraction->notEmpty()
then endMOS.enclosingInteraction.oclAsType(InteractionFragment)->asSet()
else endMOS.enclosingOperand.oclAsType(InteractionFragment)->asSet()
endif
endif)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError(
"operation enclosing_fragment(...) not yet implemented"
)
class GeneralOrderingMixin(object):
"""User defined mixin class for GeneralOrdering."""
def __init__(self, after=None, before=None, **kwargs):
super(GeneralOrderingMixin, self).__init__(**kwargs)
def irreflexive_transitive_closure(self, diagnostics=None, context=None):
"""An occurrence specification must not be ordered relative to itself through a series of general orderings. (In other words, the transitive closure of the general orderings is irreflexive.)
after->closure(toAfter.after)->excludes(before)"""
raise NotImplementedError(
"operation irreflexive_transitive_closure(...) not yet implemented"
)
class PackageableElementMixin(object):
"""User defined mixin class for PackageableElement."""
def __init__(self, **kwargs):
super(PackageableElementMixin, self).__init__(**kwargs)
def namespace_needs_visibility(self, diagnostics=None, context=None):
"""A PackageableElement owned by a Namespace must have a visibility.
visibility = null implies namespace = null"""
raise NotImplementedError(
"operation namespace_needs_visibility(...) not yet implemented"
)
class TemplateBindingMixin(object):
"""User defined mixin class for TemplateBinding."""
def __init__(
self, parameterSubstitution=None, signature=None, boundElement=None, **kwargs
):
super(TemplateBindingMixin, self).__init__(**kwargs)
def parameter_substitution_formal(self, diagnostics=None, context=None):
"""Each parameterSubstitution must refer to a formal TemplateParameter of the target TemplateSignature.
parameterSubstitution->forAll(b | signature.parameter->includes(b.formal))"""
raise NotImplementedError(
"operation parameter_substitution_formal(...) not yet implemented"
)
def one_parameter_substitution(self, diagnostics=None, context=None):
"""A TemplateBiinding contains at most one TemplateParameterSubstitution for each formal TemplateParameter of the target TemplateSignature.
signature.parameter->forAll(p | parameterSubstitution->select(b | b.formal = p)->size() <= 1)"""
raise NotImplementedError(
"operation one_parameter_substitution(...) not yet implemented"
)
class DerivedFeaturingclassifier(EDerivedCollection):
pass
class FeatureMixin(object):
"""User defined mixin class for Feature."""
def __init__(self, featuringClassifier=None, isStatic=None, **kwargs):
super(FeatureMixin, self).__init__(**kwargs)
class PseudostateMixin(object):
"""User defined mixin class for Pseudostate."""
def __init__(self, state=None, kind=None, stateMachine=None, **kwargs):
super(PseudostateMixin, self).__init__(**kwargs)
def transitions_outgoing(self, diagnostics=None, context=None):
"""All transitions outgoing a fork vertex must target states in different regions of an orthogonal state.
(kind = PseudostateKind::fork) implies
-- for any pair of outgoing transitions there exists an orthogonal state which contains the targets of these transitions
-- such that these targets belong to different regions of that orthogonal state
outgoing->forAll(t1:Transition, t2:Transition | let contState:State = containingStateMachine().LCAState(t1.target, t2.target) in
((contState <> null) and (contState.region
->exists(r1:Region, r2: Region | (r1 <> r2) and t1.target.isContainedInRegion(r1) and t2.target.isContainedInRegion(r2)))))"""
raise NotImplementedError(
"operation transitions_outgoing(...) not yet implemented"
)
def choice_vertex(self, diagnostics=None, context=None):
"""In a complete statemachine, a choice Vertex must have at least one incoming and one outgoing Transition.
(kind = PseudostateKind::choice) implies (incoming->size() >= 1 and outgoing->size() >= 1)"""
raise NotImplementedError("operation choice_vertex(...) not yet implemented")
def outgoing_from_initial(self, diagnostics=None, context=None):
"""The outgoing Transition from an initial vertex may have a behavior, but not a trigger or a guard.
(kind = PseudostateKind::initial) implies (outgoing.guard = null and outgoing.trigger->isEmpty())"""
raise NotImplementedError(
"operation outgoing_from_initial(...) not yet implemented"
)
def join_vertex(self, diagnostics=None, context=None):
"""In a complete StateMachine, a join Vertex must have at least two incoming Transitions and exactly one outgoing Transition.
(kind = PseudostateKind::join) implies (outgoing->size() = 1 and incoming->size() >= 2)"""
raise NotImplementedError("operation join_vertex(...) not yet implemented")
def junction_vertex(self, diagnostics=None, context=None):
"""In a complete StateMachine, a junction Vertex must have at least one incoming and one outgoing Transition.
(kind = PseudostateKind::junction) implies (incoming->size() >= 1 and outgoing->size() >= 1)"""
raise NotImplementedError("operation junction_vertex(...) not yet implemented")
def history_vertices(self, diagnostics=None, context=None):
"""History Vertices can have at most one outgoing Transition.
((kind = PseudostateKind::deepHistory) or (kind = PseudostateKind::shallowHistory)) implies (outgoing->size() <= 1)"""
raise NotImplementedError("operation history_vertices(...) not yet implemented")
def initial_vertex(self, diagnostics=None, context=None):
"""An initial Vertex can have at most one outgoing Transition.
(kind = PseudostateKind::initial) implies (outgoing->size() <= 1)"""
raise NotImplementedError("operation initial_vertex(...) not yet implemented")
def fork_vertex(self, diagnostics=None, context=None):
"""In a complete StateMachine, a fork Vertex must have at least two outgoing Transitions and exactly one incoming Transition.
(kind = PseudostateKind::fork) implies (incoming->size() = 1 and outgoing->size() >= 2)"""
raise NotImplementedError("operation fork_vertex(...) not yet implemented")
def transitions_incoming(self, diagnostics=None, context=None):
"""All Transitions incoming a join Vertex must originate in different Regions of an orthogonal State.
(kind = PseudostateKind::join) implies
-- for any pair of incoming transitions there exists an orthogonal state which contains the source vetices of these transitions
-- such that these source vertices belong to different regions of that orthogonal state
incoming->forAll(t1:Transition, t2:Transition | let contState:State = containingStateMachine().LCAState(t1.source, t2.source) in
((contState <> null) and (contState.region
->exists(r1:Region, r2: Region | (r1 <> r2) and t1.source.isContainedInRegion(r1) and t2.source.isContainedInRegion(r2)))))"""
raise NotImplementedError(
"operation transitions_incoming(...) not yet implemented"
)
class ConnectionPointReferenceMixin(object):
"""User defined mixin class for ConnectionPointReference."""
def __init__(self, entry=None, exit=None, state=None, **kwargs):
super(ConnectionPointReferenceMixin, self).__init__(**kwargs)
def exit_pseudostates(self, diagnostics=None, context=None):
"""The exit Pseudostates must be Pseudostates with kind exitPoint.
exit->forAll(kind = PseudostateKind::exitPoint)"""
raise NotImplementedError(
"operation exit_pseudostates(...) not yet implemented"
)
def entry_pseudostates(self, diagnostics=None, context=None):
"""The entry Pseudostates must be Pseudostates with kind entryPoint.
entry->forAll(kind = PseudostateKind::entryPoint)"""
raise NotImplementedError(
"operation entry_pseudostates(...) not yet implemented"
)
class ProtocolConformanceMixin(object):
"""User defined mixin class for ProtocolConformance."""
def __init__(self, generalMachine=None, specificMachine=None, **kwargs):
super(ProtocolConformanceMixin, self).__init__(**kwargs)
class PackageMergeMixin(object):
"""User defined mixin class for PackageMerge."""
def __init__(self, mergedPackage=None, receivingPackage=None, **kwargs):
super(PackageMergeMixin, self).__init__(**kwargs)
class ProfileApplicationMixin(object):
"""User defined mixin class for ProfileApplication."""
def __init__(
self, appliedProfile=None, isStrict=None, applyingPackage=None, **kwargs
):
super(ProfileApplicationMixin, self).__init__(**kwargs)
def get_applied_definition(self):
"""Retrieves the definition (Ecore representation) of the profile associated with this profile application."""
raise NotImplementedError(
"operation get_applied_definition(...) not yet implemented"
)
def get_applied_definition(self, namedElement=None):
"""Retrieves the definition (Ecore representation) of the specified named element in the profile associated with this profile application."""
raise NotImplementedError(
"operation get_applied_definition(...) not yet implemented"
)
class ElementImportMixin(object):
"""User defined mixin class for ElementImport."""
def __init__(
self,
alias=None,
importedElement=None,
importingNamespace=None,
visibility=None,
**kwargs,
):
super(ElementImportMixin, self).__init__(**kwargs)
def imported_element_is_public(self, diagnostics=None, context=None):
"""An importedElement has either public visibility or no visibility at all.
importedElement.visibility <> null implies importedElement.visibility = VisibilityKind::public"""
raise NotImplementedError(
"operation imported_element_is_public(...) not yet implemented"
)
def visibility_public_or_private(self, diagnostics=None, context=None):
"""The visibility of an ElementImport is either public or private.
visibility = VisibilityKind::public or visibility = VisibilityKind::private"""
raise NotImplementedError(
"operation visibility_public_or_private(...) not yet implemented"
)
def get_name(self):
"""The query getName() returns the name under which the imported PackageableElement will be known in the importing namespace.
result = (if alias->notEmpty() then
alias
else
importedElement.name
endif)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation get_name(...) not yet implemented")
class PackageImportMixin(object):
"""User defined mixin class for PackageImport."""
def __init__(
self, importedPackage=None, importingNamespace=None, visibility=None, **kwargs
):
super(PackageImportMixin, self).__init__(**kwargs)
def public_or_private(self, diagnostics=None, context=None):
"""The visibility of a PackageImport is either public or private.
visibility = VisibilityKind::public or visibility = VisibilityKind::private"""
raise NotImplementedError(
"operation public_or_private(...) not yet implemented"
)
class GeneralizationMixin(object):
"""User defined mixin class for Generalization."""
def __init__(
self,
general=None,
generalizationSet=None,
isSubstitutable=None,
specific=None,
**kwargs,
):
super(GeneralizationMixin, self).__init__(**kwargs)
class ExtensionPointMixin(object):
"""User defined mixin class for ExtensionPoint."""
def __init__(self, useCase=None, **kwargs):
super(ExtensionPointMixin, self).__init__(**kwargs)
def must_have_name(self, diagnostics=None, context=None):
"""An ExtensionPoint must have a name.
name->notEmpty ()"""
raise NotImplementedError("operation must_have_name(...) not yet implemented")
class DerivedContainededge(EDerivedCollection):
pass
class DerivedContainednode(EDerivedCollection):
pass
class DerivedSubgroup(EDerivedCollection):
pass
class ActivityGroupMixin(object):
"""User defined mixin class for ActivityGroup."""
@property
def inActivity(self):
raise NotImplementedError("Missing implementation for inActivity")
@inActivity.setter
def inActivity(self, value):
raise NotImplementedError("Missing implementation for inActivity")
@property
def superGroup(self):
raise NotImplementedError("Missing implementation for superGroup")
def __init__(
self,
containedEdge=None,
containedNode=None,
inActivity=None,
subgroup=None,
superGroup=None,
**kwargs,
):
super(ActivityGroupMixin, self).__init__(**kwargs)
def nodes_and_edges(self, diagnostics=None, context=None):
"""All containedNodes and containeEdges of an ActivityGroup must be in the same Activity as the group.
containedNode->forAll(activity = self.containingActivity()) and
containedEdge->forAll(activity = self.containingActivity())"""
raise NotImplementedError("operation nodes_and_edges(...) not yet implemented")
def not_contained(self, diagnostics=None, context=None):
"""No containedNode or containedEdge of an ActivityGroup may be contained by its subgroups or its superGroups, transitively.
subgroup->closure(subgroup).containedNode->excludesAll(containedNode) and
superGroup->closure(superGroup).containedNode->excludesAll(containedNode) and
subgroup->closure(subgroup).containedEdge->excludesAll(containedEdge) and
superGroup->closure(superGroup).containedEdge->excludesAll(containedEdge)"""
raise NotImplementedError("operation not_contained(...) not yet implemented")
class DerivedIngroup(EDerivedCollection):
pass
class ActivityEdgeMixin(object):
"""User defined mixin class for ActivityEdge."""
def __init__(
self,
activity=None,
guard=None,
inPartition=None,
interrupts=None,
inStructuredNode=None,
target=None,
source=None,
redefinedEdge=None,
weight=None,
inGroup=None,
**kwargs,
):
super(ActivityEdgeMixin, self).__init__(**kwargs)
def source_and_target(self, diagnostics=None, context=None):
"""If an ActivityEdge is directly owned by an Activity, then its source and target must be directly or indirectly contained in the same Activity.
activity<>null implies source.containingActivity() = activity and target.containingActivity() = activity"""
raise NotImplementedError(
"operation source_and_target(...) not yet implemented"
)
class InteractionUseMixin(object):
"""User defined mixin class for InteractionUse."""
def __init__(
self,
actualGate=None,
argument=None,
refersTo=None,
returnValue=None,
returnValueRecipient=None,
**kwargs,
):
super(InteractionUseMixin, self).__init__(**kwargs)
def gates_match(self, diagnostics=None, context=None):
"""Actual Gates of the InteractionUse must match Formal Gates of the referred Interaction. Gates match when their names are equal and their messages correspond.
actualGate->notEmpty() implies
refersTo.formalGate->forAll( fg : Gate | self.actualGate->select(matches(fg))->size()=1) and
self.actualGate->forAll(ag : Gate | refersTo.formalGate->select(matches(ag))->size()=1)"""
raise NotImplementedError("operation gates_match(...) not yet implemented")
def arguments_are_constants(self, diagnostics=None, context=None):
"""The arguments must only be constants, parameters of the enclosing Interaction or attributes of the classifier owning the enclosing Interaction."""
raise NotImplementedError(
"operation arguments_are_constants(...) not yet implemented"
)
def return_value_recipient_coverage(self, diagnostics=None, context=None):
"""The returnValueRecipient must be a Property of a ConnectableElement that is represented by a Lifeline covered by this InteractionUse.
returnValueRecipient->asSet()->notEmpty() implies
let covCE : Set(ConnectableElement) = covered.represents->asSet() in
covCE->notEmpty() and let classes:Set(Classifier) = covCE.type.oclIsKindOf(Classifier).oclAsType(Classifier)->asSet() in
let allProps : Set(Property) = classes.attribute->union(classes.allParents().attribute)->asSet() in
allProps->includes(returnValueRecipient)"""
raise NotImplementedError(
"operation return_value_recipient_coverage(...) not yet implemented"
)
def arguments_correspond_to_parameters(self, diagnostics=None, context=None):
"""The arguments of the InteractionUse must correspond to parameters of the referred Interaction."""
raise NotImplementedError(
"operation arguments_correspond_to_parameters(...) not yet implemented"
)
def return_value_type_recipient_correspondence(
self, diagnostics=None, context=None
):
"""The type of the returnValue must correspond to the type of the returnValueRecipient.
returnValue.type->asSequence()->notEmpty() implies returnValue.type->asSequence()->first() = returnValueRecipient.type->asSequence()->first()"""
raise NotImplementedError(
"operation return_value_type_recipient_correspondence(...) not yet implemented"
)
def all_lifelines(self, diagnostics=None, context=None):
"""The InteractionUse must cover all Lifelines of the enclosing Interaction that are common with the lifelines covered by the referred Interaction. Lifelines are common if they have the same selector and represents associationEnd values.
let parentInteraction : Set(Interaction) = enclosingInteraction->asSet()->
union(enclosingOperand.combinedFragment->closure(enclosingOperand.combinedFragment)->
collect(enclosingInteraction).oclAsType(Interaction)->asSet()) in
parentInteraction->size()=1 and let refInteraction : Interaction = refersTo in
parentInteraction.covered-> forAll(intLifeline : Lifeline | refInteraction.covered->
forAll( refLifeline : Lifeline | refLifeline.represents = intLifeline.represents and
(
( refLifeline.selector.oclIsKindOf(LiteralString) implies
intLifeline.selector.oclIsKindOf(LiteralString) and
refLifeline.selector.oclAsType(LiteralString).value = intLifeline.selector.oclAsType(LiteralString).value ) and
( refLifeline.selector.oclIsKindOf(LiteralInteger) implies
intLifeline.selector.oclIsKindOf(LiteralInteger) and
refLifeline.selector.oclAsType(LiteralInteger).value = intLifeline.selector.oclAsType(LiteralInteger).value )
)
implies self.covered->asSet()->includes(intLifeline)))"""
raise NotImplementedError("operation all_lifelines(...) not yet implemented")
class GateMixin(object):
"""User defined mixin class for Gate."""
def __init__(self, **kwargs):
super(GateMixin, self).__init__(**kwargs)
def actual_gate_matched(self, diagnostics=None, context=None):
"""If this Gate is an actualGate, it must have exactly one matching formalGate within the referred Interaction.
interactionUse->notEmpty() implies interactionUse.refersTo.formalGate->select(matches(self))->size()=1"""
raise NotImplementedError(
"operation actual_gate_matched(...) not yet implemented"
)
def inside_cf_matched(self, diagnostics=None, context=None):
"""If this Gate is inside a CombinedFragment, it must have exactly one matching Gate which is outside of that CombinedFragment.
isInsideCF() implies combinedFragment.cfragmentGate->select(isOutsideCF() and matches(self))->size()=1"""
raise NotImplementedError(
"operation inside_cf_matched(...) not yet implemented"
)
def outside_cf_matched(self, diagnostics=None, context=None):
"""If this Gate is outside an 'alt' CombinedFragment, for every InteractionOperator inside that CombinedFragment there must be exactly one matching Gate inside the CombindedFragment with its opposing end enclosed by that InteractionOperator. If this Gate is outside CombinedFragment with operator other than 'alt', there must be exactly one matching Gate inside that CombinedFragment.
isOutsideCF() implies
if self.combinedFragment.interactionOperator->asOrderedSet()->first() = InteractionOperatorKind::alt
then self.combinedFragment.operand->forAll(op : InteractionOperand |
self.combinedFragment.cfragmentGate->select(isInsideCF() and
oppositeEnd().enclosingFragment()->includes(self.combinedFragment) and matches(self))->size()=1)
else self.combinedFragment.cfragmentGate->select(isInsideCF() and matches(self))->size()=1
endif"""
raise NotImplementedError(
"operation outside_cf_matched(...) not yet implemented"
)
def formal_gate_distinguishable(self, diagnostics=None, context=None):
"""isFormal() implies that no other formalGate of the parent Interaction returns the same getName() as returned for self
isFormal() implies interaction.formalGate->select(getName() = self.getName())->size()=1"""
raise NotImplementedError(
"operation formal_gate_distinguishable(...) not yet implemented"
)
def actual_gate_distinguishable(self, diagnostics=None, context=None):
"""isActual() implies that no other actualGate of the parent InteractionUse returns the same getName() as returned for self
isActual() implies interactionUse.actualGate->select(getName() = self.getName())->size()=1"""
raise NotImplementedError(
"operation actual_gate_distinguishable(...) not yet implemented"
)
def outside_cf_gate_distinguishable(self, diagnostics=None, context=None):
"""isOutsideCF() implies that no other outside cfragmentGate of the parent CombinedFragment returns the same getName() as returned for self
isOutsideCF() implies combinedFragment.cfragmentGate->select(getName() = self.getName())->size()=1"""
raise NotImplementedError(
"operation outside_cf_gate_distinguishable(...) not yet implemented"
)
def inside_cf_gate_distinguishable(self, diagnostics=None, context=None):
"""isInsideCF() implies that no other inside cfragmentGate attached to a message with its other end in the same InteractionOperator as self, returns the same getName() as returned for self
isInsideCF() implies
let selfOperand : InteractionOperand = self.getOperand() in
combinedFragment.cfragmentGate->select(isInsideCF() and getName() = self.getName())->select(getOperand() = selfOperand)->size()=1"""
raise NotImplementedError(
"operation inside_cf_gate_distinguishable(...) not yet implemented"
)
def is_outside_cf(self):
"""This query returns true if this Gate is attached to the boundary of a CombinedFragment, and its other end (if present) is outside of the same CombinedFragment.
result = (self.oppositeEnd()-> notEmpty() and combinedFragment->notEmpty() implies
let oppEnd : MessageEnd = self.oppositeEnd()->asOrderedSet()->first() in
if oppEnd.oclIsKindOf(MessageOccurrenceSpecification)
then let oppMOS : MessageOccurrenceSpecification = oppEnd.oclAsType(MessageOccurrenceSpecification)
in self.combinedFragment.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(self.combinedFragment.enclosingOperand.oclAsType(InteractionFragment)->asSet()) =
oppMOS.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(oppMOS.enclosingOperand.oclAsType(InteractionFragment)->asSet())
else let oppGate : Gate = oppEnd.oclAsType(Gate)
in self.combinedFragment.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(self.combinedFragment.enclosingOperand.oclAsType(InteractionFragment)->asSet()) =
oppGate.combinedFragment.enclosingInteraction.oclAsType(InteractionFragment)->asSet()->
union(oppGate.combinedFragment.enclosingOperand.oclAsType(InteractionFragment)->asSet())
endif)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_outside_cf(...) not yet implemented")
def is_inside_cf(self):
"""This query returns true if this Gate is attached to the boundary of a CombinedFragment, and its other end (if present) is inside of an InteractionOperator of the same CombinedFragment.
result = (self.oppositeEnd()-> notEmpty() and combinedFragment->notEmpty() implies
let oppEnd : MessageEnd = self.oppositeEnd()->asOrderedSet()->first() in
if oppEnd.oclIsKindOf(MessageOccurrenceSpecification)
then let oppMOS : MessageOccurrenceSpecification
= oppEnd.oclAsType(MessageOccurrenceSpecification)
in combinedFragment = oppMOS.enclosingOperand.combinedFragment
else let oppGate : Gate = oppEnd.oclAsType(Gate)
in combinedFragment = oppGate.combinedFragment.enclosingOperand.combinedFragment
endif)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_inside_cf(...) not yet implemented")
def is_actual(self):
"""This query returns true value if this Gate is an actualGate of an InteractionUse.
result = (interactionUse->notEmpty())
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_actual(...) not yet implemented")
def is_formal(self):
"""This query returns true if this Gate is a formalGate of an Interaction.
result = (interaction->notEmpty())
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation is_formal(...) not yet implemented")
def get_name(self):
"""This query returns the name of the gate, either the explicit name (.name) or the constructed name ('out_" or 'in_' concatenated in front of .message.name) if the explicit name is not present.
result = (if name->notEmpty() then name->asOrderedSet()->first()
else if isActual() or isOutsideCF()
then if isSend()
then 'out_'.concat(self.message.name->asOrderedSet()->first())
else 'in_'.concat(self.message.name->asOrderedSet()->first())
endif
else if isSend()
then 'in_'.concat(self.message.name->asOrderedSet()->first())
else 'out_'.concat(self.message.name->asOrderedSet()->first())
endif
endif
endif)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation get_name(...) not yet implemented")
def matches(self, gateToMatch=None):
"""This query returns true if the name of this Gate matches the name of the in parameter Gate, and the messages for the two Gates correspond. The Message for one Gate (say A) corresponds to the Message for another Gate (say B) if (A and B have the same name value) and (if A is a sendEvent then B is a receiveEvent) and (if A is a receiveEvent then B is a sendEvent) and (A and B have the same messageSort value) and (A and B have the same signature value).
result = (self.getName() = gateToMatch.getName() and
self.message.messageSort = gateToMatch.message.messageSort and
self.message.name = gateToMatch.message.name and
self.message.sendEvent->includes(self) implies gateToMatch.message.receiveEvent->includes(gateToMatch) and
self.message.receiveEvent->includes(self) implies gateToMatch.message.sendEvent->includes(gateToMatch) and
self.message.signature = gateToMatch.message.signature)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation matches(...) not yet implemented")
def get_operand(self):
"""If the Gate is an inside Combined Fragment Gate, this operation returns the InteractionOperand that the opposite end of this Gate is included within.
result = (if isInsideCF() then
let oppEnd : MessageEnd = self.oppositeEnd()->asOrderedSet()->first() in
if oppEnd.oclIsKindOf(MessageOccurrenceSpecification)
then let oppMOS : MessageOccurrenceSpecification = oppEnd.oclAsType(MessageOccurrenceSpecification)
in oppMOS.enclosingOperand->asOrderedSet()->first()
else let oppGate : Gate = oppEnd.oclAsType(Gate)
in oppGate.combinedFragment.enclosingOperand->asOrderedSet()->first()
endif
else null
endif)
<p>From package UML::Interactions.</p>"""
raise NotImplementedError("operation get_operand(...) not yet implemented")
class OccurrenceSpecificationMixin(object):
"""User defined mixin class for OccurrenceSpecification."""
def __init__(self, toAfter=None, toBefore=None, **kwargs):
super(OccurrenceSpecificationMixin, self).__init__(**kwargs)
def get_covered(self):
"""Returns the Lifeline on which the OccurrenceSpecification appears."""
raise NotImplementedError("operation get_covered(...) not yet implemented")
def set_covered(self, value=None):
"""Sets the Lifeline on which the OccurrenceSpecification appears."""
raise NotImplementedError("operation set_covered(...) not yet implemented")
class ExecutionSpecificationMixin(object):
"""User defined mixin class for ExecutionSpecification."""
def __init__(self, finish=None, start=None, **kwargs):
super(ExecutionSpecificationMixin, self).__init__(**kwargs)
def same_lifeline(self, diagnostics=None, context=None):
"""The startEvent and the finishEvent must be on the same Lifeline.
start.covered = finish.covered"""
raise NotImplementedError("operation same_lifeline(...) not yet implemented")
class CombinedFragmentMixin(object):
"""User defined mixin class for CombinedFragment."""
def __init__(
self, cfragmentGate=None, interactionOperator=None, operand=None, **kwargs
):
super(CombinedFragmentMixin, self).__init__(**kwargs)
def break_(self, diagnostics=None, context=None):
"""If the interactionOperator is break, the corresponding InteractionOperand must cover all Lifelines covered by the enclosing InteractionFragment.
interactionOperator=InteractionOperatorKind::break implies
enclosingInteraction.oclAsType(InteractionFragment)->asSet()->union(
enclosingOperand.oclAsType(InteractionFragment)->asSet()).covered->asSet() = self.covered->asSet()"""
raise NotImplementedError("operation break_(...) not yet implemented")
def consider_and_ignore(self, diagnostics=None, context=None):
"""The interaction operators 'consider' and 'ignore' can only be used for the ConsiderIgnoreFragment subtype of CombinedFragment
((interactionOperator = InteractionOperatorKind::consider) or (interactionOperator = InteractionOperatorKind::ignore)) implies oclIsKindOf(ConsiderIgnoreFragment)"""
raise NotImplementedError(
"operation consider_and_ignore(...) not yet implemented"
)
def opt_loop_break_neg(self, diagnostics=None, context=None):
"""If the interactionOperator is opt, loop, break, assert or neg, there must be exactly one operand.
(interactionOperator = InteractionOperatorKind::opt or interactionOperator = InteractionOperatorKind::loop or
interactionOperator = InteractionOperatorKind::break or interactionOperator = InteractionOperatorKind::assert or
interactionOperator = InteractionOperatorKind::neg)
implies operand->size()=1"""
raise NotImplementedError(
"operation opt_loop_break_neg(...) not yet implemented"
)
class ContinuationMixin(object):
"""User defined mixin class for Continuation."""
def __init__(self, setting=None, **kwargs):
super(ContinuationMixin, self).__init__(**kwargs)
def first_or_last_interaction_fragment(self, diagnostics=None, context=None):
"""Continuations always occur as the very first InteractionFragment or the very last InteractionFragment of the enclosing InteractionOperand.
enclosingOperand->notEmpty() and
let peerFragments : OrderedSet(InteractionFragment) = enclosingOperand.fragment in
( peerFragments->notEmpty() and
((peerFragments->first() = self) or (peerFragments->last() = self)))"""
raise NotImplementedError(
"operation first_or_last_interaction_fragment(...) not yet implemented"
)
def same_name(self, diagnostics=None, context=None):
"""Across all Interaction instances having the same context value, every Lifeline instance covered by a Continuation (self) must be common with one covered Lifeline instance of all other Continuation instances with the same name as self, and every Lifeline instance covered by a Continuation instance with the same name as self must be common with one covered Lifeline instance of self. Lifeline instances are common if they have the same selector and represents associationEnd values.
enclosingOperand.combinedFragment->notEmpty() and
let parentInteraction : Set(Interaction) =
enclosingOperand.combinedFragment->closure(enclosingOperand.combinedFragment)->
collect(enclosingInteraction).oclAsType(Interaction)->asSet()
in
(parentInteraction->size() = 1)
and let peerInteractions : Set(Interaction) =
(parentInteraction->union(parentInteraction->collect(_'context')->collect(behavior)->
select(oclIsKindOf(Interaction)).oclAsType(Interaction)->asSet())->asSet()) in
(peerInteractions->notEmpty()) and
let combinedFragments1 : Set(CombinedFragment) = peerInteractions.fragment->
select(oclIsKindOf(CombinedFragment)).oclAsType(CombinedFragment)->asSet() in
combinedFragments1->notEmpty() and combinedFragments1->closure(operand.fragment->
select(oclIsKindOf(CombinedFragment)).oclAsType(CombinedFragment))->asSet().operand.fragment->
select(oclIsKindOf(Continuation)).oclAsType(Continuation)->asSet()->
forAll(c : Continuation | (c.name = self.name) implies
(c.covered->asSet()->forAll(cl : Lifeline | -- cl must be common to one lifeline covered by self
self.covered->asSet()->
select(represents = cl.represents and selector = cl.selector)->asSet()->size()=1))
and
(self.covered->asSet()->forAll(cl : Lifeline | -- cl must be common to one lifeline covered by c
c.covered->asSet()->
select(represents = cl.represents and selector = cl.selector)->asSet()->size()=1))
)"""
raise NotImplementedError("operation same_name(...) not yet implemented")
def global_(self, diagnostics=None, context=None):
"""Continuations are always global in the enclosing InteractionFragment e.g., it always covers all Lifelines covered by the enclosing InteractionOperator.
enclosingOperand->notEmpty() and
let operandLifelines : Set(Lifeline) = enclosingOperand.covered in
(operandLifelines->notEmpty() and
operandLifelines->forAll(ol :Lifeline |self.covered->includes(ol)))"""
raise NotImplementedError("operation global_(...) not yet implemented")
class StateInvariantMixin(object):
"""User defined mixin class for StateInvariant."""
def __init__(self, invariant=None, **kwargs):
super(StateInvariantMixin, self).__init__(**kwargs)
class TypeMixin(object):
"""User defined mixin class for Type."""
@property
def package(self):
raise NotImplementedError("Missing implementation for package")
@package.setter
def package(self, value):
raise NotImplementedError("Missing implementation for package")
def __init__(self, package=None, **kwargs):
super(TypeMixin, self).__init__(**kwargs)
def create_association(
self,
end1IsNavigable=None,
end1Aggregation=None,
end1Name=None,
end1Lower=None,
end1Upper=None,
end1Type=None,
end2IsNavigable=None,
end2Aggregation=None,
end2Name=None,
end2Lower=None,
end2Upper=None,
):
"""Creates a(n) (binary) association between this type and the specified other type, with the specified navigabilities, aggregations, names, lower bounds, and upper bounds, and owned by this type's nearest package."""
raise NotImplementedError(
"operation create_association(...) not yet implemented"
)
def get_associations(self):
"""Retrieves the associations in which this type is involved."""
raise NotImplementedError("operation get_associations(...) not yet implemented")
def conforms_to(self, other=None):
"""The query conformsTo() gives true for a Type that conforms to another. By default, two Types do not conform to each other. This query is intended to be redefined for specific conformance situations.
result = (false)
<p>From package UML::CommonStructure.</p>"""
raise NotImplementedError("operation conforms_to(...) not yet implemented")
class DerivedEnd(EDerivedCollection):
pass
class ConnectableElementMixin(object):
"""User defined mixin class for ConnectableElement."""
def __init__(self, end=None, **kwargs):
super(ConnectableElementMixin, self).__init__(**kwargs)
def get_ends(self):
"""Derivation for ConnectableElement::/end : ConnectorEnd
result = (ConnectorEnd.allInstances()->select(role = self))
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_ends(...) not yet implemented")
class ConstraintMixin(object):
"""User defined mixin class for Constraint."""
def __init__(
self, constrainedElement=None, context=None, specification=None, **kwargs
):
super(ConstraintMixin, self).__init__(**kwargs)
def boolean_value(self, diagnostics=None, context=None):
"""The ValueSpecification for a Constraint must evaluate to a Boolean value."""
raise NotImplementedError("operation boolean_value(...) not yet implemented")
def no_side_effects(self, diagnostics=None, context=None):
"""Evaluating the ValueSpecification for a Constraint must not have side effects."""
raise NotImplementedError("operation no_side_effects(...) not yet implemented")
def not_apply_to_self(self, diagnostics=None, context=None):
"""A Constraint cannot be applied to itself.
not constrainedElement->includes(self)"""
raise NotImplementedError(
"operation not_apply_to_self(...) not yet implemented"
)
class RegionMixin(object):
"""User defined mixin class for Region."""
def __init__(
self,
extendedRegion=None,
state=None,
stateMachine=None,
transition=None,
subvertex=None,
**kwargs,
):
super(RegionMixin, self).__init__(**kwargs)
def deep_history_vertex(self, diagnostics=None, context=None):
"""A Region can have at most one deep history Vertex.
self.subvertex->select (oclIsKindOf(Pseudostate))->collect(oclAsType(Pseudostate))->
select(kind = PseudostateKind::deepHistory)->size() <= 1"""
raise NotImplementedError(
"operation deep_history_vertex(...) not yet implemented"
)
def shallow_history_vertex(self, diagnostics=None, context=None):
"""A Region can have at most one shallow history Vertex.
subvertex->select(oclIsKindOf(Pseudostate))->collect(oclAsType(Pseudostate))->
select(kind = PseudostateKind::shallowHistory)->size() <= 1"""
raise NotImplementedError(
"operation shallow_history_vertex(...) not yet implemented"
)
def owned(self, diagnostics=None, context=None):
"""If a Region is owned by a StateMachine, then it cannot also be owned by a State and vice versa.
(stateMachine <> null implies state = null) and (state <> null implies stateMachine = null)"""
raise NotImplementedError("operation owned(...) not yet implemented")
def initial_vertex(self, diagnostics=None, context=None):
"""A Region can have at most one initial Vertex.
self.subvertex->select (oclIsKindOf(Pseudostate))->collect(oclAsType(Pseudostate))->
select(kind = PseudostateKind::initial)->size() <= 1"""
raise NotImplementedError("operation initial_vertex(...) not yet implemented")
def belongs_to_psm(self):
"""The operation belongsToPSM () checks if the Region belongs to a ProtocolStateMachine.
result = (if stateMachine <> null
then
stateMachine.oclIsKindOf(ProtocolStateMachine)
else
state <> null implies state.container.belongsToPSM()
endif )
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation belongs_to_psm(...) not yet implemented")
def containing_state_machine(self):
"""The operation containingStateMachine() returns the StateMachine in which this Region is defined.
result = (if stateMachine = null
then
state.containingStateMachine()
else
stateMachine
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation containing_state_machine(...) not yet implemented"
)
def redefinition_context(self):
"""The redefinition context of a Region is the nearest containing StateMachine.
result = (let sm : StateMachine = containingStateMachine() in
if sm._'context' = null or sm.general->notEmpty() then
sm
else
sm._'context'
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation redefinition_context(...) not yet implemented"
)
class EventMixin(object):
"""User defined mixin class for Event."""
def __init__(self, **kwargs):
super(EventMixin, self).__init__(**kwargs)
class TransitionMixin(object):
"""User defined mixin class for Transition."""
def __init__(
self,
effect=None,
guard=None,
kind=None,
redefinedTransition=None,
source=None,
target=None,
trigger=None,
container=None,
**kwargs,
):
super(TransitionMixin, self).__init__(**kwargs)
def state_is_external(self, diagnostics=None, context=None):
"""A Transition with kind external can source any Vertex except entry points.
(kind = TransitionKind::external) implies
not (source.oclIsKindOf(Pseudostate) and source.oclAsType(Pseudostate).kind = PseudostateKind::entryPoint)"""
raise NotImplementedError(
"operation state_is_external(...) not yet implemented"
)
def join_segment_guards(self, diagnostics=None, context=None):
"""A join segment must not have Guards or Triggers.
(target.oclIsKindOf(Pseudostate) and target.oclAsType(Pseudostate).kind = PseudostateKind::join) implies (guard = null and trigger->isEmpty())"""
raise NotImplementedError(
"operation join_segment_guards(...) not yet implemented"
)
def state_is_internal(self, diagnostics=None, context=None):
"""A Transition with kind internal must have a State as its source, and its source and target must be equal.
(kind = TransitionKind::internal) implies
(source.oclIsKindOf (State) and source = target)"""
raise NotImplementedError(
"operation state_is_internal(...) not yet implemented"
)
def outgoing_pseudostates(self, diagnostics=None, context=None):
"""Transitions outgoing Pseudostates may not have a Trigger.
source.oclIsKindOf(Pseudostate) and (source.oclAsType(Pseudostate).kind <> PseudostateKind::initial) implies trigger->isEmpty()"""
raise NotImplementedError(
"operation outgoing_pseudostates(...) not yet implemented"
)
def join_segment_state(self, diagnostics=None, context=None):
"""A join segment must always originate from a State.
(target.oclIsKindOf(Pseudostate) and target.oclAsType(Pseudostate).kind = PseudostateKind::join) implies (source.oclIsKindOf(State))"""
raise NotImplementedError(
"operation join_segment_state(...) not yet implemented"
)
def fork_segment_state(self, diagnostics=None, context=None):
"""A fork segment must always target a State.
(source.oclIsKindOf(Pseudostate) and source.oclAsType(Pseudostate).kind = PseudostateKind::fork) implies (target.oclIsKindOf(State))"""
raise NotImplementedError(
"operation fork_segment_state(...) not yet implemented"
)
def state_is_local(self, diagnostics=None, context=None):
"""A Transition with kind local must have a composite State or an entry point as its source.
(kind = TransitionKind::local) implies
((source.oclIsKindOf (State) and source.oclAsType(State).isComposite) or
(source.oclIsKindOf (Pseudostate) and source.oclAsType(Pseudostate).kind = PseudostateKind::entryPoint))"""
raise NotImplementedError("operation state_is_local(...) not yet implemented")
def initial_transition(self, diagnostics=None, context=None):
"""An initial Transition at the topmost level Region of a StateMachine that has no Trigger.
(source.oclIsKindOf(Pseudostate) and container.stateMachine->notEmpty()) implies
trigger->isEmpty()"""
raise NotImplementedError(
"operation initial_transition(...) not yet implemented"
)
def fork_segment_guards(self, diagnostics=None, context=None):
"""A fork segment must not have Guards or Triggers.
(source.oclIsKindOf(Pseudostate) and source.oclAsType(Pseudostate).kind = PseudostateKind::fork) implies (guard = null and trigger->isEmpty())"""
raise NotImplementedError(
"operation fork_segment_guards(...) not yet implemented"
)
def containing_state_machine(self):
"""The query containingStateMachine() returns the StateMachine that contains the Transition either directly or transitively.
result = (container.containingStateMachine())
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation containing_state_machine(...) not yet implemented"
)
def redefinition_context(self):
"""The redefinition context of a Transition is the nearest containing StateMachine.
result = (let sm : StateMachine = containingStateMachine() in
if sm._'context' = null or sm.general->notEmpty() then
sm
else
sm._'context'
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation redefinition_context(...) not yet implemented"
)
class ConnectorMixin(object):
"""User defined mixin class for Connector."""
@property
def kind(self):
raise NotImplementedError("Missing implementation for kind")
def __init__(
self,
contract=None,
end=None,
kind=None,
redefinedConnector=None,
type=None,
**kwargs,
):
super(ConnectorMixin, self).__init__(**kwargs)
def types(self, diagnostics=None, context=None):
"""The types of the ConnectableElements that the ends of a Connector are attached to must conform to the types of the ends of the Association that types the Connector, if any.
type<>null implies
let noOfEnds : Integer = end->size() in
(type.memberEnd->size() = noOfEnds) and Sequence{1..noOfEnds}->forAll(i | end->at(i).role.type.conformsTo(type.memberEnd->at(i).type))"""
raise NotImplementedError("operation types(...) not yet implemented")
def roles(self, diagnostics=None, context=None):
"""The ConnectableElements attached as roles to each ConnectorEnd owned by a Connector must be owned or inherited roles of the Classifier that owned the Connector, or they must be Ports of such roles.
structuredClassifier <> null
and
end->forAll( e | structuredClassifier.allRoles()->includes(e.role)
or
e.role.oclIsKindOf(Port) and structuredClassifier.allRoles()->includes(e.partWithPort))"""
raise NotImplementedError("operation roles(...) not yet implemented")
def get_kind(self):
"""Derivation for Connector::/kind : ConnectorKind
result = (if end->exists(
role.oclIsKindOf(Port)
and partWithPort->isEmpty()
and not role.oclAsType(Port).isBehavior)
then ConnectorKind::delegation
else ConnectorKind::assembly
endif)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_kind(...) not yet implemented")
class GeneralizationSetMixin(object):
"""User defined mixin class for GeneralizationSet."""
def __init__(
self,
isCovering=None,
isDisjoint=None,
powertype=None,
generalization=None,
**kwargs,
):
super(GeneralizationSetMixin, self).__init__(**kwargs)
def generalization_same_classifier(self, diagnostics=None, context=None):
"""Every Generalization associated with a particular GeneralizationSet must have the same general Classifier.
generalization->collect(general)->asSet()->size() <= 1"""
raise NotImplementedError(
"operation generalization_same_classifier(...) not yet implemented"
)
def maps_to_generalization_set(self, diagnostics=None, context=None):
"""The Classifier that maps to a GeneralizationSet may neither be a specific nor a general Classifier in any of the Generalization relationships defined for that GeneralizationSet. In other words, a power type may not be an instance of itself nor may its instances be its subclasses.
powertype <> null implies generalization->forAll( gen |
not (gen.general = powertype) and not gen.general.allParents()->includes(powertype) and not (gen.specific = powertype) and not powertype.allParents()->includes(gen.specific)
)"""
raise NotImplementedError(
"operation maps_to_generalization_set(...) not yet implemented"
)
class DerivedInheritedparameter(EDerivedCollection):
pass
class RedefinableTemplateSignatureMixin(object):
"""User defined mixin class for RedefinableTemplateSignature."""
def __init__(
self, extendedSignature=None, inheritedParameter=None, classifier=None, **kwargs
):
super(RedefinableTemplateSignatureMixin, self).__init__(**kwargs)
def redefines_parents(self, diagnostics=None, context=None):
"""If any of the parent Classifiers are a template, then the extendedSignature must include the signature of that Classifier.
classifier.allParents()->forAll(c | c.ownedTemplateSignature->notEmpty() implies self->closure(extendedSignature)->includes(c.ownedTemplateSignature))"""
raise NotImplementedError(
"operation redefines_parents(...) not yet implemented"
)
def get_inherited_parameters(self):
"""Derivation for RedefinableTemplateSignature::/inheritedParameter
result = (if extendedSignature->isEmpty() then Set{} else extendedSignature.parameter->asSet() endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation get_inherited_parameters(...) not yet implemented"
)
class ExtendMixin(object):
"""User defined mixin class for Extend."""
def __init__(
self,
condition=None,
extendedCase=None,
extensionLocation=None,
extension=None,
**kwargs,
):
super(ExtendMixin, self).__init__(**kwargs)
def extension_points(self, diagnostics=None, context=None):
"""The ExtensionPoints referenced by the Extend relationship must belong to the UseCase that is being extended.
extensionLocation->forAll (xp | extendedCase.extensionPoint->includes(xp))"""
raise NotImplementedError("operation extension_points(...) not yet implemented")
class IncludeMixin(object):
"""User defined mixin class for Include."""
def __init__(self, addition=None, includingCase=None, **kwargs):
super(IncludeMixin, self).__init__(**kwargs)
class ActivityPartitionMixin(object):
"""User defined mixin class for ActivityPartition."""
def __init__(
self,
isDimension=None,
isExternal=None,
node=None,
represents=None,
subpartition=None,
superPartition=None,
edge=None,
**kwargs,
):
super(ActivityPartitionMixin, self).__init__(**kwargs)
def represents_classifier(self, diagnostics=None, context=None):
"""If a non-external ActivityPartition represents a Classifier and has a superPartition, then the superPartition must represent a Classifier, and the Classifier of the subpartition must be nested (nestedClassifier or ownedBehavior) in the Classifier represented by the superPartition, or be at the contained end of a composition Association with the Classifier represented by the superPartition.
(not isExternal and represents.oclIsKindOf(Classifier) and superPartition->notEmpty()) implies
(
let representedClassifier : Classifier = represents.oclAsType(Classifier) in
superPartition.represents.oclIsKindOf(Classifier) and
let representedSuperClassifier : Classifier = superPartition.represents.oclAsType(Classifier) in
(representedSuperClassifier.oclIsKindOf(BehavioredClassifier) and representedClassifier.oclIsKindOf(Behavior) and
representedSuperClassifier.oclAsType(BehavioredClassifier).ownedBehavior->includes(representedClassifier.oclAsType(Behavior)))
or
(representedSuperClassifier.oclIsKindOf(Class) and representedSuperClassifier.oclAsType(Class).nestedClassifier->includes(representedClassifier))
or
(Association.allInstances()->exists(a | a.memberEnd->exists(end1 | end1.isComposite and end1.type = representedClassifier and
a.memberEnd->exists(end2 | end1<>end2 and end2.type = representedSuperClassifier))))
)"""
raise NotImplementedError(
"operation represents_classifier(...) not yet implemented"
)
def represents_property_and_is_contained(self, diagnostics=None, context=None):
"""If an ActivityPartition represents a Property and has a superPartition, then the Property must be of a Classifier represented by the superPartition, or of a Classifier that is the type of a Property represented by the superPartition.
(represents.oclIsKindOf(Property) and superPartition->notEmpty()) implies
(
(superPartition.represents.oclIsKindOf(Classifier) and represents.owner = superPartition.represents) or
(superPartition.represents.oclIsKindOf(Property) and represents.owner = superPartition.represents.oclAsType(Property).type)
)"""
raise NotImplementedError(
"operation represents_property_and_is_contained(...) not yet implemented"
)
def represents_property(self, diagnostics=None, context=None):
"""If an ActivityPartition represents a Property and has a superPartition representing a Classifier, then all the other non-external subpartitions of the superPartition must represent Properties directly owned by the same Classifier.
(represents.oclIsKindOf(Property) and superPartition->notEmpty() and superPartition.represents.oclIsKindOf(Classifier)) implies
(
let representedClassifier : Classifier = superPartition.represents.oclAsType(Classifier)
in
superPartition.subpartition->reject(isExternal)->forAll(p |
p.represents.oclIsKindOf(Property) and p.owner=representedClassifier)
)"""
raise NotImplementedError(
"operation represents_property(...) not yet implemented"
)
def dimension_not_contained(self, diagnostics=None, context=None):
"""An ActvivityPartition with isDimension = true may not be contained by another ActivityPartition.
isDimension implies superPartition->isEmpty()"""
raise NotImplementedError(
"operation dimension_not_contained(...) not yet implemented"
)
class DerivedIngroup(EDerivedCollection):
pass
class ActivityNodeMixin(object):
"""User defined mixin class for ActivityNode."""
@property
def activity(self):
raise NotImplementedError("Missing implementation for activity")
@activity.setter
def activity(self, value):
raise NotImplementedError("Missing implementation for activity")
def __init__(
self,
activity=None,
inGroup=None,
inInterruptibleRegion=None,
inStructuredNode=None,
incoming=None,
outgoing=None,
redefinedNode=None,
inPartition=None,
**kwargs,
):
super(ActivityNodeMixin, self).__init__(**kwargs)
class InterruptibleActivityRegionMixin(object):
"""User defined mixin class for InterruptibleActivityRegion."""
def __init__(self, interruptingEdge=None, node=None, **kwargs):
super(InterruptibleActivityRegionMixin, self).__init__(**kwargs)
def interrupting_edges(self, diagnostics=None, context=None):
"""The interruptingEdges of an InterruptibleActivityRegion must have their source in the region and their target outside the region, but within the same Activity containing the region.
interruptingEdge->forAll(edge |
node->includes(edge.source) and node->excludes(edge.target) and edge.target.containingActivity() = inActivity)"""
raise NotImplementedError(
"operation interrupting_edges(...) not yet implemented"
)
class ControlFlowMixin(object):
"""User defined mixin class for ControlFlow."""
def __init__(self, **kwargs):
super(ControlFlowMixin, self).__init__(**kwargs)
def object_nodes(self, diagnostics=None, context=None):
"""ControlFlows may not have ObjectNodes at either end, except for ObjectNodes with control type.
(source.oclIsKindOf(ObjectNode) implies source.oclAsType(ObjectNode).isControlType) and
(target.oclIsKindOf(ObjectNode) implies target.oclAsType(ObjectNode).isControlType)"""
raise NotImplementedError("operation object_nodes(...) not yet implemented")
class ObjectFlowMixin(object):
"""User defined mixin class for ObjectFlow."""
def __init__(
self,
isMulticast=None,
isMultireceive=None,
selection=None,
transformation=None,
**kwargs,
):
super(ObjectFlowMixin, self).__init__(**kwargs)
def input_and_output_parameter(self, diagnostics=None, context=None):
"""A selection Behavior has one input Parameter and one output Parameter. The input Parameter must have the same as or a supertype of the type of the source ObjectNode, be non-unique and have multiplicity 0..*. The output Parameter must be the same or a subtype of the type of source ObjectNode. The Behavior cannot have side effects.
selection<>null implies
selection.inputParameters()->size()=1 and
selection.inputParameters()->forAll(not isUnique and is(0,*)) and
selection.outputParameters()->size()=1"""
raise NotImplementedError(
"operation input_and_output_parameter(...) not yet implemented"
)
def no_executable_nodes(self, diagnostics=None, context=None):
"""ObjectFlows may not have ExecutableNodes at either end.
not (source.oclIsKindOf(ExecutableNode) or target.oclIsKindOf(ExecutableNode))"""
raise NotImplementedError(
"operation no_executable_nodes(...) not yet implemented"
)
def transformation_behavior(self, diagnostics=None, context=None):
"""A transformation Behavior has one input Parameter and one output Parameter. The input Parameter must be the same as or a supertype of the type of object token coming from the source end. The output Parameter must be the same or a subtype of the type of object token expected downstream. The Behavior cannot have side effects.
transformation<>null implies
transformation.inputParameters()->size()=1 and
transformation.outputParameters()->size()=1"""
raise NotImplementedError(
"operation transformation_behavior(...) not yet implemented"
)
def selection_behavior(self, diagnostics=None, context=None):
"""An ObjectFlow may have a selection Behavior only if it has an ObjectNode as its source.
selection<>null implies source.oclIsKindOf(ObjectNode)"""
raise NotImplementedError(
"operation selection_behavior(...) not yet implemented"
)
def compatible_types(self, diagnostics=None, context=None):
"""ObjectNodes connected by an ObjectFlow, with optionally intervening ControlNodes, must have compatible types. In particular, the downstream ObjectNode type must be the same or a supertype of the upstream ObjectNode type."""
raise NotImplementedError("operation compatible_types(...) not yet implemented")
def same_upper_bounds(self, diagnostics=None, context=None):
"""ObjectNodes connected by an ObjectFlow, with optionally intervening ControlNodes, must have the same upperBounds."""
raise NotImplementedError(
"operation same_upper_bounds(...) not yet implemented"
)
def target(self, diagnostics=None, context=None):
"""An ObjectFlow with a constant weight may not target an ObjectNode, with optionally intervening ControlNodes, that has an upper bound less than the weight."""
raise NotImplementedError("operation target(...) not yet implemented")
def is_multicast_or_is_multireceive(self, diagnostics=None, context=None):
"""isMulticast and isMultireceive cannot both be true.
not (isMulticast and isMultireceive)"""
raise NotImplementedError(
"operation is_multicast_or_is_multireceive(...) not yet implemented"
)
class ObservationMixin(object):
"""User defined mixin class for Observation."""
def __init__(self, **kwargs):
super(ObservationMixin, self).__init__(**kwargs)
class PartDecompositionMixin(object):
"""User defined mixin class for PartDecomposition."""
def __init__(self, **kwargs):
super(PartDecompositionMixin, self).__init__(**kwargs)
def commutativity_of_decomposition(self, diagnostics=None, context=None):
"""Assume that within Interaction X, Lifeline L is of class C and decomposed to D. Assume also that there is within X an InteractionUse (say) U that covers L. According to the constraint above U will have a counterpart CU within D. Within the Interaction referenced by U, L should also be decomposed, and the decomposition should reference CU. (This rule is called commutativity of decomposition.)"""
raise NotImplementedError(
"operation commutativity_of_decomposition(...) not yet implemented"
)
def assume(self, diagnostics=None, context=None):
"""Assume that within Interaction X, Lifeline L is of class C and decomposed to D. Within X there is a sequence of constructs along L (such constructs are CombinedFragments, InteractionUse and (plain) OccurrenceSpecifications). Then a corresponding sequence of constructs must appear within D, matched one-to-one in the same order. i) CombinedFragment covering L are matched with an extra-global CombinedFragment in D. ii) An InteractionUse covering L is matched with a global (i.e., covering all Lifelines) InteractionUse in D. iii) A plain OccurrenceSpecification on L is considered an actualGate that must be matched by a formalGate of D."""
raise NotImplementedError("operation assume(...) not yet implemented")
def parts_of_internal_structures(self, diagnostics=None, context=None):
"""PartDecompositions apply only to Parts that are Parts of Internal Structures not to Parts of Collaborations."""
raise NotImplementedError(
"operation parts_of_internal_structures(...) not yet implemented"
)
class InteractionOperandMixin(object):
"""User defined mixin class for InteractionOperand."""
def __init__(self, fragment=None, guard=None, **kwargs):
super(InteractionOperandMixin, self).__init__(**kwargs)
def guard_contain_references(self, diagnostics=None, context=None):
"""The guard must contain only references to values local to the Lifeline on which it resides, or values global to the whole Interaction."""
raise NotImplementedError(
"operation guard_contain_references(...) not yet implemented"
)
def guard_directly_prior(self, diagnostics=None, context=None):
"""The guard must be placed directly prior to (above) the OccurrenceSpecification that will become the first OccurrenceSpecification within this InteractionOperand."""
raise NotImplementedError(
"operation guard_directly_prior(...) not yet implemented"
)
class ActionExecutionSpecificationMixin(object):
"""User defined mixin class for ActionExecutionSpecification."""
def __init__(self, action=None, **kwargs):
super(ActionExecutionSpecificationMixin, self).__init__(**kwargs)
def action_referenced(self, diagnostics=None, context=None):
"""The Action referenced by the ActionExecutionSpecification must be owned by the Interaction owning that ActionExecutionSpecification.
(enclosingInteraction->notEmpty() or enclosingOperand.combinedFragment->notEmpty()) and
let parentInteraction : Set(Interaction) = enclosingInteraction.oclAsType(Interaction)->asSet()->union(
enclosingOperand.combinedFragment->closure(enclosingOperand.combinedFragment)->
collect(enclosingInteraction).oclAsType(Interaction)->asSet()) in
(parentInteraction->size() = 1) and self.action.interaction->asSet() = parentInteraction"""
raise NotImplementedError(
"operation action_referenced(...) not yet implemented"
)
class BehaviorExecutionSpecificationMixin(object):
"""User defined mixin class for BehaviorExecutionSpecification."""
def __init__(self, behavior=None, **kwargs):
super(BehaviorExecutionSpecificationMixin, self).__init__(**kwargs)
class ConsiderIgnoreFragmentMixin(object):
"""User defined mixin class for ConsiderIgnoreFragment."""
def __init__(self, message=None, **kwargs):
super(ConsiderIgnoreFragmentMixin, self).__init__(**kwargs)
def consider_or_ignore(self, diagnostics=None, context=None):
"""The interaction operator of a ConsiderIgnoreFragment must be either 'consider' or 'ignore'.
(interactionOperator = InteractionOperatorKind::consider) or (interactionOperator = InteractionOperatorKind::ignore)"""
raise NotImplementedError(
"operation consider_or_ignore(...) not yet implemented"
)
def type(self, diagnostics=None, context=None):
"""The NamedElements must be of a type of element that can be a signature for a message (i.e.., an Operation, or a Signal).
message->forAll(m | m.oclIsKindOf(Operation) or m.oclIsKindOf(Signal))"""
raise NotImplementedError("operation type(...) not yet implemented")
class ExecutionOccurrenceSpecificationMixin(object):
"""User defined mixin class for ExecutionOccurrenceSpecification."""
def __init__(self, execution=None, **kwargs):
super(ExecutionOccurrenceSpecificationMixin, self).__init__(**kwargs)
class ValueSpecificationMixin(object):
"""User defined mixin class for ValueSpecification."""
def __init__(self, **kwargs):
super(ValueSpecificationMixin, self).__init__(**kwargs)
def boolean_value(self):
"""The query booleanValue() gives a single Boolean value when one can be computed.
result = (null)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation boolean_value(...) not yet implemented")
def integer_value(self):
"""The query integerValue() gives a single Integer value when one can be computed.
result = (null)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation integer_value(...) not yet implemented")
def is_computable(self):
"""The query isComputable() determines whether a value specification can be computed in a model. This operation cannot be fully defined in OCL. A conforming implementation is expected to deliver true for this operation for all ValueSpecifications that it can compute, and to compute all of those for which the operation is true. A conforming implementation is expected to be able to compute at least the value of all LiteralSpecifications.
result = (false)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation is_computable(...) not yet implemented")
def is_null(self):
"""The query isNull() returns true when it can be computed that the value is null.
result = (false)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation is_null(...) not yet implemented")
def real_value(self):
"""The query realValue() gives a single Real value when one can be computed.
result = (null)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation real_value(...) not yet implemented")
def string_value(self):
"""The query stringValue() gives a single String value when one can be computed.
result = (null)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation string_value(...) not yet implemented")
def unlimited_value(self):
"""The query unlimitedValue() gives a single UnlimitedNatural value when one can be computed.
result = (null)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation unlimited_value(...) not yet implemented")
class BehavioralFeatureMixin(object):
"""User defined mixin class for BehavioralFeature."""
def __init__(
self,
concurrency=None,
isAbstract=None,
method=None,
ownedParameter=None,
ownedParameterSet=None,
raisedException=None,
**kwargs,
):
super(BehavioralFeatureMixin, self).__init__(**kwargs)
def abstract_no_method(self, diagnostics=None, context=None):
"""When isAbstract is true there are no methods.
isAbstract implies method->isEmpty()"""
raise NotImplementedError(
"operation abstract_no_method(...) not yet implemented"
)
def create_return_result(self, name=None, type=None):
"""Creates a return result parameter with the specified name and type."""
raise NotImplementedError(
"operation create_return_result(...) not yet implemented"
)
def input_parameters(self):
"""The ownedParameters with direction in and inout.
result = (ownedParameter->select(direction=ParameterDirectionKind::_'in' or direction=ParameterDirectionKind::inout))
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation input_parameters(...) not yet implemented")
def output_parameters(self):
"""The ownedParameters with direction out, inout, or return.
result = (ownedParameter->select(direction=ParameterDirectionKind::out or direction=ParameterDirectionKind::inout or direction=ParameterDirectionKind::return))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation output_parameters(...) not yet implemented"
)
class StateMixin(object):
"""User defined mixin class for State."""
@property
def isComposite(self):
raise NotImplementedError("Missing implementation for isComposite")
@property
def isOrthogonal(self):
raise NotImplementedError("Missing implementation for isOrthogonal")
@property
def isSimple(self):
raise NotImplementedError("Missing implementation for isSimple")
@property
def isSubmachineState(self):
raise NotImplementedError("Missing implementation for isSubmachineState")
def __init__(
self,
connection=None,
connectionPoint=None,
deferrableTrigger=None,
doActivity=None,
entry=None,
exit=None,
isComposite=None,
isOrthogonal=None,
isSimple=None,
isSubmachineState=None,
redefinedState=None,
stateInvariant=None,
submachine=None,
region=None,
**kwargs,
):
super(StateMixin, self).__init__(**kwargs)
def entry_or_exit(self, diagnostics=None, context=None):
"""Only entry or exit Pseudostates can serve as connection points.
connectionPoint->forAll(kind = PseudostateKind::entryPoint or kind = PseudostateKind::exitPoint)"""
raise NotImplementedError("operation entry_or_exit(...) not yet implemented")
def submachine_states(self, diagnostics=None, context=None):
"""Only submachine States can have connection point references.
isSubmachineState implies connection->notEmpty( )"""
raise NotImplementedError(
"operation submachine_states(...) not yet implemented"
)
def composite_states(self, diagnostics=None, context=None):
"""Only composite States can have entry or exit Pseudostates defined.
connectionPoint->notEmpty() implies isComposite"""
raise NotImplementedError("operation composite_states(...) not yet implemented")
def destinations_or_sources_of_transitions(self, diagnostics=None, context=None):
"""The connection point references used as destinations/sources of Transitions associated with a submachine State must be defined as entry/exit points in the submachine StateMachine.
self.isSubmachineState implies (self.connection->forAll (cp |
cp.entry->forAll (ps | ps.stateMachine = self.submachine) and
cp.exit->forAll (ps | ps.stateMachine = self.submachine)))"""
raise NotImplementedError(
"operation destinations_or_sources_of_transitions(...) not yet implemented"
)
def submachine_or_regions(self, diagnostics=None, context=None):
"""A State is not allowed to have both a submachine and Regions.
isComposite implies not isSubmachineState"""
raise NotImplementedError(
"operation submachine_or_regions(...) not yet implemented"
)
def is_composite(self):
"""A composite State is a State with at least one Region.
result = (region->notEmpty())
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation is_composite(...) not yet implemented")
def is_orthogonal(self):
"""An orthogonal State is a composite state with at least 2 regions.
result = (region->size () > 1)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation is_orthogonal(...) not yet implemented")
def is_simple(self):
"""A simple State is a State without any regions.
result = ((region->isEmpty()) and not isSubmachineState())
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation is_simple(...) not yet implemented")
def is_submachine_state(self):
"""Only submachine State references another StateMachine.
result = (submachine <> null)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation is_submachine_state(...) not yet implemented"
)
def redefinition_context(self):
"""The redefinition context of a State is the nearest containing StateMachine.
result = (let sm : StateMachine = containingStateMachine() in
if sm._'context' = null or sm.general->notEmpty() then
sm
else
sm._'context'
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError(
"operation redefinition_context(...) not yet implemented"
)
class ExecutableNodeMixin(object):
"""User defined mixin class for ExecutableNode."""
def __init__(self, handler=None, **kwargs):
super(ExecutableNodeMixin, self).__init__(**kwargs)
class ControlNodeMixin(object):
"""User defined mixin class for ControlNode."""
def __init__(self, **kwargs):
super(ControlNodeMixin, self).__init__(**kwargs)
class MessageEventMixin(object):
"""User defined mixin class for MessageEvent."""
def __init__(self, **kwargs):
super(MessageEventMixin, self).__init__(**kwargs)
class ChangeEventMixin(object):
"""User defined mixin class for ChangeEvent."""
def __init__(self, changeExpression=None, **kwargs):
super(ChangeEventMixin, self).__init__(**kwargs)
class TimeEventMixin(object):
"""User defined mixin class for TimeEvent."""
def __init__(self, isRelative=None, when=None, **kwargs):
super(TimeEventMixin, self).__init__(**kwargs)
def when_non_negative(self, diagnostics=None, context=None):
"""The ValueSpecification when must return a non-negative Integer.
when.integerValue() >= 0"""
raise NotImplementedError(
"operation when_non_negative(...) not yet implemented"
)
class InteractionConstraintMixin(object):
"""User defined mixin class for InteractionConstraint."""
def __init__(self, maxint=None, minint=None, **kwargs):
super(InteractionConstraintMixin, self).__init__(**kwargs)
def minint_maxint(self, diagnostics=None, context=None):
"""Minint/maxint can only be present if the InteractionConstraint is associated with the operand of a loop CombinedFragment.
maxint->notEmpty() or minint->notEmpty() implies
interactionOperand.combinedFragment.interactionOperator =
InteractionOperatorKind::loop"""
raise NotImplementedError("operation minint_maxint(...) not yet implemented")
def minint_non_negative(self, diagnostics=None, context=None):
"""If minint is specified, then the expression must evaluate to a non-negative integer.
minint->notEmpty() implies
minint->asSequence()->first().integerValue() >= 0"""
raise NotImplementedError(
"operation minint_non_negative(...) not yet implemented"
)
def maxint_positive(self, diagnostics=None, context=None):
"""If maxint is specified, then the expression must evaluate to a positive integer.
maxint->notEmpty() implies
maxint->asSequence()->first().integerValue() > 0"""
raise NotImplementedError("operation maxint_positive(...) not yet implemented")
def dynamic_variables(self, diagnostics=None, context=None):
"""The dynamic variables that take part in the constraint must be owned by the ConnectableElement corresponding to the covered Lifeline."""
raise NotImplementedError(
"operation dynamic_variables(...) not yet implemented"
)
def global_data(self, diagnostics=None, context=None):
"""The constraint may contain references to global data or write-once data."""
raise NotImplementedError("operation global_data(...) not yet implemented")
def maxint_greater_equal_minint(self, diagnostics=None, context=None):
"""If maxint is specified, then minint must be specified and the evaluation of maxint must be >= the evaluation of minint.
maxint->notEmpty() implies (minint->notEmpty() and
maxint->asSequence()->first().integerValue() >=
minint->asSequence()->first().integerValue() )"""
raise NotImplementedError(
"operation maxint_greater_equal_minint(...) not yet implemented"
)
class MessageOccurrenceSpecificationMixin(object):
"""User defined mixin class for MessageOccurrenceSpecification."""
def __init__(self, **kwargs):
super(MessageOccurrenceSpecificationMixin, self).__init__(**kwargs)
class DerivedReferred(EDerivedCollection):
pass
class ProtocolTransitionMixin(object):
"""User defined mixin class for ProtocolTransition."""
def __init__(self, postCondition=None, preCondition=None, referred=None, **kwargs):
super(ProtocolTransitionMixin, self).__init__(**kwargs)
def refers_to_operation(self, diagnostics=None, context=None):
"""If a ProtocolTransition refers to an Operation (i.e., has a CallEvent trigger corresponding to an Operation), then that Operation should apply to the context Classifier of the StateMachine of the ProtocolTransition.
if (referred()->notEmpty() and containingStateMachine()._'context'->notEmpty()) then
containingStateMachine()._'context'.oclAsType(BehavioredClassifier).allFeatures()->includesAll(referred())
else true endif"""
raise NotImplementedError(
"operation refers_to_operation(...) not yet implemented"
)
def associated_actions(self, diagnostics=None, context=None):
"""A ProtocolTransition never has associated Behaviors.
effect = null"""
raise NotImplementedError(
"operation associated_actions(...) not yet implemented"
)
def belongs_to_psm(self, diagnostics=None, context=None):
"""A ProtocolTransition always belongs to a ProtocolStateMachine.
container.belongsToPSM()"""
raise NotImplementedError("operation belongs_to_psm(...) not yet implemented")
def get_referreds(self):
"""Derivation for ProtocolTransition::/referred
result = (trigger->collect(event)->select(oclIsKindOf(CallEvent))->collect(oclAsType(CallEvent).operation)->asSet())
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation get_referreds(...) not yet implemented")
class IntervalConstraintMixin(object):
"""User defined mixin class for IntervalConstraint."""
def __init__(self, **kwargs):
super(IntervalConstraintMixin, self).__init__(**kwargs)
class DurationObservationMixin(object):
"""User defined mixin class for DurationObservation."""
def __init__(self, event=None, firstEvent=None, **kwargs):
super(DurationObservationMixin, self).__init__(**kwargs)
def first_event_multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of firstEvent must be 2 if the multiplicity of event is 2. Otherwise the multiplicity of firstEvent is 0.
if (event->size() = 2)
then (firstEvent->size() = 2) else (firstEvent->size() = 0)
endif"""
raise NotImplementedError(
"operation first_event_multiplicity(...) not yet implemented"
)
class TimeObservationMixin(object):
"""User defined mixin class for TimeObservation."""
def __init__(self, event=None, firstEvent=None, **kwargs):
super(TimeObservationMixin, self).__init__(**kwargs)
class DerivedNestedpackage(EDerivedCollection):
def _get_collection(self):
from .uml import Package
return [e for e in self.owner.packagedElement if isinstance(e, Package)]
def __len__(self):
return len(self._get_collection())
def __getitem__(self, index):
return self._get_collection()[index]
def __repr__(self):
return "DerivedCollection({})".format(self._get_collection())
class DerivedOwnedstereotype(EDerivedCollection):
def _get_collection(self):
from .uml import Stereotype
return [e for e in self.owner.packagedElement if isinstance(e, Stereotype)]
def __contains__(self, x):
return x in self._get_collection()
def __len__(self):
return len(self._get_collection())
def __getitem__(self, index):
return self._get_collection()[index]
def __repr__(self):
return "DerivedCollection({})".format(self._get_collection())
class DerivedOwnedtype(EDerivedCollection):
pass
class PackageMixin(object):
"""User defined mixin class for Package."""
@property
def nestingPackage(self):
from .uml import Package
if isinstance(self.owner, Package):
return self.owner
return None
@nestingPackage.setter
def nestingPackage(self, value):
from .uml import Package
check(value, Package)
if value is None and self.nestingPackage:
self.nestingPackage.packagedElement.remove(self)
else:
value.packagedElement.append(self)
def __init__(
self,
URI=None,
nestedPackage=None,
nestingPackage=None,
ownedStereotype=None,
ownedType=None,
packageMerge=None,
packagedElement=None,
profileApplication=None,
**kwargs,
):
super(PackageMixin, self).__init__(**kwargs)
def elements_public_or_private(self, diagnostics=None, context=None):
"""If an element that is owned by a package has visibility, it is public or private.
packagedElement->forAll(e | e.visibility<> null implies e.visibility = VisibilityKind::public or e.visibility = VisibilityKind::private)"""
raise NotImplementedError(
"operation elements_public_or_private(...) not yet implemented"
)
def apply_profile(self, profile=None):
"""Applies the current definition of the specified profile to this package and automatically applies required stereotypes in the profile to elements within this package's namespace hieararchy. If a different definition is already applied, automatically migrates any associated stereotype values on a "best effort" basis (matching classifiers and structural features by name)."""
raise NotImplementedError("operation apply_profile(...) not yet implemented")
def create_owned_class(self, name=None, isAbstract=None):
"""Creates a(n) (abstract) class with the specified name as an owned type of this package."""
raise NotImplementedError(
"operation create_owned_class(...) not yet implemented"
)
def create_owned_enumeration(self, name=None):
"""Creates a enumeration with the specified name as an owned type of this package."""
raise NotImplementedError(
"operation create_owned_enumeration(...) not yet implemented"
)
def create_owned_interface(self, name=None):
"""Creates an interface with the specified name as an owned type of this package."""
raise NotImplementedError(
"operation create_owned_interface(...) not yet implemented"
)
def create_owned_primitive_type(self, name=None):
"""Creates a primitive type with the specified name as an owned type of this package."""
raise NotImplementedError(
"operation create_owned_primitive_type(...) not yet implemented"
)
def create_owned_stereotype(self, name=None, isAbstract=None):
"""Creates a(n) (abstract) stereotype with the specified name as an owned stereotype of this profile."""
raise NotImplementedError(
"operation create_owned_stereotype(...) not yet implemented"
)
def get_all_applied_profiles(self):
"""Retrieves all the profiles that are applied to this package, including profiles applied to its nesting package(s)."""
raise NotImplementedError(
"operation get_all_applied_profiles(...) not yet implemented"
)
def get_all_profile_applications(self):
"""Retrieves all the profile applications for this package, including profile applications for its nesting package(s)."""
raise NotImplementedError(
"operation get_all_profile_applications(...) not yet implemented"
)
def get_applied_profile(self, qualifiedName=None):
"""Retrieves the profile with the specified qualified name that is applied to this package, or null if no such profile is applied."""
raise NotImplementedError(
"operation get_applied_profile(...) not yet implemented"
)
def get_applied_profile(self, qualifiedName=None, recurse=None):
"""Retrieves the profile with the specified qualified name that is applied to this package or any of its nesting packages (if indicated), or null if no such profile is applied."""
raise NotImplementedError(
"operation get_applied_profile(...) not yet implemented"
)
def get_applied_profiles(self):
"""Retrieves the profiles that are applied to this package."""
raise NotImplementedError(
"operation get_applied_profiles(...) not yet implemented"
)
def get_profile_application(self, profile=None):
"""Retrieves the application of the specified profile to this package, or null if no such profile is applied."""
raise NotImplementedError(
"operation get_profile_application(...) not yet implemented"
)
def get_profile_application(self, profile=None, recurse=None):
"""Retrieves the application of the specified profile to this package or any of its nesting packages (if indicated), or null if no such profile is applied."""
raise NotImplementedError(
"operation get_profile_application(...) not yet implemented"
)
def is_model_library(self):
"""Determines whether this package is a model library."""
raise NotImplementedError("operation is_model_library(...) not yet implemented")
def is_profile_applied(self, profile=None):
"""Determines whether the specified profile is applied to this package."""
raise NotImplementedError(
"operation is_profile_applied(...) not yet implemented"
)
def unapply_profile(self, profile=None):
"""Unapplies the specified profile from this package and automatically unapplies stereotypes in the profile from elements within this package's namespace hieararchy."""
raise NotImplementedError("operation unapply_profile(...) not yet implemented")
def apply_profiles(self, profiles=None):
"""Applies the current definitions of the specified profiles to this package and automatically applies required stereotypes in the profiles to elements within this package's namespace hieararchy. If different definitions are already applied, automatically migrates any associated stereotype values on a "best effort" basis (matching classifiers and structural features by name)."""
raise NotImplementedError("operation apply_profiles(...) not yet implemented")
def all_applicable_stereotypes(self):
"""The query allApplicableStereotypes() returns all the directly or indirectly owned stereotypes, including stereotypes contained in sub-profiles.
result = (let ownedPackages : Bag(Package) = ownedMember->select(oclIsKindOf(Package))->collect(oclAsType(Package)) in
ownedStereotype->union(ownedPackages.allApplicableStereotypes())->flatten()->asSet()
)
<p>From package UML::Packages.</p>"""
raise NotImplementedError(
"operation all_applicable_stereotypes(...) not yet implemented"
)
def containing_profile(self):
"""The query containingProfile() returns the closest profile directly or indirectly containing this package (or this package itself, if it is a profile).
result = (if self.oclIsKindOf(Profile) then
self.oclAsType(Profile)
else
self.namespace.oclAsType(Package).containingProfile()
endif)
<p>From package UML::Packages.</p>"""
raise NotImplementedError(
"operation containing_profile(...) not yet implemented"
)
def makes_visible(self, el=None):
"""The query makesVisible() defines whether a Package makes an element visible outside itself. Elements with no visibility and elements with public visibility are made visible.
member->includes(el)
result = (ownedMember->includes(el) or
(elementImport->select(ei|ei.importedElement = VisibilityKind::public)->collect(importedElement.oclAsType(NamedElement))->includes(el)) or
(packageImport->select(visibility = VisibilityKind::public)->collect(importedPackage.member->includes(el))->notEmpty()))
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation makes_visible(...) not yet implemented")
def get_nested_packages(self):
"""Derivation for Package::/nestedPackage
result = (packagedElement->select(oclIsKindOf(Package))->collect(oclAsType(Package))->asSet())
<p>From package UML::Packages.</p>"""
return self.nestedPackage
def get_owned_stereotypes(self):
"""Derivation for Package::/ownedStereotype
result = (packagedElement->select(oclIsKindOf(Stereotype))->collect(oclAsType(Stereotype))->asSet())
<p>From package UML::Packages.</p>"""
return self.ownedStereotype
def get_owned_types(self):
"""Derivation for Package::/ownedType
result = (packagedElement->select(oclIsKindOf(Type))->collect(oclAsType(Type))->asSet())
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation get_owned_types(...) not yet implemented")
def visible_members(self):
"""The query visibleMembers() defines which members of a Package can be accessed outside it.
result = (member->select( m | m.oclIsKindOf(PackageableElement) and self.makesVisible(m))->collect(oclAsType(PackageableElement))->asSet())
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation visible_members(...) not yet implemented")
class DependencyMixin(object):
"""User defined mixin class for Dependency."""
def __init__(self, client=None, supplier=None, **kwargs):
super(DependencyMixin, self).__init__(**kwargs)
class OpaqueExpressionMixin(object):
"""User defined mixin class for OpaqueExpression."""
@property
def result(self):
raise NotImplementedError("Missing implementation for result")
def __init__(self, behavior=None, body=None, language=None, result=None, **kwargs):
super(OpaqueExpressionMixin, self).__init__(**kwargs)
def language_body_size(self, diagnostics=None, context=None):
"""If the language attribute is not empty, then the size of the body and language arrays must be the same.
language->notEmpty() implies (_'body'->size() = language->size())"""
raise NotImplementedError(
"operation language_body_size(...) not yet implemented"
)
def one_return_result_parameter(self, diagnostics=None, context=None):
"""The behavior must have exactly one return result parameter.
behavior <> null implies
behavior.ownedParameter->select(direction=ParameterDirectionKind::return)->size() = 1"""
raise NotImplementedError(
"operation one_return_result_parameter(...) not yet implemented"
)
def only_return_result_parameters(self, diagnostics=None, context=None):
"""The behavior may only have return result parameters.
behavior <> null implies behavior.ownedParameter->select(direction<>ParameterDirectionKind::return)->isEmpty()"""
raise NotImplementedError(
"operation only_return_result_parameters(...) not yet implemented"
)
def is_integral(self):
"""The query isIntegral() tells whether an expression is intended to produce an Integer.
result = (false)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation is_integral(...) not yet implemented")
def is_non_negative(self):
"""The query isNonNegative() tells whether an integer expression has a non-negative value.
self.isIntegral()
result = (false)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation is_non_negative(...) not yet implemented")
def is_positive(self):
"""The query isPositive() tells whether an integer expression has a positive value.
self.isIntegral()
result = (false)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation is_positive(...) not yet implemented")
def get_result(self):
"""Derivation for OpaqueExpression::/result
result = (if behavior = null then
null
else
behavior.ownedParameter->first()
endif)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation get_result(...) not yet implemented")
def value(self):
"""The query value() gives an integer value for an expression intended to produce one.
self.isIntegral()
result = (0)
<p>From package UML::Values.</p>"""
raise NotImplementedError("operation value(...) not yet implemented")
class ParameterMixin(object):
"""User defined mixin class for Parameter."""
@property
def default(self):
raise NotImplementedError("Missing implementation for default")
@default.setter
def default(self, value):
raise NotImplementedError("Missing implementation for default")
def __init__(
self,
default=None,
defaultValue=None,
direction=None,
effect=None,
isException=None,
isStream=None,
operation=None,
parameterSet=None,
**kwargs,
):
super(ParameterMixin, self).__init__(**kwargs)
def in_and_out(self, diagnostics=None, context=None):
"""Only in and inout Parameters may have a delete effect. Only out, inout, and return Parameters may have a create effect.
(effect = ParameterEffectKind::delete implies (direction = ParameterDirectionKind::_'in' or direction = ParameterDirectionKind::inout))
and
(effect = ParameterEffectKind::create implies (direction = ParameterDirectionKind::out or direction = ParameterDirectionKind::inout or direction = ParameterDirectionKind::return))"""
raise NotImplementedError("operation in_and_out(...) not yet implemented")
def not_exception(self, diagnostics=None, context=None):
"""An input Parameter cannot be an exception.
isException implies (direction <> ParameterDirectionKind::_'in' and direction <> ParameterDirectionKind::inout)"""
raise NotImplementedError("operation not_exception(...) not yet implemented")
def connector_end(self, diagnostics=None, context=None):
"""A Parameter may only be associated with a Connector end within the context of a Collaboration.
end->notEmpty() implies collaboration->notEmpty()"""
raise NotImplementedError("operation connector_end(...) not yet implemented")
def reentrant_behaviors(self, diagnostics=None, context=None):
"""Reentrant behaviors cannot have stream Parameters.
(isStream and behavior <> null) implies not behavior.isReentrant"""
raise NotImplementedError(
"operation reentrant_behaviors(...) not yet implemented"
)
def stream_and_exception(self, diagnostics=None, context=None):
"""A Parameter cannot be a stream and exception at the same time.
not (isException and isStream)"""
raise NotImplementedError(
"operation stream_and_exception(...) not yet implemented"
)
def object_effect(self, diagnostics=None, context=None):
"""Parameters typed by DataTypes cannot have an effect.
(type.oclIsKindOf(DataType)) implies (effect = null)"""
raise NotImplementedError("operation object_effect(...) not yet implemented")
def is_set_default(self):
raise NotImplementedError("operation is_set_default(...) not yet implemented")
def set_boolean_default_value(self, value=None):
"""Sets the default value for this parameter to the specified Boolean value."""
raise NotImplementedError(
"operation set_boolean_default_value(...) not yet implemented"
)
def set_default(self, newDefault=None):
raise NotImplementedError("operation set_default(...) not yet implemented")
def set_integer_default_value(self, value=None):
"""Sets the default value for this parameter to the specified integer value."""
raise NotImplementedError(
"operation set_integer_default_value(...) not yet implemented"
)
def set_null_default_value(self):
"""Sets the default value for this parameter to the null value."""
raise NotImplementedError(
"operation set_null_default_value(...) not yet implemented"
)
def set_real_default_value(self, value=None):
"""Sets the default value for this parameter to the specified real value."""
raise NotImplementedError(
"operation set_real_default_value(...) not yet implemented"
)
def set_string_default_value(self, value=None):
"""Sets the default value for this parameter to the specified string value."""
raise NotImplementedError(
"operation set_string_default_value(...) not yet implemented"
)
def set_unlimited_natural_default_value(self, value=None):
"""Sets the default value for this parameter to the specified unlimited natural value."""
raise NotImplementedError(
"operation set_unlimited_natural_default_value(...) not yet implemented"
)
def unset_default(self):
raise NotImplementedError("operation unset_default(...) not yet implemented")
def get_default(self):
"""Derivation for Parameter::/default
result = (if self.type = String then defaultValue.stringValue() else null endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_default(...) not yet implemented")
class ReceptionMixin(object):
"""User defined mixin class for Reception."""
def __init__(self, signal=None, **kwargs):
super(ReceptionMixin, self).__init__(**kwargs)
def same_name_as_signal(self, diagnostics=None, context=None):
"""A Reception has the same name as its signal
name = signal.name"""
raise NotImplementedError(
"operation same_name_as_signal(...) not yet implemented"
)
def same_structure_as_signal(self, diagnostics=None, context=None):
"""A Reception's parameters match the ownedAttributes of its signal by name, type, and multiplicity
signal.ownedAttribute->size() = ownedParameter->size() and
Sequence{1..signal.ownedAttribute->size()}->forAll( i |
ownedParameter->at(i).direction = ParameterDirectionKind::_'in' and
ownedParameter->at(i).name = signal.ownedAttribute->at(i).name and
ownedParameter->at(i).type = signal.ownedAttribute->at(i).type and
ownedParameter->at(i).lowerBound() = signal.ownedAttribute->at(i).lowerBound() and
ownedParameter->at(i).upperBound() = signal.ownedAttribute->at(i).upperBound()
)"""
raise NotImplementedError(
"operation same_structure_as_signal(...) not yet implemented"
)
class StructuralFeatureMixin(object):
"""User defined mixin class for StructuralFeature."""
def __init__(self, isReadOnly=None, **kwargs):
super(StructuralFeatureMixin, self).__init__(**kwargs)
class InstanceSpecificationMixin(object):
"""User defined mixin class for InstanceSpecification."""
def __init__(self, classifier=None, slot=None, specification=None, **kwargs):
super(InstanceSpecificationMixin, self).__init__(**kwargs)
def deployment_artifact(self, diagnostics=None, context=None):
"""An InstanceSpecification can act as a DeployedArtifact if it represents an instance of an Artifact.
deploymentForArtifact->notEmpty() implies classifier->exists(oclIsKindOf(Artifact))"""
raise NotImplementedError(
"operation deployment_artifact(...) not yet implemented"
)
def structural_feature(self, diagnostics=None, context=None):
"""No more than one slot in an InstanceSpecification may have the same definingFeature.
classifier->forAll(c | (c.allSlottableFeatures()->forAll(f | slot->select(s | s.definingFeature = f)->size() <= 1)))"""
raise NotImplementedError(
"operation structural_feature(...) not yet implemented"
)
def defining_feature(self, diagnostics=None, context=None):
"""The definingFeature of each slot is a StructuralFeature related to a classifier of the InstanceSpecification, including direct attributes, inherited attributes, private attributes in generalizations, and memberEnds of Associations, but excluding redefined StructuralFeatures.
slot->forAll(s | classifier->exists (c | c.allSlottableFeatures()->includes (s.definingFeature)))"""
raise NotImplementedError("operation defining_feature(...) not yet implemented")
def deployment_target(self, diagnostics=None, context=None):
"""An InstanceSpecification can act as a DeploymentTarget if it represents an instance of a Node and functions as a part in the internal structure of an encompassing Node.
deployment->notEmpty() implies classifier->exists(node | node.oclIsKindOf(Node) and Node.allInstances()->exists(n | n.part->exists(p | p.type = node)))"""
raise NotImplementedError(
"operation deployment_target(...) not yet implemented"
)
class ExpressionMixin(object):
"""User defined mixin class for Expression."""
def __init__(self, operand=None, symbol=None, **kwargs):
super(ExpressionMixin, self).__init__(**kwargs)
class DerivedInput(EDerivedCollection):
pass
class DerivedOutput(EDerivedCollection):
pass
class ActionMixin(object):
"""User defined mixin class for Action."""
@property
def context(self):
raise NotImplementedError("Missing implementation for context")
def __init__(
self,
context=None,
input=None,
isLocallyReentrant=None,
localPostcondition=None,
localPrecondition=None,
output=None,
**kwargs,
):
super(ActionMixin, self).__init__(**kwargs)
def get_context(self):
"""The derivation for the context property.
result = (let behavior: Behavior = self.containingBehavior() in
if behavior=null then null
else if behavior._'context' = null then behavior
else behavior._'context'
endif
endif)
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation get_context(...) not yet implemented")
def all_actions(self):
"""Return this Action and all Actions contained directly or indirectly in it. By default only the Action itself is returned, but the operation is overridden for StructuredActivityNodes.
result = (self->asSet())
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation all_actions(...) not yet implemented")
def all_owned_nodes(self):
"""Returns all the ActivityNodes directly or indirectly owned by this Action. This includes at least all the Pins of the Action.
result = (input.oclAsType(Pin)->asSet()->union(output->asSet()))
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation all_owned_nodes(...) not yet implemented")
def containing_behavior(self):
"""result = (if inStructuredNode<>null then inStructuredNode.containingBehavior()
else if activity<>null then activity
else interaction
endif
endif
)
<p>From package UML::Actions.</p>"""
raise NotImplementedError(
"operation containing_behavior(...) not yet implemented"
)
class ObjectNodeMixin(object):
"""User defined mixin class for ObjectNode."""
def __init__(
self,
inState=None,
isControlType=None,
ordering=None,
selection=None,
upperBound=None,
**kwargs,
):
super(ObjectNodeMixin, self).__init__(**kwargs)
def input_output_parameter(self, diagnostics=None, context=None):
"""A selection Behavior has one input Parameter and one output Parameter. The input Parameter must have the same type as or a supertype of the type of ObjectNode, be non-unique, and have multiplicity 0..*. The output Parameter must be the same or a subtype of the type of ObjectNode. The Behavior cannot have side effects.
selection<>null implies
selection.inputParameters()->size()=1 and
selection.inputParameters()->forAll(p | not p.isUnique and p.is(0,*) and self.type.conformsTo(p.type)) and
selection.outputParameters()->size()=1 and
selection.inputParameters()->forAll(p | self.type.conformsTo(p.type))"""
raise NotImplementedError(
"operation input_output_parameter(...) not yet implemented"
)
def selection_behavior(self, diagnostics=None, context=None):
"""If an ObjectNode has a selection Behavior, then the ordering of the object node is ordered, and vice versa.
(selection<>null) = (ordering=ObjectNodeOrderingKind::ordered)"""
raise NotImplementedError(
"operation selection_behavior(...) not yet implemented"
)
def object_flow_edges(self, diagnostics=None, context=None):
"""If isControlType=false, the ActivityEdges incoming to or outgoing from an ObjectNode must all be ObjectFlows.
(not isControlType) implies incoming->union(outgoing)->forAll(oclIsKindOf(ObjectFlow))"""
raise NotImplementedError(
"operation object_flow_edges(...) not yet implemented"
)
class VariableMixin(object):
"""User defined mixin class for Variable."""
def __init__(self, activityScope=None, scope=None, **kwargs):
super(VariableMixin, self).__init__(**kwargs)
def is_accessible_by(self, a=None):
"""A Variable is accessible by Actions within its scope (the Activity or StructuredActivityNode that owns it).
result = (if scope<>null then scope.allOwnedNodes()->includes(a)
else a.containingActivity()=activityScope
endif)
<p>From package UML::Activities.</p>"""
raise NotImplementedError("operation is_accessible_by(...) not yet implemented")
class FinalNodeMixin(object):
"""User defined mixin class for FinalNode."""
def __init__(self, **kwargs):
super(FinalNodeMixin, self).__init__(**kwargs)
def no_outgoing_edges(self, diagnostics=None, context=None):
"""A FinalNode has no outgoing ActivityEdges.
outgoing->isEmpty()"""
raise NotImplementedError(
"operation no_outgoing_edges(...) not yet implemented"
)
class DecisionNodeMixin(object):
"""User defined mixin class for DecisionNode."""
def __init__(self, decisionInput=None, decisionInputFlow=None, **kwargs):
super(DecisionNodeMixin, self).__init__(**kwargs)
def zero_input_parameters(self, diagnostics=None, context=None):
"""If the DecisionNode has no decisionInputFlow and an incoming ControlFlow, then any decisionInput Behavior has no in parameters.
(decisionInput<>null and decisionInputFlow=null and incoming->exists(oclIsKindOf(ControlFlow))) implies
decisionInput.inputParameters()->isEmpty()"""
raise NotImplementedError(
"operation zero_input_parameters(...) not yet implemented"
)
def edges(self, diagnostics=None, context=None):
"""The ActivityEdges incoming to and outgoing from a DecisionNode, other than the decisionInputFlow (if any), must be either all ObjectFlows or all ControlFlows.
let allEdges: Set(ActivityEdge) = incoming->union(outgoing) in
let allRelevantEdges: Set(ActivityEdge) = if decisionInputFlow->notEmpty() then allEdges->excluding(decisionInputFlow) else allEdges endif in
allRelevantEdges->forAll(oclIsKindOf(ControlFlow)) or allRelevantEdges->forAll(oclIsKindOf(ObjectFlow))
"""
raise NotImplementedError("operation edges(...) not yet implemented")
def decision_input_flow_incoming(self, diagnostics=None, context=None):
"""The decisionInputFlow of a DecisionNode must be an incoming ActivityEdge of the DecisionNode.
incoming->includes(decisionInputFlow)"""
raise NotImplementedError(
"operation decision_input_flow_incoming(...) not yet implemented"
)
def two_input_parameters(self, diagnostics=None, context=None):
"""If the DecisionNode has a decisionInputFlow and an second incoming ObjectFlow, then any decisionInput has two in Parameters, the first of which has a type that is the same as or a supertype of the type of object tokens offered on the non-decisionInputFlow and the second of which has a type that is the same as or a supertype of the type of object tokens offered on the decisionInputFlow.
(decisionInput<>null and decisionInputFlow<>null and incoming->forAll(oclIsKindOf(ObjectFlow))) implies
decisionInput.inputParameters()->size()=2"""
raise NotImplementedError(
"operation two_input_parameters(...) not yet implemented"
)
def incoming_outgoing_edges(self, diagnostics=None, context=None):
"""A DecisionNode has one or two incoming ActivityEdges and at least one outgoing ActivityEdge.
(incoming->size() = 1 or incoming->size() = 2) and outgoing->size() > 0"""
raise NotImplementedError(
"operation incoming_outgoing_edges(...) not yet implemented"
)
def incoming_control_one_input_parameter(self, diagnostics=None, context=None):
"""If the DecisionNode has a decisionInputFlow and an incoming ControlFlow, then any decisionInput Behavior has one in Parameter whose type is the same as or a supertype of the type of object tokens offered on the decisionInputFlow.
(decisionInput<>null and decisionInputFlow<>null and incoming->exists(oclIsKindOf(ControlFlow))) implies
decisionInput.inputParameters()->size()=1"""
raise NotImplementedError(
"operation incoming_control_one_input_parameter(...) not yet implemented"
)
def parameters(self, diagnostics=None, context=None):
"""A decisionInput Behavior has no out parameters, no inout parameters, and one return parameter.
decisionInput<>null implies
(decisionInput.ownedParameter->forAll(par |
par.direction <> ParameterDirectionKind::out and
par.direction <> ParameterDirectionKind::inout ) and
decisionInput.ownedParameter->one(par |
par.direction <> ParameterDirectionKind::return))"""
raise NotImplementedError("operation parameters(...) not yet implemented")
def incoming_object_one_input_parameter(self, diagnostics=None, context=None):
"""If the DecisionNode has no decisionInputFlow and an incoming ObjectFlow, then any decisionInput Behavior has one in Parameter whose type is the same as or a supertype of the type of object tokens offered on the incoming ObjectFlow.
(decisionInput<>null and decisionInputFlow=null and incoming->forAll(oclIsKindOf(ObjectFlow))) implies
decisionInput.inputParameters()->size()=1"""
raise NotImplementedError(
"operation incoming_object_one_input_parameter(...) not yet implemented"
)
class ForkNodeMixin(object):
"""User defined mixin class for ForkNode."""
def __init__(self, **kwargs):
super(ForkNodeMixin, self).__init__(**kwargs)
def edges(self, diagnostics=None, context=None):
"""The ActivityEdges incoming to and outgoing from a ForkNode must be either all ObjectFlows or all ControlFlows.
let allEdges : Set(ActivityEdge) = incoming->union(outgoing) in
allEdges->forAll(oclIsKindOf(ControlFlow)) or allEdges->forAll(oclIsKindOf(ObjectFlow))"""
raise NotImplementedError("operation edges(...) not yet implemented")
def one_incoming_edge(self, diagnostics=None, context=None):
"""A ForkNode has one incoming ActivityEdge.
incoming->size()=1"""
raise NotImplementedError(
"operation one_incoming_edge(...) not yet implemented"
)
class InitialNodeMixin(object):
"""User defined mixin class for InitialNode."""
def __init__(self, **kwargs):
super(InitialNodeMixin, self).__init__(**kwargs)
def no_incoming_edges(self, diagnostics=None, context=None):
"""An InitialNode has no incoming ActivityEdges.
incoming->isEmpty()"""
raise NotImplementedError(
"operation no_incoming_edges(...) not yet implemented"
)
def control_edges(self, diagnostics=None, context=None):
"""All the outgoing ActivityEdges from an InitialNode must be ControlFlows.
outgoing->forAll(oclIsKindOf(ControlFlow))"""
raise NotImplementedError("operation control_edges(...) not yet implemented")
class JoinNodeMixin(object):
"""User defined mixin class for JoinNode."""
def __init__(self, isCombineDuplicate=None, joinSpec=None, **kwargs):
super(JoinNodeMixin, self).__init__(**kwargs)
def one_outgoing_edge(self, diagnostics=None, context=None):
"""A JoinNode has one outgoing ActivityEdge.
outgoing->size() = 1"""
raise NotImplementedError(
"operation one_outgoing_edge(...) not yet implemented"
)
def incoming_object_flow(self, diagnostics=None, context=None):
"""If one of the incoming ActivityEdges of a JoinNode is an ObjectFlow, then its outgoing ActivityEdge must be an ObjectFlow. Otherwise its outgoing ActivityEdge must be a ControlFlow.
if incoming->exists(oclIsKindOf(ObjectFlow)) then outgoing->forAll(oclIsKindOf(ObjectFlow))
else outgoing->forAll(oclIsKindOf(ControlFlow))
endif"""
raise NotImplementedError(
"operation incoming_object_flow(...) not yet implemented"
)
class MergeNodeMixin(object):
"""User defined mixin class for MergeNode."""
def __init__(self, **kwargs):
super(MergeNodeMixin, self).__init__(**kwargs)
def one_outgoing_edge(self, diagnostics=None, context=None):
"""A MergeNode has one outgoing ActivityEdge.
outgoing->size()=1"""
raise NotImplementedError(
"operation one_outgoing_edge(...) not yet implemented"
)
def edges(self, diagnostics=None, context=None):
"""The ActivityEdges incoming to and outgoing from a MergeNode must be either all ObjectFlows or all ControlFlows.
let allEdges : Set(ActivityEdge) = incoming->union(outgoing) in
allEdges->forAll(oclIsKindOf(ControlFlow)) or allEdges->forAll(oclIsKindOf(ObjectFlow))"""
raise NotImplementedError("operation edges(...) not yet implemented")
class InstanceValueMixin(object):
"""User defined mixin class for InstanceValue."""
def __init__(self, instance=None, **kwargs):
super(InstanceValueMixin, self).__init__(**kwargs)
class AnyReceiveEventMixin(object):
"""User defined mixin class for AnyReceiveEvent."""
def __init__(self, **kwargs):
super(AnyReceiveEventMixin, self).__init__(**kwargs)
class CallEventMixin(object):
"""User defined mixin class for CallEvent."""
def __init__(self, operation=None, **kwargs):
super(CallEventMixin, self).__init__(**kwargs)
class SignalEventMixin(object):
"""User defined mixin class for SignalEvent."""
def __init__(self, signal=None, **kwargs):
super(SignalEventMixin, self).__init__(**kwargs)
class TimeExpressionMixin(object):
"""User defined mixin class for TimeExpression."""
def __init__(self, expr=None, observation=None, **kwargs):
super(TimeExpressionMixin, self).__init__(**kwargs)
def no_expr_requires_observation(self, diagnostics=None, context=None):
"""If a TimeExpression has no expr, then it must have a single observation that is a TimeObservation.
expr = null implies (observation->size() = 1 and observation->forAll(oclIsKindOf(TimeObservation)))"""
raise NotImplementedError(
"operation no_expr_requires_observation(...) not yet implemented"
)
class InformationFlowMixin(object):
"""User defined mixin class for InformationFlow."""
def __init__(
self,
conveyed=None,
informationSource=None,
informationTarget=None,
realization=None,
realizingActivityEdge=None,
realizingConnector=None,
realizingMessage=None,
**kwargs,
):
super(InformationFlowMixin, self).__init__(**kwargs)
def must_conform(self, diagnostics=None, context=None):
"""The sources and targets of the information flow must conform to the sources and targets or conversely the targets and sources of the realization relationships."""
raise NotImplementedError("operation must_conform(...) not yet implemented")
def sources_and_targets_kind(self, diagnostics=None, context=None):
"""The sources and targets of the information flow can only be one of the following kind: Actor, Node, UseCase, Artifact, Class, Component, Port, Property, Interface, Package, ActivityNode, ActivityPartition,
Behavior and InstanceSpecification except when its classifier is a relationship (i.e. it represents a link).
(self.informationSource->forAll( sis |
oclIsKindOf(Actor) or oclIsKindOf(Node) or oclIsKindOf(UseCase) or oclIsKindOf(Artifact) or
oclIsKindOf(Class) or oclIsKindOf(Component) or oclIsKindOf(Port) or oclIsKindOf(Property) or
oclIsKindOf(Interface) or oclIsKindOf(Package) or oclIsKindOf(ActivityNode) or oclIsKindOf(ActivityPartition) or
(oclIsKindOf(InstanceSpecification) and not sis.oclAsType(InstanceSpecification).classifier->exists(oclIsKindOf(Relationship)))))
and
(self.informationTarget->forAll( sit |
oclIsKindOf(Actor) or oclIsKindOf(Node) or oclIsKindOf(UseCase) or oclIsKindOf(Artifact) or
oclIsKindOf(Class) or oclIsKindOf(Component) or oclIsKindOf(Port) or oclIsKindOf(Property) or
oclIsKindOf(Interface) or oclIsKindOf(Package) or oclIsKindOf(ActivityNode) or oclIsKindOf(ActivityPartition) or
(oclIsKindOf(InstanceSpecification) and not sit.oclAsType(InstanceSpecification).classifier->exists(oclIsKindOf(Relationship)))))"""
raise NotImplementedError(
"operation sources_and_targets_kind(...) not yet implemented"
)
def convey_classifiers(self, diagnostics=None, context=None):
"""An information flow can only convey classifiers that are allowed to represent an information item.
self.conveyed->forAll(oclIsKindOf(Class) or oclIsKindOf(Interface)
or oclIsKindOf(InformationItem) or oclIsKindOf(Signal) or oclIsKindOf(Component))"""
raise NotImplementedError(
"operation convey_classifiers(...) not yet implemented"
)
class DestructionOccurrenceSpecificationMixin(object):
"""User defined mixin class for DestructionOccurrenceSpecification."""
def __init__(self, **kwargs):
super(DestructionOccurrenceSpecificationMixin, self).__init__(**kwargs)
def no_occurrence_specifications_below(self, diagnostics=None, context=None):
"""No other OccurrenceSpecifications on a given Lifeline in an InteractionOperand may appear below a DestructionOccurrenceSpecification.
let o : InteractionOperand = enclosingOperand in o->notEmpty() and
let peerEvents : OrderedSet(OccurrenceSpecification) = covered.events->select(enclosingOperand = o)
in peerEvents->last() = self"""
raise NotImplementedError(
"operation no_occurrence_specifications_below(...) not yet implemented"
)
class FinalStateMixin(object):
"""User defined mixin class for FinalState."""
def __init__(self, **kwargs):
super(FinalStateMixin, self).__init__(**kwargs)
def no_exit_behavior(self, diagnostics=None, context=None):
"""A FinalState has no exit Behavior.
exit->isEmpty()"""
raise NotImplementedError("operation no_exit_behavior(...) not yet implemented")
def no_outgoing_transitions(self, diagnostics=None, context=None):
"""A FinalState cannot have any outgoing Transitions.
outgoing->size() = 0"""
raise NotImplementedError(
"operation no_outgoing_transitions(...) not yet implemented"
)
def no_regions(self, diagnostics=None, context=None):
"""A FinalState cannot have Regions.
region->size() = 0"""
raise NotImplementedError("operation no_regions(...) not yet implemented")
def cannot_reference_submachine(self, diagnostics=None, context=None):
"""A FinalState cannot reference a submachine.
submachine->isEmpty()"""
raise NotImplementedError(
"operation cannot_reference_submachine(...) not yet implemented"
)
def no_entry_behavior(self, diagnostics=None, context=None):
"""A FinalState has no entry Behavior.
entry->isEmpty()"""
raise NotImplementedError(
"operation no_entry_behavior(...) not yet implemented"
)
def no_state_behavior(self, diagnostics=None, context=None):
"""A FinalState has no state (doActivity) Behavior.
doActivity->isEmpty()"""
raise NotImplementedError(
"operation no_state_behavior(...) not yet implemented"
)
class DurationMixin(object):
"""User defined mixin class for Duration."""
def __init__(self, expr=None, observation=None, **kwargs):
super(DurationMixin, self).__init__(**kwargs)
def no_expr_requires_observation(self, diagnostics=None, context=None):
"""If a Duration has no expr, then it must have a single observation that is a DurationObservation.
expr = null implies (observation->size() = 1 and observation->forAll(oclIsKindOf(DurationObservation)))"""
raise NotImplementedError(
"operation no_expr_requires_observation(...) not yet implemented"
)
class DurationConstraintMixin(object):
"""User defined mixin class for DurationConstraint."""
def __init__(self, firstEvent=None, **kwargs):
super(DurationConstraintMixin, self).__init__(**kwargs)
def first_event_multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of firstEvent must be 2 if the multiplicity of constrainedElement is 2. Otherwise the multiplicity of firstEvent is 0.
if (constrainedElement->size() = 2)
then (firstEvent->size() = 2) else (firstEvent->size() = 0)
endif"""
raise NotImplementedError(
"operation first_event_multiplicity(...) not yet implemented"
)
def has_one_or_two_constrained_elements(self, diagnostics=None, context=None):
"""A DurationConstraint has either one or two constrainedElements.
constrainedElement->size() = 1 or constrainedElement->size()=2"""
raise NotImplementedError(
"operation has_one_or_two_constrained_elements(...) not yet implemented"
)
class IntervalMixin(object):
"""User defined mixin class for Interval."""
def __init__(self, max=None, min=None, **kwargs):
super(IntervalMixin, self).__init__(**kwargs)
class LiteralSpecificationMixin(object):
"""User defined mixin class for LiteralSpecification."""
def __init__(self, **kwargs):
super(LiteralSpecificationMixin, self).__init__(**kwargs)
class TimeConstraintMixin(object):
"""User defined mixin class for TimeConstraint."""
def __init__(self, firstEvent=None, **kwargs):
super(TimeConstraintMixin, self).__init__(**kwargs)
def has_one_constrained_element(self, diagnostics=None, context=None):
"""A TimeConstraint has one constrainedElement.
constrainedElement->size() = 1"""
raise NotImplementedError(
"operation has_one_constrained_element(...) not yet implemented"
)
class ProfileMixin(object):
"""User defined mixin class for Profile."""
def __init__(self, metaclassReference=None, metamodelReference=None, **kwargs):
super(ProfileMixin, self).__init__(**kwargs)
def metaclass_reference_not_specialized(self, diagnostics=None, context=None):
"""An element imported as a metaclassReference is not specialized or generalized in a Profile.
metaclassReference.importedElement->
select(c | c.oclIsKindOf(Classifier) and
(c.oclAsType(Classifier).allParents()->collect(namespace)->includes(self)))->isEmpty()
and
packagedElement->
select(oclIsKindOf(Classifier))->collect(oclAsType(Classifier).allParents())->
intersection(metaclassReference.importedElement->select(oclIsKindOf(Classifier))->collect(oclAsType(Classifier)))->isEmpty()"""
raise NotImplementedError(
"operation metaclass_reference_not_specialized(...) not yet implemented"
)
def references_same_metamodel(self, diagnostics=None, context=None):
"""All elements imported either as metaclassReferences or through metamodelReferences are members of the same base reference metamodel.
metamodelReference.importedPackage.elementImport.importedElement.allOwningPackages()->
union(metaclassReference.importedElement.allOwningPackages() )->notEmpty()"""
raise NotImplementedError(
"operation references_same_metamodel(...) not yet implemented"
)
def create(self, classifier=None):
"""Creates and returns an instance of (the Ecore representation of) the specified classifier defined in this profile."""
raise NotImplementedError("operation create(...) not yet implemented")
def define(self, options=None, diagnostics=None, context=None):
"""Defines this profile by (re)creating Ecore representations of its current contents, using the specified options, diagnostics, and context."""
from .profile_utils import UML_20_URI, define_profile
eannotation = self.getEAnnotation(UML_20_URI)
if not eannotation:
eannotation = ecore.EAnnotation(source=UML_20_URI)
eannotation.contents.append(define_profile(self))
self.eAnnotations.append(eannotation)
def get_definition(self):
"""Retrieves the current definition (Ecore representation) of this profile."""
raise NotImplementedError("operation get_definition(...) not yet implemented")
def get_definition(self, namedElement=None):
"""Retrieves the current definition (Ecore representation) of the specified named element in this profile."""
raise NotImplementedError("operation get_definition(...) not yet implemented")
def get_owned_extensions(self, requiredOnly=None):
"""Retrieves the extensions owned by this profile, excluding non-required extensions if indicated."""
raise NotImplementedError(
"operation get_owned_extensions(...) not yet implemented"
)
def get_referenced_metaclasses(self):
"""Retrieves the metaclasses referenced by this profile."""
raise NotImplementedError(
"operation get_referenced_metaclasses(...) not yet implemented"
)
def get_referenced_metamodels(self):
"""Retrieves the metamodels referenced by this profile."""
raise NotImplementedError(
"operation get_referenced_metamodels(...) not yet implemented"
)
def is_defined(self):
"""Determines whether this profile is defined."""
raise NotImplementedError("operation is_defined(...) not yet implemented")
class DeploymentMixin(object):
"""User defined mixin class for Deployment."""
def __init__(
self, configuration=None, deployedArtifact=None, location=None, **kwargs
):
super(DeploymentMixin, self).__init__(**kwargs)
class AbstractionMixin(object):
"""User defined mixin class for Abstraction."""
def __init__(self, mapping=None, **kwargs):
super(AbstractionMixin, self).__init__(**kwargs)
class EnumerationLiteralMixin(object):
"""User defined mixin class for EnumerationLiteral."""
def __init__(self, enumeration=None, **kwargs):
super(EnumerationLiteralMixin, self).__init__(**kwargs)
def get_classifiers(self):
raise NotImplementedError("operation get_classifiers(...) not yet implemented")
def get_classifier(self):
"""Derivation of Enumeration::/classifier
result = (enumeration)
<p>From package UML::SimpleClassifiers.</p>"""
raise NotImplementedError("operation get_classifier(...) not yet implemented")
class ModelMixin(object):
"""User defined mixin class for Model."""
def __init__(self, viewpoint=None, **kwargs):
super(ModelMixin, self).__init__(**kwargs)
def is_metamodel(self):
"""Determines whether this model is a metamodel."""
raise NotImplementedError("operation is_metamodel(...) not yet implemented")
class UsageMixin(object):
"""User defined mixin class for Usage."""
def __init__(self, **kwargs):
super(UsageMixin, self).__init__(**kwargs)
class ValueSpecificationActionMixin(object):
"""User defined mixin class for ValueSpecificationAction."""
def __init__(self, result=None, value=None, **kwargs):
super(ValueSpecificationActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1
result.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def compatible_type(self, diagnostics=None, context=None):
"""The type of the value ValueSpecification must conform to the type of the result OutputPin.
value.type.conformsTo(result.type)"""
raise NotImplementedError("operation compatible_type(...) not yet implemented")
class VariableActionMixin(object):
"""User defined mixin class for VariableAction."""
def __init__(self, variable=None, **kwargs):
super(VariableActionMixin, self).__init__(**kwargs)
def scope_of_variable(self, diagnostics=None, context=None):
"""The VariableAction must be in the scope of the variable.
variable.isAccessibleBy(self)"""
raise NotImplementedError(
"operation scope_of_variable(...) not yet implemented"
)
class LinkActionMixin(object):
"""User defined mixin class for LinkAction."""
def __init__(self, endData=None, inputValue=None, **kwargs):
super(LinkActionMixin, self).__init__(**kwargs)
def same_pins(self, diagnostics=None, context=None):
"""The inputValue InputPins is the same as the union of all the InputPins referenced by the endData.
inputValue->asBag()=endData.allPins()"""
raise NotImplementedError("operation same_pins(...) not yet implemented")
def same_association(self, diagnostics=None, context=None):
"""The ends of the endData must all be from the same Association and include all and only the memberEnds of that association.
endData.end = self.association().memberEnd->asBag()"""
raise NotImplementedError("operation same_association(...) not yet implemented")
def not_static(self, diagnostics=None, context=None):
"""The ends of the endData must not be static.
endData->forAll(not end.isStatic)"""
raise NotImplementedError("operation not_static(...) not yet implemented")
def association(self):
"""Returns the Association acted on by this LinkAction.
result = (endData->asSequence()->first().end.association)
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation association(...) not yet implemented")
class StructuralFeatureActionMixin(object):
"""User defined mixin class for StructuralFeatureAction."""
def __init__(self, object=None, structuralFeature=None, **kwargs):
super(StructuralFeatureActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin must be 1..1.
object.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def object_type(self, diagnostics=None, context=None):
"""The structuralFeature must either be an owned or inherited feature of the type of the object InputPin, or it must be an owned end of a binary Association whose opposite end had as a type to which the type of the object InputPin conforms.
object.type.oclAsType(Classifier).allFeatures()->includes(structuralFeature) or
object.type.conformsTo(structuralFeature.oclAsType(Property).opposite.type)"""
raise NotImplementedError("operation object_type(...) not yet implemented")
def visibility(self, diagnostics=None, context=None):
"""The visibility of the structuralFeature must allow access from the object performing the ReadStructuralFeatureAction.
structuralFeature.visibility = VisibilityKind::public or
_'context'.allFeatures()->includes(structuralFeature) or
structuralFeature.visibility=VisibilityKind::protected and
_'context'.conformsTo(structuralFeature.oclAsType(Property).opposite.type.oclAsType(Classifier))"""
raise NotImplementedError("operation visibility(...) not yet implemented")
def not_static(self, diagnostics=None, context=None):
"""The structuralFeature must not be static.
not structuralFeature.isStatic"""
raise NotImplementedError("operation not_static(...) not yet implemented")
def one_featuring_classifier(self, diagnostics=None, context=None):
"""The structuralFeature must have exactly one featuringClassifier.
structuralFeature.featuringClassifier->size() = 1"""
raise NotImplementedError(
"operation one_featuring_classifier(...) not yet implemented"
)
class AcceptEventActionMixin(object):
"""User defined mixin class for AcceptEventAction."""
def __init__(self, isUnmarshall=None, result=None, trigger=None, **kwargs):
super(AcceptEventActionMixin, self).__init__(**kwargs)
def one_output_pin(self, diagnostics=None, context=None):
"""If isUnmarshall=false and any of the triggers are for SignalEvents or TimeEvents, there must be exactly one result OutputPin with multiplicity 1..1.
not isUnmarshall and trigger->exists(event.oclIsKindOf(SignalEvent) or event.oclIsKindOf(TimeEvent)) implies
output->size() = 1 and output->first().is(1,1)"""
raise NotImplementedError("operation one_output_pin(...) not yet implemented")
def no_input_pins(self, diagnostics=None, context=None):
"""AcceptEventActions may have no input pins.
input->size() = 0"""
raise NotImplementedError("operation no_input_pins(...) not yet implemented")
def no_output_pins(self, diagnostics=None, context=None):
"""There are no OutputPins if the trigger events are only ChangeEvents and/or CallEvents when this action is an instance of AcceptEventAction and not an instance of a descendant of AcceptEventAction (such as AcceptCallAction).
(self.oclIsTypeOf(AcceptEventAction) and
(trigger->forAll(event.oclIsKindOf(ChangeEvent) or
event.oclIsKindOf(CallEvent))))
implies output->size() = 0"""
raise NotImplementedError("operation no_output_pins(...) not yet implemented")
def unmarshall_signal_events(self, diagnostics=None, context=None):
"""If isUnmarshall is true (and this is not an AcceptCallAction), there must be exactly one trigger, which is for a SignalEvent. The number of result output pins must be the same as the number of attributes of the signal. The type and ordering of each result output pin must be the same as the corresponding attribute of the signal. The multiplicity of each result output pin must be compatible with the multiplicity of the corresponding attribute.
isUnmarshall and self.oclIsTypeOf(AcceptEventAction) implies
trigger->size()=1 and
trigger->asSequence()->first().event.oclIsKindOf(SignalEvent) and
let attribute: OrderedSet(Property) = trigger->asSequence()->first().event.oclAsType(SignalEvent).signal.allAttributes() in
attribute->size()>0 and result->size() = attribute->size() and
Sequence{1..result->size()}->forAll(i |
result->at(i).type = attribute->at(i).type and
result->at(i).isOrdered = attribute->at(i).isOrdered and
result->at(i).includesMultiplicity(attribute->at(i)))"""
raise NotImplementedError(
"operation unmarshall_signal_events(...) not yet implemented"
)
def conforming_type(self, diagnostics=None, context=None):
"""If isUnmarshall=false and all the triggers are for SignalEvents, then the type of the single result OutputPin must either be null or all the signals must conform to it.
not isUnmarshall implies
result->isEmpty() or
let type: Type = result->first().type in
type=null or
(trigger->forAll(event.oclIsKindOf(SignalEvent)) and
trigger.event.oclAsType(SignalEvent).signal->forAll(s | s.conformsTo(type)))"""
raise NotImplementedError("operation conforming_type(...) not yet implemented")
class InvocationActionMixin(object):
"""User defined mixin class for InvocationAction."""
def __init__(self, argument=None, onPort=None, **kwargs):
super(InvocationActionMixin, self).__init__(**kwargs)
class ClearAssociationActionMixin(object):
"""User defined mixin class for ClearAssociationAction."""
def __init__(self, association=None, object=None, **kwargs):
super(ClearAssociationActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1.
object.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def same_type(self, diagnostics=None, context=None):
"""The type of the InputPin must conform to the type of at least one of the memberEnds of the association.
association.memberEnd->exists(self.object.type.conformsTo(type))"""
raise NotImplementedError("operation same_type(...) not yet implemented")
class CreateObjectActionMixin(object):
"""User defined mixin class for CreateObjectAction."""
def __init__(self, classifier=None, result=None, **kwargs):
super(CreateObjectActionMixin, self).__init__(**kwargs)
def classifier_not_abstract(self, diagnostics=None, context=None):
"""The classifier cannot be abstract.
not classifier.isAbstract"""
raise NotImplementedError(
"operation classifier_not_abstract(...) not yet implemented"
)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def classifier_not_association_class(self, diagnostics=None, context=None):
"""The classifier cannot be an AssociationClass.
not classifier.oclIsKindOf(AssociationClass)"""
raise NotImplementedError(
"operation classifier_not_association_class(...) not yet implemented"
)
def same_type(self, diagnostics=None, context=None):
"""The type of the result OutputPin must be the same as the classifier of the CreateObjectAction.
result.type = classifier"""
raise NotImplementedError("operation same_type(...) not yet implemented")
class DestroyObjectActionMixin(object):
"""User defined mixin class for DestroyObjectAction."""
def __init__(
self, isDestroyLinks=None, isDestroyOwnedObjects=None, target=None, **kwargs
):
super(DestroyObjectActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the targe IinputPin is 1..1.
target.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def no_type(self, diagnostics=None, context=None):
"""The target InputPin has no type.
target.type= null"""
raise NotImplementedError("operation no_type(...) not yet implemented")
class ExpansionNodeMixin(object):
"""User defined mixin class for ExpansionNode."""
def __init__(self, regionAsInput=None, regionAsOutput=None, **kwargs):
super(ExpansionNodeMixin, self).__init__(**kwargs)
def region_as_input_or_output(self, diagnostics=None, context=None):
"""One of regionAsInput or regionAsOutput must be non-empty, but not both.
regionAsInput->notEmpty() xor regionAsOutput->notEmpty()"""
raise NotImplementedError(
"operation region_as_input_or_output(...) not yet implemented"
)
class OpaqueActionMixin(object):
"""User defined mixin class for OpaqueAction."""
def __init__(
self, body=None, inputValue=None, language=None, outputValue=None, **kwargs
):
super(OpaqueActionMixin, self).__init__(**kwargs)
def language_body_size(self, diagnostics=None, context=None):
"""If the language attribute is not empty, then the size of the body and language lists must be the same.
language->notEmpty() implies (_'body'->size() = language->size())"""
raise NotImplementedError(
"operation language_body_size(...) not yet implemented"
)
class RaiseExceptionActionMixin(object):
"""User defined mixin class for RaiseExceptionAction."""
def __init__(self, exception=None, **kwargs):
super(RaiseExceptionActionMixin, self).__init__(**kwargs)
class ReadExtentActionMixin(object):
"""User defined mixin class for ReadExtentAction."""
def __init__(self, classifier=None, result=None, **kwargs):
super(ReadExtentActionMixin, self).__init__(**kwargs)
def type_is_classifier(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the classifier.
result.type = classifier"""
raise NotImplementedError(
"operation type_is_classifier(...) not yet implemented"
)
def multiplicity_of_result(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 0..*.
result.is(0,*)"""
raise NotImplementedError(
"operation multiplicity_of_result(...) not yet implemented"
)
class ReadIsClassifiedObjectActionMixin(object):
"""User defined mixin class for ReadIsClassifiedObjectAction."""
def __init__(
self, classifier=None, isDirect=None, object=None, result=None, **kwargs
):
super(ReadIsClassifiedObjectActionMixin, self).__init__(**kwargs)
def no_type(self, diagnostics=None, context=None):
"""The object InputPin has no type.
object.type = null"""
raise NotImplementedError("operation no_type(...) not yet implemented")
def multiplicity_of_output(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_output(...) not yet implemented"
)
def boolean_result(self, diagnostics=None, context=None):
"""The type of the result OutputPin is Boolean.
result.type = Boolean"""
raise NotImplementedError("operation boolean_result(...) not yet implemented")
def multiplicity_of_input(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1.
object.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_input(...) not yet implemented"
)
class ReadLinkObjectEndActionMixin(object):
"""User defined mixin class for ReadLinkObjectEndAction."""
def __init__(self, end=None, object=None, result=None, **kwargs):
super(ReadLinkObjectEndActionMixin, self).__init__(**kwargs)
def property(self, diagnostics=None, context=None):
"""The end Property must be an Association memberEnd.
end.association <> null"""
raise NotImplementedError("operation property(...) not yet implemented")
def multiplicity_of_object(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1.
object.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_object(...) not yet implemented"
)
def ends_of_association(self, diagnostics=None, context=None):
"""The ends of the association must not be static.
end.association.memberEnd->forAll(e | not e.isStatic)"""
raise NotImplementedError(
"operation ends_of_association(...) not yet implemented"
)
def type_of_result(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the same as the type of the end Property.
result.type = end.type"""
raise NotImplementedError("operation type_of_result(...) not yet implemented")
def multiplicity_of_result(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_result(...) not yet implemented"
)
def type_of_object(self, diagnostics=None, context=None):
"""The type of the object InputPin is the AssociationClass that owns the end Property.
object.type = end.association"""
raise NotImplementedError("operation type_of_object(...) not yet implemented")
def association_of_association(self, diagnostics=None, context=None):
"""The association of the end must be an AssociationClass.
end.association.oclIsKindOf(AssociationClass)"""
raise NotImplementedError(
"operation association_of_association(...) not yet implemented"
)
class ReadLinkObjectEndQualifierActionMixin(object):
"""User defined mixin class for ReadLinkObjectEndQualifierAction."""
def __init__(self, object=None, qualifier=None, result=None, **kwargs):
super(ReadLinkObjectEndQualifierActionMixin, self).__init__(**kwargs)
def multiplicity_of_object(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1.
object.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_object(...) not yet implemented"
)
def type_of_object(self, diagnostics=None, context=None):
"""The type of the object InputPin is the AssociationClass that owns the Association end that has the given qualifier Property.
object.type = qualifier.associationEnd.association"""
raise NotImplementedError("operation type_of_object(...) not yet implemented")
def multiplicity_of_qualifier(self, diagnostics=None, context=None):
"""The multiplicity of the qualifier Property is 1..1.
qualifier.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_qualifier(...) not yet implemented"
)
def ends_of_association(self, diagnostics=None, context=None):
"""The ends of the Association must not be static.
qualifier.associationEnd.association.memberEnd->forAll(e | not e.isStatic)"""
raise NotImplementedError(
"operation ends_of_association(...) not yet implemented"
)
def multiplicity_of_result(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_result(...) not yet implemented"
)
def same_type(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the same as the type of the qualifier Property.
result.type = qualifier.type"""
raise NotImplementedError("operation same_type(...) not yet implemented")
def association_of_association(self, diagnostics=None, context=None):
"""The association of the Association end of the qualifier Property must be an AssociationClass.
qualifier.associationEnd.association.oclIsKindOf(AssociationClass)"""
raise NotImplementedError(
"operation association_of_association(...) not yet implemented"
)
def qualifier_attribute(self, diagnostics=None, context=None):
"""The qualifier Property must be a qualifier of an Association end.
qualifier.associationEnd <> null"""
raise NotImplementedError(
"operation qualifier_attribute(...) not yet implemented"
)
class ReadSelfActionMixin(object):
"""User defined mixin class for ReadSelfAction."""
def __init__(self, result=None, **kwargs):
super(ReadSelfActionMixin, self).__init__(**kwargs)
def contained(self, diagnostics=None, context=None):
"""A ReadSelfAction must have a context Classifier.
_'context' <> null"""
raise NotImplementedError("operation contained(...) not yet implemented")
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def not_static(self, diagnostics=None, context=None):
"""If the ReadSelfAction is contained in an Behavior that is acting as a method, then the Operation of the method must not be static.
let behavior: Behavior = self.containingBehavior() in
behavior.specification<>null implies not behavior.specification.isStatic"""
raise NotImplementedError("operation not_static(...) not yet implemented")
def type(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the context Classifier.
result.type = _'context'"""
raise NotImplementedError("operation type(...) not yet implemented")
class ReclassifyObjectActionMixin(object):
"""User defined mixin class for ReclassifyObjectAction."""
def __init__(
self,
isReplaceAll=None,
newClassifier=None,
object=None,
oldClassifier=None,
**kwargs,
):
super(ReclassifyObjectActionMixin, self).__init__(**kwargs)
def input_pin(self, diagnostics=None, context=None):
"""The object InputPin has no type.
object.type = null"""
raise NotImplementedError("operation input_pin(...) not yet implemented")
def classifier_not_abstract(self, diagnostics=None, context=None):
"""None of the newClassifiers may be abstract.
not newClassifier->exists(isAbstract)"""
raise NotImplementedError(
"operation classifier_not_abstract(...) not yet implemented"
)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1.
object.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
class ReduceActionMixin(object):
"""User defined mixin class for ReduceAction."""
def __init__(
self, collection=None, isOrdered=None, reducer=None, result=None, **kwargs
):
super(ReduceActionMixin, self).__init__(**kwargs)
def reducer_inputs_output(self, diagnostics=None, context=None):
"""The reducer Behavior must have two input ownedParameters and one output ownedParameter, where the type of the output Parameter and the type of elements of the input collection conform to the types of the input Parameters.
let inputs: OrderedSet(Parameter) = reducer.inputParameters() in
let outputs: OrderedSet(Parameter) = reducer.outputParameters() in
inputs->size()=2 and outputs->size()=1 and
inputs.type->forAll(t |
outputs.type->forAll(conformsTo(t)) and
-- Note that the following only checks the case when the collection is via multiple tokens.
collection.upperBound()>1 implies collection.type.conformsTo(t))"""
raise NotImplementedError(
"operation reducer_inputs_output(...) not yet implemented"
)
def input_type_is_collection(self, diagnostics=None, context=None):
"""The type of the collection InputPin must be a collection."""
raise NotImplementedError(
"operation input_type_is_collection(...) not yet implemented"
)
def output_types_are_compatible(self, diagnostics=None, context=None):
"""The type of the output of the reducer Behavior must conform to the type of the result OutputPin.
reducer.outputParameters().type->forAll(conformsTo(result.type))"""
raise NotImplementedError(
"operation output_types_are_compatible(...) not yet implemented"
)
class ReplyActionMixin(object):
"""User defined mixin class for ReplyAction."""
def __init__(
self, replyToCall=None, replyValue=None, returnInformation=None, **kwargs
):
super(ReplyActionMixin, self).__init__(**kwargs)
def pins_match_parameter(self, diagnostics=None, context=None):
"""The replyValue InputPins must match the output (return, out, and inout) parameters of the operation of the event of the replyToCall Trigger in number, type, ordering, and multiplicity.
let parameter:OrderedSet(Parameter) = replyToCall.event.oclAsType(CallEvent).operation.outputParameters() in
replyValue->size()=parameter->size() and
Sequence{1..replyValue->size()}->forAll(i |
replyValue->at(i).type.conformsTo(parameter->at(i).type) and
replyValue->at(i).isOrdered=parameter->at(i).isOrdered and
replyValue->at(i).compatibleWith(parameter->at(i)))"""
raise NotImplementedError(
"operation pins_match_parameter(...) not yet implemented"
)
def event_on_reply_to_call_trigger(self, diagnostics=None, context=None):
"""The event of the replyToCall Trigger must be a CallEvent.
replyToCall.event.oclIsKindOf(CallEvent)"""
raise NotImplementedError(
"operation event_on_reply_to_call_trigger(...) not yet implemented"
)
class StartClassifierBehaviorActionMixin(object):
"""User defined mixin class for StartClassifierBehaviorAction."""
def __init__(self, object=None, **kwargs):
super(StartClassifierBehaviorActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1
object.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def type_has_classifier(self, diagnostics=None, context=None):
"""If the InputPin has a type, then the type or one of its ancestors must have a classifierBehavior.
object.type->notEmpty() implies
(object.type.oclIsKindOf(BehavioredClassifier) and object.type.oclAsType(BehavioredClassifier).classifierBehavior<>null)"""
raise NotImplementedError(
"operation type_has_classifier(...) not yet implemented"
)
class TestIdentityActionMixin(object):
"""User defined mixin class for TestIdentityAction."""
def __init__(self, first=None, result=None, second=None, **kwargs):
super(TestIdentityActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the InputPins is 1..1.
first.is(1,1) and second.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def no_type(self, diagnostics=None, context=None):
"""The InputPins have no type.
first.type= null and second.type = null"""
raise NotImplementedError("operation no_type(...) not yet implemented")
def result_is_boolean(self, diagnostics=None, context=None):
"""The type of the result OutputPin is Boolean.
result.type=Boolean"""
raise NotImplementedError(
"operation result_is_boolean(...) not yet implemented"
)
class UnmarshallActionMixin(object):
"""User defined mixin class for UnmarshallAction."""
def __init__(self, object=None, result=None, unmarshallType=None, **kwargs):
super(UnmarshallActionMixin, self).__init__(**kwargs)
def structural_feature(self, diagnostics=None, context=None):
"""The unmarshallType must have at least one StructuralFeature.
unmarshallType.allAttributes()->size() >= 1"""
raise NotImplementedError(
"operation structural_feature(...) not yet implemented"
)
def number_of_result(self, diagnostics=None, context=None):
"""The number of result outputPins must be the same as the number of attributes of the unmarshallType.
unmarshallType.allAttributes()->size() = result->size()"""
raise NotImplementedError("operation number_of_result(...) not yet implemented")
def type_ordering_and_multiplicity(self, diagnostics=None, context=None):
"""The type, ordering and multiplicity of each attribute of the unmarshallType must be compatible with the type, ordering and multiplicity of the corresponding result OutputPin.
let attribute:OrderedSet(Property) = unmarshallType.allAttributes() in
Sequence{1..result->size()}->forAll(i |
attribute->at(i).type.conformsTo(result->at(i).type) and
attribute->at(i).isOrdered=result->at(i).isOrdered and
attribute->at(i).compatibleWith(result->at(i)))"""
raise NotImplementedError(
"operation type_ordering_and_multiplicity(...) not yet implemented"
)
def multiplicity_of_object(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin is 1..1
object.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_object(...) not yet implemented"
)
def object_type(self, diagnostics=None, context=None):
"""The type of the object InputPin conform to the unmarshallType.
object.type.conformsTo(unmarshallType)"""
raise NotImplementedError("operation object_type(...) not yet implemented")
class ActivityFinalNodeMixin(object):
"""User defined mixin class for ActivityFinalNode."""
def __init__(self, **kwargs):
super(ActivityFinalNodeMixin, self).__init__(**kwargs)
class ActivityParameterNodeMixin(object):
"""User defined mixin class for ActivityParameterNode."""
def __init__(self, parameter=None, **kwargs):
super(ActivityParameterNodeMixin, self).__init__(**kwargs)
def no_outgoing_edges(self, diagnostics=None, context=None):
"""An ActivityParameterNode with no outgoing ActivityEdges and one or more incoming ActivityEdges must have a parameter with direction out, inout, or return.
(incoming->notEmpty() and outgoing->isEmpty()) implies
(parameter.direction = ParameterDirectionKind::out or
parameter.direction = ParameterDirectionKind::inout or
parameter.direction = ParameterDirectionKind::return)"""
raise NotImplementedError(
"operation no_outgoing_edges(...) not yet implemented"
)
def has_parameters(self, diagnostics=None, context=None):
"""The parameter of an ActivityParameterNode must be from the containing Activity.
activity.ownedParameter->includes(parameter)"""
raise NotImplementedError("operation has_parameters(...) not yet implemented")
def same_type(self, diagnostics=None, context=None):
"""The type of an ActivityParameterNode is the same as the type of its parameter.
type = parameter.type"""
raise NotImplementedError("operation same_type(...) not yet implemented")
def no_incoming_edges(self, diagnostics=None, context=None):
"""An ActivityParameterNode with no incoming ActivityEdges and one or more outgoing ActivityEdges must have a parameter with direction in or inout.
(outgoing->notEmpty() and incoming->isEmpty()) implies
(parameter.direction = ParameterDirectionKind::_'in' or
parameter.direction = ParameterDirectionKind::inout)"""
raise NotImplementedError(
"operation no_incoming_edges(...) not yet implemented"
)
def no_edges(self, diagnostics=None, context=None):
"""An ActivityParameterNode may have all incoming ActivityEdges or all outgoing ActivityEdges, but it must not have both incoming and outgoing ActivityEdges.
incoming->isEmpty() or outgoing->isEmpty()"""
raise NotImplementedError("operation no_edges(...) not yet implemented")
class CentralBufferNodeMixin(object):
"""User defined mixin class for CentralBufferNode."""
def __init__(self, **kwargs):
super(CentralBufferNodeMixin, self).__init__(**kwargs)
class FlowFinalNodeMixin(object):
"""User defined mixin class for FlowFinalNode."""
def __init__(self, **kwargs):
super(FlowFinalNodeMixin, self).__init__(**kwargs)
class DurationIntervalMixin(object):
"""User defined mixin class for DurationInterval."""
def __init__(self, **kwargs):
super(DurationIntervalMixin, self).__init__(**kwargs)
class LiteralBooleanMixin(object):
"""User defined mixin class for LiteralBoolean."""
def __init__(self, value=None, **kwargs):
super(LiteralBooleanMixin, self).__init__(**kwargs)
class LiteralIntegerMixin(object):
"""User defined mixin class for LiteralInteger."""
def __init__(self, value=None, **kwargs):
super(LiteralIntegerMixin, self).__init__(**kwargs)
class LiteralNullMixin(object):
"""User defined mixin class for LiteralNull."""
def __init__(self, **kwargs):
super(LiteralNullMixin, self).__init__(**kwargs)
class LiteralRealMixin(object):
"""User defined mixin class for LiteralReal."""
def __init__(self, value=None, **kwargs):
super(LiteralRealMixin, self).__init__(**kwargs)
class LiteralStringMixin(object):
"""User defined mixin class for LiteralString."""
def __init__(self, value=None, **kwargs):
super(LiteralStringMixin, self).__init__(**kwargs)
class LiteralUnlimitedNaturalMixin(object):
"""User defined mixin class for LiteralUnlimitedNatural."""
def __init__(self, value=None, **kwargs):
super(LiteralUnlimitedNaturalMixin, self).__init__(**kwargs)
class TimeIntervalMixin(object):
"""User defined mixin class for TimeInterval."""
def __init__(self, **kwargs):
super(TimeIntervalMixin, self).__init__(**kwargs)
class DerivedFeature(EDerivedCollection):
pass
class DerivedAttribute(EDerivedCollection):
pass
class DerivedGeneral(EDerivedCollection):
pass
class DerivedInheritedmember(EDerivedCollection):
pass
class ClassifierMixin(object):
"""User defined mixin class for Classifier."""
def __init__(
self,
feature=None,
attribute=None,
collaborationUse=None,
general=None,
generalization=None,
powertypeExtent=None,
inheritedMember=None,
isAbstract=None,
isFinalSpecialization=None,
ownedUseCase=None,
useCase=None,
redefinedClassifier=None,
representation=None,
substitution=None,
**kwargs,
):
super(ClassifierMixin, self).__init__(**kwargs)
def specialize_type(self, diagnostics=None, context=None):
"""A Classifier may only specialize Classifiers of a valid type.
parents()->forAll(c | self.maySpecializeType(c))"""
raise NotImplementedError("operation specialize_type(...) not yet implemented")
def maps_to_generalization_set(self, diagnostics=None, context=None):
"""The Classifier that maps to a GeneralizationSet may neither be a specific nor a general Classifier in any of the Generalization relationships defined for that GeneralizationSet. In other words, a power type may not be an instance of itself nor may its instances also be its subclasses.
powertypeExtent->forAll( gs |
gs.generalization->forAll( gen |
not (gen.general = self) and not gen.general.allParents()->includes(self) and not (gen.specific = self) and not self.allParents()->includes(gen.specific)
))"""
raise NotImplementedError(
"operation maps_to_generalization_set(...) not yet implemented"
)
def non_final_parents(self, diagnostics=None, context=None):
"""The parents of a Classifier must be non-final.
parents()->forAll(not isFinalSpecialization)"""
raise NotImplementedError(
"operation non_final_parents(...) not yet implemented"
)
def no_cycles_in_generalization(self, diagnostics=None, context=None):
"""Generalization hierarchies must be directed and acyclical. A Classifier can not be both a transitively general and transitively specific Classifier of the same Classifier.
not allParents()->includes(self)"""
raise NotImplementedError(
"operation no_cycles_in_generalization(...) not yet implemented"
)
def get_all_attributes(self):
"""Retrieves all the attributes of this classifier, including those inherited from its parents."""
raise NotImplementedError(
"operation get_all_attributes(...) not yet implemented"
)
def get_all_operations(self):
"""Retrieves all the operations of this classifier, including those inherited from its parents."""
raise NotImplementedError(
"operation get_all_operations(...) not yet implemented"
)
def get_all_used_interfaces(self):
"""Retrieves all the interfaces on which this classifier or any of its parents has a usage dependency."""
raise NotImplementedError(
"operation get_all_used_interfaces(...) not yet implemented"
)
def get_operation(self, name=None, parameterNames=None, parameterTypes=None):
"""Retrieves the first operation with the specified name, parameter names, and parameter types from this classifier."""
raise NotImplementedError("operation get_operation(...) not yet implemented")
def get_operation(
self, name=None, parameterNames=None, parameterTypes=None, ignoreCase=None
):
"""Retrieves the first operation with the specified name, parameter names, and parameter types from this classifier, ignoring case if indicated."""
raise NotImplementedError("operation get_operation(...) not yet implemented")
def get_operations(self):
"""Retrieves the operations of this classifier."""
raise NotImplementedError("operation get_operations(...) not yet implemented")
def get_used_interfaces(self):
"""Retrieves the interfaces on which this classifier has a usage dependency."""
raise NotImplementedError(
"operation get_used_interfaces(...) not yet implemented"
)
def all_features(self):
"""The query allFeatures() gives all of the Features in the namespace of the Classifier. In general, through mechanisms such as inheritance, this will be a larger set than feature.
result = (member->select(oclIsKindOf(Feature))->collect(oclAsType(Feature))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation all_features(...) not yet implemented")
def all_parents(self):
"""The query allParents() gives all of the direct and indirect ancestors of a generalized Classifier.
result = (parents()->union(parents()->collect(allParents())->asSet()))
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation all_parents(...) not yet implemented")
def get_generals(self):
"""The general Classifiers are the ones referenced by the Generalization relationships.
result = (parents())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_generals(...) not yet implemented")
def has_visibility_of(self, n=None):
"""The query hasVisibilityOf() determines whether a NamedElement is visible in the classifier. Non-private members are visible. It is only called when the argument is something owned by a parent.
allParents()->including(self)->collect(member)->includes(n)
result = (n.visibility <> VisibilityKind::private)
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation has_visibility_of(...) not yet implemented"
)
def inherit(self, inhs=None):
"""The query inherit() defines how to inherit a set of elements passed as its argument. It excludes redefined elements from the result.
result = (inhs->reject(inh |
inh.oclIsKindOf(RedefinableElement) and
ownedMember->select(oclIsKindOf(RedefinableElement))->
select(redefinedElement->includes(inh.oclAsType(RedefinableElement)))
->notEmpty()))
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation inherit(...) not yet implemented")
def inheritable_members(self, c=None):
"""The query inheritableMembers() gives all of the members of a Classifier that may be inherited in one of its descendants, subject to whatever visibility restrictions apply.
c.allParents()->includes(self)
result = (member->select(m | c.hasVisibilityOf(m)))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation inheritable_members(...) not yet implemented"
)
def get_inherited_members(self):
"""The inheritedMember association is derived by inheriting the inheritable members of the parents.
result = (inherit(parents()->collect(inheritableMembers(self))->asSet()))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation get_inherited_members(...) not yet implemented"
)
def may_specialize_type(self, c=None):
"""The query maySpecializeType() determines whether this classifier may have a generalization relationship to classifiers of the specified type. By default a classifier may specialize classifiers of the same or a more general type. It is intended to be redefined by classifiers that have different specialization constraints.
result = (self.oclIsKindOf(c.oclType()))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation may_specialize_type(...) not yet implemented"
)
def parents(self):
"""The query parents() gives all of the immediate ancestors of a generalized Classifier.
result = (generalization.general->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation parents(...) not yet implemented")
def directly_realized_interfaces(self):
"""The Interfaces directly realized by this Classifier
result = ((clientDependency->
select(oclIsKindOf(Realization) and supplier->forAll(oclIsKindOf(Interface))))->
collect(supplier.oclAsType(Interface))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation directly_realized_interfaces(...) not yet implemented"
)
def directly_used_interfaces(self):
"""The Interfaces directly used by this Classifier
result = ((supplierDependency->
select(oclIsKindOf(Usage) and client->forAll(oclIsKindOf(Interface))))->
collect(client.oclAsType(Interface))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation directly_used_interfaces(...) not yet implemented"
)
def all_realized_interfaces(self):
"""The Interfaces realized by this Classifier and all of its generalizations
result = (directlyRealizedInterfaces()->union(self.allParents()->collect(directlyRealizedInterfaces()))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation all_realized_interfaces(...) not yet implemented"
)
def all_used_interfaces(self):
"""The Interfaces used by this Classifier and all of its generalizations
result = (directlyUsedInterfaces()->union(self.allParents()->collect(directlyUsedInterfaces()))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation all_used_interfaces(...) not yet implemented"
)
def is_substitutable_for(self, contract=None):
"""result = (substitution.contract->includes(contract))
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation is_substitutable_for(...) not yet implemented"
)
def all_attributes(self):
"""The query allAttributes gives an ordered set of all owned and inherited attributes of the Classifier. All owned attributes appear before any inherited attributes, and the attributes inherited from any more specific parent Classifier appear before those of any more general parent Classifier. However, if the Classifier has multiple immediate parents, then the relative ordering of the sets of attributes from those parents is not defined.
result = (attribute->asSequence()->union(parents()->asSequence().allAttributes())->select(p | member->includes(p))->asOrderedSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation all_attributes(...) not yet implemented")
def all_slottable_features(self):
"""All StructuralFeatures related to the Classifier that may have Slots, including direct attributes, inherited attributes, private attributes in generalizations, and memberEnds of Associations, but excluding redefined StructuralFeatures.
result = (member->select(oclIsKindOf(StructuralFeature))->
collect(oclAsType(StructuralFeature))->
union(self.inherit(self.allParents()->collect(p | p.attribute)->asSet())->
collect(oclAsType(StructuralFeature)))->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation all_slottable_features(...) not yet implemented"
)
class ManifestationMixin(object):
"""User defined mixin class for Manifestation."""
def __init__(self, utilizedElement=None, **kwargs):
super(ManifestationMixin, self).__init__(**kwargs)
class OperationMixin(object):
"""User defined mixin class for Operation."""
@property
def isOrdered(self):
raise NotImplementedError("Missing implementation for isOrdered")
@property
def isUnique(self):
raise NotImplementedError("Missing implementation for isUnique")
@property
def lower(self):
raise NotImplementedError("Missing implementation for lower")
@property
def type(self):
raise NotImplementedError("Missing implementation for type")
@property
def upper(self):
raise NotImplementedError("Missing implementation for upper")
def __init__(
self,
bodyCondition=None,
class_=None,
datatype=None,
interface=None,
isOrdered=None,
isQuery=None,
isUnique=None,
lower=None,
postcondition=None,
precondition=None,
redefinedOperation=None,
type=None,
upper=None,
**kwargs,
):
super(OperationMixin, self).__init__(**kwargs)
def at_most_one_return(self, diagnostics=None, context=None):
"""An Operation can have at most one return parameter; i.e., an owned parameter with the direction set to 'return.'
self.ownedParameter->select(direction = ParameterDirectionKind::return)->size() <= 1"""
raise NotImplementedError(
"operation at_most_one_return(...) not yet implemented"
)
def only_body_for_query(self, diagnostics=None, context=None):
"""A bodyCondition can only be specified for a query Operation.
bodyCondition <> null implies isQuery"""
raise NotImplementedError(
"operation only_body_for_query(...) not yet implemented"
)
def get_return_result(self):
"""Retrieves the (only) return result parameter for this operation."""
raise NotImplementedError(
"operation get_return_result(...) not yet implemented"
)
def set_is_ordered(self, newIsOrdered=None):
raise NotImplementedError("operation set_is_ordered(...) not yet implemented")
def set_is_unique(self, newIsUnique=None):
raise NotImplementedError("operation set_is_unique(...) not yet implemented")
def set_lower(self, newLower=None):
raise NotImplementedError("operation set_lower(...) not yet implemented")
def set_type(self, newType=None):
raise NotImplementedError("operation set_type(...) not yet implemented")
def set_upper(self, newUpper=None):
raise NotImplementedError("operation set_upper(...) not yet implemented")
def is_ordered(self):
"""If this operation has a return parameter, isOrdered equals the value of isOrdered for that parameter. Otherwise isOrdered is false.
result = (if returnResult()->notEmpty() then returnResult()-> exists(isOrdered) else false endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation is_ordered(...) not yet implemented")
def is_unique(self):
"""If this operation has a return parameter, isUnique equals the value of isUnique for that parameter. Otherwise isUnique is true.
result = (if returnResult()->notEmpty() then returnResult()->exists(isUnique) else true endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation is_unique(...) not yet implemented")
def get_lower(self):
"""If this operation has a return parameter, lower equals the value of lower for that parameter. Otherwise lower has no value.
result = (if returnResult()->notEmpty() then returnResult()->any(true).lower else null endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_lower(...) not yet implemented")
def return_result(self):
"""The query returnResult() returns the set containing the return parameter of the Operation if one exists, otherwise, it returns an empty set
result = (ownedParameter->select (direction = ParameterDirectionKind::return)->asSet())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation return_result(...) not yet implemented")
def get_type(self):
"""If this operation has a return parameter, type equals the value of type for that parameter. Otherwise type has no value.
result = (if returnResult()->notEmpty() then returnResult()->any(true).type else null endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_type(...) not yet implemented")
def get_upper(self):
"""If this operation has a return parameter, upper equals the value of upper for that parameter. Otherwise upper has no value.
result = (if returnResult()->notEmpty() then returnResult()->any(true).upper else null endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_upper(...) not yet implemented")
class StringExpressionMixin(object):
"""User defined mixin class for StringExpression."""
def __init__(self, owningExpression=None, subExpression=None, **kwargs):
super(StringExpressionMixin, self).__init__(**kwargs)
def operands(self, diagnostics=None, context=None):
"""All the operands of a StringExpression must be LiteralStrings
operand->forAll (oclIsKindOf (LiteralString))"""
raise NotImplementedError("operation operands(...) not yet implemented")
def subexpressions(self, diagnostics=None, context=None):
"""If a StringExpression has sub-expressions, it cannot have operands and vice versa (this avoids the problem of having to define a collating sequence between operands and subexpressions).
if subExpression->notEmpty() then operand->isEmpty() else operand->notEmpty() endif"""
raise NotImplementedError("operation subexpressions(...) not yet implemented")
class RealizationMixin(object):
"""User defined mixin class for Realization."""
def __init__(self, **kwargs):
super(RealizationMixin, self).__init__(**kwargs)
class PinMixin(object):
"""User defined mixin class for Pin."""
def __init__(self, isControl=None, **kwargs):
super(PinMixin, self).__init__(**kwargs)
def control_pins(self, diagnostics=None, context=None):
"""A control Pin has a control type.
isControl implies isControlType"""
raise NotImplementedError("operation control_pins(...) not yet implemented")
def not_unique(self, diagnostics=None, context=None):
"""Pin multiplicity is not unique.
not isUnique"""
raise NotImplementedError("operation not_unique(...) not yet implemented")
class WriteLinkActionMixin(object):
"""User defined mixin class for WriteLinkAction."""
def __init__(self, **kwargs):
super(WriteLinkActionMixin, self).__init__(**kwargs)
def allow_access(self, diagnostics=None, context=None):
"""The visibility of at least one end must allow access from the context Classifier of the WriteLinkAction.
endData.end->exists(end |
end.type=_'context' or
end.visibility=VisibilityKind::public or
end.visibility=VisibilityKind::protected and
endData.end->exists(other |
other<>end and _'context'.conformsTo(other.type.oclAsType(Classifier))))"""
raise NotImplementedError("operation allow_access(...) not yet implemented")
class WriteStructuralFeatureActionMixin(object):
"""User defined mixin class for WriteStructuralFeatureAction."""
def __init__(self, result=None, value=None, **kwargs):
super(WriteStructuralFeatureActionMixin, self).__init__(**kwargs)
def multiplicity_of_result(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin must be 1..1.
result <> null implies result.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_result(...) not yet implemented"
)
def type_of_value(self, diagnostics=None, context=None):
"""The type of the value InputPin must conform to the type of the structuralFeature.
value <> null implies value.type.conformsTo(structuralFeature.type)"""
raise NotImplementedError("operation type_of_value(...) not yet implemented")
def multiplicity_of_value(self, diagnostics=None, context=None):
"""The multiplicity of the value InputPin is 1..1.
value<>null implies value.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_value(...) not yet implemented"
)
def type_of_result(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the same as the type of the inherited object InputPin.
result <> null implies result.type = object.type"""
raise NotImplementedError("operation type_of_result(...) not yet implemented")
class WriteVariableActionMixin(object):
"""User defined mixin class for WriteVariableAction."""
def __init__(self, value=None, **kwargs):
super(WriteVariableActionMixin, self).__init__(**kwargs)
def value_type(self, diagnostics=None, context=None):
"""The type of the value InputPin must conform to the type of the variable.
value <> null implies value.type.conformsTo(variable.type)"""
raise NotImplementedError("operation value_type(...) not yet implemented")
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the value InputPin is 1..1.
value<>null implies value.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
class AcceptCallActionMixin(object):
"""User defined mixin class for AcceptCallAction."""
def __init__(self, returnInformation=None, **kwargs):
super(AcceptCallActionMixin, self).__init__(**kwargs)
def result_pins(self, diagnostics=None, context=None):
"""The number of result OutputPins must be the same as the number of input (in and inout) ownedParameters of the Operation specified by the trigger Event. The type, ordering and multiplicity of each result OutputPin must be consistent with the corresponding input Parameter.
let parameter: OrderedSet(Parameter) = trigger.event->asSequence()->first().oclAsType(CallEvent).operation.inputParameters() in
result->size() = parameter->size() and
Sequence{1..result->size()}->forAll(i |
parameter->at(i).type.conformsTo(result->at(i).type) and
parameter->at(i).isOrdered = result->at(i).isOrdered and
parameter->at(i).compatibleWith(result->at(i)))"""
raise NotImplementedError("operation result_pins(...) not yet implemented")
def trigger_call_event(self, diagnostics=None, context=None):
"""The action must have exactly one trigger, which must be for a CallEvent.
trigger->size()=1 and
trigger->asSequence()->first().event.oclIsKindOf(CallEvent)"""
raise NotImplementedError(
"operation trigger_call_event(...) not yet implemented"
)
def unmarshall(self, diagnostics=None, context=None):
"""isUnmrashall must be true for an AcceptCallAction.
isUnmarshall = true"""
raise NotImplementedError("operation unmarshall(...) not yet implemented")
class BroadcastSignalActionMixin(object):
"""User defined mixin class for BroadcastSignalAction."""
def __init__(self, signal=None, **kwargs):
super(BroadcastSignalActionMixin, self).__init__(**kwargs)
def number_of_arguments(self, diagnostics=None, context=None):
"""The number of argument InputPins must be the same as the number of attributes in the signal.
argument->size() = signal.allAttributes()->size()"""
raise NotImplementedError(
"operation number_of_arguments(...) not yet implemented"
)
def type_ordering_multiplicity(self, diagnostics=None, context=None):
"""The type, ordering, and multiplicity of an argument InputPin must be the same as the corresponding attribute of the signal.
let attribute: OrderedSet(Property) = signal.allAttributes() in
Sequence{1..argument->size()}->forAll(i |
argument->at(i).type.conformsTo(attribute->at(i).type) and
argument->at(i).isOrdered = attribute->at(i).isOrdered and
argument->at(i).compatibleWith(attribute->at(i)))"""
raise NotImplementedError(
"operation type_ordering_multiplicity(...) not yet implemented"
)
def no_onport(self, diagnostics=None, context=None):
"""A BroadcaseSignalAction may not specify onPort.
onPort=null"""
raise NotImplementedError("operation no_onport(...) not yet implemented")
class CallActionMixin(object):
"""User defined mixin class for CallAction."""
def __init__(self, isSynchronous=None, result=None, **kwargs):
super(CallActionMixin, self).__init__(**kwargs)
def argument_pins(self, diagnostics=None, context=None):
"""The number of argument InputPins must be the same as the number of input (in and inout) ownedParameters of the called Behavior or Operation. The type, ordering and multiplicity of each argument InputPin must be consistent with the corresponding input Parameter.
let parameter: OrderedSet(Parameter) = self.inputParameters() in
argument->size() = parameter->size() and
Sequence{1..argument->size()}->forAll(i |
argument->at(i).type.conformsTo(parameter->at(i).type) and
argument->at(i).isOrdered = parameter->at(i).isOrdered and
argument->at(i).compatibleWith(parameter->at(i)))"""
raise NotImplementedError("operation argument_pins(...) not yet implemented")
def result_pins(self, diagnostics=None, context=None):
"""The number of result OutputPins must be the same as the number of output (inout, out and return) ownedParameters of the called Behavior or Operation. The type, ordering and multiplicity of each result OutputPin must be consistent with the corresponding input Parameter.
let parameter: OrderedSet(Parameter) = self.outputParameters() in
result->size() = parameter->size() and
Sequence{1..result->size()}->forAll(i |
parameter->at(i).type.conformsTo(result->at(i).type) and
parameter->at(i).isOrdered = result->at(i).isOrdered and
parameter->at(i).compatibleWith(result->at(i)))"""
raise NotImplementedError("operation result_pins(...) not yet implemented")
def synchronous_call(self, diagnostics=None, context=None):
"""Only synchronous CallActions can have result OutputPins.
result->notEmpty() implies isSynchronous"""
raise NotImplementedError("operation synchronous_call(...) not yet implemented")
def input_parameters(self):
"""Return the in and inout ownedParameters of the Behavior or Operation being called. (This operation is abstract and should be overridden by subclasses of CallAction.)
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation input_parameters(...) not yet implemented")
def output_parameters(self):
"""Return the inout, out and return ownedParameters of the Behavior or Operation being called. (This operation is abstract and should be overridden by subclasses of CallAction.)
<p>From package UML::Actions.</p>"""
raise NotImplementedError(
"operation output_parameters(...) not yet implemented"
)
class ClearStructuralFeatureActionMixin(object):
"""User defined mixin class for ClearStructuralFeatureAction."""
def __init__(self, result=None, **kwargs):
super(ClearStructuralFeatureActionMixin, self).__init__(**kwargs)
def type_of_result(self, diagnostics=None, context=None):
"""The type of the result OutputPin is the same as the type of the inherited object InputPin.
result<>null implies result.type = object.type"""
raise NotImplementedError("operation type_of_result(...) not yet implemented")
def multiplicity_of_result(self, diagnostics=None, context=None):
"""The multiplicity of the result OutputPin must be 1..1.
result<>null implies result.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_result(...) not yet implemented"
)
class ClearVariableActionMixin(object):
"""User defined mixin class for ClearVariableAction."""
def __init__(self, **kwargs):
super(ClearVariableActionMixin, self).__init__(**kwargs)
class ReadLinkActionMixin(object):
"""User defined mixin class for ReadLinkAction."""
def __init__(self, result=None, **kwargs):
super(ReadLinkActionMixin, self).__init__(**kwargs)
def type_and_ordering(self, diagnostics=None, context=None):
"""The type and ordering of the result OutputPin are same as the type and ordering of the open Association end.
self.openEnd()->forAll(type=result.type and isOrdered=result.isOrdered)"""
raise NotImplementedError(
"operation type_and_ordering(...) not yet implemented"
)
def compatible_multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the open Association end must be compatible with the multiplicity of the result OutputPin.
self.openEnd()->first().compatibleWith(result)"""
raise NotImplementedError(
"operation compatible_multiplicity(...) not yet implemented"
)
def visibility(self, diagnostics=None, context=None):
"""Visibility of the open end must allow access from the object performing the action.
let openEnd : Property = self.openEnd()->first() in
openEnd.visibility = VisibilityKind::public or
endData->exists(oed |
oed.end<>openEnd and
(_'context' = oed.end.type or
(openEnd.visibility = VisibilityKind::protected and
_'context'.conformsTo(oed.end.type.oclAsType(Classifier)))))"""
raise NotImplementedError("operation visibility(...) not yet implemented")
def one_open_end(self, diagnostics=None, context=None):
"""Exactly one linkEndData specification (corresponding to the "open" end) must not have an value InputPin.
self.openEnd()->size() = 1"""
raise NotImplementedError("operation one_open_end(...) not yet implemented")
def navigable_open_end(self, diagnostics=None, context=None):
"""The open end must be navigable.
self.openEnd()->first().isNavigable()"""
raise NotImplementedError(
"operation navigable_open_end(...) not yet implemented"
)
def open_end(self):
"""Returns the ends corresponding to endData with no value InputPin. (A well-formed ReadLinkAction is constrained to have only one of these.)
result = (endData->select(value=null).end->asOrderedSet())
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation open_end(...) not yet implemented")
class ReadStructuralFeatureActionMixin(object):
"""User defined mixin class for ReadStructuralFeatureAction."""
def __init__(self, result=None, **kwargs):
super(ReadStructuralFeatureActionMixin, self).__init__(**kwargs)
def type_and_ordering(self, diagnostics=None, context=None):
"""The type and ordering of the result OutputPin are the same as the type and ordering of the StructuralFeature.
result.type =structuralFeature.type and
result.isOrdered = structuralFeature.isOrdered"""
raise NotImplementedError(
"operation type_and_ordering(...) not yet implemented"
)
class ReadVariableActionMixin(object):
"""User defined mixin class for ReadVariableAction."""
def __init__(self, result=None, **kwargs):
super(ReadVariableActionMixin, self).__init__(**kwargs)
def type_and_ordering(self, diagnostics=None, context=None):
"""The type and ordering of the result OutputPin are the same as the type and ordering of the variable.
result.type =variable.type and
result.isOrdered = variable.isOrdered"""
raise NotImplementedError(
"operation type_and_ordering(...) not yet implemented"
)
def compatible_multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the variable must be compatible with the multiplicity of the output pin.
variable.compatibleWith(result)"""
raise NotImplementedError(
"operation compatible_multiplicity(...) not yet implemented"
)
class SendObjectActionMixin(object):
"""User defined mixin class for SendObjectAction."""
def __init__(self, request=None, target=None, **kwargs):
super(SendObjectActionMixin, self).__init__(**kwargs)
def type_target_pin(self, diagnostics=None, context=None):
"""If onPort is not empty, the Port given by onPort must be an owned or inherited feature of the type of the target InputPin.
onPort<>null implies target.type.oclAsType(Classifier).allFeatures()->includes(onPort)"""
raise NotImplementedError("operation type_target_pin(...) not yet implemented")
class SendSignalActionMixin(object):
"""User defined mixin class for SendSignalAction."""
def __init__(self, signal=None, target=None, **kwargs):
super(SendSignalActionMixin, self).__init__(**kwargs)
def type_ordering_multiplicity(self, diagnostics=None, context=None):
"""The type, ordering, and multiplicity of an argument InputPin must be the same as the corresponding attribute of the signal.
let attribute: OrderedSet(Property) = signal.allAttributes() in
Sequence{1..argument->size()}->forAll(i |
argument->at(i).type.conformsTo(attribute->at(i).type) and
argument->at(i).isOrdered = attribute->at(i).isOrdered and
argument->at(i).compatibleWith(attribute->at(i)))"""
raise NotImplementedError(
"operation type_ordering_multiplicity(...) not yet implemented"
)
def number_order(self, diagnostics=None, context=None):
"""The number and order of argument InputPins must be the same as the number and order of attributes of the signal.
argument->size()=signal.allAttributes()->size()"""
raise NotImplementedError("operation number_order(...) not yet implemented")
def type_target_pin(self, diagnostics=None, context=None):
"""If onPort is not empty, the Port given by onPort must be an owned or inherited feature of the type of the target InputPin.
not onPort->isEmpty() implies target.type.oclAsType(Classifier).allFeatures()->includes(onPort)"""
raise NotImplementedError("operation type_target_pin(...) not yet implemented")
class DataStoreNodeMixin(object):
"""User defined mixin class for DataStoreNode."""
def __init__(self, **kwargs):
super(DataStoreNodeMixin, self).__init__(**kwargs)
class BehavioredClassifierMixin(object):
"""User defined mixin class for BehavioredClassifier."""
def __init__(
self,
classifierBehavior=None,
interfaceRealization=None,
ownedBehavior=None,
**kwargs,
):
super(BehavioredClassifierMixin, self).__init__(**kwargs)
def class_behavior(self, diagnostics=None, context=None):
"""If a behavior is classifier behavior, it does not have a specification.
classifierBehavior->notEmpty() implies classifierBehavior.specification->isEmpty()"""
raise NotImplementedError("operation class_behavior(...) not yet implemented")
def get_all_implemented_interfaces(self):
"""Retrieves all the interfaces on which this behaviored classifier or any of its parents has an interface realization dependency."""
raise NotImplementedError(
"operation get_all_implemented_interfaces(...) not yet implemented"
)
def get_implemented_interfaces(self):
"""Retrieves the interfaces on which this behaviored classifier has an interface realization dependency."""
raise NotImplementedError(
"operation get_implemented_interfaces(...) not yet implemented"
)
class DataTypeMixin(object):
"""User defined mixin class for DataType."""
def __init__(self, ownedAttribute=None, ownedOperation=None, **kwargs):
super(DataTypeMixin, self).__init__(**kwargs)
def create_owned_attribute(self, name=None, type=None, lower=None, upper=None):
"""Creates a property with the specified name, type, lower bound, and upper bound as an owned attribute of this data type."""
raise NotImplementedError(
"operation create_owned_attribute(...) not yet implemented"
)
def create_owned_operation(
self, name=None, parameterNames=None, parameterTypes=None, returnType=None
):
"""Creates an operation with the specified name, parameter names, parameter types, and return type (or null) as an owned operation of this data type."""
raise NotImplementedError(
"operation create_owned_operation(...) not yet implemented"
)
class InterfaceMixin(object):
"""User defined mixin class for Interface."""
def __init__(
self,
nestedClassifier=None,
ownedAttribute=None,
ownedReception=None,
protocol=None,
redefinedInterface=None,
ownedOperation=None,
**kwargs,
):
super(InterfaceMixin, self).__init__(**kwargs)
def visibility(self, diagnostics=None, context=None):
"""The visibility of all Features owned by an Interface must be public.
feature->forAll(visibility = VisibilityKind::public)"""
raise NotImplementedError("operation visibility(...) not yet implemented")
def create_owned_attribute(self, name=None, type=None, lower=None, upper=None):
"""Creates a property with the specified name, type, lower bound, and upper bound as an owned attribute of this interface."""
raise NotImplementedError(
"operation create_owned_attribute(...) not yet implemented"
)
def create_owned_operation(
self, name=None, parameterNames=None, parameterTypes=None, returnType=None
):
"""Creates an operation with the specified name, parameter names, parameter types, and return type (or null) as an owned operation of this interface."""
raise NotImplementedError(
"operation create_owned_operation(...) not yet implemented"
)
class SignalMixin(object):
"""User defined mixin class for Signal."""
def __init__(self, ownedAttribute=None, **kwargs):
super(SignalMixin, self).__init__(**kwargs)
def create_owned_attribute(self, name=None, type=None, lower=None, upper=None):
"""Creates a property with the specified name, type, lower bound, and upper bound as an owned attribute of this signal."""
raise NotImplementedError(
"operation create_owned_attribute(...) not yet implemented"
)
class DerivedPart(EDerivedCollection):
pass
class DerivedRole(EDerivedCollection):
pass
class StructuredClassifierMixin(object):
"""User defined mixin class for StructuredClassifier."""
def __init__(
self, ownedAttribute=None, ownedConnector=None, part=None, role=None, **kwargs
):
super(StructuredClassifierMixin, self).__init__(**kwargs)
def create_owned_attribute(self, name=None, type=None, lower=None, upper=None):
"""Creates a property with the specified name, type, lower bound, and upper bound as an owned attribute of this structured classifier."""
raise NotImplementedError(
"operation create_owned_attribute(...) not yet implemented"
)
def get_parts(self):
"""Derivation for StructuredClassifier::/part
result = (ownedAttribute->select(isComposite)->asSet())
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_parts(...) not yet implemented")
def all_roles(self):
"""All features of type ConnectableElement, equivalent to all direct and inherited roles.
result = (allFeatures()->select(oclIsKindOf(ConnectableElement))->collect(oclAsType(ConnectableElement))->asSet())
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation all_roles(...) not yet implemented")
class SubstitutionMixin(object):
"""User defined mixin class for Substitution."""
def __init__(self, contract=None, substitutingClassifier=None, **kwargs):
super(SubstitutionMixin, self).__init__(**kwargs)
class InterfaceRealizationMixin(object):
"""User defined mixin class for InterfaceRealization."""
def __init__(self, contract=None, implementingClassifier=None, **kwargs):
super(InterfaceRealizationMixin, self).__init__(**kwargs)
class StructuredActivityNodeMixin(object):
"""User defined mixin class for StructuredActivityNode."""
def __init__(
self,
edge=None,
mustIsolate=None,
structuredNodeInput=None,
structuredNodeOutput=None,
variable=None,
node=None,
**kwargs,
):
super(StructuredActivityNodeMixin, self).__init__(**kwargs)
def output_pin_edges(self, diagnostics=None, context=None):
"""The outgoing ActivityEdges of the OutputPins of a StructuredActivityNode must have targets that are not within the StructuredActivityNode.
output.outgoing.target->excludesAll(allOwnedNodes()-input)"""
raise NotImplementedError("operation output_pin_edges(...) not yet implemented")
def edges(self, diagnostics=None, context=None):
"""The edges of a StructuredActivityNode are all the ActivityEdges with source and target ActivityNodes contained directly or indirectly within the StructuredActivityNode and at least one of the source or target not contained in any more deeply nested StructuredActivityNode.
edge=self.sourceNodes().outgoing->intersection(self.allOwnedNodes().incoming)->
union(self.targetNodes().incoming->intersection(self.allOwnedNodes().outgoing))->asSet()"""
raise NotImplementedError("operation edges(...) not yet implemented")
def input_pin_edges(self, diagnostics=None, context=None):
"""The incoming ActivityEdges of an InputPin of a StructuredActivityNode must have sources that are not within the StructuredActivityNode.
input.incoming.source->excludesAll(allOwnedNodes()-output)"""
raise NotImplementedError("operation input_pin_edges(...) not yet implemented")
def source_nodes(self):
"""Return those ActivityNodes contained immediately within the StructuredActivityNode that may act as sources of edges owned by the StructuredActivityNode.
result = (node->union(input.oclAsType(ActivityNode)->asSet())->
union(node->select(oclIsKindOf(Action)).oclAsType(Action).output)->asSet())
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation source_nodes(...) not yet implemented")
def target_nodes(self):
"""Return those ActivityNodes contained immediately within the StructuredActivityNode that may act as targets of edges owned by the StructuredActivityNode.
result = (node->union(output.oclAsType(ActivityNode)->asSet())->
union(node->select(oclIsKindOf(Action)).oclAsType(Action).input)->asSet())
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation target_nodes(...) not yet implemented")
class InputPinMixin(object):
"""User defined mixin class for InputPin."""
def __init__(self, **kwargs):
super(InputPinMixin, self).__init__(**kwargs)
def outgoing_edges_structured_only(self, diagnostics=None, context=None):
"""An InputPin may have outgoing ActivityEdges only when it is owned by a StructuredActivityNode, and these edges must target a node contained (directly or indirectly) in the owning StructuredActivityNode.
outgoing->notEmpty() implies
action<>null and
action.oclIsKindOf(StructuredActivityNode) and
action.oclAsType(StructuredActivityNode).allOwnedNodes()->includesAll(outgoing.target)"""
raise NotImplementedError(
"operation outgoing_edges_structured_only(...) not yet implemented"
)
class OutputPinMixin(object):
"""User defined mixin class for OutputPin."""
def __init__(self, **kwargs):
super(OutputPinMixin, self).__init__(**kwargs)
def incoming_edges_structured_only(self, diagnostics=None, context=None):
"""An OutputPin may have incoming ActivityEdges only when it is owned by a StructuredActivityNode, and these edges must have sources contained (directly or indirectly) in the owning StructuredActivityNode.
incoming->notEmpty() implies
action<>null and
action.oclIsKindOf(StructuredActivityNode) and
action.oclAsType(StructuredActivityNode).allOwnedNodes()->includesAll(incoming.source)"""
raise NotImplementedError(
"operation incoming_edges_structured_only(...) not yet implemented"
)
class AddStructuralFeatureValueActionMixin(object):
"""User defined mixin class for AddStructuralFeatureValueAction."""
def __init__(self, insertAt=None, isReplaceAll=None, **kwargs):
super(AddStructuralFeatureValueActionMixin, self).__init__(**kwargs)
def required_value(self, diagnostics=None, context=None):
"""A value InputPin is required.
value<>null"""
raise NotImplementedError("operation required_value(...) not yet implemented")
def insert_at_pin(self, diagnostics=None, context=None):
"""AddStructuralFeatureActions adding a value to ordered StructuralFeatures must have a single InputPin for the insertion point with type UnlimitedNatural and multiplicity of 1..1 if isReplaceAll=false, and must have no Input Pin for the insertion point when the StructuralFeature is unordered.
if not structuralFeature.isOrdered then insertAt = null
else
not isReplaceAll implies
insertAt<>null and
insertAt->forAll(type=UnlimitedNatural and is(1,1.oclAsType(UnlimitedNatural)))
endif"""
raise NotImplementedError("operation insert_at_pin(...) not yet implemented")
class AddVariableValueActionMixin(object):
"""User defined mixin class for AddVariableValueAction."""
def __init__(self, insertAt=None, isReplaceAll=None, **kwargs):
super(AddVariableValueActionMixin, self).__init__(**kwargs)
def required_value(self, diagnostics=None, context=None):
"""A value InputPin is required.
value <> null"""
raise NotImplementedError("operation required_value(...) not yet implemented")
def insert_at_pin(self, diagnostics=None, context=None):
"""AddVariableValueActions for ordered Variables must have a single InputPin for the insertion point with type UnlimtedNatural and multiplicity of 1..1 if isReplaceAll=false, otherwise the Action has no InputPin for the insertion point.
if not variable.isOrdered then insertAt = null
else
not isReplaceAll implies
insertAt<>null and
insertAt->forAll(type=UnlimitedNatural and is(1,1.oclAsType(UnlimitedNatural)))
endif"""
raise NotImplementedError("operation insert_at_pin(...) not yet implemented")
class CallBehaviorActionMixin(object):
"""User defined mixin class for CallBehaviorAction."""
def __init__(self, behavior=None, **kwargs):
super(CallBehaviorActionMixin, self).__init__(**kwargs)
def no_onport(self, diagnostics=None, context=None):
"""A CallBehaviorAction may not specify onPort.
onPort=null"""
raise NotImplementedError("operation no_onport(...) not yet implemented")
class CallOperationActionMixin(object):
"""User defined mixin class for CallOperationAction."""
def __init__(self, operation=None, target=None, **kwargs):
super(CallOperationActionMixin, self).__init__(**kwargs)
def type_target_pin(self, diagnostics=None, context=None):
"""If onPort has no value, the operation must be an owned or inherited feature of the type of the target InputPin, otherwise the Port given by onPort must be an owned or inherited feature of the type of the target InputPin, and the Port must have a required or provided Interface with the operation as an owned or inherited feature.
if onPort=null then target.type.oclAsType(Classifier).allFeatures()->includes(operation)
else target.type.oclAsType(Classifier).allFeatures()->includes(onPort) and onPort.provided->union(onPort.required).allFeatures()->includes(operation)
endif"""
raise NotImplementedError("operation type_target_pin(...) not yet implemented")
class CreateLinkActionMixin(object):
"""User defined mixin class for CreateLinkAction."""
def __init__(self, **kwargs):
super(CreateLinkActionMixin, self).__init__(**kwargs)
def association_not_abstract(self, diagnostics=None, context=None):
"""The Association cannot be an abstract Classifier.
not self.association().isAbstract"""
raise NotImplementedError(
"operation association_not_abstract(...) not yet implemented"
)
class DestroyLinkActionMixin(object):
"""User defined mixin class for DestroyLinkAction."""
def __init__(self, **kwargs):
super(DestroyLinkActionMixin, self).__init__(**kwargs)
class RemoveStructuralFeatureValueActionMixin(object):
"""User defined mixin class for RemoveStructuralFeatureValueAction."""
def __init__(self, isRemoveDuplicates=None, removeAt=None, **kwargs):
super(RemoveStructuralFeatureValueActionMixin, self).__init__(**kwargs)
def remove_at_and_value(self, diagnostics=None, context=None):
"""RemoveStructuralFeatureValueActions removing a value from ordered, non-unique StructuralFeatures must have a single removeAt InputPin and no value InputPin, if isRemoveDuplicates is false. The removeAt InputPin must be of type Unlimited Natural with multiplicity 1..1. Otherwise, the Action has a value InputPin and no removeAt InputPin.
if structuralFeature.isOrdered and not structuralFeature.isUnique and not isRemoveDuplicates then
value = null and
removeAt <> null and
removeAt.type = UnlimitedNatural and
removeAt.is(1,1)
else
removeAt = null and value <> null
endif"""
raise NotImplementedError(
"operation remove_at_and_value(...) not yet implemented"
)
class RemoveVariableValueActionMixin(object):
"""User defined mixin class for RemoveVariableValueAction."""
def __init__(self, isRemoveDuplicates=None, removeAt=None, **kwargs):
super(RemoveVariableValueActionMixin, self).__init__(**kwargs)
def remove_at_and_value(self, diagnostics=None, context=None):
"""ReadVariableActions removing a value from ordered, non-unique Variables must have a single removeAt InputPin and no value InputPin, if isRemoveDuplicates is false. The removeAt InputPin must be of type Unlimited Natural with multiplicity 1..1. Otherwise, the Action has a value InputPin and no removeAt InputPin.
if variable.isOrdered and not variable.isUnique and not isRemoveDuplicates then
value = null and
removeAt <> null and
removeAt.type = UnlimitedNatural and
removeAt.is(1,1)
else
removeAt = null and value <> null
endif"""
raise NotImplementedError(
"operation remove_at_and_value(...) not yet implemented"
)
class StartObjectBehaviorActionMixin(object):
"""User defined mixin class for StartObjectBehaviorAction."""
def __init__(self, object=None, **kwargs):
super(StartObjectBehaviorActionMixin, self).__init__(**kwargs)
def multiplicity_of_object(self, diagnostics=None, context=None):
"""The multiplicity of the object InputPin must be 1..1.
object.is(1,1)"""
raise NotImplementedError(
"operation multiplicity_of_object(...) not yet implemented"
)
def type_of_object(self, diagnostics=None, context=None):
"""The type of the object InputPin must be either a Behavior or a BehavioredClassifier with a classifierBehavior.
self.behavior()<>null"""
raise NotImplementedError("operation type_of_object(...) not yet implemented")
def no_onport(self, diagnostics=None, context=None):
"""A StartObjectBehaviorAction may not specify onPort.
onPort->isEmpty()"""
raise NotImplementedError("operation no_onport(...) not yet implemented")
def behavior(self):
"""If the type of the object InputPin is a Behavior, then that Behavior. Otherwise, if the type of the object InputPin is a BehavioredClassifier, then the classifierBehavior of that BehavioredClassifier.
result = (if object.type.oclIsKindOf(Behavior) then
object.type.oclAsType(Behavior)
else if object.type.oclIsKindOf(BehavioredClassifier) then
object.type.oclAsType(BehavioredClassifier).classifierBehavior
else
null
endif
endif)
<p>From package UML::Actions.</p>"""
raise NotImplementedError("operation behavior(...) not yet implemented")
class InformationItemMixin(object):
"""User defined mixin class for InformationItem."""
def __init__(self, represented=None, **kwargs):
super(InformationItemMixin, self).__init__(**kwargs)
def sources_and_targets(self, diagnostics=None, context=None):
"""The sources and targets of an information item (its related information flows) must designate subsets of the sources and targets of the representation information item, if any. The Classifiers that can realize an information item can only be of the following kind: Class, Interface, InformationItem, Signal, Component.
(self.represented->select(oclIsKindOf(InformationItem))->forAll(p |
p.conveyingFlow.source->forAll(q | self.conveyingFlow.source->includes(q)) and
p.conveyingFlow.target->forAll(q | self.conveyingFlow.target->includes(q)))) and
(self.represented->forAll(oclIsKindOf(Class) or oclIsKindOf(Interface) or
oclIsKindOf(InformationItem) or oclIsKindOf(Signal) or oclIsKindOf(Component)))"""
raise NotImplementedError(
"operation sources_and_targets(...) not yet implemented"
)
def has_no(self, diagnostics=None, context=None):
"""An informationItem has no feature, no generalization, and no associations.
self.generalization->isEmpty() and self.feature->isEmpty()"""
raise NotImplementedError("operation has_no(...) not yet implemented")
def not_instantiable(self, diagnostics=None, context=None):
"""It is not instantiable.
isAbstract"""
raise NotImplementedError("operation not_instantiable(...) not yet implemented")
class ComponentRealizationMixin(object):
"""User defined mixin class for ComponentRealization."""
def __init__(self, realizingClassifier=None, abstraction=None, **kwargs):
super(ComponentRealizationMixin, self).__init__(**kwargs)
class DerivedEndtype(EDerivedCollection):
pass
class AssociationMixin(object):
"""User defined mixin class for Association."""
def __init__(
self,
endType=None,
isDerived=None,
memberEnd=None,
ownedEnd=None,
navigableOwnedEnd=None,
**kwargs,
):
super(AssociationMixin, self).__init__(**kwargs)
def specialized_end_number(self, diagnostics=None, context=None):
"""An Association specializing another Association has the same number of ends as the other Association.
parents()->select(oclIsKindOf(Association)).oclAsType(Association)->forAll(p | p.memberEnd->size() = self.memberEnd->size())"""
raise NotImplementedError(
"operation specialized_end_number(...) not yet implemented"
)
def specialized_end_types(self, diagnostics=None, context=None):
"""When an Association specializes another Association, every end of the specific Association corresponds to an end of the general Association, and the specific end reaches the same type or a subtype of the corresponding general end.
Sequence{1..memberEnd->size()}->
forAll(i | general->select(oclIsKindOf(Association)).oclAsType(Association)->
forAll(ga | self.memberEnd->at(i).type.conformsTo(ga.memberEnd->at(i).type)))"""
raise NotImplementedError(
"operation specialized_end_types(...) not yet implemented"
)
def binary_associations(self, diagnostics=None, context=None):
"""Only binary Associations can be aggregations.
memberEnd->exists(aggregation <> AggregationKind::none) implies (memberEnd->size() = 2 and memberEnd->exists(aggregation = AggregationKind::none))"""
raise NotImplementedError(
"operation binary_associations(...) not yet implemented"
)
def association_ends(self, diagnostics=None, context=None):
"""Ends of Associations with more than two ends must be owned by the Association itself.
memberEnd->size() > 2 implies ownedEnd->includesAll(memberEnd)"""
raise NotImplementedError("operation association_ends(...) not yet implemented")
def ends_must_be_typed(self, diagnostics=None, context=None):
"""memberEnd->forAll(type->notEmpty())"""
raise NotImplementedError(
"operation ends_must_be_typed(...) not yet implemented"
)
def is_binary(self):
"""Determines whether this association is a binary association, i.e. whether it has exactly two member ends."""
raise NotImplementedError("operation is_binary(...) not yet implemented")
def get_end_types(self):
"""endType is derived from the types of the member ends.
result = (memberEnd->collect(type)->asSet())
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_end_types(...) not yet implemented")
class PropertyMixin(object):
"""User defined mixin class for Property."""
@property
def default(self):
raise NotImplementedError("Missing implementation for default")
@default.setter
def default(self, value):
raise NotImplementedError("Missing implementation for default")
@property
def isComposite(self):
from .uml import AggregationKind
return self.aggregation == AggregationKind.composite
@isComposite.setter
def isComposite(self, value):
from .uml import AggregationKind
if value:
self.aggregation = AggregationKind.composite
else:
self.aggregation = AggregationKind.none
@property
def opposite(self):
raise NotImplementedError("Missing implementation for opposite")
@opposite.setter
def opposite(self, value):
raise NotImplementedError("Missing implementation for opposite")
def __init__(
self,
datatype=None,
interface=None,
default=None,
aggregation=None,
associationEnd=None,
qualifier=None,
class_=None,
defaultValue=None,
isComposite=None,
isDerived=None,
isDerivedUnion=None,
isID=None,
opposite=None,
owningAssociation=None,
redefinedProperty=None,
subsettedProperty=None,
association=None,
**kwargs,
):
super(PropertyMixin, self).__init__(**kwargs)
def subsetting_context_conforms(self, diagnostics=None, context=None):
"""Subsetting may only occur when the context of the subsetting property conforms to the context of the subsetted property.
subsettedProperty->notEmpty() implies
(subsettingContext()->notEmpty() and subsettingContext()->forAll (sc |
subsettedProperty->forAll(sp |
sp.subsettingContext()->exists(c | sc.conformsTo(c)))))"""
raise NotImplementedError(
"operation subsetting_context_conforms(...) not yet implemented"
)
def derived_union_is_read_only(self, diagnostics=None, context=None):
"""A derived union is read only.
isDerivedUnion implies isReadOnly"""
raise NotImplementedError(
"operation derived_union_is_read_only(...) not yet implemented"
)
def multiplicity_of_composite(self, diagnostics=None, context=None):
"""A multiplicity on the composing end of a composite aggregation must not have an upper bound greater than 1.
isComposite and association <> null implies opposite.upperBound() <= 1"""
raise NotImplementedError(
"operation multiplicity_of_composite(...) not yet implemented"
)
def redefined_property_inherited(self, diagnostics=None, context=None):
"""A redefined Property must be inherited from a more general Classifier.
(redefinedProperty->notEmpty()) implies
(redefinitionContext->notEmpty() and
redefinedProperty->forAll(rp|
((redefinitionContext->collect(fc|
fc.allParents()))->asSet())->collect(c| c.allFeatures())->asSet()->includes(rp)))"""
raise NotImplementedError(
"operation redefined_property_inherited(...) not yet implemented"
)
def subsetting_rules(self, diagnostics=None, context=None):
"""A subsetting Property may strengthen the type of the subsetted Property, and its upper bound may be less.
subsettedProperty->forAll(sp |
self.type.conformsTo(sp.type) and
((self.upperBound()->notEmpty() and sp.upperBound()->notEmpty()) implies
self.upperBound() <= sp.upperBound() ))"""
raise NotImplementedError("operation subsetting_rules(...) not yet implemented")
def binding_to_attribute(self, diagnostics=None, context=None):
"""A binding of a PropertyTemplateParameter representing an attribute must be to an attribute.
(self.isAttribute()
and (templateParameterSubstitution->notEmpty())
implies (templateParameterSubstitution->forAll(ts |
ts.formal.oclIsKindOf(Property)
and ts.formal.oclAsType(Property).isAttribute())))"""
raise NotImplementedError(
"operation binding_to_attribute(...) not yet implemented"
)
def derived_union_is_derived(self, diagnostics=None, context=None):
"""A derived union is derived.
isDerivedUnion implies isDerived"""
raise NotImplementedError(
"operation derived_union_is_derived(...) not yet implemented"
)
def deployment_target(self, diagnostics=None, context=None):
"""A Property can be a DeploymentTarget if it is a kind of Node and functions as a part in the internal structure of an encompassing Node.
deployment->notEmpty() implies owner.oclIsKindOf(Node) and Node.allInstances()->exists(n | n.part->exists(p | p = self))"""
raise NotImplementedError(
"operation deployment_target(...) not yet implemented"
)
def subsetted_property_names(self, diagnostics=None, context=None):
"""A Property may not subset a Property with the same name.
subsettedProperty->forAll(sp | sp.name <> name)"""
raise NotImplementedError(
"operation subsetted_property_names(...) not yet implemented"
)
def type_of_opposite_end(self, diagnostics=None, context=None):
"""If a Property is a classifier-owned end of a binary Association, its owner must be the type of the opposite end.
(opposite->notEmpty() and owningAssociation->isEmpty()) implies classifier = opposite.type"""
raise NotImplementedError(
"operation type_of_opposite_end(...) not yet implemented"
)
def qualified_is_association_end(self, diagnostics=None, context=None):
"""All qualified Properties must be Association ends
qualifier->notEmpty() implies association->notEmpty()"""
raise NotImplementedError(
"operation qualified_is_association_end(...) not yet implemented"
)
def get_default(self):
"""Retrieves a string representation of the default value for this property."""
raise NotImplementedError("operation get_default(...) not yet implemented")
def get_other_end(self):
"""Retrieves the other end of the (binary) association in which this property is a member end."""
raise NotImplementedError("operation get_other_end(...) not yet implemented")
def is_set_default(self):
raise NotImplementedError("operation is_set_default(...) not yet implemented")
def set_boolean_default_value(self, value=None):
"""Sets the default value for this property to the specified Boolean value."""
raise NotImplementedError(
"operation set_boolean_default_value(...) not yet implemented"
)
def set_default(self, newDefault=None):
"""Sets the default value for this property based on the specified string representation."""
raise NotImplementedError("operation set_default(...) not yet implemented")
def set_integer_default_value(self, value=None):
"""Sets the default value for this property to the specified integer value."""
raise NotImplementedError(
"operation set_integer_default_value(...) not yet implemented"
)
def set_is_composite(self, newIsComposite=None):
self.isComposite = newIsComposite
def set_is_navigable(self, isNavigable=None):
"""Sets the navigability of this property as indicated."""
raise NotImplementedError("operation set_is_navigable(...) not yet implemented")
def set_null_default_value(self):
"""Sets the default value for this property to the null value."""
raise NotImplementedError(
"operation set_null_default_value(...) not yet implemented"
)
def set_opposite(self, newOpposite=None):
raise NotImplementedError("operation set_opposite(...) not yet implemented")
def set_real_default_value(self, value=None):
"""Sets the default value for this property to the specified real value."""
raise NotImplementedError(
"operation set_real_default_value(...) not yet implemented"
)
def set_string_default_value(self, value=None):
"""Sets the default value for this property to the specified string value."""
raise NotImplementedError(
"operation set_string_default_value(...) not yet implemented"
)
def set_unlimited_natural_default_value(self, value=None):
"""Sets the default value for this property to the specified unlimited natural value."""
raise NotImplementedError(
"operation set_unlimited_natural_default_value(...) not yet implemented"
)
def unset_default(self):
raise NotImplementedError("operation unset_default(...) not yet implemented")
def is_attribute(self):
"""The query isAttribute() is true if the Property is defined as an attribute of some Classifier.
result = (not classifier->isEmpty())
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation is_attribute(...) not yet implemented")
def is_composite(self):
"""The value of isComposite is true only if aggregation is composite.
result = (aggregation = AggregationKind::composite)
<p>From package UML::Classification.</p>"""
return self.isComposite
def is_navigable(self):
"""The query isNavigable() indicates whether it is possible to navigate across the property.
result = (not classifier->isEmpty() or association.navigableOwnedEnd->includes(self))
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation is_navigable(...) not yet implemented")
def get_opposite(self):
"""If this property is a memberEnd of a binary association, then opposite gives the other end.
result = (if association <> null and association.memberEnd->size() = 2
then
association.memberEnd->any(e | e <> self)
else
null
endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError("operation get_opposite(...) not yet implemented")
def subsetting_context(self):
"""The query subsettingContext() gives the context for subsetting a Property. It consists, in the case of an attribute, of the corresponding Classifier, and in the case of an association end, all of the Classifiers at the other ends.
result = (if association <> null
then association.memberEnd->excluding(self)->collect(type)->asSet()
else
if classifier<>null
then classifier->asSet()
else Set{}
endif
endif)
<p>From package UML::Classification.</p>"""
raise NotImplementedError(
"operation subsetting_context(...) not yet implemented"
)
class ArtifactMixin(object):
"""User defined mixin class for Artifact."""
def __init__(
self,
fileName=None,
manifestation=None,
nestedArtifact=None,
ownedAttribute=None,
ownedOperation=None,
**kwargs,
):
super(ArtifactMixin, self).__init__(**kwargs)
def create_owned_attribute(self, name=None, type=None, lower=None, upper=None):
"""Creates a property with the specified name, type, lower bound, and upper bound as an owned attribute of this artifact."""
raise NotImplementedError(
"operation create_owned_attribute(...) not yet implemented"
)
def create_owned_operation(
self, name=None, parameterNames=None, parameterTypes=None, returnType=None
):
"""Creates an operation with the specified name, parameter names, parameter types, and return type (or null) as an owned operation of this artifact."""
raise NotImplementedError(
"operation create_owned_operation(...) not yet implemented"
)
class EnumerationMixin(object):
"""User defined mixin class for Enumeration."""
def __init__(self, ownedLiteral=None, **kwargs):
super(EnumerationMixin, self).__init__(**kwargs)
def immutable(self, diagnostics=None, context=None):
"""ownedAttribute->forAll(isReadOnly)"""
raise NotImplementedError("operation immutable(...) not yet implemented")
class PrimitiveTypeMixin(object):
"""User defined mixin class for PrimitiveType."""
def __init__(self, **kwargs):
super(PrimitiveTypeMixin, self).__init__(**kwargs)
class UseCaseMixin(object):
"""User defined mixin class for UseCase."""
def __init__(
self, extend=None, extensionPoint=None, include=None, subject=None, **kwargs
):
super(UseCaseMixin, self).__init__(**kwargs)
def binary_associations(self, diagnostics=None, context=None):
"""UseCases can only be involved in binary Associations.
Association.allInstances()->forAll(a | a.memberEnd.type->includes(self) implies a.memberEnd->size() = 2)"""
raise NotImplementedError(
"operation binary_associations(...) not yet implemented"
)
def no_association_to_use_case(self, diagnostics=None, context=None):
"""UseCases cannot have Associations to UseCases specifying the same subject.
Association.allInstances()->forAll(a | a.memberEnd.type->includes(self) implies
(
let usecases: Set(UseCase) = a.memberEnd.type->select(oclIsKindOf(UseCase))->collect(oclAsType(UseCase))->asSet() in
usecases->size() > 1 implies usecases->collect(subject)->size() > 1
)
)"""
raise NotImplementedError(
"operation no_association_to_use_case(...) not yet implemented"
)
def cannot_include_self(self, diagnostics=None, context=None):
"""A UseCase cannot include UseCases that directly or indirectly include it.
not allIncludedUseCases()->includes(self)"""
raise NotImplementedError(
"operation cannot_include_self(...) not yet implemented"
)
def must_have_name(self, diagnostics=None, context=None):
"""A UseCase must have a name.
name -> notEmpty ()"""
raise NotImplementedError("operation must_have_name(...) not yet implemented")
def all_included_use_cases(self):
"""The query allIncludedUseCases() returns the transitive closure of all UseCases (directly or indirectly) included by this UseCase.
result = (self.include.addition->union(self.include.addition->collect(uc | uc.allIncludedUseCases()))->asSet())
<p>From package UML::UseCases.</p>"""
raise NotImplementedError(
"operation all_included_use_cases(...) not yet implemented"
)
class DerivedOwnedport(EDerivedCollection):
pass
class EncapsulatedClassifierMixin(object):
"""User defined mixin class for EncapsulatedClassifier."""
def __init__(self, ownedPort=None, **kwargs):
super(EncapsulatedClassifierMixin, self).__init__(**kwargs)
def get_owned_ports(self):
"""Derivation for EncapsulatedClassifier::/ownedPort : Port
result = (ownedAttribute->select(oclIsKindOf(Port))->collect(oclAsType(Port))->asOrderedSet())
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_owned_ports(...) not yet implemented")
class ActionInputPinMixin(object):
"""User defined mixin class for ActionInputPin."""
def __init__(self, fromAction=None, **kwargs):
super(ActionInputPinMixin, self).__init__(**kwargs)
def input_pin(self, diagnostics=None, context=None):
"""The fromAction of an ActionInputPin must only have ActionInputPins as InputPins.
fromAction.input->forAll(oclIsKindOf(ActionInputPin))"""
raise NotImplementedError("operation input_pin(...) not yet implemented")
def one_output_pin(self, diagnostics=None, context=None):
"""The fromAction of an ActionInputPin must have exactly one OutputPin.
fromAction.output->size() = 1"""
raise NotImplementedError("operation one_output_pin(...) not yet implemented")
def no_control_or_object_flow(self, diagnostics=None, context=None):
"""The fromAction of an ActionInputPin cannot have ActivityEdges coming into or out of it or its Pins.
fromAction.incoming->union(outgoing)->isEmpty() and
fromAction.input.incoming->isEmpty() and
fromAction.output.outgoing->isEmpty()"""
raise NotImplementedError(
"operation no_control_or_object_flow(...) not yet implemented"
)
class ConditionalNodeMixin(object):
"""User defined mixin class for ConditionalNode."""
def __init__(
self, clause=None, isAssured=None, isDeterminate=None, result=None, **kwargs
):
super(ConditionalNodeMixin, self).__init__(**kwargs)
def result_no_incoming(self, diagnostics=None, context=None):
"""The result OutputPins have no incoming edges.
result.incoming->isEmpty()"""
raise NotImplementedError(
"operation result_no_incoming(...) not yet implemented"
)
def no_input_pins(self, diagnostics=None, context=None):
"""A ConditionalNode has no InputPins.
input->isEmpty()"""
raise NotImplementedError("operation no_input_pins(...) not yet implemented")
def one_clause_with_executable_node(self, diagnostics=None, context=None):
"""No ExecutableNode in the ConditionNode may appear in the test or body part of more than one clause of a ConditionalNode.
node->select(oclIsKindOf(ExecutableNode)).oclAsType(ExecutableNode)->forAll(n |
self.clause->select(test->union(_'body')->includes(n))->size()=1)"""
raise NotImplementedError(
"operation one_clause_with_executable_node(...) not yet implemented"
)
def matching_output_pins(self, diagnostics=None, context=None):
"""Each clause of a ConditionalNode must have the same number of bodyOutput pins as the ConditionalNode has result OutputPins, and each clause bodyOutput Pin must be compatible with the corresponding result OutputPin (by positional order) in type, multiplicity, ordering, and uniqueness.
clause->forAll(
bodyOutput->size()=self.result->size() and
Sequence{1..self.result->size()}->forAll(i |
bodyOutput->at(i).type.conformsTo(result->at(i).type) and
bodyOutput->at(i).isOrdered = result->at(i).isOrdered and
bodyOutput->at(i).isUnique = result->at(i).isUnique and
bodyOutput->at(i).compatibleWith(result->at(i))))"""
raise NotImplementedError(
"operation matching_output_pins(...) not yet implemented"
)
def executable_nodes(self, diagnostics=None, context=None):
"""The union of the ExecutableNodes in the test and body parts of all clauses must be the same as the subset of nodes contained in the ConditionalNode (considered as a StructuredActivityNode) that are ExecutableNodes.
clause.test->union(clause._'body') = node->select(oclIsKindOf(ExecutableNode)).oclAsType(ExecutableNode)"""
raise NotImplementedError("operation executable_nodes(...) not yet implemented")
def clause_no_predecessor(self, diagnostics=None, context=None):
"""No two clauses within a ConditionalNode may be predecessorClauses of each other, either directly or indirectly.
clause->closure(predecessorClause)->intersection(clause)->isEmpty()"""
raise NotImplementedError(
"operation clause_no_predecessor(...) not yet implemented"
)
class CreateLinkObjectActionMixin(object):
"""User defined mixin class for CreateLinkObjectAction."""
def __init__(self, result=None, **kwargs):
super(CreateLinkObjectActionMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of the OutputPin is 1..1.
result.is(1,1)"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def type_of_result(self, diagnostics=None, context=None):
"""The type of the result OutputPin must be the same as the Association of the CreateLinkObjectAction.
result.type = association()"""
raise NotImplementedError("operation type_of_result(...) not yet implemented")
def association_class(self, diagnostics=None, context=None):
"""The Association must be an AssociationClass.
self.association().oclIsKindOf(AssociationClass)"""
raise NotImplementedError(
"operation association_class(...) not yet implemented"
)
class ExpansionRegionMixin(object):
"""User defined mixin class for ExpansionRegion."""
def __init__(self, mode=None, outputElement=None, inputElement=None, **kwargs):
super(ExpansionRegionMixin, self).__init__(**kwargs)
class LoopNodeMixin(object):
"""User defined mixin class for LoopNode."""
def __init__(
self,
bodyOutput=None,
bodyPart=None,
decider=None,
isTestedFirst=None,
loopVariable=None,
loopVariableInput=None,
result=None,
setupPart=None,
test=None,
**kwargs,
):
super(LoopNodeMixin, self).__init__(**kwargs)
def result_no_incoming(self, diagnostics=None, context=None):
"""The result OutputPins have no incoming edges.
result.incoming->isEmpty()"""
raise NotImplementedError(
"operation result_no_incoming(...) not yet implemented"
)
def input_edges(self, diagnostics=None, context=None):
"""The loopVariableInputs must not have outgoing edges.
loopVariableInput.outgoing->isEmpty()"""
raise NotImplementedError("operation input_edges(...) not yet implemented")
def executable_nodes(self, diagnostics=None, context=None):
"""The union of the ExecutableNodes in the setupPart, test and bodyPart of a LoopNode must be the same as the subset of nodes contained in the LoopNode (considered as a StructuredActivityNode) that are ExecutableNodes.
setupPart->union(test)->union(bodyPart)=node->select(oclIsKindOf(ExecutableNode)).oclAsType(ExecutableNode)->asSet()"""
raise NotImplementedError("operation executable_nodes(...) not yet implemented")
def body_output_pins(self, diagnostics=None, context=None):
"""The bodyOutput pins are OutputPins on Actions in the body of the LoopNode.
bodyPart.oclAsType(Action).allActions().output->includesAll(bodyOutput)"""
raise NotImplementedError("operation body_output_pins(...) not yet implemented")
def setup_test_and_body(self, diagnostics=None, context=None):
"""The test and body parts of a ConditionalNode must be disjoint with each other.
setupPart->intersection(test)->isEmpty() and
setupPart->intersection(bodyPart)->isEmpty() and
test->intersection(bodyPart)->isEmpty()"""
raise NotImplementedError(
"operation setup_test_and_body(...) not yet implemented"
)
def matching_output_pins(self, diagnostics=None, context=None):
"""A LoopNode must have the same number of bodyOutput Pins as loopVariables, and each bodyOutput Pin must be compatible with the corresponding loopVariable (by positional order) in type, multiplicity, ordering and uniqueness.
bodyOutput->size()=loopVariable->size() and
Sequence{1..loopVariable->size()}->forAll(i |
bodyOutput->at(i).type.conformsTo(loopVariable->at(i).type) and
bodyOutput->at(i).isOrdered = loopVariable->at(i).isOrdered and
bodyOutput->at(i).isUnique = loopVariable->at(i).isUnique and
loopVariable->at(i).includesMultiplicity(bodyOutput->at(i)))"""
raise NotImplementedError(
"operation matching_output_pins(...) not yet implemented"
)
def matching_loop_variables(self, diagnostics=None, context=None):
"""A LoopNode must have the same number of loopVariableInputs and loopVariables, and they must match in type, uniqueness and multiplicity.
loopVariableInput->size()=loopVariable->size() and
loopVariableInput.type=loopVariable.type and
loopVariableInput.isUnique=loopVariable.isUnique and
loopVariableInput.lower=loopVariable.lower and
loopVariableInput.upper=loopVariable.upper"""
raise NotImplementedError(
"operation matching_loop_variables(...) not yet implemented"
)
def matching_result_pins(self, diagnostics=None, context=None):
"""A LoopNode must have the same number of result OutputPins and loopVariables, and they must match in type, uniqueness and multiplicity.
result->size()=loopVariable->size() and
result.type=loopVariable.type and
result.isUnique=loopVariable.isUnique and
result.lower=loopVariable.lower and
result.upper=loopVariable.upper"""
raise NotImplementedError(
"operation matching_result_pins(...) not yet implemented"
)
def loop_variable_outgoing(self, diagnostics=None, context=None):
"""All ActivityEdges outgoing from loopVariable OutputPins must have targets within the LoopNode.
allOwnedNodes()->includesAll(loopVariable.outgoing.target)"""
raise NotImplementedError(
"operation loop_variable_outgoing(...) not yet implemented"
)
class SequenceNodeMixin(object):
"""User defined mixin class for SequenceNode."""
def __init__(self, executableNode=None, **kwargs):
super(SequenceNodeMixin, self).__init__(**kwargs)
class ValuePinMixin(object):
"""User defined mixin class for ValuePin."""
def __init__(self, value=None, **kwargs):
super(ValuePinMixin, self).__init__(**kwargs)
def no_incoming_edges(self, diagnostics=None, context=None):
"""A ValuePin may have no incoming ActivityEdges.
incoming->isEmpty()"""
raise NotImplementedError(
"operation no_incoming_edges(...) not yet implemented"
)
def compatible_type(self, diagnostics=None, context=None):
"""The type of the value ValueSpecification must conform to the type of the ValuePin.
value.type.conformsTo(type)"""
raise NotImplementedError("operation compatible_type(...) not yet implemented")
class ActorMixin(object):
"""User defined mixin class for Actor."""
def __init__(self, **kwargs):
super(ActorMixin, self).__init__(**kwargs)
def associations(self, diagnostics=None, context=None):
"""An Actor can only have Associations to UseCases, Components, and Classes. Furthermore these Associations must be binary.
Association.allInstances()->forAll( a |
a.memberEnd->collect(type)->includes(self) implies
(
a.memberEnd->size() = 2 and
let actorEnd : Property = a.memberEnd->any(type = self) in
actorEnd.opposite.class.oclIsKindOf(UseCase) or
( actorEnd.opposite.class.oclIsKindOf(Class) and not
actorEnd.opposite.class.oclIsKindOf(Behavior))
)
)"""
raise NotImplementedError("operation associations(...) not yet implemented")
def must_have_name(self, diagnostics=None, context=None):
"""An Actor must have a name.
name->notEmpty()"""
raise NotImplementedError("operation must_have_name(...) not yet implemented")
class DeploymentSpecificationMixin(object):
"""User defined mixin class for DeploymentSpecification."""
def __init__(
self, deploymentLocation=None, executionLocation=None, deployment=None, **kwargs
):
super(DeploymentSpecificationMixin, self).__init__(**kwargs)
def deployment_target(self, diagnostics=None, context=None):
"""The DeploymentTarget of a DeploymentSpecification is a kind of ExecutionEnvironment.
deployment->forAll (location.oclIsKindOf(ExecutionEnvironment))"""
raise NotImplementedError(
"operation deployment_target(...) not yet implemented"
)
def deployed_elements(self, diagnostics=None, context=None):
"""The deployedElements of a DeploymentTarget that are involved in a Deployment that has an associated Deployment-Specification is a kind of Component (i.e., the configured components).
deployment->forAll (location.deployedElement->forAll (oclIsKindOf(Component)))"""
raise NotImplementedError(
"operation deployed_elements(...) not yet implemented"
)
class DerivedProvided(EDerivedCollection):
pass
class DerivedRequired(EDerivedCollection):
pass
class PortMixin(object):
"""User defined mixin class for Port."""
def __init__(
self,
isBehavior=None,
isConjugated=None,
isService=None,
protocol=None,
provided=None,
redefinedPort=None,
required=None,
**kwargs,
):
super(PortMixin, self).__init__(**kwargs)
def port_aggregation(self, diagnostics=None, context=None):
"""Port.aggregation must be composite.
aggregation = AggregationKind::composite"""
raise NotImplementedError("operation port_aggregation(...) not yet implemented")
def default_value(self, diagnostics=None, context=None):
"""A defaultValue for port cannot be specified when the type of the Port is an Interface.
type.oclIsKindOf(Interface) implies defaultValue->isEmpty()"""
raise NotImplementedError("operation default_value(...) not yet implemented")
def encapsulated_owner(self, diagnostics=None, context=None):
"""All Ports are owned by an EncapsulatedClassifier.
owner = encapsulatedClassifier"""
raise NotImplementedError(
"operation encapsulated_owner(...) not yet implemented"
)
def get_provideds(self):
"""Derivation for Port::/provided
result = (if isConjugated then basicRequired() else basicProvided() endif)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_provideds(...) not yet implemented")
def get_requireds(self):
"""Derivation for Port::/required
result = (if isConjugated then basicProvided() else basicRequired() endif)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_requireds(...) not yet implemented")
def basic_provided(self):
"""The union of the sets of Interfaces realized by the type of the Port and its supertypes, or directly the type of the Port if the Port is typed by an Interface.
result = (if type.oclIsKindOf(Interface)
then type.oclAsType(Interface)->asSet()
else type.oclAsType(Classifier).allRealizedInterfaces()
endif)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation basic_provided(...) not yet implemented")
def basic_required(self):
"""The union of the sets of Interfaces used by the type of the Port and its supertypes.
result = ( type.oclAsType(Classifier).allUsedInterfaces() )
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation basic_required(...) not yet implemented")
class ExtensionMixin(object):
"""User defined mixin class for Extension."""
@property
def isRequired(self):
raise NotImplementedError("Missing implementation for isRequired")
@property
def metaclass(self):
raise NotImplementedError("Missing implementation for metaclass")
def __init__(self, isRequired=None, metaclass=None, **kwargs):
super(ExtensionMixin, self).__init__(**kwargs)
def non_owned_end(self, diagnostics=None, context=None):
"""The non-owned end of an Extension is typed by a Class.
metaclassEnd()->notEmpty() and metaclassEnd().type.oclIsKindOf(Class)"""
raise NotImplementedError("operation non_owned_end(...) not yet implemented")
def is_binary(self, diagnostics=None, context=None):
"""An Extension is binary, i.e., it has only two memberEnds.
memberEnd->size() = 2"""
raise NotImplementedError("operation is_binary(...) not yet implemented")
def get_stereotype(self):
"""Retrieves the stereotype that extends a metaclass through this extension."""
raise NotImplementedError("operation get_stereotype(...) not yet implemented")
def get_stereotype_end(self):
"""Retrieves the extension end that is typed by a stereotype (as opposed to a metaclass)."""
raise NotImplementedError(
"operation get_stereotype_end(...) not yet implemented"
)
def is_required(self):
"""The query isRequired() is true if the owned end has a multiplicity with the lower bound of 1.
result = (ownedEnd.lowerBound() = 1)
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation is_required(...) not yet implemented")
def get_metaclass(self):
"""The query metaclass() returns the metaclass that is being extended (as opposed to the extending stereotype).
result = (metaclassEnd().type.oclAsType(Class))
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation get_metaclass(...) not yet implemented")
def metaclass_end(self):
"""The query metaclassEnd() returns the Property that is typed by a metaclass (as opposed to a stereotype).
result = (memberEnd->reject(p | ownedEnd->includes(p.oclAsType(ExtensionEnd)))->any(true))
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation metaclass_end(...) not yet implemented")
class ExtensionEndMixin(object):
"""User defined mixin class for ExtensionEnd."""
def __init__(self, **kwargs):
super(ExtensionEndMixin, self).__init__(**kwargs)
def multiplicity(self, diagnostics=None, context=None):
"""The multiplicity of ExtensionEnd is 0..1 or 1.
(lowerBound() = 0 or lowerBound() = 1) and upperBound() = 1"""
raise NotImplementedError("operation multiplicity(...) not yet implemented")
def aggregation(self, diagnostics=None, context=None):
"""The aggregation of an ExtensionEnd is composite.
self.aggregation = AggregationKind::composite"""
raise NotImplementedError("operation aggregation(...) not yet implemented")
class CollaborationMixin(object):
"""User defined mixin class for Collaboration."""
def __init__(self, collaborationRole=None, **kwargs):
super(CollaborationMixin, self).__init__(**kwargs)
class CommunicationPathMixin(object):
"""User defined mixin class for CommunicationPath."""
def __init__(self, **kwargs):
super(CommunicationPathMixin, self).__init__(**kwargs)
class DerivedExtension(EDerivedCollection):
pass
class DerivedSuperclass(EDerivedCollection):
def _get_collection(self):
return [g.general for g in self.owner.generalization]
def __len__(self):
return len(self.owner.generalization)
def __getitem__(self, index):
return self._get_collection()[index]
def insert(self, index, item):
from .uml import Generalization
self.check(item)
g = Generalization(general=item)
self.owner.generalization.append(g)
def discard(self, item):
c = [g for g in self.owner.generalization if g.general is item]
for i in c:
self.owner.generalization.remove(i)
def __delitem__(self, index):
g = self.owner.generalization[index]
self.owner.generalization.remove(g)
def __repr__(self):
return "DerivedCollection({})".format(self._get_collection())
class ClassMixin(object):
"""User defined mixin class for Class."""
def __init__(
self,
ownedOperation=None,
extension=None,
isActive=None,
nestedClassifier=None,
ownedReception=None,
superClass=None,
**kwargs,
):
super(ClassMixin, self).__init__(**kwargs)
def passive_class(self, diagnostics=None, context=None):
"""Only an active Class may own Receptions and have a classifierBehavior.
not isActive implies (ownedReception->isEmpty() and classifierBehavior = null)"""
raise NotImplementedError("operation passive_class(...) not yet implemented")
def create_owned_operation(
self, name=None, parameterNames=None, parameterTypes=None, returnType=None
):
"""Creates an operation with the specified name, parameter names, parameter types, and return type (or null) as an owned operation of this class."""
raise NotImplementedError(
"operation create_owned_operation(...) not yet implemented"
)
def is_metaclass(self):
from .standard import Metaclass
"""Determines whether this class is a metaclass."""
for o, r in self._inverse_rels:
if isinstance(o, Metaclass) and r.name == "base_Class":
return True
return False
def get_extensions(self):
"""Derivation for Class::/extension : Extension
result = (Extension.allInstances()->select(ext |
let endTypes : Sequence(Classifier) = ext.memberEnd->collect(type.oclAsType(Classifier)) in
endTypes->includes(self) or endTypes.allParents()->includes(self) ))
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_extensions(...) not yet implemented")
def get_super_classes(self):
"""Derivation for Class::/superClass : Class
result = (self.general()->select(oclIsKindOf(Class))->collect(oclAsType(Class))->asSet())
<p>From package UML::StructuredClassifiers.</p>"""
return self.superClass
class BehaviorMixin(object):
"""User defined mixin class for Behavior."""
@property
def context(self):
raise NotImplementedError("Missing implementation for context")
def __init__(
self,
specification=None,
context=None,
isReentrant=None,
ownedParameter=None,
ownedParameterSet=None,
postcondition=None,
precondition=None,
redefinedBehavior=None,
**kwargs,
):
super(BehaviorMixin, self).__init__(**kwargs)
def most_one_behavior(self, diagnostics=None, context=None):
"""There may be at most one Behavior for a given pairing of BehavioredClassifier (as owner of the Behavior) and BehavioralFeature (as specification of the Behavior).
specification <> null implies _'context'.ownedBehavior->select(specification=self.specification)->size() = 1"""
raise NotImplementedError(
"operation most_one_behavior(...) not yet implemented"
)
def parameters_match(self, diagnostics=None, context=None):
"""If a Behavior has a specification BehavioralFeature, then it must have the same number of ownedParameters as its specification. The Behavior Parameters must also "match" the BehavioralParameter Parameters, but the exact requirements for this matching are not formalized.
specification <> null implies ownedParameter->size() = specification.ownedParameter->size()"""
raise NotImplementedError("operation parameters_match(...) not yet implemented")
def feature_of_context_classifier(self, diagnostics=None, context=None):
"""The specification BehavioralFeature must be a feature (possibly inherited) of the context BehavioredClassifier of the Behavior.
_'context'.feature->includes(specification)"""
raise NotImplementedError(
"operation feature_of_context_classifier(...) not yet implemented"
)
def get_context(self):
"""A Behavior that is directly owned as a nestedClassifier does not have a context. Otherwise, to determine the context of a Behavior, find the first BehavioredClassifier reached by following the chain of owner relationships from the Behavior, if any. If there is such a BehavioredClassifier, then it is the context, unless it is itself a Behavior with a non-empty context, in which case that is also the context for the original Behavior.
result = (if nestingClass <> null then
null
else
let b:BehavioredClassifier = self.behavioredClassifier(self.owner) in
if b.oclIsKindOf(Behavior) and b.oclAsType(Behavior)._'context' <> null then
b.oclAsType(Behavior)._'context'
else
b
endif
endif
)
<p>From package UML::CommonBehavior.</p>"""
raise NotImplementedError("operation get_context(...) not yet implemented")
def behaviored_classifier(self, from_=None):
"""The first BehavioredClassifier reached by following the chain of owner relationships from the Behavior, if any.
if from.oclIsKindOf(BehavioredClassifier) then
from.oclAsType(BehavioredClassifier)
else if from.owner = null then
null
else
self.behavioredClassifier(from.owner)
endif
endif
<p>From package UML::CommonBehavior.</p>"""
raise NotImplementedError(
"operation behaviored_classifier(...) not yet implemented"
)
def input_parameters(self):
"""The in and inout ownedParameters of the Behavior.
result = (ownedParameter->select(direction=ParameterDirectionKind::_'in' or direction=ParameterDirectionKind::inout))
<p>From package UML::CommonBehavior.</p>"""
raise NotImplementedError("operation input_parameters(...) not yet implemented")
def output_parameters(self):
"""The out, inout and return ownedParameters.
result = (ownedParameter->select(direction=ParameterDirectionKind::out or direction=ParameterDirectionKind::inout or direction=ParameterDirectionKind::return))
<p>From package UML::CommonBehavior.</p>"""
raise NotImplementedError(
"operation output_parameters(...) not yet implemented"
)
class StereotypeMixin(object):
"""User defined mixin class for Stereotype."""
@property
def profile(self):
raise NotImplementedError("Missing implementation for profile")
def __init__(self, icon=None, profile=None, **kwargs):
super(StereotypeMixin, self).__init__(**kwargs)
def binary_associations_only(self, diagnostics=None, context=None):
"""Stereotypes may only participate in binary associations.
ownedAttribute.association->forAll(memberEnd->size()=2)"""
raise NotImplementedError(
"operation binary_associations_only(...) not yet implemented"
)
def generalize(self, diagnostics=None, context=None):
"""A Stereotype may only generalize or specialize another Stereotype.
allParents()->forAll(oclIsKindOf(Stereotype))
and Classifier.allInstances()->forAll(c | c.allParents()->exists(oclIsKindOf(Stereotype)) implies c.oclIsKindOf(Stereotype))"""
raise NotImplementedError("operation generalize(...) not yet implemented")
def name_not_clash(self, diagnostics=None, context=None):
"""Stereotype names should not clash with keyword names for the extended model element."""
raise NotImplementedError("operation name_not_clash(...) not yet implemented")
def association_end_ownership(self, diagnostics=None, context=None):
"""Where a stereotype’s property is an association end for an association other than a kind of extension, and the other end is not a stereotype, the other end must be owned by the association itself.
ownedAttribute
->select(association->notEmpty() and not association.oclIsKindOf(Extension) and not type.oclIsKindOf(Stereotype))
->forAll(opposite.owner = association)"""
raise NotImplementedError(
"operation association_end_ownership(...) not yet implemented"
)
def base_property_upper_bound(self, diagnostics=None, context=None):
"""The upper bound of base-properties is exactly 1."""
raise NotImplementedError(
"operation base_property_upper_bound(...) not yet implemented"
)
def base_property_multiplicity_single_extension(
self, diagnostics=None, context=None
):
"""If a Stereotype extends only one metaclass, the multiplicity of the corresponding base-property shall be 1..1."""
raise NotImplementedError(
"operation base_property_multiplicity_single_extension(...) not yet implemented"
)
def base_property_multiplicity_multiple_extension(
self, diagnostics=None, context=None
):
"""If a Stereotype extends more than one metaclass, the multiplicity of the corresponding base-properties shall be [0..1]. At any point in time, only one of these base-properties can contain a metaclass instance during runtime."""
raise NotImplementedError(
"operation base_property_multiplicity_multiple_extension(...) not yet implemented"
)
def create_extension(self, metaclass=None, isRequired=None):
"""Creates a(n) (required) extension of the specified metaclass with this stereotype."""
raise NotImplementedError("operation create_extension(...) not yet implemented")
def create_icon(self, location=None):
"""Creates an icon with the specified location for this stereotype."""
raise NotImplementedError("operation create_icon(...) not yet implemented")
def create_icon(self, format=None, content=None):
"""Creates an icon with the specified format and content for this stereotype."""
raise NotImplementedError("operation create_icon(...) not yet implemented")
def get_all_extended_metaclasses(self):
"""Retrieves all the metaclasses extended by this stereotype, including the metaclasses extended by its superstereotypes."""
raise NotImplementedError(
"operation get_all_extended_metaclasses(...) not yet implemented"
)
def get_definition(self):
"""Retrieves the current definition (Ecore representation) of this stereotype."""
raise NotImplementedError("operation get_definition(...) not yet implemented")
def get_extended_metaclasses(self):
"""Retrieves the metaclasses extended by this stereotype."""
raise NotImplementedError(
"operation get_extended_metaclasses(...) not yet implemented"
)
def get_keyword(self):
"""Retrieves the localized keyword for this stereotype."""
raise NotImplementedError("operation get_keyword(...) not yet implemented")
def get_keyword(self, localize=None):
"""Retrieves the keyword for this stereotype, localized if indicated."""
raise NotImplementedError("operation get_keyword(...) not yet implemented")
def containing_profile(self):
"""The query containingProfile returns the closest profile directly or indirectly containing this stereotype.
result = (self.namespace.oclAsType(Package).containingProfile())
<p>From package UML::Packages.</p>"""
raise NotImplementedError(
"operation containing_profile(...) not yet implemented"
)
def get_profile(self):
"""A stereotype must be contained, directly or indirectly, in a profile.
result = (self.containingProfile())
<p>From package UML::Packages.</p>"""
raise NotImplementedError("operation get_profile(...) not yet implemented")
class DerivedProvided(EDerivedCollection):
pass
class DerivedRequired(EDerivedCollection):
pass
class ComponentMixin(object):
"""User defined mixin class for Component."""
def __init__(
self,
isIndirectlyInstantiated=None,
packagedElement=None,
provided=None,
realization=None,
required=None,
**kwargs,
):
super(ComponentMixin, self).__init__(**kwargs)
def no_nested_classifiers(self, diagnostics=None, context=None):
"""A Component cannot nest Classifiers.
nestedClassifier->isEmpty()"""
raise NotImplementedError(
"operation no_nested_classifiers(...) not yet implemented"
)
def no_packaged_elements(self, diagnostics=None, context=None):
"""A Component nested in a Class cannot have any packaged elements.
nestingClass <> null implies packagedElement->isEmpty()"""
raise NotImplementedError(
"operation no_packaged_elements(...) not yet implemented"
)
def create_owned_class(self, name=None, isAbstract=None):
"""Creates a(n) (abstract) class with the specified name as a packaged element of this component."""
raise NotImplementedError(
"operation create_owned_class(...) not yet implemented"
)
def create_owned_enumeration(self, name=None):
"""Creates a enumeration with the specified name as a packaged element of this component."""
raise NotImplementedError(
"operation create_owned_enumeration(...) not yet implemented"
)
def create_owned_interface(self, name=None):
"""Creates an interface with the specified name as a packaged element of this component."""
raise NotImplementedError(
"operation create_owned_interface(...) not yet implemented"
)
def create_owned_primitive_type(self, name=None):
"""Creates a primitive type with the specified name as a packaged element of this component."""
raise NotImplementedError(
"operation create_owned_primitive_type(...) not yet implemented"
)
def get_provideds(self):
"""Derivation for Component::/provided
result = (let ris : Set(Interface) = allRealizedInterfaces(),
realizingClassifiers : Set(Classifier) = self.realization.realizingClassifier->union(self.allParents()->collect(realization.realizingClassifier))->asSet(),
allRealizingClassifiers : Set(Classifier) = realizingClassifiers->union(realizingClassifiers.allParents())->asSet(),
realizingClassifierInterfaces : Set(Interface) = allRealizingClassifiers->iterate(c; rci : Set(Interface) = Set{} | rci->union(c.allRealizedInterfaces())),
ports : Set(Port) = self.ownedPort->union(allParents()->collect(ownedPort))->asSet(),
providedByPorts : Set(Interface) = ports.provided->asSet()
in ris->union(realizingClassifierInterfaces) ->union(providedByPorts)->asSet())
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_provideds(...) not yet implemented")
def get_requireds(self):
"""Derivation for Component::/required
result = (let uis : Set(Interface) = allUsedInterfaces(),
realizingClassifiers : Set(Classifier) = self.realization.realizingClassifier->union(self.allParents()->collect(realization.realizingClassifier))->asSet(),
allRealizingClassifiers : Set(Classifier) = realizingClassifiers->union(realizingClassifiers.allParents())->asSet(),
realizingClassifierInterfaces : Set(Interface) = allRealizingClassifiers->iterate(c; rci : Set(Interface) = Set{} | rci->union(c.allUsedInterfaces())),
ports : Set(Port) = self.ownedPort->union(allParents()->collect(ownedPort))->asSet(),
usedByPorts : Set(Interface) = ports.required->asSet()
in uis->union(realizingClassifierInterfaces)->union(usedByPorts)->asSet()
)
<p>From package UML::StructuredClassifiers.</p>"""
raise NotImplementedError("operation get_requireds(...) not yet implemented")
class DerivedNode(EDerivedCollection):
pass
class DerivedGroup(EDerivedCollection):
pass
class ActivityMixin(object):
"""User defined mixin class for Activity."""
def __init__(
self,
ownedGroup=None,
edge=None,
node=None,
variable=None,
group=None,
ownedNode=None,
isReadOnly=None,
isSingleExecution=None,
partition=None,
structuredNode=None,
**kwargs,
):
super(ActivityMixin, self).__init__(**kwargs)
def maximum_one_parameter_node(self, diagnostics=None, context=None):
"""A Parameter with direction other than inout must have exactly one ActivityParameterNode in an Activity.
ownedParameter->forAll(p |
p.direction <> ParameterDirectionKind::inout implies node->select(
oclIsKindOf(ActivityParameterNode) and oclAsType(ActivityParameterNode).parameter = p)->size()= 1)"""
raise NotImplementedError(
"operation maximum_one_parameter_node(...) not yet implemented"
)
def maximum_two_parameter_nodes(self, diagnostics=None, context=None):
"""A Parameter with direction inout must have exactly two ActivityParameterNodes in an Activity, at most one with incoming ActivityEdges and at most one with outgoing ActivityEdges.
ownedParameter->forAll(p |
p.direction = ParameterDirectionKind::inout implies
let associatedNodes : Set(ActivityNode) = node->select(
oclIsKindOf(ActivityParameterNode) and oclAsType(ActivityParameterNode).parameter = p) in
associatedNodes->size()=2 and
associatedNodes->select(incoming->notEmpty())->size()<=1 and
associatedNodes->select(outgoing->notEmpty())->size()<=1
)"""
raise NotImplementedError(
"operation maximum_two_parameter_nodes(...) not yet implemented"
)
class StateMachineMixin(object):
"""User defined mixin class for StateMachine."""
def __init__(
self,
connectionPoint=None,
submachineState=None,
region=None,
extendedStateMachine=None,
**kwargs,
):
super(StateMachineMixin, self).__init__(**kwargs)
def connection_points(self, diagnostics=None, context=None):
"""The connection points of a StateMachine are Pseudostates of kind entry point or exit point.
connectionPoint->forAll (kind = PseudostateKind::entryPoint or kind = PseudostateKind::exitPoint)"""
raise NotImplementedError(
"operation connection_points(...) not yet implemented"
)
def classifier_context(self, diagnostics=None, context=None):
"""The Classifier context of a StateMachine cannot be an Interface.
_'context' <> null implies not _'context'.oclIsKindOf(Interface)"""
raise NotImplementedError(
"operation classifier_context(...) not yet implemented"
)
def method(self, diagnostics=None, context=None):
"""A StateMachine as the method for a BehavioralFeature cannot have entry/exit connection points.
specification <> null implies connectionPoint->isEmpty()"""
raise NotImplementedError("operation method(...) not yet implemented")
def context_classifier(self, diagnostics=None, context=None):
"""The context Classifier of the method StateMachine of a BehavioralFeature must be the Classifier that owns the BehavioralFeature.
specification <> null implies ( _'context' <> null and specification.featuringClassifier->exists(c | c = _'context'))"""
raise NotImplementedError(
"operation context_classifier(...) not yet implemented"
)
def lca(self, s1=None, s2=None):
"""The operation LCA(s1,s2) returns the Region that is the least common
ancestor of Vertices s1 and s2, based on the StateMachine containment hierarchy.
result = (if ancestor(s1, s2) then
s2.container
else
if ancestor(s2, s1) then
s1.container
else
LCA(s1.container.state, s2.container.state)
endif
endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation lca(...) not yet implemented")
def ancestor(self, s1=None, s2=None):
"""The query ancestor(s1, s2) checks whether Vertex s2 is an ancestor of Vertex s1.
result = (if (s2 = s1) then
true
else
if s1.container.stateMachine->notEmpty() then
true
else
if s2.container.stateMachine->notEmpty() then
false
else
ancestor(s1, s2.container.state)
endif
endif
endif )
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation ancestor(...) not yet implemented")
def lca_state(self, v1=None, v2=None):
"""This utility funciton is like the LCA, except that it returns the nearest composite State that contains both input Vertices.
result = (if v2.oclIsTypeOf(State) and ancestor(v1, v2) then
v2.oclAsType(State)
else if v1.oclIsTypeOf(State) and ancestor(v2, v1) then
v1.oclAsType(State)
else if (v1.container.state->isEmpty() or v2.container.state->isEmpty()) then
null.oclAsType(State)
else LCAState(v1.container.state, v2.container.state)
endif endif endif)
<p>From package UML::StateMachines.</p>"""
raise NotImplementedError("operation lca_state(...) not yet implemented")
class OpaqueBehaviorMixin(object):
"""User defined mixin class for OpaqueBehavior."""
def __init__(self, body=None, language=None, **kwargs):
super(OpaqueBehaviorMixin, self).__init__(**kwargs)
class NodeMixin(object):
"""User defined mixin class for Node."""
def __init__(self, nestedNode=None, **kwargs):
super(NodeMixin, self).__init__(**kwargs)
def internal_structure(self, diagnostics=None, context=None):
"""The internal structure of a Node (if defined) consists solely of parts of type Node.
part->forAll(oclIsKindOf(Node))"""
raise NotImplementedError(
"operation internal_structure(...) not yet implemented"
)
def create_communication_path(
self,
end1IsNavigable=None,
end1Aggregation=None,
end1Name=None,
end1Lower=None,
end1Upper=None,
end1Node=None,
end2IsNavigable=None,
end2Aggregation=None,
end2Name=None,
end2Lower=None,
end2Upper=None,
):
"""Creates a (binary) communication path between this node and the specified
other node, with the specified navigabilities, aggregations, names,
lower bounds, and upper bounds, and owned by this node's nearest package."""
raise NotImplementedError(
"operation create_communication_path(...) not yet implemented"
)
def get_communication_paths(self):
"""Retrieves the communication paths in which this node is involved."""
raise NotImplementedError(
"operation get_communication_paths(...) not yet implemented"
)
class ProtocolStateMachineMixin(object):
"""User defined mixin class for ProtocolStateMachine."""
def __init__(self, conformance=None, **kwargs):
super(ProtocolStateMachineMixin, self).__init__(**kwargs)
def deep_or_shallow_history(self, diagnostics=None, context=None):
"""ProtocolStateMachines cannot have deep or shallow history Pseudostates.
region->forAll (r | r.subvertex->forAll (v | v.oclIsKindOf(Pseudostate) implies
((v.oclAsType(Pseudostate).kind <> PseudostateKind::deepHistory) and (v.oclAsType(Pseudostate).kind <> PseudostateKind::shallowHistory))))"""
raise NotImplementedError(
"operation deep_or_shallow_history(...) not yet implemented"
)
def entry_exit_do(self, diagnostics=None, context=None):
"""The states of a ProtocolStateMachine cannot have entry, exit, or do activity Behaviors.
region->forAll(r | r.subvertex->forAll(v | v.oclIsKindOf(State) implies
(v.oclAsType(State).entry->isEmpty() and v.oclAsType(State).exit->isEmpty() and v.oclAsType(State).doActivity->isEmpty())))"""
raise NotImplementedError("operation entry_exit_do(...) not yet implemented")
def protocol_transitions(self, diagnostics=None, context=None):
"""All Transitions of a ProtocolStateMachine must be ProtocolTransitions.
region->forAll(r | r.transition->forAll(t | t.oclIsTypeOf(ProtocolTransition)))"""
raise NotImplementedError(
"operation protocol_transitions(...) not yet implemented"
)
class FunctionBehaviorMixin(object):
"""User defined mixin class for FunctionBehavior."""
def __init__(self, **kwargs):
super(FunctionBehaviorMixin, self).__init__(**kwargs)
def one_output_parameter(self, diagnostics=None, context=None):
"""A FunctionBehavior has at least one output Parameter.
self.ownedParameter->
select(p | p.direction = ParameterDirectionKind::out or p.direction= ParameterDirectionKind::inout or p.direction= ParameterDirectionKind::return)->size() >= 1"""
raise NotImplementedError(
"operation one_output_parameter(...) not yet implemented"
)
def types_of_parameters(self, diagnostics=None, context=None):
"""The types of the ownedParameters are all DataTypes, which may not nest anything but other DataTypes.
ownedParameter->forAll(p | p.type <> null and
p.type.oclIsTypeOf(DataType) and hasAllDataTypeAttributes(p.type.oclAsType(DataType)))"""
raise NotImplementedError(
"operation types_of_parameters(...) not yet implemented"
)
def has_all_data_type_attributes(self, d=None):
"""The hasAllDataTypeAttributes query tests whether the types of the attributes of the given DataType are all DataTypes, and similarly for all those DataTypes.
result = (d.ownedAttribute->forAll(a |
a.type.oclIsKindOf(DataType) and
hasAllDataTypeAttributes(a.type.oclAsType(DataType))))
<p>From package UML::CommonBehavior.</p>"""
raise NotImplementedError(
"operation has_all_data_type_attributes(...) not yet implemented"
)
class DeviceMixin(object):
"""User defined mixin class for Device."""
def __init__(self, **kwargs):
super(DeviceMixin, self).__init__(**kwargs)
class ExecutionEnvironmentMixin(object):
"""User defined mixin class for ExecutionEnvironment."""
def __init__(self, **kwargs):
super(ExecutionEnvironmentMixin, self).__init__(**kwargs)
class InteractionMixin(object):
"""User defined mixin class for Interaction."""
def __init__(
self,
lifeline=None,
fragment=None,
action=None,
formalGate=None,
message=None,
**kwargs,
):
super(InteractionMixin, self).__init__(**kwargs)
def not_contained(self, diagnostics=None, context=None):
"""An Interaction instance must not be contained within another Interaction instance.
enclosingInteraction->isEmpty()"""
raise NotImplementedError("operation not_contained(...) not yet implemented")
class AssociationClassMixin(object):
"""User defined mixin class for AssociationClass."""
def __init__(self, **kwargs):
super(AssociationClassMixin, self).__init__(**kwargs)
def cannot_be_defined(self, diagnostics=None, context=None):
"""An AssociationClass cannot be defined between itself and something else.
self.endType()->excludes(self) and self.endType()->collect(et|et.oclAsType(Classifier).allParents())->flatten()->excludes(self)"""
raise NotImplementedError(
"operation cannot_be_defined(...) not yet implemented"
)
def disjoint_attributes_ends(self, diagnostics=None, context=None):
"""The owned attributes and owned ends of an AssociationClass are disjoint.
ownedAttribute->intersection(ownedEnd)->isEmpty()"""
raise NotImplementedError(
"operation disjoint_attributes_ends(...) not yet implemented"
)
| 3
| 4,074
| 0
| 354,099
| 0
| 93
| 0
| 79
| 6,667
|
81d2b8761a000e3c2e1719093c36e29ab070f551
| 97
|
py
|
Python
|
NaStyAPI/__init__.py
|
Nimi142/NaStyAPI
|
573c04b89c6dd40b2a5b8360221a9711fece1234
|
[
"Apache-2.0"
] | 2
|
2021-04-04T06:36:22.000Z
|
2021-05-03T16:00:58.000Z
|
NaStyAPI/__init__.py
|
Nimi142/NaStyAPI
|
573c04b89c6dd40b2a5b8360221a9711fece1234
|
[
"Apache-2.0"
] | null | null | null |
NaStyAPI/__init__.py
|
Nimi142/NaStyAPI
|
573c04b89c6dd40b2a5b8360221a9711fece1234
|
[
"Apache-2.0"
] | null | null | null |
__all__ = ["Nation", "Region", "Telegrams", "WorldAssembly", "World", "TradingCards", "APICall"]
| 48.5
| 96
| 0.670103
|
__all__ = ["Nation", "Region", "Telegrams", "WorldAssembly", "World", "TradingCards", "APICall"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7c29438a9f8848043860516040d82534bc2f57ea
| 28,673
|
py
|
Python
|
google/ads/googleads/v6/resources/types/campaign.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/resources/types/campaign.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v6/resources/types/campaign.py
|
wxxlouisa/google-ads-python
|
f24137966f6bfcb765a9b1fae79f2d23041825fe
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"Campaign",},
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 42.104258
| 151
| 0.672165
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.common.types import bidding
from google.ads.googleads.v6.common.types import custom_parameter
from google.ads.googleads.v6.common.types import frequency_cap
from google.ads.googleads.v6.common.types import (
real_time_bidding_setting as gagc_real_time_bidding_setting,
)
from google.ads.googleads.v6.common.types import (
targeting_setting as gagc_targeting_setting,
)
from google.ads.googleads.v6.enums.types import (
ad_serving_optimization_status as gage_ad_serving_optimization_status,
)
from google.ads.googleads.v6.enums.types import (
advertising_channel_sub_type as gage_advertising_channel_sub_type,
)
from google.ads.googleads.v6.enums.types import (
advertising_channel_type as gage_advertising_channel_type,
)
from google.ads.googleads.v6.enums.types import app_campaign_app_store
from google.ads.googleads.v6.enums.types import (
app_campaign_bidding_strategy_goal_type,
)
from google.ads.googleads.v6.enums.types import (
bidding_strategy_type as gage_bidding_strategy_type,
)
from google.ads.googleads.v6.enums.types import brand_safety_suitability
from google.ads.googleads.v6.enums.types import campaign_experiment_type
from google.ads.googleads.v6.enums.types import campaign_serving_status
from google.ads.googleads.v6.enums.types import campaign_status
from google.ads.googleads.v6.enums.types import (
location_source_type as gage_location_source_type,
)
from google.ads.googleads.v6.enums.types import (
negative_geo_target_type as gage_negative_geo_target_type,
)
from google.ads.googleads.v6.enums.types import optimization_goal_type
from google.ads.googleads.v6.enums.types import (
payment_mode as gage_payment_mode,
)
from google.ads.googleads.v6.enums.types import (
positive_geo_target_type as gage_positive_geo_target_type,
)
from google.ads.googleads.v6.enums.types import (
vanity_pharma_display_url_mode as gage_vanity_pharma_display_url_mode,
)
from google.ads.googleads.v6.enums.types import (
vanity_pharma_text as gage_vanity_pharma_text,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"Campaign",},
)
class Campaign(proto.Message):
r"""A campaign.
Attributes:
resource_name (str):
Immutable. The resource name of the campaign. Campaign
resource names have the form:
``customers/{customer_id}/campaigns/{campaign_id}``
id (int):
Output only. The ID of the campaign.
name (str):
The name of the campaign.
This field is required and should not be empty
when creating new campaigns.
It must not contain any null (code point 0x0),
NL line feed (code point 0xA) or carriage return
(code point 0xD) characters.
status (google.ads.googleads.v6.enums.types.CampaignStatusEnum.CampaignStatus):
The status of the campaign.
When a new campaign is added, the status
defaults to ENABLED.
serving_status (google.ads.googleads.v6.enums.types.CampaignServingStatusEnum.CampaignServingStatus):
Output only. The ad serving status of the
campaign.
ad_serving_optimization_status (google.ads.googleads.v6.enums.types.AdServingOptimizationStatusEnum.AdServingOptimizationStatus):
The ad serving optimization status of the
campaign.
advertising_channel_type (google.ads.googleads.v6.enums.types.AdvertisingChannelTypeEnum.AdvertisingChannelType):
Immutable. The primary serving target for ads within the
campaign. The targeting options can be refined in
``network_settings``.
This field is required and should not be empty when creating
new campaigns.
Can be set only when creating campaigns. After the campaign
is created, the field can not be changed.
advertising_channel_sub_type (google.ads.googleads.v6.enums.types.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType):
Immutable. Optional refinement to
``advertising_channel_type``. Must be a valid sub-type of
the parent channel type.
Can be set only when creating campaigns. After campaign is
created, the field can not be changed.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
url_custom_parameters (Sequence[google.ads.googleads.v6.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
real_time_bidding_setting (google.ads.googleads.v6.common.types.RealTimeBiddingSetting):
Settings for Real-Time Bidding, a feature
only available for campaigns targeting the Ad
Exchange network.
network_settings (google.ads.googleads.v6.resources.types.Campaign.NetworkSettings):
The network settings for the campaign.
hotel_setting (google.ads.googleads.v6.resources.types.Campaign.HotelSettingInfo):
Immutable. The hotel setting for the
campaign.
dynamic_search_ads_setting (google.ads.googleads.v6.resources.types.Campaign.DynamicSearchAdsSetting):
The setting for controlling Dynamic Search
Ads (DSA).
shopping_setting (google.ads.googleads.v6.resources.types.Campaign.ShoppingSetting):
The setting for controlling Shopping
campaigns.
targeting_setting (google.ads.googleads.v6.common.types.TargetingSetting):
Setting for targeting related features.
geo_target_type_setting (google.ads.googleads.v6.resources.types.Campaign.GeoTargetTypeSetting):
The setting for ads geotargeting.
local_campaign_setting (google.ads.googleads.v6.resources.types.Campaign.LocalCampaignSetting):
The setting for local campaign.
app_campaign_setting (google.ads.googleads.v6.resources.types.Campaign.AppCampaignSetting):
The setting related to App Campaign.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this campaign.
experiment_type (google.ads.googleads.v6.enums.types.CampaignExperimentTypeEnum.CampaignExperimentType):
Output only. The type of campaign: normal,
draft, or experiment.
base_campaign (str):
Output only. The resource name of the base campaign of a
draft or experiment campaign. For base campaigns, this is
equal to ``resource_name``.
This field is read-only.
campaign_budget (str):
The budget of the campaign.
bidding_strategy_type (google.ads.googleads.v6.enums.types.BiddingStrategyTypeEnum.BiddingStrategyType):
Output only. The type of bidding strategy.
A bidding strategy can be created by setting either the
bidding scheme to create a standard bidding strategy or the
``bidding_strategy`` field to create a portfolio bidding
strategy.
This field is read-only.
start_date (str):
The date when campaign started.
end_date (str):
The last day of the campaign.
final_url_suffix (str):
Suffix used to append query parameters to
landing pages that are served with parallel
tracking.
frequency_caps (Sequence[google.ads.googleads.v6.common.types.FrequencyCapEntry]):
A list that limits how often each user will
see this campaign's ads.
video_brand_safety_suitability (google.ads.googleads.v6.enums.types.BrandSafetySuitabilityEnum.BrandSafetySuitability):
Output only. 3-Tier Brand Safety setting for
the campaign.
vanity_pharma (google.ads.googleads.v6.resources.types.Campaign.VanityPharma):
Describes how unbranded pharma ads will be
displayed.
selective_optimization (google.ads.googleads.v6.resources.types.Campaign.SelectiveOptimization):
Selective optimization setting for this
campaign, which includes a set of conversion
actions to optimize this campaign towards.
optimization_goal_setting (google.ads.googleads.v6.resources.types.Campaign.OptimizationGoalSetting):
Optimization goal setting for this campaign,
which includes a set of optimization goal types.
tracking_setting (google.ads.googleads.v6.resources.types.Campaign.TrackingSetting):
Output only. Campaign-level settings for
tracking information.
payment_mode (google.ads.googleads.v6.enums.types.PaymentModeEnum.PaymentMode):
Payment mode for the campaign.
optimization_score (float):
Output only. Optimization score of the
campaign.
Optimization score is an estimate of how well a
campaign is set to perform. It ranges from 0%
(0.0) to 100% (1.0), with 100% indicating that
the campaign is performing at full potential.
This field is null for unscored campaigns.
See "About optimization score" at
https://support.google.com/google-
ads/answer/9061546.
This field is read-only.
bidding_strategy (str):
Portfolio bidding strategy used by campaign.
commission (google.ads.googleads.v6.common.types.Commission):
Commission is an automatic bidding strategy
in which the advertiser pays a certain portion
of the conversion value.
manual_cpc (google.ads.googleads.v6.common.types.ManualCpc):
Standard Manual CPC bidding strategy.
Manual click-based bidding where user pays per
click.
manual_cpm (google.ads.googleads.v6.common.types.ManualCpm):
Standard Manual CPM bidding strategy.
Manual impression-based bidding where user pays
per thousand impressions.
manual_cpv (google.ads.googleads.v6.common.types.ManualCpv):
Output only. A bidding strategy that pays a
configurable amount per video view.
maximize_conversions (google.ads.googleads.v6.common.types.MaximizeConversions):
Standard Maximize Conversions bidding
strategy that automatically maximizes number of
conversions while spending your budget.
maximize_conversion_value (google.ads.googleads.v6.common.types.MaximizeConversionValue):
Standard Maximize Conversion Value bidding
strategy that automatically sets bids to
maximize revenue while spending your budget.
target_cpa (google.ads.googleads.v6.common.types.TargetCpa):
Standard Target CPA bidding strategy that
automatically sets bids to help get as many
conversions as possible at the target cost-per-
acquisition (CPA) you set.
target_impression_share (google.ads.googleads.v6.common.types.TargetImpressionShare):
Target Impression Share bidding strategy. An
automated bidding strategy that sets bids to
achieve a desired percentage of impressions.
target_roas (google.ads.googleads.v6.common.types.TargetRoas):
Standard Target ROAS bidding strategy that
automatically maximizes revenue while averaging
a specific target return on ad spend (ROAS).
target_spend (google.ads.googleads.v6.common.types.TargetSpend):
Standard Target Spend bidding strategy that
automatically sets your bids to help get as many
clicks as possible within your budget.
percent_cpc (google.ads.googleads.v6.common.types.PercentCpc):
Standard Percent Cpc bidding strategy where
bids are a fraction of the advertised price for
some good or service.
target_cpm (google.ads.googleads.v6.common.types.TargetCpm):
A bidding strategy that automatically
optimizes cost per thousand impressions.
"""
class NetworkSettings(proto.Message):
r"""The network settings for the campaign.
Attributes:
target_google_search (bool):
Whether ads will be served with google.com
search results.
target_search_network (bool):
Whether ads will be served on partner sites in the Google
Search Network (requires ``target_google_search`` to also be
``true``).
target_content_network (bool):
Whether ads will be served on specified
placements in the Google Display Network.
Placements are specified using the Placement
criterion.
target_partner_search_network (bool):
Whether ads will be served on the Google
Partner Network. This is available only to some
select Google partner accounts.
"""
target_google_search = proto.Field(proto.BOOL, number=5, optional=True)
target_search_network = proto.Field(proto.BOOL, number=6, optional=True)
target_content_network = proto.Field(
proto.BOOL, number=7, optional=True
)
target_partner_search_network = proto.Field(
proto.BOOL, number=8, optional=True
)
class HotelSettingInfo(proto.Message):
r"""Campaign-level settings for hotel ads.
Attributes:
hotel_center_id (int):
Immutable. The linked Hotel Center account.
"""
hotel_center_id = proto.Field(proto.INT64, number=2, optional=True)
class VanityPharma(proto.Message):
r"""Describes how unbranded pharma ads will be displayed.
Attributes:
vanity_pharma_display_url_mode (google.ads.googleads.v6.enums.types.VanityPharmaDisplayUrlModeEnum.VanityPharmaDisplayUrlMode):
The display mode for vanity pharma URLs.
vanity_pharma_text (google.ads.googleads.v6.enums.types.VanityPharmaTextEnum.VanityPharmaText):
The text that will be displayed in display
URL of the text ad when website description is
the selected display mode for vanity pharma
URLs.
"""
vanity_pharma_display_url_mode = proto.Field(
proto.ENUM,
number=1,
enum=gage_vanity_pharma_display_url_mode.VanityPharmaDisplayUrlModeEnum.VanityPharmaDisplayUrlMode,
)
vanity_pharma_text = proto.Field(
proto.ENUM,
number=2,
enum=gage_vanity_pharma_text.VanityPharmaTextEnum.VanityPharmaText,
)
class DynamicSearchAdsSetting(proto.Message):
r"""The setting for controlling Dynamic Search Ads (DSA).
Attributes:
domain_name (str):
Required. The Internet domain name that this
setting represents, e.g., "google.com" or
"www.google.com".
language_code (str):
Required. The language code specifying the
language of the domain, e.g., "en".
use_supplied_urls_only (bool):
Whether the campaign uses advertiser supplied
URLs exclusively.
feeds (Sequence[str]):
The list of page feeds associated with the
campaign.
"""
domain_name = proto.Field(proto.STRING, number=6)
language_code = proto.Field(proto.STRING, number=7)
use_supplied_urls_only = proto.Field(
proto.BOOL, number=8, optional=True
)
feeds = proto.RepeatedField(proto.STRING, number=9)
class SelectiveOptimization(proto.Message):
r"""Selective optimization setting for this campaign, which
includes a set of conversion actions to optimize this campaign
towards.
Attributes:
conversion_actions (Sequence[str]):
The selected set of conversion actions for
optimizing this campaign.
"""
conversion_actions = proto.RepeatedField(proto.STRING, number=2)
class AppCampaignSetting(proto.Message):
r"""Campaign-level settings for App Campaigns.
Attributes:
bidding_strategy_goal_type (google.ads.googleads.v6.enums.types.AppCampaignBiddingStrategyGoalTypeEnum.AppCampaignBiddingStrategyGoalType):
Represents the goal which the bidding
strategy of this app campaign should optimize
towards.
app_id (str):
Immutable. A string that uniquely identifies
a mobile application.
app_store (google.ads.googleads.v6.enums.types.AppCampaignAppStoreEnum.AppCampaignAppStore):
Immutable. The application store that
distributes this specific app.
"""
bidding_strategy_goal_type = proto.Field(
proto.ENUM,
number=1,
enum=app_campaign_bidding_strategy_goal_type.AppCampaignBiddingStrategyGoalTypeEnum.AppCampaignBiddingStrategyGoalType,
)
app_id = proto.Field(proto.STRING, number=4, optional=True)
app_store = proto.Field(
proto.ENUM,
number=3,
enum=app_campaign_app_store.AppCampaignAppStoreEnum.AppCampaignAppStore,
)
class ShoppingSetting(proto.Message):
r"""The setting for Shopping campaigns. Defines the universe of
products that can be advertised by the campaign, and how this
campaign interacts with other Shopping campaigns.
Attributes:
merchant_id (int):
Immutable. ID of the Merchant Center account.
This field is required for create operations.
This field is immutable for Shopping campaigns.
sales_country (str):
Immutable. Sales country of products to
include in the campaign. This field is required
for Shopping campaigns. This field is immutable.
This field is optional for non-Shopping
campaigns, but it must be equal to 'ZZ' if set.
campaign_priority (int):
Priority of the campaign. Campaigns with
numerically higher priorities take precedence
over those with lower priorities. This field is
required for Shopping campaigns, with values
between 0 and 2, inclusive.
This field is optional for Smart Shopping
campaigns, but must be equal to 3 if set.
enable_local (bool):
Whether to include local products.
"""
merchant_id = proto.Field(proto.INT64, number=5, optional=True)
sales_country = proto.Field(proto.STRING, number=6, optional=True)
campaign_priority = proto.Field(proto.INT32, number=7, optional=True)
enable_local = proto.Field(proto.BOOL, number=8, optional=True)
class TrackingSetting(proto.Message):
r"""Campaign-level settings for tracking information.
Attributes:
tracking_url (str):
Output only. The url used for dynamic
tracking.
"""
tracking_url = proto.Field(proto.STRING, number=2, optional=True)
class GeoTargetTypeSetting(proto.Message):
r"""Represents a collection of settings related to ads
geotargeting.
Attributes:
positive_geo_target_type (google.ads.googleads.v6.enums.types.PositiveGeoTargetTypeEnum.PositiveGeoTargetType):
The setting used for positive geotargeting in
this particular campaign.
negative_geo_target_type (google.ads.googleads.v6.enums.types.NegativeGeoTargetTypeEnum.NegativeGeoTargetType):
The setting used for negative geotargeting in
this particular campaign.
"""
positive_geo_target_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_positive_geo_target_type.PositiveGeoTargetTypeEnum.PositiveGeoTargetType,
)
negative_geo_target_type = proto.Field(
proto.ENUM,
number=2,
enum=gage_negative_geo_target_type.NegativeGeoTargetTypeEnum.NegativeGeoTargetType,
)
class LocalCampaignSetting(proto.Message):
r"""Campaign setting for local campaigns.
Attributes:
location_source_type (google.ads.googleads.v6.enums.types.LocationSourceTypeEnum.LocationSourceType):
The location source type for this local
campaign.
"""
location_source_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_location_source_type.LocationSourceTypeEnum.LocationSourceType,
)
class OptimizationGoalSetting(proto.Message):
r"""Optimization goal setting for this campaign, which includes a
set of optimization goal types.
Attributes:
optimization_goal_types (Sequence[google.ads.googleads.v6.enums.types.OptimizationGoalTypeEnum.OptimizationGoalType]):
The list of optimization goal types.
"""
optimization_goal_types = proto.RepeatedField(
proto.ENUM,
number=1,
enum=optimization_goal_type.OptimizationGoalTypeEnum.OptimizationGoalType,
)
resource_name = proto.Field(proto.STRING, number=1)
id = proto.Field(proto.INT64, number=59, optional=True)
name = proto.Field(proto.STRING, number=58, optional=True)
status = proto.Field(
proto.ENUM,
number=5,
enum=campaign_status.CampaignStatusEnum.CampaignStatus,
)
serving_status = proto.Field(
proto.ENUM,
number=21,
enum=campaign_serving_status.CampaignServingStatusEnum.CampaignServingStatus,
)
ad_serving_optimization_status = proto.Field(
proto.ENUM,
number=8,
enum=gage_ad_serving_optimization_status.AdServingOptimizationStatusEnum.AdServingOptimizationStatus,
)
advertising_channel_type = proto.Field(
proto.ENUM,
number=9,
enum=gage_advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType,
)
advertising_channel_sub_type = proto.Field(
proto.ENUM,
number=10,
enum=gage_advertising_channel_sub_type.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType,
)
tracking_url_template = proto.Field(proto.STRING, number=60, optional=True)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=12, message=custom_parameter.CustomParameter,
)
real_time_bidding_setting = proto.Field(
proto.MESSAGE,
number=39,
message=gagc_real_time_bidding_setting.RealTimeBiddingSetting,
)
network_settings = proto.Field(
proto.MESSAGE, number=14, message=NetworkSettings,
)
hotel_setting = proto.Field(
proto.MESSAGE, number=32, message=HotelSettingInfo,
)
dynamic_search_ads_setting = proto.Field(
proto.MESSAGE, number=33, message=DynamicSearchAdsSetting,
)
shopping_setting = proto.Field(
proto.MESSAGE, number=36, message=ShoppingSetting,
)
targeting_setting = proto.Field(
proto.MESSAGE,
number=43,
message=gagc_targeting_setting.TargetingSetting,
)
geo_target_type_setting = proto.Field(
proto.MESSAGE, number=47, message=GeoTargetTypeSetting,
)
local_campaign_setting = proto.Field(
proto.MESSAGE, number=50, message=LocalCampaignSetting,
)
app_campaign_setting = proto.Field(
proto.MESSAGE, number=51, message=AppCampaignSetting,
)
labels = proto.RepeatedField(proto.STRING, number=61)
experiment_type = proto.Field(
proto.ENUM,
number=17,
enum=campaign_experiment_type.CampaignExperimentTypeEnum.CampaignExperimentType,
)
base_campaign = proto.Field(proto.STRING, number=56, optional=True)
campaign_budget = proto.Field(proto.STRING, number=62, optional=True)
bidding_strategy_type = proto.Field(
proto.ENUM,
number=22,
enum=gage_bidding_strategy_type.BiddingStrategyTypeEnum.BiddingStrategyType,
)
start_date = proto.Field(proto.STRING, number=63, optional=True)
end_date = proto.Field(proto.STRING, number=64, optional=True)
final_url_suffix = proto.Field(proto.STRING, number=65, optional=True)
frequency_caps = proto.RepeatedField(
proto.MESSAGE, number=40, message=frequency_cap.FrequencyCapEntry,
)
video_brand_safety_suitability = proto.Field(
proto.ENUM,
number=42,
enum=brand_safety_suitability.BrandSafetySuitabilityEnum.BrandSafetySuitability,
)
vanity_pharma = proto.Field(proto.MESSAGE, number=44, message=VanityPharma,)
selective_optimization = proto.Field(
proto.MESSAGE, number=45, message=SelectiveOptimization,
)
optimization_goal_setting = proto.Field(
proto.MESSAGE, number=54, message=OptimizationGoalSetting,
)
tracking_setting = proto.Field(
proto.MESSAGE, number=46, message=TrackingSetting,
)
payment_mode = proto.Field(
proto.ENUM,
number=52,
enum=gage_payment_mode.PaymentModeEnum.PaymentMode,
)
optimization_score = proto.Field(proto.DOUBLE, number=66, optional=True)
bidding_strategy = proto.Field(
proto.STRING, number=67, oneof="campaign_bidding_strategy"
)
commission = proto.Field(
proto.MESSAGE,
number=49,
oneof="campaign_bidding_strategy",
message=bidding.Commission,
)
manual_cpc = proto.Field(
proto.MESSAGE,
number=24,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpc,
)
manual_cpm = proto.Field(
proto.MESSAGE,
number=25,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpm,
)
manual_cpv = proto.Field(
proto.MESSAGE,
number=37,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpv,
)
maximize_conversions = proto.Field(
proto.MESSAGE,
number=30,
oneof="campaign_bidding_strategy",
message=bidding.MaximizeConversions,
)
maximize_conversion_value = proto.Field(
proto.MESSAGE,
number=31,
oneof="campaign_bidding_strategy",
message=bidding.MaximizeConversionValue,
)
target_cpa = proto.Field(
proto.MESSAGE,
number=26,
oneof="campaign_bidding_strategy",
message=bidding.TargetCpa,
)
target_impression_share = proto.Field(
proto.MESSAGE,
number=48,
oneof="campaign_bidding_strategy",
message=bidding.TargetImpressionShare,
)
target_roas = proto.Field(
proto.MESSAGE,
number=29,
oneof="campaign_bidding_strategy",
message=bidding.TargetRoas,
)
target_spend = proto.Field(
proto.MESSAGE,
number=27,
oneof="campaign_bidding_strategy",
message=bidding.TargetSpend,
)
percent_cpc = proto.Field(
proto.MESSAGE,
number=34,
oneof="campaign_bidding_strategy",
message=bidding.PercentCpc,
)
target_cpm = proto.Field(
proto.MESSAGE,
number=41,
oneof="campaign_bidding_strategy",
message=bidding.TargetCpm,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 0
| 0
| 0
| 25,759
| 0
| 0
| 0
| 1,575
| 508
|
4846371389c6ecd83e61164bd3ccc3ebf91187b3
| 1,596
|
py
|
Python
|
scripts/gen_large_tests_oc.py
|
kuenishi/chainer-compiler
|
942c3b09759d0857ddc7c6bb93b81117d011ff50
|
[
"MIT"
] | null | null | null |
scripts/gen_large_tests_oc.py
|
kuenishi/chainer-compiler
|
942c3b09759d0857ddc7c6bb93b81117d011ff50
|
[
"MIT"
] | null | null | null |
scripts/gen_large_tests_oc.py
|
kuenishi/chainer-compiler
|
942c3b09759d0857ddc7c6bb93b81117d011ff50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
if __name__ == '__main__':
main()
| 28
| 71
| 0.572055
|
#!/usr/bin/python3
import chainer
import numpy as np
import onnx_chainer
import large_models
def create_test(test_name, get_fun, dtype):
np.random.seed(314)
chainer.config.dtype = dtype
model, inputs = get_fun(dtype)
output_grad = 'backprop' in test_name
test_dir = 'out/%s' % test_name
chainer.disable_experimental_feature_warning = True
onnx_chainer.export_testcase(model,
inputs,
test_dir,
output_grad=output_grad,
train=True,
output_names='loss')
def get_large_tests():
tests = []
def test(name, get_fun, kwargs=None):
# kwargs is used for testing
for dtype in (np.float32, np.float64):
output_grad = dtype == np.float64
backprop_str = '_backprop' if output_grad else ''
test_name = 'large_oc%s_%s_%s' % (backprop_str,
name, dtype.__name__)
if kwargs is None:
kwargs = {}
tests.append((test_name, get_fun, dtype, kwargs))
test('resnet50', large_models.get_resnet50)
test('resnet152', large_models.get_resnet152)
test('vgg16', large_models.get_vgg16, {'rtol': 2e-2, 'atol': 2e-2})
test('vgg19', large_models.get_vgg19, {'rtol': 2e-2, 'atol': 2e-2})
return tests
def main():
for test_name, get_fun, dtype, _ in get_large_tests():
create_test(test_name, get_fun, dtype)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 1,389
| 0
| -14
| 159
|
e4b2fb7d5295c37d01ec3eec6b1e99065efdc814
| 648
|
py
|
Python
|
setup.py
|
olavurmortensen/pedgraph
|
0e7c90bd0e4f63fa718b7af8c5c769f102bbdebc
|
[
"MIT"
] | null | null | null |
setup.py
|
olavurmortensen/pedgraph
|
0e7c90bd0e4f63fa718b7af8c5c769f102bbdebc
|
[
"MIT"
] | null | null | null |
setup.py
|
olavurmortensen/pedgraph
|
0e7c90bd0e4f63fa718b7af8c5c769f102bbdebc
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = ['neo4j-driver>=4.1.1',
'tqdm==4.48.2']
setuptools.setup(
name="pedgraph",
version="0.0.1",
author="lavur Mortensen",
author_email="[email protected]",
description="PedGraph -- Multidimensional network database for pedigree analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/olavurmortensen/pedgraph",
packages=setuptools.find_packages(),
install_requires=INSTALL_REQUIRES,
python_requires='>=3.6',
)
| 29.454545
| 86
| 0.697531
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = ['neo4j-driver>=4.1.1',
'tqdm==4.48.2']
setuptools.setup(
name="pedgraph",
version="0.0.1",
author="Ólavur Mortensen",
author_email="[email protected]",
description="PedGraph -- Multidimensional network database for pedigree analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/olavurmortensen/pedgraph",
packages=setuptools.find_packages(),
install_requires=INSTALL_REQUIRES,
python_requires='>=3.6',
)
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a0bfa04c353c116af29022063768a993ed18a5fc
| 2,008
|
py
|
Python
|
Q380-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q380-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
Q380-v2.py
|
Linchin/python_leetcode_git
|
3d08ab04bbdbd2ce268f33c501fbb149662872c7
|
[
"MIT"
] | null | null | null |
"""
Q380
Insert Delete GetRandom O(1)
Medium
Array; Hash Table; Design.
Design a data structure that supports all following operations in average O(1) time.
1. insert(val): Inserts an item val to the set if not already present.
2. remove(val): Removes an item val from the set if present.
3. getRandom: Returns a random element from current set of elements.
Each element must have the same probability of being returned.
Your RandomizedSet object will be instantiated and called as such:
obj = RandomizedSet()
param_1 = obj.insert(val)
param_2 = obj.remove(val)
param_3 = obj.getRandom()
"""
randomSet = RandomizedSet()
print(randomSet.insert(1))
print(randomSet.insert(1))
print(randomSet.remove(2))
print(randomSet.insert(2))
print(randomSet.getRandom())
print(randomSet.getRandom())
| 24.790123
| 117
| 0.635956
|
"""
Q380
Insert Delete GetRandom O(1)
Medium
Array; Hash Table; Design.
Design a data structure that supports all following operations in average O(1) time.
1. insert(val): Inserts an item val to the set if not already present.
2. remove(val): Removes an item val from the set if present.
3. getRandom: Returns a random element from current set of elements.
Each element must have the same probability of being returned.
Your RandomizedSet object will be instantiated and called as such:
obj = RandomizedSet()
param_1 = obj.insert(val)
param_2 = obj.remove(val)
param_3 = obj.getRandom()
"""
import random
class RandomizedSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.array = []
self.hash = {}
def insert(self, val: int) -> bool:
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
"""
if val in self.hash:
return False
self.array.append(val)
self.hash[val] = len(self.array) - 1
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the set. Returns true if the set contained the specified element.
"""
if val not in self.hash:
return False
ind = self.hash[val]
self.array[ind], self.array[-1] = self.array[-1], self.array[ind]
self.hash[self.array[ind]], self.hash[self.array[-1]] = self.hash[self.array[-1]], self.hash[self.array[ind]]
self.array.pop()
del self.hash[val]
return True
def getRandom(self) -> int:
"""
Get a random element from the set.
"""
from random import random
return self.array[int(random()*len(self.array))]
randomSet = RandomizedSet()
print(randomSet.insert(1))
print(randomSet.insert(1))
print(randomSet.remove(2))
print(randomSet.insert(2))
print(randomSet.getRandom())
print(randomSet.getRandom())
| 0
| 0
| 0
| 1,167
| 0
| 0
| 0
| -8
| 46
|
8e2c9128f4841ac28d470c168123443131eba11d
| 385
|
py
|
Python
|
RiskQuantLib/Security/Stock/stock.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | 1
|
2021-12-29T12:18:45.000Z
|
2021-12-29T12:18:45.000Z
|
RiskQuantLib/Security/Stock/stock.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | null | null | null |
RiskQuantLib/Security/Stock/stock.py
|
SyuyaMurakami/RiskQuantLib-Doc
|
2503befc24c2e422e51f8b9f468c8d8439e11c65
|
[
"MIT"
] | 1
|
2021-12-08T02:14:34.000Z
|
2021-12-08T02:14:34.000Z
|
#!/usr/bin/python
#coding = utf-8
| 20.263158
| 76
| 0.732468
|
#!/usr/bin/python
#coding = utf-8
from RiskQuantLib.Security.base import base
from RiskQuantLib.Set.Security.Stock.stock import setStock
class stock(base,setStock):
"""
stock is one of the five basic classes.
"""
def __init__(self,codeString,nameString,securityTypeString = 'Stock'):
super(stock,self).__init__(codeString,nameString,securityTypeString)
| 0
| 0
| 0
| 218
| 0
| 0
| 0
| 59
| 68
|
d508595276fcec1298ee3044f7295cd76a1e39cb
| 333
|
py
|
Python
|
tests/test_basic.py
|
sbalnojan/tropo-mods
|
63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic.py
|
sbalnojan/tropo-mods
|
63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63
|
[
"Apache-2.0"
] | null | null | null |
tests/test_basic.py
|
sbalnojan/tropo-mods
|
63ef70b84ff6d3fb2bbaeea94193e06e1fc64f63
|
[
"Apache-2.0"
] | null | null | null |
import tropo_mods.auto_sns as auto_sns
from troposphere import Template
test_sns_yaml = """\
Resources:
SNSTopic1:
Properties:
TopicName: my_new_topic
Type: AWS::SNS::Topic
"""
t = Template()
topic = auto_sns.AutoSNS(t, topic_name="my_new_topic", email="[email protected]")
assert test_sns_yaml == topic.print_to_yaml()
| 20.8125
| 75
| 0.72973
|
import tropo_mods.auto_sns as auto_sns
from troposphere import Template
test_sns_yaml = """\
Resources:
SNSTopic1:
Properties:
TopicName: my_new_topic
Type: AWS::SNS::Topic
"""
t = Template()
topic = auto_sns.AutoSNS(t, topic_name="my_new_topic", email="[email protected]")
assert test_sns_yaml == topic.print_to_yaml()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2e4a45056766bfcabf520edef026dcde56f9a911
| 23,761
|
py
|
Python
|
jade2/pymol_jade/PyMolScriptWriter.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T21:52:23.000Z
|
2019-12-23T21:52:23.000Z
|
jade2/pymol_jade/PyMolScriptWriter.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | null | null | null |
jade2/pymol_jade/PyMolScriptWriter.py
|
RosettaCommons/jade2
|
40affc7c4e0f1f6ee07030e72de284e3484946e7
|
[
"BSD-3-Clause"
] | 2
|
2021-11-13T01:34:15.000Z
|
2021-11-13T01:34:34.000Z
|
#!/usr/bin/python
#Author: Jared Adolf-Bryfogle
import sys
from collections import defaultdict
from jade2.basic.threading.Threader import Threader
from jade2.basic.structure import Structure
#from jade2.antibody import util as ab_util
########################################################################################################################
## Helper Functions
########################################################################################################################
def run_pymol_script(script_path, run_gui = False, delete_script = False, parellel_process = True):
"""
Run the script of the given path.
"""
if not os.path.exists(script_path):
if os.path.exists(os.getcwd()+script_path):
script_path = os.getcwd()+script_path
elif os.path.exists(os.getcwd()+"/"+script_path):
script_path = os.getcwd()+"/"+script_path
else:
raise Exception(script_path +" does not exist...")
if run_gui:
cmd = "pymol "+script_path
else:
cmd = "pymol -c "+script_path
print("Running: "+cmd)
if parellel_process:
threader = Threader()
#threader.run_system_command(cmd)
threader.run_functions([lambda: os.system(cmd)])
else:
os.system(cmd)
if delete_script:
os.remove(script_path)
def make_pymol_session_on_top(pdb_path_list, load_as_list, script_dir, session_dir, out_name, top_num = None, native_path = None, antibody = True):
"""
Make a pymol session on a set of decoys. Usually an ordered decoy list.
:param top_dir:
:param pdb_path_list: List of PDB Paths
:param load_as_list: List of PDB Path names for pymol.
:param outdir:
:param out_name:
:param top_num:
:param native_path:
:return:
"""
if top_num:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdb_path_list) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
scripter = PyMolScriptWriter(script_dir)
if native_path:
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
scripter.add_load_pdbs(pdb_path_list, load_as_list)
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_show("cartoon")
scripter.add_line("center")
scripter.add_line("hide lines")
scripter.add_line("group models, model*")
if antibody:
color_cdrs_path = get_bin_path()+"/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
run_pymol_script(script_dir+"/"+"load_align_top.pml")
def make_pymol_session_on_top_ab_include_native_cdrs(pdb_path_list, load_as_list, script_dir, session_dir, out_name, cdr_dir, top_num = None, native_path = None):
"""
Make a pymol session on a set of decoys. These decoys should have REMARK CDR_origin. These origin pdbs will be aligned and included in the pymol session
:param top_dir:
:param pdb_path_list: List of PDB Paths
:param load_as_list: List of PDB Path names for pymol.
:param cdr_dir: The directory of antibody CDRs from PyIgClassify.
:return:
"""
if top_num:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdb_path_list) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
scripter = PyMolScriptWriter(script_dir)
if native_path:
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
scripter.add_load_pdbs(pdb_path_list, load_as_list)
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_line("group models, model*")
color_cdrs_path = get_bin_path()+"/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
#For each Model, we need to load and search for origin
origin_names = defaultdict(list)
sorted_origin_names = []
for pair in zip(pdb_path_list, load_as_list):
cdrs_to_align = defaultdict()
pdb_path = pair[0]
pdb_name = pair[1]
INFILE = open_file(pdb_path)
for line in INFILE:
line = line.strip()
if not line: continue
lineSP = line.split()
# REMARK L3_origin 5alcL_L3_0001 #
if len(lineSP) == 3 and lineSP[0] == "REMARK" and re.search('origin', lineSP[1]):
print(line)
cdrs_to_align[lineSP[1]] = lineSP[2]
else:
continue
INFILE.close()
#Group: L3_origin_pdb_id_model
for origin_type in cdrs_to_align:
model_num = os.path.basename(pdb_name).split("_")[1]; # model_1_scoretye_score
cdr_name = origin_type.split("_")[0]; #L3_origin
cdr_object = Structure.CDR(cdr_name)
pdbid = cdrs_to_align[ origin_type ].split("_")[0]; #pdbid_cdr_0001
cdr_path = cdr_dir+"/"+"_".join(cdrs_to_align[origin_type].split("_")[0:-1])+".pdb"
print("CDR Path: "+ cdr_path)
if not os.path.exists(cdr_path): sys.exit("CDR Path does not exist!! "+cdr_path)
stem_cdr_name = '_'.join(["ste", "mod", model_num, pdbid, cdr_name])
cdr_cdr_name = '_'.join(["cdr", "mod", model_num, pdbid, cdr_name])
all_cdr_name = '_'.join(["all", "mod", model_num, pdbid, cdr_name])
model_group = "_".join(["model", model_num, cdr_name])
sorted_origin_names.append(model_group)
origin_names[model_group].append(stem_cdr_name)
origin_names[model_group].append(cdr_cdr_name)
origin_names[model_group].append(all_cdr_name)
scripter.add_load_pdb(cdr_path, stem_cdr_name)
scripter.add_load_pdb(cdr_path, cdr_cdr_name)
scripter.add_load_pdb(cdr_path, all_cdr_name)
#SuperImpose stem
#overhang_sele = ab_util.get_overhang_sele(scripter, cdr_object, 3)
#scripter.add_superimpose(" ".join([stem_cdr_name, "and", overhang_sele]), " ".join([pdb_name, "and", overhang_sele]))
#SuperImpose CDR-only (no Stem!)
#cdr_only_sele = ab_util.get_all_cdr_sele(cdr_object, stem=0)
#scripter.add_superimpose(" ".join([cdr_cdr_name, "and", cdr_only_sele]), " ".join([pdb_name, "and", cdr_only_sele]))
#SuperImpose Stem+CDR
#cdr_and_stem_sele = ab_util.get_all_cdr_sele(cdr_object, stem=3)
#scripter.add_superimpose(" ".join([all_cdr_name, "and", cdr_and_stem_sele]), " ".join([pdb_name, "and", cdr_and_stem_sele]))
scripter.add_show("cartoon")
scripter.add_line("zoom full_paratope")
scripter.add_line("hide lines")
for origin_type in sorted_origin_names:
scripter.add_line("group "+origin_type+", "+" ".join(origin_names[origin_type]))
for object_name in origin_names[origin_type]:
scripter.add_line("disable "+origin_type); #Make them not show when we load pymol
scripter.add_line("group origin_cdrs, "+ " ".join(sorted_origin_names))
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
run_pymol_script(script_dir+"/"+"load_align_top.pml")
def make_pymol_session_on_top_scored(pdbpaths_scores, script_dir, session_dir, out_name, top_num = -1, native_path = None, antibody=True,
parellel = True,
super = "",
run_pymol = True,
prefixes=[],
copy_models=True,
align = True,
out_prefix = ""):
"""
Make a pymol session on a set of decoys with a tuple of [[score, pdb], ... ]
Optionally, it can be a 3 length tupple with model name to use as last:
[[score, pdb, model_name], ... ]
if run_pymol is False, will not run pymol.
Pymol names will be: model_n_RosettaModelNumber_score
Score will be truncated to two decimal places.
Returns configured PyMol Scripter for extra use.
:param pdbpaths_scores: tuple of [[score, pdb], ... ]
:param script_dir: Path to output PyMol script
:param session_dir: Path to output Session
:param out_name: name of the Pymol session
:param prefixes: Optional - Prefixes to use for model names. List. Must be indexed with pdbpaths_scores
:param top_num: Optional - Only output TOP N models
:param native_path: Optional - Path to any input native to add to pymol session
:param parellel: Optional - Run in parellel (so many pymol sessions can be created at once)
:param super: Optional - Super to THIS particular selection instead of align_all to.
:param run_pymol: Optional - Run Pymol using script? Default true
:param out_prefix: Optional - Prefix to use for copied output models.
:rtype: PyMolScriptWriter,names dict [decoy] = new name
"""
names = defaultdict()
out_name = out_name.replace(".pse", "")
if not os.path.exists(session_dir):
os.mkdir(session_dir)
if top_num != -1:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdbpaths_scores) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
print(pdbpaths_scores)
scripter = PyMolScriptWriter(script_dir)
print("Outputting script to: " +script_dir)
if native_path and os.path.exists(native_path):
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
else:
print("No native path given or it does not exist...")
i = 1
for indx, score_pdb in enumerate(pdbpaths_scores):
print(indx)
print(repr(score_pdb))
decoy = get_decoy_path(score_pdb[1])
print("Decoy path: "+decoy)
ext = get_decoy_extension(decoy)
print(repr(decoy))
model_name = score_pdb[1].split("_")[-1]
if len(score_pdb) == 3:
model_name = score_pdb[2]
out_model_name = "model_"+repr(i)+"_"+model_name+"_%.2f"%(score_pdb[0])
out_model_name = out_model_name.replace(ext, "")
if prefixes:
out_model_name = prefixes[indx]+out_model_name
scripter.add_load_pdb(decoy, out_model_name)
names[os.path.basename(score_pdb[1])] = out_model_name
if (copy_models):
out = out_model_name.replace(ext, "")+ext
if out_prefix:
out = out_prefix.strip('_')+"_"+out
os.system('cp '+decoy+' '+session_dir+"/"+out)
i+=1
if super:
scripter.add_superimpose_all_to(scripter.get_final_names()[0], super, super)
elif align:
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_show("cartoon")
scripter.add_line("center")
scripter.add_line("hide lines")
if native_path:
scripter.add_line("group models, *model*")
if antibody:
color_cdrs_path = get_bin_path()+"/pymol_scripts/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
if run_pymol:
run_pymol_script(script_dir+"/"+"load_align_top.pml", parellel_process=parellel)
return scripter,names
| 35.623688
| 162
| 0.577543
|
#!/usr/bin/python
#Author: Jared Adolf-Bryfogle
import sys
from collections import defaultdict
from jade2.basic.path import *
from jade2.basic.threading.Threader import Threader
from jade2.basic.structure import Structure
from jade2.basic.path import *
#from jade2.antibody import util as ab_util
class PyMolScriptWriter:
"""
Class to help build PyMol scripts using arbitrary lists of PDBs.
Example for loading all top models into PyMol, aligning them to the native, and labeling them:
scripter = PyMolScriptWriter(outpath)
if native_path:
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
scripter.add_load_pdbs(pdb_path_list, load_as_list)
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_show("cartoon")
scripter.add_line("center")
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
run_pymol_script(top_dir+"/"+"load_align_top.pml")
"""
def __init__(self, outdir):
self.base_dir = os.path.split(os.path.abspath(__file__))[0]
self.set_outdir(outdir)
self.reset_script()
self.colors = []
self.color_types = defaultdict()
self.vis_options = ["cartoon", "spheres", "lines", "dots", "sticks", "surface", "mesh", "nonbonded", "(hydro)"]
self._read_colors(get_database_path()+"/pymol/"+"simple_pymol_colors.txt")
self.pdbs = []
self.final_names = []
self.grouped_pdbs = defaultdict()
def _read_colors(self, path):
"""
Reads PyMOl colors text file. Loads colors.
"""
INFILE = open(path, 'r')
color_type = ""
for line in INFILE:
line = line.strip()
if not line: continue
if line.startswith("#"): continue
lineSP = line.split()
if lineSP[0] == "TYPE":
color_type = lineSP[1].lower()
self.color_types[color_type] = []
continue
self.colors.append(lineSP[0])
self.color_types[color_type].append(lineSP[0])
INFILE.close()
print("Done reading PyMol color types")
def __str__(self):
return "\n".join(self.script_lines)
def __repr__(self):
return "\n".join(self.script_lines)
def set_outdir(self, outdir):
if outdir:
self.output_dir = outdir
if not os.path.exists(outdir):
os.mkdir(outdir)
else:
self.output_dir = os.getcwd()
def write_script(self, fname=None):
if not fname:
fname = "pml_script.pml"
OUTFILE = open(self.output_dir+"/"+fname, 'w')
for line in self.script_lines:
OUTFILE.write(line+"\n")
OUTFILE.close()
return self.output_dir+"/"+fname
def save_script(self, fname = None):
return self.write_script(fname)
def reset_script(self):
self.script_lines = []
def clear(self):
self.reset_script()
self.pdbs = []
self.final_names = []
def print_script(self):
print(str(self))
####################################################################################################
## Helpful Functions
###################################################################################################
def get_color_types(self):
return list(self.color_types.keys())
def get_vis_types(self):
return self.vis_options
def get_colors_of_type(self, color_type):
color_type = color_type.lower()
return self.color_types[color_type]
def get_final_names(self):
"""
Get the final names PyMOL will use after loading PDBs.
"""
return self.final_names
def get_sele(self, chain, resid_array):
"""
Get a a selection from an array of residue IDs and a particular chain.
If the residue Id is a two-element tupple, then add a selection between the first and last element
"""
def get_entry(resid):
if type(resid) == tuple:
if len(resid) == 2:
start = resid[0]
end = resid[1]
entry = "resi "+repr(start)+"-"+repr(end)
return entry
else:
raise Exception("Tuple for PyMolScriptWriter must be length 2!")
else:
entry = "resi "+repr(resid)
return entry
if len(resid_array) == 1:
sele = "chain "+chain+" & "+get_entry(resid_array[0])
else:
sele = "chain "+chain+" & ( "+get_entry(resid_array[0])
for resi in resid_array[1:]:
sele = sele +" | "+get_entry(resi)
sele = sele+" )"
return sele
####################################################################################################
## Build PyMol Script:
###################################################################################################
def add_line(self, line):
"""
Add an arbitrary line to the script
"""
self.script_lines.append(line)
def add_save_session(self, session_path):
"""
Add a line to save the session to a FULL path
"""
if not re.search(".pse", session_path): session_path = session_path+".pse"
self.script_lines.append("cmd.save('"+session_path+"')")
####################################################################################################
## Load PDBs and Groups:
###################################################################################################
def add_load_pdb(self, pdb_path, load_as = None, group = None):
"""
Add line to load a PDB Path into PyMol
Optionally load them as a particular name
Will then set the final names PyMol uses to the object.
"""
#print "PDB"+repr(pdb_path)
self.pdbs.append(pdb_path)
name = os.path.basename(pdb_path)
name = "".join(name.split(".")[0:-1])
basenameSP = name.split('.')
if re.search(".pdb.gz", name):
basename = "".join(basenameSP[:len(basenameSP)-2])
else:
basename = "".join(basenameSP[:len(basenameSP)-1])
if not load_as:
self.final_names.append(name)
self.script_lines.append("load "+pdb_path)
else:
self.final_names.append(load_as)
self.script_lines.append("load "+pdb_path+", "+load_as)
if group:
self.add_group_object(self.final_names[-1], group)
def add_load_pdbs(self, pdb_paths, load_as = None, group = None):
"""
Add lines to load the list of PDB paths into PyMol
Optionally load them as a particular name
Will then set the final names PyMol uses to the object.
"""
i = 0
for path in pdb_paths:
print(path)
if load_as:
self.add_load_pdb(path, load_as = load_as[i], group = group)
else:
self.add_load_pdb(path, group = group)
i+=1
def add_group_object(self, name, new_group_name):
"""
Group a single object to another. Useful for meta-groups.
"""
self.script_lines.append("group "+new_group_name+", "+name)
def add_group_objects(self, names, new_group_name):
"""
Group a set of pre-loaded names to the new group.
"""
names_str = " ".join(names)
self.script_lines.append("group "+new_group_name+", "+names_str)
def add_select(self, name, sele, group = None):
self.script_lines.append("select "+name+","+sele)
if group:
self.add_group_object(name, group)
####################################################################################################
## Alignment :
###################################################################################################
def add_superimpose(self, sele1, sele2):
"""
Super impose two selections using the super command
"""
self.add_line("super "+sele1+", "+sele2)
def add_superimpose_all_to(self, model, sele1 , sele2):
for name in self.final_names:
if name !=model:
self.add_superimpose(name+" and "+sele1, model+" and "+sele2)
def add_align_all(self, sele1 = "", sele2="", limit_to_bb=True, pair_fit = False):
"""
Align all to the first model
"""
for name in self.final_names[1:]:
self.add_align_to(name, self.final_names[0], sele1, sele2, limit_to_bb, pair_fit)
def add_align_all_to(self, model, sele1 = "", sele2="", limit_to_bb=True, pair_fit = False):
"""
Align all to a particular model
"""
for name in self.final_names:
if name !=model:
self.add_align_to(name, model, sele1, sele2, limit_to_bb)
def add_align_to(self, model1, model2, sele1="", sele2 = "", limit_to_bb = True, pair_fit = False):
"""
Align one model to another, optionally specifying a selection.
Recommended to use superimpose instead
"""
m1 = get_decoy_name(model1)
m2 = get_decoy_name(model2)
align = "align "
if pair_fit:
align = "pair_fit "
bb = ""
if limit_to_bb:
bb = " & name n+ca+c+o "
if not sele1:
self.script_lines.append(align + m1 + bb+","+m2 + bb)
else:
self.script_lines.append(align + m1+" & "+sele1+bb+", "+m2 +" &"+sele2+ bb)
####################################################################################################
## Misc. :
###################################################################################################
def add_show(self, vis_type, sele=""):
"""
Show a representation. Optionally with a particular selection
"""
if not vis_type in self.vis_options:
print(("Type "+vis_type+" not a known vis_option. Options are: \n"+repr(self.vis_options)))
if not sele:
self.script_lines.append("show "+vis_type)
else:
self.script_lines.append("show "+vis_type+", "+sele)
def add_center(self, sele = None):
if sele:
self.add_line("center "+sele)
else:
self.add_line("center")
def add_hide(self, vis_type, sele=""):
"""
Hide a representation. Optionally with a particular selection.
"""
if not type in self.vis_options:
print(("Type "+vis_type+" not a known vis_option. Options are: \n"+repr(self.vis_options)))
if not sele:
self.script_lines.append("hide "+vis_type)
else:
self.script_lines.append("hide "+vis_type+", "+sele)
def add_color(self, sele, color):
"""
Add color to a selection.
sele: PyMol Selection
color: Particular color.
See Also self.colors
"""
if not color in self.colors:
sys.exit("Color not understood by PyMol: "+color+" See simple_pymol_colors for list of acceptable colors")
self.script_lines.append("color "+color+", "+sele)
def add_antibody_script(self):
"""
Add running the color cdrs pymol script. Antibody must be in AHO numbering
"""
color_cdrs_path = get_bin_path()+"/color_cdrs.pml"
self.add_line("@"+color_cdrs_path)
def run_script(self, script_outname = "pml_script.pml", delete_script = True, parellel_process = False):
"""
Save and Run the Pymol script
:param script_outname: str
"""
run_pymol_script(self.save_script(script_outname), delete_script = delete_script, parellel_process=parellel_process)
########################################################################################################################
## Helper Functions
########################################################################################################################
def run_pymol_script(script_path, run_gui = False, delete_script = False, parellel_process = True):
"""
Run the script of the given path.
"""
if not os.path.exists(script_path):
if os.path.exists(os.getcwd()+script_path):
script_path = os.getcwd()+script_path
elif os.path.exists(os.getcwd()+"/"+script_path):
script_path = os.getcwd()+"/"+script_path
else:
raise Exception(script_path +" does not exist...")
if run_gui:
cmd = "pymol "+script_path
else:
cmd = "pymol -c "+script_path
print("Running: "+cmd)
if parellel_process:
threader = Threader()
#threader.run_system_command(cmd)
threader.run_functions([lambda: os.system(cmd)])
else:
os.system(cmd)
if delete_script:
os.remove(script_path)
def make_pymol_session_on_top(pdb_path_list, load_as_list, script_dir, session_dir, out_name, top_num = None, native_path = None, antibody = True):
"""
Make a pymol session on a set of decoys. Usually an ordered decoy list.
:param top_dir:
:param pdb_path_list: List of PDB Paths
:param load_as_list: List of PDB Path names for pymol.
:param outdir:
:param out_name:
:param top_num:
:param native_path:
:return:
"""
if top_num:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdb_path_list) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
scripter = PyMolScriptWriter(script_dir)
if native_path:
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
scripter.add_load_pdbs(pdb_path_list, load_as_list)
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_show("cartoon")
scripter.add_line("center")
scripter.add_line("hide lines")
scripter.add_line("group models, model*")
if antibody:
color_cdrs_path = get_bin_path()+"/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
run_pymol_script(script_dir+"/"+"load_align_top.pml")
def make_pymol_session_on_top_ab_include_native_cdrs(pdb_path_list, load_as_list, script_dir, session_dir, out_name, cdr_dir, top_num = None, native_path = None):
"""
Make a pymol session on a set of decoys. These decoys should have REMARK CDR_origin. These origin pdbs will be aligned and included in the pymol session
:param top_dir:
:param pdb_path_list: List of PDB Paths
:param load_as_list: List of PDB Path names for pymol.
:param cdr_dir: The directory of antibody CDRs from PyIgClassify.
:return:
"""
if top_num:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdb_path_list) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
scripter = PyMolScriptWriter(script_dir)
if native_path:
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
scripter.add_load_pdbs(pdb_path_list, load_as_list)
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_line("group models, model*")
color_cdrs_path = get_bin_path()+"/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
#For each Model, we need to load and search for origin
origin_names = defaultdict(list)
sorted_origin_names = []
for pair in zip(pdb_path_list, load_as_list):
cdrs_to_align = defaultdict()
pdb_path = pair[0]
pdb_name = pair[1]
INFILE = open_file(pdb_path)
for line in INFILE:
line = line.strip()
if not line: continue
lineSP = line.split()
# REMARK L3_origin 5alcL_L3_0001 #
if len(lineSP) == 3 and lineSP[0] == "REMARK" and re.search('origin', lineSP[1]):
print(line)
cdrs_to_align[lineSP[1]] = lineSP[2]
else:
continue
INFILE.close()
#Group: L3_origin_pdb_id_model
for origin_type in cdrs_to_align:
model_num = os.path.basename(pdb_name).split("_")[1]; # model_1_scoretye_score
cdr_name = origin_type.split("_")[0]; #L3_origin
cdr_object = Structure.CDR(cdr_name)
pdbid = cdrs_to_align[ origin_type ].split("_")[0]; #pdbid_cdr_0001
cdr_path = cdr_dir+"/"+"_".join(cdrs_to_align[origin_type].split("_")[0:-1])+".pdb"
print("CDR Path: "+ cdr_path)
if not os.path.exists(cdr_path): sys.exit("CDR Path does not exist!! "+cdr_path)
stem_cdr_name = '_'.join(["ste", "mod", model_num, pdbid, cdr_name])
cdr_cdr_name = '_'.join(["cdr", "mod", model_num, pdbid, cdr_name])
all_cdr_name = '_'.join(["all", "mod", model_num, pdbid, cdr_name])
model_group = "_".join(["model", model_num, cdr_name])
sorted_origin_names.append(model_group)
origin_names[model_group].append(stem_cdr_name)
origin_names[model_group].append(cdr_cdr_name)
origin_names[model_group].append(all_cdr_name)
scripter.add_load_pdb(cdr_path, stem_cdr_name)
scripter.add_load_pdb(cdr_path, cdr_cdr_name)
scripter.add_load_pdb(cdr_path, all_cdr_name)
#SuperImpose stem
#overhang_sele = ab_util.get_overhang_sele(scripter, cdr_object, 3)
#scripter.add_superimpose(" ".join([stem_cdr_name, "and", overhang_sele]), " ".join([pdb_name, "and", overhang_sele]))
#SuperImpose CDR-only (no Stem!)
#cdr_only_sele = ab_util.get_all_cdr_sele(cdr_object, stem=0)
#scripter.add_superimpose(" ".join([cdr_cdr_name, "and", cdr_only_sele]), " ".join([pdb_name, "and", cdr_only_sele]))
#SuperImpose Stem+CDR
#cdr_and_stem_sele = ab_util.get_all_cdr_sele(cdr_object, stem=3)
#scripter.add_superimpose(" ".join([all_cdr_name, "and", cdr_and_stem_sele]), " ".join([pdb_name, "and", cdr_and_stem_sele]))
scripter.add_show("cartoon")
scripter.add_line("zoom full_paratope")
scripter.add_line("hide lines")
for origin_type in sorted_origin_names:
scripter.add_line("group "+origin_type+", "+" ".join(origin_names[origin_type]))
for object_name in origin_names[origin_type]:
scripter.add_line("disable "+origin_type); #Make them not show when we load pymol
scripter.add_line("group origin_cdrs, "+ " ".join(sorted_origin_names))
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
run_pymol_script(script_dir+"/"+"load_align_top.pml")
def make_pymol_session_on_top_scored(pdbpaths_scores, script_dir, session_dir, out_name, top_num = -1, native_path = None, antibody=True,
parellel = True,
super = "",
run_pymol = True,
prefixes=[],
copy_models=True,
align = True,
out_prefix = ""):
"""
Make a pymol session on a set of decoys with a tuple of [[score, pdb], ... ]
Optionally, it can be a 3 length tupple with model name to use as last:
[[score, pdb, model_name], ... ]
if run_pymol is False, will not run pymol.
Pymol names will be: model_n_RosettaModelNumber_score
Score will be truncated to two decimal places.
Returns configured PyMol Scripter for extra use.
:param pdbpaths_scores: tuple of [[score, pdb], ... ]
:param script_dir: Path to output PyMol script
:param session_dir: Path to output Session
:param out_name: name of the Pymol session
:param prefixes: Optional - Prefixes to use for model names. List. Must be indexed with pdbpaths_scores
:param top_num: Optional - Only output TOP N models
:param native_path: Optional - Path to any input native to add to pymol session
:param parellel: Optional - Run in parellel (so many pymol sessions can be created at once)
:param super: Optional - Super to THIS particular selection instead of align_all to.
:param run_pymol: Optional - Run Pymol using script? Default true
:param out_prefix: Optional - Prefix to use for copied output models.
:rtype: PyMolScriptWriter,names dict [decoy] = new name
"""
names = defaultdict()
out_name = out_name.replace(".pse", "")
if not os.path.exists(session_dir):
os.mkdir(session_dir)
if top_num != -1:
pse_path = session_dir+"/"+out_name+"_top_"+str(top_num)+".pse"
else:
pse_path = session_dir+"/"+out_name+"_all"+".pse"
if os.path.exists(pse_path):
print("Overriding PSE: "+pse_path)
#return
if len(pdbpaths_scores) == 0:
print("PDB list path empty. Skipping creation of pymol session")
return
print(pdbpaths_scores)
scripter = PyMolScriptWriter(script_dir)
print("Outputting script to: " +script_dir)
if native_path and os.path.exists(native_path):
scripter.add_load_pdb(native_path, "native_"+os.path.basename(native_path))
else:
print("No native path given or it does not exist...")
i = 1
for indx, score_pdb in enumerate(pdbpaths_scores):
print(indx)
print(repr(score_pdb))
decoy = get_decoy_path(score_pdb[1])
print("Decoy path: "+decoy)
ext = get_decoy_extension(decoy)
print(repr(decoy))
model_name = score_pdb[1].split("_")[-1]
if len(score_pdb) == 3:
model_name = score_pdb[2]
out_model_name = "model_"+repr(i)+"_"+model_name+"_%.2f"%(score_pdb[0])
out_model_name = out_model_name.replace(ext, "")
if prefixes:
out_model_name = prefixes[indx]+out_model_name
scripter.add_load_pdb(decoy, out_model_name)
names[os.path.basename(score_pdb[1])] = out_model_name
if (copy_models):
out = out_model_name.replace(ext, "")+ext
if out_prefix:
out = out_prefix.strip('_')+"_"+out
os.system('cp '+decoy+' '+session_dir+"/"+out)
i+=1
if super:
scripter.add_superimpose_all_to(scripter.get_final_names()[0], super, super)
elif align:
scripter.add_align_all_to(scripter.get_final_names()[0])
scripter.add_show("cartoon")
scripter.add_line("center")
scripter.add_line("hide lines")
if native_path:
scripter.add_line("group models, *model*")
if antibody:
color_cdrs_path = get_bin_path()+"/pymol_scripts/color_cdrs.pml"
scripter.add_line("@"+color_cdrs_path)
scripter.add_save_session(pse_path)
scripter.write_script("load_align_top.pml")
if run_pymol:
run_pymol_script(script_dir+"/"+"load_align_top.pml", parellel_process=parellel)
return scripter,names
| 0
| 0
| 0
| 11,732
| 0
| 0
| 0
| 18
| 68
|
64daa544c850e2eafe0501beb13ccb026f2670ec
| 75
|
py
|
Python
|
server/conftest.py
|
njbrunner/neighbor
|
eeffc8ff7adb4eaa1889ab058d6cd1fc95b80e1e
|
[
"MIT"
] | 1
|
2020-04-06T03:31:25.000Z
|
2020-04-06T03:31:25.000Z
|
server/conftest.py
|
njbrunner/covid-19
|
eeffc8ff7adb4eaa1889ab058d6cd1fc95b80e1e
|
[
"MIT"
] | 16
|
2020-04-17T00:44:01.000Z
|
2020-05-01T20:39:31.000Z
|
server/conftest.py
|
njbrunner/neighbor
|
eeffc8ff7adb4eaa1889ab058d6cd1fc95b80e1e
|
[
"MIT"
] | null | null | null |
"""This module alters the testing system path to provide access to app."""
| 37.5
| 74
| 0.746667
|
"""This module alters the testing system path to provide access to app."""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ab6a6ccefbec9268443801ac748475636e4151cb
| 7,072
|
py
|
Python
|
sleeplearning/lib/models/granger_amoe.py
|
a1247418/MT18_LH_human-sleep-classification
|
c4a40571390aaa14b1cc8a458100e21252fe05d2
|
[
"MIT"
] | 2
|
2018-11-16T19:39:32.000Z
|
2018-11-20T03:38:58.000Z
|
sleeplearning/lib/models/granger_amoe.py
|
a1247418/MT18_LH_human-sleep-classification
|
c4a40571390aaa14b1cc8a458100e21252fe05d2
|
[
"MIT"
] | 1
|
2019-04-03T07:39:48.000Z
|
2019-04-03T07:39:48.000Z
|
sleeplearning/lib/models/granger_amoe.py
|
a1247418/MT18_LH_human-sleep-classification
|
c4a40571390aaa14b1cc8a458100e21252fe05d2
|
[
"MIT"
] | 1
|
2019-03-04T15:46:24.000Z
|
2019-03-04T15:46:24.000Z
|
import os
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname('__file__'), '..'))
sys.path.insert(0, root_dir)
from pymongo import MongoClient
from cfg.config import mongo_url
client = MongoClient(mongo_url)
db = client.sacred
# db.collection_names(include_system_collections=False)
files = db.fs.files
| 35.898477
| 102
| 0.579893
|
import os
import sys
import gridfs
from sleeplearning.lib.models.single_chan_expert import SingleChanExpert
root_dir = os.path.abspath(os.path.join(os.path.dirname('__file__'), '..'))
sys.path.insert(0, root_dir)
from pymongo import MongoClient
from torch import nn
from torch.nn.init import xavier_normal_ as xavier_normal
from cfg.config import mongo_url
import torch
import torch.nn.functional as F
import sleeplearning.lib.base
client = MongoClient(mongo_url)
db = client.sacred
# db.collection_names(include_system_collections=False)
files = db.fs.files
def restore_clf_from_runid(db, id: int):
import tempfile
fs = gridfs.GridFS(db)
model_object = files.find_one({"filename": "artifact://runs/"+str(
id)+"/checkpoint.pth.tar"})
myfile = fs.get(model_object['_id'])
temp_path = os.environ.get('TMPDIR') if 'TMPDIR' in os.environ else \
tempfile.mkdtemp()
model_path = os.path.join(temp_path, 'tmp_model')
with open(model_path, 'wb') as f:
f.write(myfile.read())
clf = sleeplearning.lib.base.Base()
clf.restore(model_path)
return clf
class Conv2dWithBn(nn.Module):
def __init__(self, input_shape, filter_size, n_filters, stride, wd=0):
super(Conv2dWithBn, self).__init__()
self.conv1 = nn.Conv2d(input_shape, n_filters, kernel_size=filter_size,
stride=stride, bias=False,
padding=((filter_size[0] - 1) // 2, (filter_size[
1] - 1) // 2))
# fake 'SAME'
self.relu = nn.ReLU()
self.conv1_bn = nn.BatchNorm2d(n_filters)
# self.weights_init()
def weights_init(m):
for _, mi in m._modules.items():
if isinstance(mi, nn.Conv2d) or isinstance(m, nn.Linear):
xavier_normal(mi.weight.data)
if mi.bias is not None:
xavier_normal(mi.bias.data)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv1_bn(x)
return x
class GrangerAmoe(nn.Module):
def __init__(self, ms: dict):
super(GrangerAmoe, self).__init__()
self.dropout = ms['dropout']
self.num_classes = ms['nclasses']
if 'expert_ids' in ms.keys():
experts = []
for id in ms['expert_ids']:
clf = restore_clf_from_runid(db, id)
for param in clf.model.parameters():
param.requires_grad = False
experts.append(clf)
ms['experts'] = experts
emb_dim = list(experts[0].model.children())[-1].in_features
self.nchannels = ms['input_dim'][0]
assert (self.nchannels == len(experts))
self.experts_emb = [e.model for e in ms['experts']]
self.experts_emb = [nn.Sequential(*list(expert.children())[:-1]) for
expert in self.experts_emb]
self.experts_emb = nn.ModuleList(self.experts_emb).eval()
self.emb_transf_net = [nn.Sequential(
nn.Linear(emb_dim, emb_dim, bias=True),
nn.ReLU(),
) for _ in range(len(self.experts_emb))]
self.emb_transf_net = nn.ModuleList(self.emb_transf_net)
# transforms
self.u = nn.ModuleList([nn.Sequential(
nn.Linear(emb_dim * self.nchannels, emb_dim, bias=True),
nn.ReLU(),
) for _ in range(len(self.experts_emb))])
# initialize the attention vectors
self.ue = nn.Parameter(torch.zeros(self.nchannels, emb_dim))
torch.nn.init.xavier_uniform_(self.ue)
self.fcn = nn.Sequential(
nn.Linear(self.nchannels * emb_dim, self.nchannels * emb_dim // 2),
nn.ReLU(),
nn.Dropout(p=self.dropout),
# nn.Linear(num_channels * emb_dim // 2, num_channels * emb_dim // 2),
# nn.ReLU(),
# nn.Dropout(p=self.dropout),
# nn.Linear(num_channels * emb_dim // 2, num_channels * emb_dim // 2),
# nn.ReLU(),
# nn.Dropout(p=self.dropout),
nn.Linear(self.nchannels * emb_dim // 2, self.num_classes),
)
self.paux_i = nn.ModuleList([nn.Sequential(
nn.Linear(emb_dim * (self.nchannels - 1), self.num_classes,
bias=True),
nn.Softmax(dim=1)
) for _ in range(len(self.experts_emb))])
self.indices = [torch.Tensor([i for i in range(self.nchannels) if i !=
j]).long() for j in range(self.nchannels)]
self.paux_c = nn.Sequential(
nn.Linear(emb_dim * self.nchannels, self.num_classes, bias=True),
nn.Softmax(dim=1)
)
del ms['experts']
# self.weights_init()
def weights_init(m):
for _, mi in m._modules.items():
if isinstance(mi, nn.Conv2d) or isinstance(m, nn.Linear):
xavier_normal(mi.weight.data)
if mi.bias is not None:
xavier_normal(mi.bias.data)
def train(self, mode=True):
super(GrangerAmoe, self).train(mode=mode)
self.experts_emb.eval() # keep the embedding models in eval mode
def forward(self, x):
#print("ue type:", self.ue.dtype)
emb = [exp(torch.unsqueeze(channel, 1).contiguous())
for (exp, channel) in zip(self.experts_emb, torch.unbind(x, 1))]
# hidden states of experts
# [bs, nchannels, emb_size]
h = torch.stack(emb, dim=1)
# [bs, nchannels*embsize]
h = h.view(h.size(0), -1)
# transformed embeddings
u = [ui(h) for ui in self.u]
# [bs, nchannels, emb_size]
u = torch.stack(u, dim=1)
# [bs, nchannels, emb_size]
transf_emb = [t(emb.contiguous()) for (t, emb) in zip(
self.emb_transf_net, emb)]
transf_emb = torch.stack(transf_emb, dim=1)
# [bs, nchannels, emb_size]
weights = torch.add(u, self.ue)
# [bs, nchannels, 1]
weights = torch.sum(weights, 2)
# normalize weights
# [bs, nchannels]
weights = F.softmax(weights, 1)
# [bs, emb_size, nchannels]
weighted_emb = torch.mul(transf_emb.permute(0, 2, 1), weights.unsqueeze(1))
# [bs, emb_size*nchannels]
weighted_emb = weighted_emb.view(weighted_emb.size(0), -1)
# [bs, nclasses] (prediction with weighted hidden states of experts)
y_att = self.fcn(weighted_emb)
# [bs, nclasses] (prediction with unweighted hidden states of experts)
yaux_c = self.paux_c(h)
# [bs, nchannels, emb_size]
h = h.view(h.size(0), self.nchannels, -1)
# List[bs, (nchannels-1), emb_size]
hc_wo_hi = [torch.index_select(h, 1, ind) for ind in self.indices]
# List[bs, nchannels]
yaux_i = [paux_i_(hci.view(hci.size(0), -1)) for (paux_i_, hci) in zip(self.paux_i, hc_wo_hi)]
return yaux_i, yaux_c, y_att, weights
| 0
| 0
| 0
| 5,915
| 0
| 521
| 0
| 87
| 225
|
4ab43ec654b55664f667149827969ecc7a2b583c
| 1,173
|
py
|
Python
|
big_data/python_tools/big_data_tools/bokeh_tools/ex_graphs/labelset_ex.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | null | null | null |
big_data/python_tools/big_data_tools/bokeh_tools/ex_graphs/labelset_ex.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | 7
|
2020-06-05T18:13:25.000Z
|
2022-03-11T23:19:48.000Z
|
big_data/python_tools/big_data_tools/bokeh_tools/ex_graphs/labelset_ex.py
|
paulhtremblay/big-data
|
dfa2aa9877300a57e7a9368af59c07fcc5841b4f
|
[
"MIT"
] | 1
|
2020-11-25T18:24:37.000Z
|
2020-11-25T18:24:37.000Z
|
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
output_file("label.html", title="label.py example")
source = ColumnDataSource(data=dict(height=[66, 71, 72, 68, 58, 62],
weight=[165, 189, 220, 141, 260, 174],
names=['Mark', 'Amir', 'Matt', 'Greg',
'Owen', 'Juan']))
p = figure(title='Dist. of 10th Grade Students at Lee High',
x_range=Range1d(140, 275))
p.scatter(x='weight', y='height', size=8, source=source)
p.xaxis[0].axis_label = 'Weight (lbs)'
p.yaxis[0].axis_label = 'Height (in)'
labels = LabelSet(x='weight', y='height', text='names', level='glyph',
x_offset=5, y_offset=5, source=source, render_mode='canvas')
citation = Label(x=70, y=70, x_units='screen', y_units='screen',
text='Collected by Luke C. 2016-04-01', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
p.add_layout(labels)
p.add_layout(citation)
show(p)
| 41.892857
| 75
| 0.602728
|
from bokeh.plotting import figure, show, output_file
from bokeh.models import ColumnDataSource, Range1d, LabelSet, Label
output_file("label.html", title="label.py example")
source = ColumnDataSource(data=dict(height=[66, 71, 72, 68, 58, 62],
weight=[165, 189, 220, 141, 260, 174],
names=['Mark', 'Amir', 'Matt', 'Greg',
'Owen', 'Juan']))
p = figure(title='Dist. of 10th Grade Students at Lee High',
x_range=Range1d(140, 275))
p.scatter(x='weight', y='height', size=8, source=source)
p.xaxis[0].axis_label = 'Weight (lbs)'
p.yaxis[0].axis_label = 'Height (in)'
labels = LabelSet(x='weight', y='height', text='names', level='glyph',
x_offset=5, y_offset=5, source=source, render_mode='canvas')
citation = Label(x=70, y=70, x_units='screen', y_units='screen',
text='Collected by Luke C. 2016-04-01', render_mode='css',
border_line_color='black', border_line_alpha=1.0,
background_fill_color='white', background_fill_alpha=1.0)
p.add_layout(labels)
p.add_layout(citation)
show(p)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
189bb5fd75bd6436fff20052f805eddc1d8a7212
| 4,516
|
py
|
Python
|
backend/size.py
|
VivianChan1998/Old-shoes
|
090f97373333a4df21399f5555dadd4c0eca6ea6
|
[
"MIT"
] | 1
|
2021-01-27T17:53:33.000Z
|
2021-01-27T17:53:33.000Z
|
backend/size.py
|
VivianChan1998/Old-shoes
|
090f97373333a4df21399f5555dadd4c0eca6ea6
|
[
"MIT"
] | 2
|
2021-01-28T07:38:49.000Z
|
2022-01-22T17:25:59.000Z
|
backend/size.py
|
VivianChan1998/Old-shoes
|
090f97373333a4df21399f5555dadd4c0eca6ea6
|
[
"MIT"
] | 1
|
2020-09-03T02:37:14.000Z
|
2020-09-03T02:37:14.000Z
|
import sys
# import cv2
# import config_own
import logging
########## To-Do ##########
### optimization ### warping ###
### the shot-raw-img is needed to be warpped
### or the coin and shoes will be in inaccurate scales
# def img_warping():
# to scale the main_obj size by a NT$10 coin
# NT$10 coin's diameter = 26mm
######### TO-DO ##########
### /// ###
#def scaling_type_transform():
if __name__ == "__main__":
try:
# load the needed keys
subscription_key = sys.argv[2] #####################################
endpoint = sys.argv[3] #####################################
# shot_cv2() # take pics #####################################################
input_json = img_transfer_json(endpoint)
obj_scale = scaling(input_json)
print( "{}".format(obj_scale))
except:
logging.exception("Message")
| 32.257143
| 142
| 0.579717
|
import os
from os.path import basename
import sys
import json
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# import cv2
import numpy as np
# import config_own
import logging
def shot_cv2():
# set the img storage path
img_path = config_own.SHOT_CV2_DIR
# cpature image by cv2
cam = cv2.VideoCapture(1)
cv2.namedWindow("image capture")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("image capture", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frameshot_{}.jpg".format(img_counter)
cv2.imwrite( img_path + img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
break
cam.release()
cv2.destroyAllWindows()
########## To-Do ##########
### optimization ### warping ###
### the shot-raw-img is needed to be warpped
### or the coin and shoes will be in inaccurate scales
# def img_warping():
def img_transfer_json(endpoint):
img_counter = 0
# # Set the COMPUTER_SUBSCRIPTION_KEY & COMPUTER_VISION_ENDPOINT
# if 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:
# subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']
# else:
# print("\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
# if 'COMPUTER_VISION_ENDPOINT' in os.environ:
# endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
# else:
# print("\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**")
# sys.exit()
analyze_url = endpoint + "vision/v3.0/analyze"
while True:
try: # if there are still images remained to be analyzed
# # Set image_path
# image_dir = config_own.SHOT_CV2_DIR
# image_name = "opencv_frameshot_{}.jpg".format(img_counter)
# image_path = image_dir + image_name
image_path = sys.argv[1] ###################################
# Read the image into a byte array
image_data = open(image_path, "rb").read()
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Brands,Objects'}
response = requests.post(
analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()
# Description of the image
analysis = response.json() # the JSON return value of the image
# print(analysis)
# split the image name to rename the JSON file
base = os.path.basename(image_path)
file_name = os.path.splitext(base)[0]
# # write into the JSON file
# JSON_dir = config_own.JSON_DIR
# with open( JSON_dir + file_name + '.json', 'w', encoding='utf-8') as f:
# json.dump(analysis, f, ensure_ascii=False, indent=4)
# k = cv2.waitKey(1)
# img_counter += 1
# return value: coin's width & object's width
return analysis
except FileNotFoundError: # if there is no img remained to be analyzed
break
if k%256 == 27:
break
# to scale the main_obj size by a NT$10 coin
# NT$10 coin's diameter = 26mm
def scaling(input_json):
ruler = input_json["objects"][0]["rectangle"]["w"]
main_obj = input_json["objects"][1]["rectangle"]["w"]
main_obj_scale = (main_obj/ruler)*26
return main_obj_scale
######### TO-DO ##########
### 台灣/歐/美/日 各式尺寸轉換 ###
#def scaling_type_transform():
if __name__ == "__main__":
try:
# load the needed keys
subscription_key = sys.argv[2] #####################################
endpoint = sys.argv[3] #####################################
# shot_cv2() # take pics #####################################################
input_json = img_transfer_json(endpoint)
obj_scale = scaling(input_json)
print( "{}".format(obj_scale))
except:
logging.exception("Message")
| 33
| 0
| 0
| 0
| 0
| 3,400
| 0
| -13
| 244
|
d64e97cef206d1a3eab0c9ff04a0f95768cec03b
| 500
|
py
|
Python
|
algorithms/samename-03-1.py
|
lsm4446/study_python
|
d05077b319c98007af26c92f69f5d59fe33483d0
|
[
"BSD-2-Clause"
] | 1
|
2020-02-17T01:25:35.000Z
|
2020-02-17T01:25:35.000Z
|
algorithms/samename-03-1.py
|
lsm4446/study_python
|
d05077b319c98007af26c92f69f5d59fe33483d0
|
[
"BSD-2-Clause"
] | 2
|
2021-03-31T19:32:47.000Z
|
2021-12-13T20:33:30.000Z
|
algorithms/samename-03-1.py
|
lsm4446/study_python
|
d05077b319c98007af26c92f69f5d59fe33483d0
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Created on 2018. 4. 26.
@author: lsm
'''
name = ["", "Tom", "Jerry", "Mike", "", "Tom"]
print(find_same_name(name))
name = ["", "Tom", "Jerry", "Mike", "", "Mike"]
print(find_same_name(name))
name = ["", "Tom", "Jerry", "Mike", "Mike", "Jerry", "tom"]
print(find_same_name(name))
| 21.73913
| 62
| 0.53
|
'''
Created on 2018. 4. 26.
@author: lsm
'''
def find_same_name(a):
n = len(a)
result = set()
for i in range(0, n-1):
for j in range(i+1, n):
if a[i] == a[j]:
result.add(a[i])
return result
name = ["이상면", "Tom", "Jerry", "Mike", "이상면", "Tom"]
print(find_same_name(name))
name = ["이상면", "Tom", "Jerry", "Mike", "이상면", "Mike"]
print(find_same_name(name))
name = ["이상면", "Tom", "Jerry", "Mike", "Mike", "Jerry", "tom"]
print(find_same_name(name))
| 45
| 0
| 0
| 0
| 0
| 175
| 0
| 0
| 22
|
52e4d41b65b85280389783fdde8527c4a6dd8a7c
| 7,689
|
py
|
Python
|
curriculums.py
|
EdwardJTL/Edit3D
|
bbb6364aeb5ea17c12c0c23578268641c066ebca
|
[
"MIT"
] | null | null | null |
curriculums.py
|
EdwardJTL/Edit3D
|
bbb6364aeb5ea17c12c0c23578268641c066ebca
|
[
"MIT"
] | null | null | null |
curriculums.py
|
EdwardJTL/Edit3D
|
bbb6364aeb5ea17c12c0c23578268641c066ebca
|
[
"MIT"
] | null | null | null |
"""
To easily reproduce experiments, and avoid passing several command line arguments, we implemented
a curriculum utility. Parameters can be set in a curriculum dictionary.
Curriculum Schema:
Numerical keys in the curriculum specify an upsample step. When the current step matches the upsample step,
the values in the corresponding dict be updated in the curriculum. Common curriculum values specified at upsamples:
batch_size: Batch Size.
num_steps: Number of samples along ray.
img_size: Generated image resolution.
batch_split: Integer number over which to divide batches and aggregate sequentially. (Used due to memory constraints)
gen_lr: Generator learnig rate.
disc_lr: Discriminator learning rate.
fov: Camera field of view
ray_start: Near clipping for camera rays.
ray_end: Far clipping for camera rays.
fade_steps: Number of steps to fade in new layer on discriminator after upsample.
h_stddev: Stddev of camera yaw in radians.
v_stddev: Stddev of camera pitch in radians.
h_mean: Mean of camera yaw in radians.
v_mean: Mean of camera pitch in radians.
sample_dist: Type of camera pose distribution. (gaussian | spherical_uniform | uniform)
topk_interval: Interval over which to fade the top k ratio.
topk_v: Minimum fraction of a batch to keep during top k training.
betas: Beta parameters for Adam.
unique_lr: Whether to use reduced LRs for mapping network.
weight_decay: Weight decay parameter.
r1_lambda: R1 regularization parameter.
latent_dim: Latent dim for Siren network in generator.
grad_clip: Grad clipping parameter.
model: Siren architecture used in generator. (SPATIALSIRENBASELINE | TALLSIREN)
generator: Generator class. (ImplicitGenerator3d)
discriminator: Discriminator class. (ProgressiveEncoderDiscriminator | ProgressiveDiscriminator)
dataset: Training dataset. (CelebA | Carla | Cats)
clamp_mode: Clamping function for Siren density output. (relu | softplus)
z_dist: Latent vector distributiion. (gaussian | uniform)
hierarchical_sample: Flag to enable hierarchical_sampling from NeRF algorithm. (Doubles the number of sampled points)
z_labmda: Weight for experimental latent code positional consistency loss.
pos_lambda: Weight parameter for experimental positional consistency loss.
last_back: Flag to fill in background color with last sampled color on ray.
"""
import math
LSUN = {
0: {
"batch_size": 20,
"num_steps": 12,
"img_size": 32,
"batch_split": 1,
"gen_lr": 0.0002,
"disc_lr": 0.002,
},
int(150e3): {
"batch_size": 10,
"num_steps": 12,
"img_size": 64,
"batch_split": 1,
"gen_lr": 5e-5,
"disc_lr": 5e-4,
},
int(400e3): {
"batch_size": 1,
"num_steps": 48,
"img_size": 128,
"batch_split": 1,
"gen_lr": 10e-6,
"disc_lr": 10e-5,
},
int(600e3): {},
# int(55e3): {'batch_size': 1, 'num_steps': 48, 'img_size': 128, 'batch_split': 5, 'gen_lr': 10e-6, 'disc_lr': 10e-5},
# int(200e3): {},
"dataset_path": "/h/edwardl/datasets/LSUN/cars/combined/*.webp",
# "dataset_path": "/h/edwardl/datasets/carla/images/*.png",
"fov": 30,
"ray_start": 0.7,
"ray_end": 1.3,
"fade_steps": 10000,
"sample_dist": "spherical_uniform",
"h_stddev": math.pi,
"v_stddev": math.pi / 4 * 85 / 90,
"h_mean": math.pi * 0.5,
"v_mean": math.pi / 4 * 85 / 90,
"topk_interval": 1000,
"topk_v": 1,
"betas": (0, 0.9),
"unique_lr": False,
"weight_decay": 0,
"r1_lambda": 10,
"latent_dim": 256,
"grad_clip": 10,
"generator": "CIPSGeneratorNerfINR",
"discriminator": "MultiScaleAuxDiscriminatorConfig",
"INR": "CIPSNetINRConfig",
"siren": "ShallowSIRENConfig",
"inr_mapping": "INRMultiHeadMappingConfig",
"siren_mapping": "SirenMultiHeadMappingConfig",
"dataset": "LSUNCars",
# "dataset": "Carla",
"white_back": True,
"clamp_mode": "relu",
"z_dist": "gaussian",
"hierarchical_sample": True,
"z_lambda": 0,
"pos_lambda": 0,
"learnable_dist": False,
"use_amp_G": False,
"use_amp_D": False,
"forward_points": 256,
"train_aux_img": True,
"aux_img_interval": 1,
"d_reg_interval": 1,
"batch_size_eval": 16,
"grad_points": 256,
}
CelebA = {
0: {
"batch_size": 4,
"num_steps": 12,
"img_size": 32,
"batch_split": 1,
"gen_lr": 0.0002,
"disc_lr": 0.002,
},
int(50e3): {
"batch_size": 10,
"num_steps": 12,
"img_size": 64,
"batch_split": 1,
"gen_lr": 5e-5,
"disc_lr": 5e-4,
},
int(300e3): {},
"dataset_path": "/scratch/ssd002/datasets/celeba/Img/img_align_celeba/*.jpg",
"fov": 12,
"ray_start": 0.88,
"ray_end": 1.12,
"fade_steps": 10000,
"sample_dist": "gaussian",
"h_stddev": 0.3,
"v_stddev": 0.155,
"h_mean": math.pi * 0.5,
"v_mean": math.pi / 4 * 85 / 90,
"topk_interval": 1000,
"topk_v": 1,
"betas": (0, 0.9),
"unique_lr": False,
"weight_decay": 0,
"r1_lambda": 10,
"latent_dim": 256,
"grad_clip": 10,
"generator": "CIPSGeneratorNerfINR",
"discriminator": "MultiScaleAuxDiscriminatorConfig",
"INR": "CIPSNetINRConfig",
"siren": "ShallowSIRENConfig",
"inr_mapping": "INRMultiHeadMappingConfig",
"siren_mapping": "SirenMultiHeadMappingConfig",
"dataset": "CelebA",
"white_back": True,
"clamp_mode": "relu",
"z_dist": "gaussian",
"hierarchical_sample": True,
"z_lambda": 0,
"pos_lambda": 0,
"learnable_dist": False,
"use_amp_G": False,
"use_amp_D": False,
"forward_points": 256,
"train_aux_img": True,
"aux_img_interval": 1,
"d_reg_interval": 1,
"batch_size_eval": 16,
"grad_points": 256,
}
| 33.723684
| 125
| 0.642216
|
"""
To easily reproduce experiments, and avoid passing several command line arguments, we implemented
a curriculum utility. Parameters can be set in a curriculum dictionary.
Curriculum Schema:
Numerical keys in the curriculum specify an upsample step. When the current step matches the upsample step,
the values in the corresponding dict be updated in the curriculum. Common curriculum values specified at upsamples:
batch_size: Batch Size.
num_steps: Number of samples along ray.
img_size: Generated image resolution.
batch_split: Integer number over which to divide batches and aggregate sequentially. (Used due to memory constraints)
gen_lr: Generator learnig rate.
disc_lr: Discriminator learning rate.
fov: Camera field of view
ray_start: Near clipping for camera rays.
ray_end: Far clipping for camera rays.
fade_steps: Number of steps to fade in new layer on discriminator after upsample.
h_stddev: Stddev of camera yaw in radians.
v_stddev: Stddev of camera pitch in radians.
h_mean: Mean of camera yaw in radians.
v_mean: Mean of camera pitch in radians.
sample_dist: Type of camera pose distribution. (gaussian | spherical_uniform | uniform)
topk_interval: Interval over which to fade the top k ratio.
topk_v: Minimum fraction of a batch to keep during top k training.
betas: Beta parameters for Adam.
unique_lr: Whether to use reduced LRs for mapping network.
weight_decay: Weight decay parameter.
r1_lambda: R1 regularization parameter.
latent_dim: Latent dim for Siren network in generator.
grad_clip: Grad clipping parameter.
model: Siren architecture used in generator. (SPATIALSIRENBASELINE | TALLSIREN)
generator: Generator class. (ImplicitGenerator3d)
discriminator: Discriminator class. (ProgressiveEncoderDiscriminator | ProgressiveDiscriminator)
dataset: Training dataset. (CelebA | Carla | Cats)
clamp_mode: Clamping function for Siren density output. (relu | softplus)
z_dist: Latent vector distributiion. (gaussian | uniform)
hierarchical_sample: Flag to enable hierarchical_sampling from NeRF algorithm. (Doubles the number of sampled points)
z_labmda: Weight for experimental latent code positional consistency loss.
pos_lambda: Weight parameter for experimental positional consistency loss.
last_back: Flag to fill in background color with last sampled color on ray.
"""
import math
def next_upsample_step(curriculum, current_step):
# Return the epoch when it will next upsample
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata["img_size"]
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if (
curriculum_step > current_step
and curriculum[curriculum_step].get("img_size", 512) > current_size
):
return curriculum_step
return float("Inf")
def last_upsample_step(curriculum, current_step):
# Returns the start epoch of the current stage, i.e. the epoch
# it last upsampled
current_metadata = extract_metadata(curriculum, current_step)
current_size = current_metadata["img_size"]
for curriculum_step in sorted([cs for cs in curriculum.keys() if type(cs) == int]):
if (
curriculum_step <= current_step
and curriculum[curriculum_step]["img_size"] == current_size
):
return curriculum_step
return 0
def get_current_step(curriculum, epoch):
step = 0
for update_epoch in curriculum["update_epochs"]:
if epoch >= update_epoch:
step += 1
return step
def extract_metadata(curriculum, current_step):
return_dict = {}
for curriculum_step in sorted(
[cs for cs in curriculum.keys() if type(cs) == int], reverse=True
):
if curriculum_step <= current_step:
for key, value in curriculum[curriculum_step].items():
return_dict[key] = value
break
for key in [k for k in curriculum.keys() if type(k) != int]:
return_dict[key] = curriculum[key]
return return_dict
LSUN = {
0: {
"batch_size": 20,
"num_steps": 12,
"img_size": 32,
"batch_split": 1,
"gen_lr": 0.0002,
"disc_lr": 0.002,
},
int(150e3): {
"batch_size": 10,
"num_steps": 12,
"img_size": 64,
"batch_split": 1,
"gen_lr": 5e-5,
"disc_lr": 5e-4,
},
int(400e3): {
"batch_size": 1,
"num_steps": 48,
"img_size": 128,
"batch_split": 1,
"gen_lr": 10e-6,
"disc_lr": 10e-5,
},
int(600e3): {},
# int(55e3): {'batch_size': 1, 'num_steps': 48, 'img_size': 128, 'batch_split': 5, 'gen_lr': 10e-6, 'disc_lr': 10e-5},
# int(200e3): {},
"dataset_path": "/h/edwardl/datasets/LSUN/cars/combined/*.webp",
# "dataset_path": "/h/edwardl/datasets/carla/images/*.png",
"fov": 30,
"ray_start": 0.7,
"ray_end": 1.3,
"fade_steps": 10000,
"sample_dist": "spherical_uniform",
"h_stddev": math.pi,
"v_stddev": math.pi / 4 * 85 / 90,
"h_mean": math.pi * 0.5,
"v_mean": math.pi / 4 * 85 / 90,
"topk_interval": 1000,
"topk_v": 1,
"betas": (0, 0.9),
"unique_lr": False,
"weight_decay": 0,
"r1_lambda": 10,
"latent_dim": 256,
"grad_clip": 10,
"generator": "CIPSGeneratorNerfINR",
"discriminator": "MultiScaleAuxDiscriminatorConfig",
"INR": "CIPSNetINRConfig",
"siren": "ShallowSIRENConfig",
"inr_mapping": "INRMultiHeadMappingConfig",
"siren_mapping": "SirenMultiHeadMappingConfig",
"dataset": "LSUNCars",
# "dataset": "Carla",
"white_back": True,
"clamp_mode": "relu",
"z_dist": "gaussian",
"hierarchical_sample": True,
"z_lambda": 0,
"pos_lambda": 0,
"learnable_dist": False,
"use_amp_G": False,
"use_amp_D": False,
"forward_points": 256,
"train_aux_img": True,
"aux_img_interval": 1,
"d_reg_interval": 1,
"batch_size_eval": 16,
"grad_points": 256,
}
CelebA = {
0: {
"batch_size": 4,
"num_steps": 12,
"img_size": 32,
"batch_split": 1,
"gen_lr": 0.0002,
"disc_lr": 0.002,
},
int(50e3): {
"batch_size": 10,
"num_steps": 12,
"img_size": 64,
"batch_split": 1,
"gen_lr": 5e-5,
"disc_lr": 5e-4,
},
int(300e3): {},
"dataset_path": "/scratch/ssd002/datasets/celeba/Img/img_align_celeba/*.jpg",
"fov": 12,
"ray_start": 0.88,
"ray_end": 1.12,
"fade_steps": 10000,
"sample_dist": "gaussian",
"h_stddev": 0.3,
"v_stddev": 0.155,
"h_mean": math.pi * 0.5,
"v_mean": math.pi / 4 * 85 / 90,
"topk_interval": 1000,
"topk_v": 1,
"betas": (0, 0.9),
"unique_lr": False,
"weight_decay": 0,
"r1_lambda": 10,
"latent_dim": 256,
"grad_clip": 10,
"generator": "CIPSGeneratorNerfINR",
"discriminator": "MultiScaleAuxDiscriminatorConfig",
"INR": "CIPSNetINRConfig",
"siren": "ShallowSIRENConfig",
"inr_mapping": "INRMultiHeadMappingConfig",
"siren_mapping": "SirenMultiHeadMappingConfig",
"dataset": "CelebA",
"white_back": True,
"clamp_mode": "relu",
"z_dist": "gaussian",
"hierarchical_sample": True,
"z_lambda": 0,
"pos_lambda": 0,
"learnable_dist": False,
"use_amp_G": False,
"use_amp_D": False,
"forward_points": 256,
"train_aux_img": True,
"aux_img_interval": 1,
"d_reg_interval": 1,
"batch_size_eval": 16,
"grad_points": 256,
}
| 0
| 0
| 0
| 0
| 0
| 1,616
| 0
| 0
| 92
|
12bef3917108852eeaabbac5c934d0b48f2924ea
| 6,488
|
py
|
Python
|
tests/transmitters.py
|
LeMagnesium/stolas-p2p
|
f5a48c12b8422c58ba62ed89491bcf2a19441dbf
|
[
"Unlicense"
] | null | null | null |
tests/transmitters.py
|
LeMagnesium/stolas-p2p
|
f5a48c12b8422c58ba62ed89491bcf2a19441dbf
|
[
"Unlicense"
] | null | null | null |
tests/transmitters.py
|
LeMagnesium/stolas-p2p
|
f5a48c12b8422c58ba62ed89491bcf2a19441dbf
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
#
from sys import argv
import stolas.stolas
from stolas.betterui import pprint as print
if __name__ == '__main__':
if len(argv) == 1:
print("Tell me?")
pass
if argv[1] == "cluster":
stolas_cluster()
elif argv[1] == "gigacluster":
stolas_gigacluster()
elif argv[1] == "simple":
stolas_simple()
elif argv[1] == "transmission":
test_transmission()
else:
print("\_()_/")
| 30.603774
| 146
| 0.679408
|
#!/usr/bin/python3
# -*- encoding: utf-8 -*-
#
from sys import argv
import random
import os.path
import hashlib
import time
import stolas.stolas
import stolas.protocol
from stolas.betterui import pprint as print
from network import manual_stolas_prompt
from common import network_collapse, mean
def network_random(port, quantity = None):
ports = []
cport = port
objects = []
if quantity == None:
quantity = random.randrange(5,7)
for n in range(quantity):
while True:
try:
stols = stolas.stolas.Stolas(port = cport, virtual=True)
stols.start()
if len(ports) > 0:
rport = random.choice(ports)
pid = stols.networker.peer_add(("127.0.0.1", rport))
print("Created model {0}/{1}...".format(n+1, quantity), end = "\r")
ports.append(cport)
cport += 1
objects.append(stols)
break
except OSError:
cport += 1
continue
# Assert that every single object is connected to another one
assert(len([x for x in objects if len(x.networker.peers) == 0]) == 0)
return objects
def create_ctrlfile():
controlfile = "{0}_cluster.ctrl".format(hashlib.sha1(str(time.time()+random.randrange(-10000, 10000)).encode("utf8")).hexdigest()[:8])
print("Creating control file : {0}".format(controlfile))
open(controlfile, "w")
print("~<s:bright]Remove the control file at any time to collapse the cluster.~<s:reset_all]")
return controlfile
def stolas_cluster():
rport = random.randrange(1024, 65400)
controlfile = create_ctrlfile()
print("Starting from port ~<s:bright]~<f:red]{0}~<s:reset_all]".format(rport))
cluster = network_random(rport)
print("~<s:bright]Network is ready! ~<f:green]\u2713~<s:reset_all]")
mm = [x.port for x in cluster if x.is_alive()]
while len(mm) > 0 and os.path.isfile(controlfile):
print("AvgInt: {0:.2f}; FiPort: {1}".format(sum([len(m.networker.peers) for m in cluster if m.is_alive()])/len(mm), mm[0]), end = " \r")
time.sleep(1)
mm = [x.port for x in cluster if x.is_alive()]
#print([m for m in cluster if m.is_alive()][0].networker.peers)
if os.path.isfile(controlfile):
os.remove(controlfile)
print("\n~<s:bright]~<f:blue]Cluster collapsing~<s:reset_all]")
network_collapse(cluster)
print("Done")
def stolas_gigacluster():
rport = random.randrange(1024, 65400)
controlfile = create_ctrlfile()
print("Starting from port ~<s:bright]~<f:red]{0}~<s:reset_all]".format(rport))
cluster = network_random(rport, random.randrange(15,20))
print("~<s:bright]Network is ready! ~<f:green]\u2713~<s:reset_all]")
mm = [x.port for x in cluster if x.is_alive()]
while len(mm) > 0 and os.path.isfile(controlfile):
print("AvgInt: {0:.2f}; FiPort: {1}".format(sum([len(m.networker.peers) for m in cluster if m.is_alive()])/len(mm), mm[0]), end = " \r")
time.sleep(1)
mm = [x.port for x in cluster if x.is_alive()]
#print([m for m in cluster if m.is_alive()][0].networker.peers)
if os.path.isfile(controlfile):
os.remove(controlfile)
print("\n~<s:bright]~<f:blue]Cluster collapsing~<s:reset_all]")
network_collapse(cluster)
print("Done")
def stolas_simple():
slobj = stolas.stolas.Stolas(logdown = True)
slobj.start()
print("Port is {0}".format(slobj.port))
manual_stolas_prompt(slobj)
slobj.stop()
slobj.join()
print("Done")
def generate_random_payload():
return os.urandom(random.randrange(10, 65000))
def cluster_average_integration(cluster):
return mean([len(x.networker.peers) for x in cluster if x.is_alive()])
def test_transmission():
print("~<s:bright]Starting Message Transmission Test~<s:reset_all]")
controlfile = create_ctrlfile()
sender = stolas.stolas.Stolas()
receiver = stolas.stolas.Stolas()
print("\t=> Ends created ~<sf:bright,green]\u2713~<s:reset_all]")
sender.start()
receiver.start()
print("\t=> Ends started ~<sf:bright,green]\u2713~<s:reset_all]")
cluster = network_random(sender.port+1, random.randrange(10,12))
print("\n\t=> Done creating the cluster ~<sf:bright,green]\u2713~<s:reset_all]")
sender.networker.peer_add(("localhost", random.choice(cluster).port))
receiver.networker.peer_add(("localhost", random.choice(cluster).port))
print("\t=> Connected the Receiver and Sender")
i, sint, cint, mint, rint = 0, 0, 0, 0, 0
while not sender.networker.integrated or not receiver.networker.integrated or len([x for x in cluster if x.networker.integrated]) < len(cluster):
print("[{0}|{1:.2f}|{2}] Integrating{3}{4}".format(
sint, mint, rint,
'.' * i,
' ' * (5-i)
), end = "\r")
sint = len(sender.networker.peers)
mint = min([len(x.networker.peers) for x in cluster + [sender] if x.is_alive()])
rint = len(receiver.networker.peers)
i = (i+1)%6
time.sleep(0.5)
print("=> Integrated ~<s:bright]~<f:green]\u2713~<s:reset_all]" + 17 * ' ')
assert(receiver.networker.integrated)
assert(sender.networker.integrated)
assert(len([True for x in cluster if x.networker.integrated]) == len(cluster))
ttlt = 120
globalpayload = generate_random_payload()
msgobj = stolas.protocol.Message(ttl = ttlt, channel = "")
msgobj.set_payload(globalpayload)
print("Sending out: {0}".format(msgobj))
sender.message_broadcast(msgobj)
then = time.time()
i = 1
worked_out_fine = False
while True:
if len(receiver.mpile) > 0:
print(" " * 10, end = "\r")
print("\t=> Message Received ~<f:green]~<s:bright]\u2713~<s:reset_all] ")
worked_out_fine = True
break
print("[{0}|{1:.2f}|{2}>{3}] Waiting{4}{5}".format(
sint, cint, rint, len([x for x in cluster + [receiver] if len(x.mpile) > 0]),
'.' * i,
' ' * (5-i)
), end = "\r")
time.sleep(0.5)
sint = len(sender.networker.peers)
cint = mean([len(x.networker.peers) for x in cluster + [sender] if x.is_alive()])
rint = len(receiver.networker.peers)
i = (i+1)%5
if time.time() - then >= ttlt or not os.path.isfile(controlfile):
print("~<s:bright]~<f:red]Failed sending the message \u2717~<s:reset_all]")
print("\t=> Leaving anyways ~<s:bright]~<f:red]\u2717~<s:reset_all]")
break
network_collapse(cluster)
sender.stop()
sender.join()
receiver.stop()
receiver.join()
if os.path.isfile(controlfile):
os.remove(controlfile)
print("Done")
return worked_out_fine # We're a unit test
if __name__ == '__main__':
if len(argv) == 1:
print("Tell me?")
pass
if argv[1] == "cluster":
stolas_cluster()
elif argv[1] == "gigacluster":
stolas_gigacluster()
elif argv[1] == "simple":
stolas_simple()
elif argv[1] == "transmission":
test_transmission()
else:
print("¯\_(ツ)_/¯")
| 7
| 0
| 0
| 0
| 0
| 5,698
| 0
| 8
| 339
|
dfb39c7d8e6a3e2dae7e009e6ab22ce4fe0a79a6
| 8,436
|
py
|
Python
|
Neural Networks/deepnnScript.py
|
Muthu2093/Machine-Learning-Applications
|
bb171ff2bfdcb5af64403ae7f63fe96572d63963
|
[
"MIT"
] | null | null | null |
Neural Networks/deepnnScript.py
|
Muthu2093/Machine-Learning-Applications
|
bb171ff2bfdcb5af64403ae7f63fe96572d63963
|
[
"MIT"
] | null | null | null |
Neural Networks/deepnnScript.py
|
Muthu2093/Machine-Learning-Applications
|
bb171ff2bfdcb5af64403ae7f63fe96572d63963
|
[
"MIT"
] | null | null | null |
'''
Comparing single layer MLP with deep MLP (using TensorFlow)
'''
import tensorflow as tf
import timeit
start = timeit.default_timer()
# Create model
# Add more hidden layers to create deeper networks
# Remember to connect the final hidden layer to the out_layer
# Do not change this
# Parameters
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
# Construct model
pred,x,y = create_multilayer_perceptron()
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
timer =1
# Initializing the variables
init = tf.global_variables_initializer()
# load data
train_features, train_labels, valid_features, valid_labels, test_features, test_labels = preprocess()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
print(timer)
timer = timer +1
avg_cost = 0.
total_batch = int(train_features.shape[0] / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = train_features[i * batch_size: (i + 1) * batch_size], train_labels[i * batch_size: (i + 1) * batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: test_features, y: test_labels}))
stop = timeit.default_timer()
print('\n Time Taken: ' + str(stop - start))
| 40.951456
| 135
| 0.642959
|
'''
Comparing single layer MLP with deep MLP (using TensorFlow)
'''
import tensorflow as tf
import numpy as np
import pickle
import timeit
start = timeit.default_timer()
# Create model
# Add more hidden layers to create deeper networks
# Remember to connect the final hidden layer to the out_layer
def create_multilayer_perceptron():
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
#n_hidden_2 = 256 # 2nd layer number of features
#n_hidden_3 = 256
#n_hidden_4 = 256
#n_hidden_5 = 256
#n_hidden_6 = 256
#n_hidden_7 = 256
n_input = 784 # data input
n_classes = 2
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
#'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
#'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
#'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
#'h5': tf.Variable(tf.random_normal([n_hidden_4, n_hidden_5])),
#'h6': tf.Variable(tf.random_normal([n_hidden_5, n_hidden_6])),
#'h7': tf.Variable(tf.random_normal([n_hidden_6, n_hidden_7])),
'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
#'b2': tf.Variable(tf.random_normal([n_hidden_2])),
#'b3': tf.Variable(tf.random_normal([n_hidden_3])),
#'b4': tf.Variable(tf.random_normal([n_hidden_4])),
#'b5': tf.Variable(tf.random_normal([n_hidden_5])),
#'b6': tf.Variable(tf.random_normal([n_hidden_6])),
#'b7': tf.Variable(tf.random_normal([n_hidden_7])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
#layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
#layer_2 = tf.nn.relu(layer_2)
# Hidden layer with RELU activation
#layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
#layer_3 = tf.nn.relu(layer_3)
# Hidden layer with RELU activation
#layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
#layer_4 = tf.nn.relu(layer_4)
# Hidden layer with RELU activation
#layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
#layer_5 = tf.nn.relu(layer_5)
# Hidden layer with RELU activation
#layer_6 = tf.add(tf.matmul(layer_5, weights['h6']), biases['b6'])
#layer_6 = tf.nn.relu(layer_6)
# Hidden layer with RELU activation
#layer_7 = tf.add(tf.matmul(layer_6, weights['h7']), biases['b7'])
#layer_7 = tf.nn.relu(layer_7)
# Output layer with linear activation
out_layer = tf.matmul(layer_1, weights['out']) + biases['out']
return out_layer,x,y
# Do not change this
def preprocess():
mat = loadmat('mnist_all.mat') # loads the MAT object as a Dictionary
# Pick a reasonable size for validation data
# ------------Initialize preprocess arrays----------------------#
train_preprocess = np.zeros(shape=(50000, 784))
validation_preprocess = np.zeros(shape=(10000, 784))
test_preprocess = np.zeros(shape=(10000, 784))
train_label_preprocess = np.zeros(shape=(50000,))
validation_label_preprocess = np.zeros(shape=(10000,))
test_label_preprocess = np.zeros(shape=(10000,))
# ------------Initialize flag variables----------------------#
train_len = 0
validation_len = 0
test_len = 0
train_label_len = 0
validation_label_len = 0
# ------------Start to split the data set into 6 arrays-----------#
for key in mat:
# -----------when the set is training set--------------------#
if "train" in key:
label = key[-1] # record the corresponding label
tup = mat.get(key)
sap = range(tup.shape[0])
tup_perm = np.random.permutation(sap)
tup_len = len(tup) # get the length of current training set
tag_len = tup_len - 1000 # defines the number of examples which will be added into the training set
# ---------------------adding data to training set-------------------------#
train_preprocess[train_len:train_len + tag_len] = tup[tup_perm[1000:], :]
train_len += tag_len
train_label_preprocess[train_label_len:train_label_len + tag_len] = label
train_label_len += tag_len
# ---------------------adding data to validation set-------------------------#
validation_preprocess[validation_len:validation_len + 1000] = tup[tup_perm[0:1000], :]
validation_len += 1000
validation_label_preprocess[validation_label_len:validation_label_len + 1000] = label
validation_label_len += 1000
# ---------------------adding data to test set-------------------------#
elif "test" in key:
label = key[-1]
tup = mat.get(key)
sap = range(tup.shape[0])
tup_perm = np.random.permutation(sap)
tup_len = len(tup)
test_label_preprocess[test_len:test_len + tup_len] = label
test_preprocess[test_len:test_len + tup_len] = tup[tup_perm]
test_len += tup_len
# ---------------------Shuffle,double and normalize-------------------------#
train_size = range(train_preprocess.shape[0])
train_perm = np.random.permutation(train_size)
train_data = train_preprocess[train_perm]
train_data = np.double(train_data)
train_data = train_data / 255.0
train_label = train_label_preprocess[train_perm]
validation_size = range(validation_preprocess.shape[0])
vali_perm = np.random.permutation(validation_size)
validation_data = validation_preprocess[vali_perm]
validation_data = np.double(validation_data)
validation_data = validation_data / 255.0
validation_label = validation_label_preprocess[vali_perm]
test_size = range(test_preprocess.shape[0])
test_perm = np.random.permutation(test_size)
test_data = test_preprocess[test_perm]
test_data = np.double(test_data)
test_data = test_data / 255.0
test_label = test_label_preprocess[test_perm]
# Feature selection
# Your code here.
#train_data1 = np.delete(train_data,np.where(np.amax(train_data,axis=0)==0),1)
#test_data1 = np.delete(test_data,np.where(np.amax(train_data,axis=0)==0),1)
#validation_data1 = np.delete(validation_data,np.where(np.amax(train_data,axis=0)==0),1)
print('preprocess done')
return train_data, train_label, validation_data, validation_label, test_data, test_label
# Parameters
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
# Construct model
pred,x,y = create_multilayer_perceptron()
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
timer =1
# Initializing the variables
init = tf.global_variables_initializer()
# load data
train_features, train_labels, valid_features, valid_labels, test_features, test_labels = preprocess()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
print(timer)
timer = timer +1
avg_cost = 0.
total_batch = int(train_features.shape[0] / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = train_features[i * batch_size: (i + 1) * batch_size], train_labels[i * batch_size: (i + 1) * batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
# Compute average loss
avg_cost += c / total_batch
print("Optimization Finished!")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: test_features, y: test_labels}))
stop = timeit.default_timer()
print('\n Time Taken: ' + str(stop - start))
| 0
| 0
| 0
| 0
| 0
| 6,505
| 0
| -11
| 88
|
bd6b41c6a191fa6db37444744515df734b167e4a
| 405
|
py
|
Python
|
pytorch_projects/common_pytorch/base_modules/avg_pool_head.py
|
KGMSFT/integral-human-pose
|
d3ad4117ed71c580d2ab17987e15f9b2c3318a3b
|
[
"MIT"
] | 472
|
2018-07-20T04:01:04.000Z
|
2022-01-20T07:28:06.000Z
|
pytorch_projects/common_pytorch/base_modules/avg_pool_head.py
|
KGMSFT/integral-human-pose
|
d3ad4117ed71c580d2ab17987e15f9b2c3318a3b
|
[
"MIT"
] | 44
|
2018-09-17T06:39:13.000Z
|
2021-09-10T06:22:27.000Z
|
pytorch_projects/common_pytorch/base_modules/avg_pool_head.py
|
KGMSFT/integral-human-pose
|
d3ad4117ed71c580d2ab17987e15f9b2c3318a3b
|
[
"MIT"
] | 88
|
2018-07-22T13:10:16.000Z
|
2022-03-21T11:21:38.000Z
|
import torch.nn as nn
| 31.153846
| 64
| 0.624691
|
import torch.nn as nn
class AvgPoolHead(nn.Module):
def __init__(self, in_channels, out_channels, fea_map_size):
super(AvgPoolHead, self).__init__()
self.avgpool = nn.AvgPool2d(fea_map_size, stride=1)
self.fc = nn.Linear(in_channels, out_channels)
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 0
| 0
| 0
| 361
| 0
| 0
| 0
| 0
| 23
|
4a1ceaf38773fa0905170c8e89ab1c5dd2a537ec
| 1,049
|
py
|
Python
|
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/sort/quick_3_string.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
keys = [
'she',
'sells',
'seashells',
'by',
'the',
'seashore',
'the',
'shells',
'she',
'sells',
'are',
'surely',
'seashells',
]
expected = sorted(keys[:])
assert keys != expected
sort(keys)
assert keys == expected
| 16.919355
| 49
| 0.414681
|
def sort(keys):
_sort(keys, 0, len(keys)-1, 0)
def _sort(keys, lo, hi, start):
if hi <= lo:
return
lt = lo
gt = hi
v = get_r(keys[lt], start)
i = lt + 1
while i <= gt:
c = get_r(keys[i], start)
if c < v:
keys[lt], keys[i] = keys[i], keys[lt]
lt += 1
i += 1
elif c > v:
keys[i], keys[gt] = keys[gt], keys[i]
gt -= 1
else:
i += 1
_sort(keys, lo, lt-1, start)
if v >= 0:
_sort(keys, lt, gt, start+1)
_sort(keys, gt+1, hi, start)
def get_r(key, i):
if i < len(key):
return ord(key[i])
return -1
if __name__ == '__main__':
keys = [
'she',
'sells',
'seashells',
'by',
'the',
'seashore',
'the',
'shells',
'she',
'sells',
'are',
'surely',
'seashells',
]
expected = sorted(keys[:])
assert keys != expected
sort(keys)
assert keys == expected
| 0
| 0
| 0
| 0
| 0
| 604
| 0
| 0
| 68
|
18817fe28e3e3d36c37cd1aa559511462ada2e60
| 505
|
py
|
Python
|
src/sdanalysis/dynamics/__init__.py
|
malramsay64/statdyn-analysis
|
2cc9ea1c5386b7fc203cb068d29e8d0aa626ee43
|
[
"MIT"
] | 6
|
2018-03-13T22:43:11.000Z
|
2021-07-07T17:46:28.000Z
|
src/sdanalysis/dynamics/__init__.py
|
malramsay64/statdyn-analysis
|
2cc9ea1c5386b7fc203cb068d29e8d0aa626ee43
|
[
"MIT"
] | 174
|
2018-02-06T05:50:12.000Z
|
2020-08-01T19:32:55.000Z
|
src/sdanalysis/dynamics/__init__.py
|
malramsay64/statdyn-analysis
|
2cc9ea1c5386b7fc203cb068d29e8d0aa626ee43
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright 2019 Malcolm Ramsay <[email protected]>
#
# Distributed under terms of the MIT license.
"""Module for reading and processing input files."""
from ._util import TrackedMotion
from .dynamics import Dynamics
from .relaxations import LastMolecularRelaxation, MolecularRelaxation, Relaxations
__all__ = [
"Dynamics",
"TrackedMotion",
"LastMolecularRelaxation",
"MolecularRelaxation",
"Relaxations",
]
| 21.956522
| 82
| 0.724752
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 Malcolm Ramsay <[email protected]>
#
# Distributed under terms of the MIT license.
"""Module for reading and processing input files."""
from ._util import TrackedMotion
from .dynamics import Dynamics
from .relaxations import LastMolecularRelaxation, MolecularRelaxation, Relaxations
__all__ = [
"Dynamics",
"TrackedMotion",
"LastMolecularRelaxation",
"MolecularRelaxation",
"Relaxations",
]
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4433baac982fb2d87a67e95a552e5dba24c69fd0
| 1,000
|
py
|
Python
|
backend/fedmix_backend/datastore.py
|
FEDMix/fedmix-viewer
|
3116d7c85aa73016d5657c2c084cddb7fb64e399
|
[
"Apache-2.0"
] | null | null | null |
backend/fedmix_backend/datastore.py
|
FEDMix/fedmix-viewer
|
3116d7c85aa73016d5657c2c084cddb7fb64e399
|
[
"Apache-2.0"
] | 3
|
2020-10-19T09:27:42.000Z
|
2020-10-23T11:26:52.000Z
|
backend/fedmix_backend/datastore.py
|
FEDMix/fedmix-viewer
|
3116d7c85aa73016d5657c2c084cddb7fb64e399
|
[
"Apache-2.0"
] | null | null | null |
import logging
logger = logging.getLogger(__name__)
| 32.258065
| 77
| 0.598
|
import json
import logging
import os
logger = logging.getLogger(__name__)
class Datastore:
def __init__(self, path, remote_url):
self.path = path
self.remote_url = remote_url
self.datasets = dict()
self.load_datasets()
def load_datasets(self):
print(f"Loading datasets from {self.path} relative to {os.getcwd()}")
try:
with os.scandir(self.path) as directories:
self.abspath = os.path.abspath(self.path)
for directory in directories:
if directory.is_dir():
if 'manifest.json' in os.listdir(directory.path):
self.load_dataset(directory)
except FileNotFoundError as error:
print("Could not find data directory!")
raise error
def load_dataset(self, directory):
with open(os.path.join(directory, 'manifest.json')) as file:
self.datasets[directory.name] = json.load(file)
| 0
| 0
| 0
| 901
| 0
| 0
| 0
| -22
| 67
|
a25b7aed1e40cc5fb4a9fe65324df3c25be54409
| 6,669
|
py
|
Python
|
kohokoho.py
|
Moonlit-Solutions/kohokoho
|
27f7c0c45cf7662b79f5fd75402cbab2098423d0
|
[
"Apache-2.0"
] | null | null | null |
kohokoho.py
|
Moonlit-Solutions/kohokoho
|
27f7c0c45cf7662b79f5fd75402cbab2098423d0
|
[
"Apache-2.0"
] | 22
|
2021-01-24T06:42:55.000Z
|
2021-08-09T14:25:06.000Z
|
kohokoho.py
|
ayushsubedi/term-obfuscate-dataset
|
ac07d46a60114fadf084ade4e1dbf7dc0370bf07
|
[
"Apache-2.0"
] | 2
|
2021-02-01T14:46:34.000Z
|
2021-02-18T03:40:02.000Z
|
from faker import Faker
fake = Faker('en')
| 31.757143
| 78
| 0.575948
|
import click
import string
import random
import time
import pandas as pd
from faker import Faker
fake = Faker('en')
class anon(object):
'''Initialize a df as an anon object.
Args:
df: pandas dataframe
Returns:
anon object
'''
def __init__(self, df):
self.original = df.copy()
self.df = df
def anon_name(self, col):
''' Replace entries in name column with fake names generated
from faker.
Args:
col: column containing name data
Returns:
anon object with replaced name col
'''
unique = self.df[col].unique()
map_dict = {_: fake.name() for _ in unique}
self.df[col] = self.df[col].map(map_dict)
def anon_id(self, col):
''' Replace entries in id column with fake uuid generated
from faker.
Args:
col: column containing id data
Returns:
anon object with replaced id col
'''
unique = self.df[col].unique()
map_dict = {_: fake.uuid4() for _ in unique}
self.df[col] = self.df[col].map(map_dict)
def anon_discrete_num(self, col):
''' Replace entries in column with whole nums to random whole
nums in the same range.
Args:
col: column containing whole number data
Returns:
anon object with replaced discrete col
'''
X_std = (self.df[col] - self.df[col].min()) / (
self.df[col].max() - self.df[col].min())
X_scaled = (X_std * (10 - 1) + 1)
X_scaled_randomized = (X_scaled * random.randint(1, 10)).astype(int)
self.df[col] = X_scaled_randomized
def anon_continuous_num(self, col):
''' Replace entries in columns with continuous nums to random whole
nums in the same range.
Args:
col: column containing continuous number data
Returns:
anon object with replaced continuous col
'''
X_std = (self.df[col] - self.df[col].min()) / (
self.df[col].max() - self.df[col].min())
X_scaled = (X_std * (10 - 1) + 1)
X_scaled_randomized = round(X_scaled * random.randint(1, 10), 3)
self.df[col] = X_scaled_randomized
def anon_category(self, col):
''' Replace entries in column with categorical data to
anonymized category.
Args:
col: column containing categorical data
Returns:
anon object with replaced categorical col
'''
unique = self.df[col].unique()
rand_ = random.randint(0, 1000)
map_dict = {
category: "Category_" + str(rand_) + " " + str(i)
for i, category in enumerate(unique)
}
self.df[col] = self.df[col].map(map_dict)
def anon_date(self, col):
''' Replace entries in date column with random date
in the same range.
Args:
col: column containing date data
Returns:
anon object with replaced date col
'''
self.df[col] = pd.to_datetime(
self.df[col], infer_datetime_format=True)
start_date = self.df[col].min()
end_date = self.df[col].max()
map_list = [fake.date_between(
start_date=start_date,
end_date=end_date) for i in range(self.df.shape[0])]
self.df[col] = map_list
def anon_email(self, col):
''' Replace entries in email column with random emails.
Args:
col: column containing email
Returns:
anon object with replaced email col
'''
unique = self.df[col].unique()
map_dict = {_: (''.join(random.choices(
string.ascii_lowercase + string.digits,
k=12)))+'@anonemail.com' for _ in unique}
self.df[col] = self.df[col].map(map_dict)
def save_anon_csv(self):
'''Save anon object to a csv file'''
self.df.to_csv(str(time.time())+'kohokoho.csv', index=False)
def anon_df(self):
return self.df
def _df(self):
return self.original
@click.command()
@click.option(
'--csv',
prompt='Enter location of CSV',
help='Enter a valid filepath or buffer')
def cli(csv):
df = pd.read_csv(csv)
koho_df = anon(df)
click.echo('Columns info: '+str(df.info()))
# name
name_col = click.prompt(
'Enter column/s which stores names, each column separated by a comma',
type=str,
default='')
if (name_col != ''):
for col in name_col.split(","):
koho_df.anon_name(col.strip())
# id
id_col = click.prompt(
'Enter column/s which stores id, each column separated by a comma',
type=str,
default='')
if (id_col != ''):
for col in id_col.split(","):
koho_df.anon_id(col.strip())
# continuous values
continuous_col = click.prompt(
'''Enter column/s which stores continuous numbers,
each column separated by a comma''',
type=str,
default='')
if (continuous_col != ''):
for col in continuous_col.split(","):
koho_df.anon_continuous_num(col)
# discrete_col
discrete_col = click.prompt(
'''Enter column/s which stores discrete numbers,
each column separated by a comma''',
type=str,
default='')
if (discrete_col != ''):
for col in discrete_col.split(","):
koho_df.anon_discrete_num(col)
# category
category_col = click.prompt(
'''Enter column/s which stores categorical values,
each column separated by a comma''',
type=str,
default='')
if (category_col != ''):
for col in category_col.split(","):
koho_df.anon_category(col)
# date
date_col = click.prompt(
'Enter column which stores dates, each column separated by a comma',
type=str,
default='')
if (date_col != ''):
for col in date_col.split(","):
koho_df.anon_date(date_col)
# email
email_col = click.prompt(
'Enter column which stores email, each column separated by a comma',
type=str,
default='')
if (email_col != ''):
for col in email_col.split(","):
koho_df.anon_email(email_col)
# original dataset
click.echo('Original dataset')
click.echo(koho_df._df().head(10))
# final dataset
click.echo('Kohoko dataset')
click.echo(koho_df.anon_df().head(10))
# save anon dataset
if click.confirm('Do you want to save the anonymized csv?'):
koho_df.save_anon_csv()
click.echo('Done!')
| 0
| 2,533
| 0
| 3,970
| 0
| 0
| 0
| -37
| 156
|
48d27e7c7f04241eaba6ebba6f4280a5b806a2d3
| 1,741
|
py
|
Python
|
input_test.py
|
Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses
|
21e70c3eb5fac8984adb78771bf25f1e5aef823a
|
[
"MIT"
] | null | null | null |
input_test.py
|
Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses
|
21e70c3eb5fac8984adb78771bf25f1e5aef823a
|
[
"MIT"
] | null | null | null |
input_test.py
|
Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses
|
21e70c3eb5fac8984adb78771bf25f1e5aef823a
|
[
"MIT"
] | null | null | null |
import gym
import numpy as np
import operator
env = gym.make('MountainCar-v0')
possible_actions = env.action_space.n
print 'Possible actions are {}'.format(possible_actions)
epsilonlearn = EpsilonGreedy()
for episode in xrange(epsilonlearn.episodes):
observation = env.reset()
while True:
env.render()
action = epsilonlearn.select_action(observation)
next_observation, reward, done, _ = env.step(action)
epsilonlearn.update_all(action, reward)
observation = next_observation
if done:
break
env.destroy()
| 28.540984
| 69
| 0.577254
|
import gym
import numpy as np
import operator
env = gym.make('MountainCar-v0')
possible_actions = env.action_space.n
print 'Possible actions are {}'.format(possible_actions)
class EpsilonGreedy():
def __init__(self, episodes=1000, epsilon=0.2):
self.episodes = episodes
self.epsilon = epsilon
self.values = {0: 0.0, 1: 0.0, 2: 0.0}
self.counts = {0: 0, 1: 0, 2: 0}
def explore(self):
return np.random.choice(self.counts.keys())
def exploit(self):
return max(self.values.items(), \
key=operator.itemgetter(1))[0]
def select_action(self, observation):
if np.random.uniform(0, 1) < self.epsilon:
return self.explore()
else:
return self.exploit()
def update_counts(self, action):
self.counts[action] = self.counts[action] + 1
def update_values(self, action, reward):
current_value = self.values[action]
n = self.counts[action]
self.values[action] = ((n - 1) / float(n)) * \
current_value + (1 / float(n)) * reward
def update_all(self, action, reward):
self.update_counts(action)
self.update_values(action, reward)
epsilonlearn = EpsilonGreedy()
for episode in xrange(epsilonlearn.episodes):
observation = env.reset()
while True:
env.render()
action = epsilonlearn.select_action(observation)
next_observation, reward, done, _ = env.step(action)
epsilonlearn.update_all(action, reward)
observation = next_observation
if done:
break
env.destroy()
| 0
| 0
| 0
| 1,134
| 0
| 0
| 0
| 0
| 23
|
c1732f33ed39b706bb238f90f7327e90eb8aadf9
| 6,424
|
py
|
Python
|
tests/utils_tests.py
|
lukegb/ehacktivities
|
bc04d02eaf36a106b943ce0cb8395e85a780f6fc
|
[
"MIT"
] | 1
|
2016-04-30T00:19:13.000Z
|
2016-04-30T00:19:13.000Z
|
tests/utils_tests.py
|
lukegb/ehacktivities
|
bc04d02eaf36a106b943ce0cb8395e85a780f6fc
|
[
"MIT"
] | null | null | null |
tests/utils_tests.py
|
lukegb/ehacktivities
|
bc04d02eaf36a106b943ce0cb8395e85a780f6fc
|
[
"MIT"
] | null | null | null |
# vim: set fileencoding=utf-8
from eactivities import utils as utils
| 31.336585
| 84
| 0.452055
|
# vim: set fileencoding=utf-8
import unittest
import decimal
from eactivities import utils as utils
class TestFormatYear(unittest.TestCase):
def test_basic(self):
self.assertEqual(utils.format_year(2013), "13-14")
self.assertEqual(utils.format_year(2012), "12-13")
def test_wraparound(self):
self.assertEqual(utils.format_year(2000), "00-01")
self.assertEqual(utils.format_year(1999), "99-00")
class TestSplitAccountBracket(unittest.TestCase):
def test_basic(self):
self.assertEqual(
utils.split_account_bracket("General (00)"),
{'id': '00', 'name': 'General'}
)
self.assertEqual(
utils.split_account_bracket("Event 1 (50)"),
{'id': '50', 'name': 'Event 1'}
)
class TestMungeValue(unittest.TestCase):
def test_no_vat(self):
self.assertEqual(
utils.munge_value({
'gross': decimal.Decimal('500')
}),
{
'gross': decimal.Decimal('500')
}
)
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('500')
}),
{
'net': decimal.Decimal('500')
}
)
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('560')
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('560')
}
)
def test_vat_net_gross(self):
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('560'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.12')
}
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('560'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.12')
}
}
)
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('700'),
'gross': decimal.Decimal('560'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.12')
}
}),
{
'net': decimal.Decimal('700'),
'gross': decimal.Decimal('560'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.12')
}
}
)
def test_vat_net(self):
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('500'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.2')
}
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('600'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.2')
}
}
)
self.assertEqual(
utils.munge_value({
'net': decimal.Decimal('500'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.175')
}
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('587.50'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.175')
}
}
)
def test_vat_gross(self):
self.assertEqual(
utils.munge_value({
'gross': decimal.Decimal('600'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.2')
}
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('600'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.2')
}
}
)
self.assertEqual(
utils.munge_value({
'gross': decimal.Decimal('587.50'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.175')
}
}),
{
'net': decimal.Decimal('500'),
'gross': decimal.Decimal('587.50'),
'vat': {
'rate': 'P1',
'value': decimal.Decimal('1.175')
}
}
)
class TestSplitRole(unittest.TestCase):
def test_simple(self):
self.assertEqual(utils.split_role("A (BCD)"), ("A", "BCD"))
self.assertEqual(utils.split_role("BCD (A)"), ("BCD", "A"))
self.assertEqual(utils.split_role("A ((BCD))"), ("A", "(BCD)"))
class TestFormatPrice(unittest.TestCase):
def test_simple(self):
self.assertEqual(utils.format_price('132.58'), decimal.Decimal('132.58'))
self.assertEqual(utils.format_price('67'), decimal.Decimal('67.00'))
self.assertEqual(utils.format_price('-79'), decimal.Decimal('-79.00'))
def test_extraneous_characters(self):
self.assertEqual(utils.format_price(u'£24.33'), decimal.Decimal('24.33'))
self.assertEqual(utils.format_price(u'£24.93p'), decimal.Decimal('24.93'))
self.assertEqual(utils.format_price(u'-£14.33p'), decimal.Decimal('-14.33'))
class TestQuantizeDecimal(unittest.TestCase):
def test_simple(self):
d = decimal.Decimal
self.assertEqual(utils.quantize_decimal(d(143.2)), 143)
self.assertEqual(utils.quantize_decimal(d(143.9)), 144)
self.assertEqual(utils.quantize_decimal(d(144.2)), 144)
self.assertEqual(utils.quantize_decimal(d(144.9)), 145)
def test_rounding(self):
d = decimal.Decimal
self.assertEqual(utils.quantize_decimal(d(144.4)), 144)
self.assertEqual(utils.quantize_decimal(d(144.5)), 145)
self.assertEqual(utils.quantize_decimal(d(143.4)), 143)
self.assertEqual(utils.quantize_decimal(d(143.5)), 144)
| 6
| 0
| 0
| 6,175
| 0
| 0
| 0
| -13
| 183
|
0ee0a90813c8c9a90a1985052ea498c3076c177b
| 2,480
|
py
|
Python
|
tests/dataset_mock.py
|
fufunoyu/mindspore
|
704e367ada35653e8144eb0528c714f4b0231508
|
[
"Apache-2.0"
] | null | null | null |
tests/dataset_mock.py
|
fufunoyu/mindspore
|
704e367ada35653e8144eb0528c714f4b0231508
|
[
"Apache-2.0"
] | null | null | null |
tests/dataset_mock.py
|
fufunoyu/mindspore
|
704e367ada35653e8144eb0528c714f4b0231508
|
[
"Apache-2.0"
] | 1
|
2021-05-10T03:30:36.000Z
|
2021-05-10T03:30:36.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Remove after MindData merge to MindSpore '''
| 27.252747
| 78
| 0.645968
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Remove after MindData merge to MindSpore '''
import numpy as np
from mindspore import Tensor
class MindData:
""" Stub for MindData """
def __init__(self, size=None, batch_size=None, repeat_count=1,
np_types=None, output_shapes=None, input_indexs=()):
self._size = size
self._batch_size = batch_size
self._repeat_count = repeat_count
self._np_types = np_types
self._output_shapes = output_shapes
self._input_indexs = input_indexs
self._iter_num = 0
def get_dataset_size(self):
return self._size
def get_repeat_count(self):
return self._repeat_count
def get_batch_size(self):
return self._batch_size
def output_types(self):
return self._np_types
def output_shapes(self):
return self._output_shapes
@property
def input_indexs(self):
return self._input_indexs
def device_que(self, send_epoch_end=True):
self.queue_name = '6ba41974-209e-11ea-88b0-a24efeb2c736'
self.send_epoch_end = send_epoch_end
return self
def create_tuple_iterator(self):
return self.__iter__()
def send(self, num_epochs=-1):
pass
def stop_send(self):
pass
def continue_send(self):
pass
def __len__(self):
return self._size
def __iter__(self):
return self
def __next__(self):
if self._size < self._iter_num:
raise StopIteration
self._iter_num += 1
next_value = []
for shape, typ in zip(self._output_shapes, self._np_types):
next_value.append(Tensor(np.ndarray(shape, typ)))
return tuple(next_value)
def next(self):
return self.__next__()
def reset(self):
self._iter_num = 0
| 0
| 50
| 0
| 1,642
| 0
| 0
| 0
| 4
| 68
|
c78fff6a1b742a2097292cc1762ed16a15045646
| 2,885
|
py
|
Python
|
onnx/backend/test/case/node/softmax.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 12,820
|
2017-09-07T07:00:24.000Z
|
2022-03-31T14:41:57.000Z
|
onnx/backend/test/case/node/softmax.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 3,213
|
2017-09-07T17:48:17.000Z
|
2022-03-31T19:44:57.000Z
|
onnx/backend/test/case/node/softmax.py
|
pchandrasekaran1595/onnx
|
10da6f2e919c8515877e227a41cd44e86ae0bb2d
|
[
"Apache-2.0"
] | 2,922
|
2017-09-07T07:46:00.000Z
|
2022-03-31T15:55:24.000Z
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
| 28.284314
| 65
| 0.507452
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def softmax(x, axis=-1): # type: (np.ndarray, int) -> np.ndarray
x_max = np.max(x, axis=axis, keepdims=True)
tmp = np.exp(x - x_max)
s = np.sum(tmp, axis=axis, keepdims=True)
return tmp / s
class Softmax(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output [[0.09003058, 0.24472848, 0.66524094]]
y = softmax(x, axis=1)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_example')
@staticmethod
def export_softmax_axis(): # type: () -> None
x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]
).astype(np.float32)
# expected output
# [[0.032058604 0.08714432 0.23688284 0.6439143 ]
# [0.032058604 0.08714432 0.23688284 0.6439143 ]]
y = softmax(x)
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_large_number')
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=0,
)
y = softmax(x, axis=0)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_0')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=1,
)
y = softmax(x, axis=1)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_1')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=2,
)
y = softmax(x, axis=2)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_2')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=-1,
)
y = softmax(x, axis=-1)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_negative_axis')
# default axis is -1
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_default_axis')
| 0
| 2,318
| 0
| -1
| 0
| 185
| 0
| -12
| 205
|
8caa6e6d97d8417d27e29aa9cf0782bc090cde87
| 5,604
|
py
|
Python
|
gorilla3d/nn/modules/sparse/unet.py
|
Gorilla-Lab-SCUT/gorilla-3d
|
399ed8616781a0fbc462f655c0e80c258c5a5207
|
[
"MIT"
] | 6
|
2021-08-30T14:52:05.000Z
|
2022-02-28T19:37:14.000Z
|
gorilla3d/nn/modules/sparse/unet.py
|
Gorilla-Lab-SCUT/gorilla-3d
|
399ed8616781a0fbc462f655c0e80c258c5a5207
|
[
"MIT"
] | null | null | null |
gorilla3d/nn/modules/sparse/unet.py
|
Gorilla-Lab-SCUT/gorilla-3d
|
399ed8616781a0fbc462f655c0e80c258c5a5207
|
[
"MIT"
] | 3
|
2021-12-25T02:17:51.000Z
|
2022-02-23T21:53:16.000Z
|
import torch.nn as nn
try:
except:
pass
| 37.36
| 77
| 0.470914
|
import functools
from collections import OrderedDict
from typing import Callable, Dict, List, Optional, Union
import gorilla
import torch
import torch.nn as nn
try:
import spconv
from spconv.modules import SparseModule
except:
pass
from .block import ResidualBlock, VGGBlock, AsymResidualBlock
class UBlock(nn.Module):
def __init__(
self,
nPlanes: List[int],
norm_fn: Union[Dict, Callable] = functools.partial(nn.BatchNorm1d,
eps=1e-4,
momentum=0.1),
block_reps: int = 2,
block: Union[str, Callable] = ResidualBlock,
indice_key_id: int = 1,
normalize_before: bool = True,
return_blocks: bool = False,
):
super().__init__()
self.return_blocks = return_blocks
self.nPlanes = nPlanes
# process block and norm_fn caller
if isinstance(block, str):
area = ["residual", "vgg", "asym"]
assert block in area, f"block must be in {area}, but got {block}"
if block == "residual":
block = ResidualBlock
elif block == "vgg":
block = VGGBlock
elif block == "asym":
block = AsymResidualBlock
if isinstance(norm_fn, Dict):
norm_caller = gorilla.nn.get_torch_layer_caller(
norm_fn.pop("type"))
norm_fn = functools.partial(norm_caller, **norm_fn)
blocks = {
f"block{i}": block(nPlanes[0],
nPlanes[0],
norm_fn,
normalize_before=normalize_before,
indice_key=f"subm{indice_key_id}")
for i in range(block_reps)
}
blocks = OrderedDict(blocks)
self.blocks = spconv.SparseSequential(blocks)
if len(nPlanes) > 1:
if normalize_before:
self.conv = spconv.SparseSequential(
norm_fn(nPlanes[0]), nn.ReLU(),
spconv.SparseConv3d(nPlanes[0],
nPlanes[1],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{indice_key_id}"))
else:
self.conv = spconv.SparseSequential(
spconv.SparseConv3d(nPlanes[0],
nPlanes[1],
kernel_size=2,
stride=2,
bias=False,
indice_key=f"spconv{indice_key_id}"),
norm_fn(nPlanes[1]), nn.ReLU())
self.u = UBlock(nPlanes[1:],
norm_fn,
block_reps,
block,
indice_key_id=indice_key_id + 1,
normalize_before=normalize_before,
return_blocks=return_blocks)
if normalize_before:
self.deconv = spconv.SparseSequential(
norm_fn(nPlanes[1]), nn.ReLU(),
spconv.SparseInverseConv3d(
nPlanes[1],
nPlanes[0],
kernel_size=2,
bias=False,
indice_key=f"spconv{indice_key_id}"))
else:
self.deconv = spconv.SparseSequential(
spconv.SparseInverseConv3d(
nPlanes[1],
nPlanes[0],
kernel_size=2,
bias=False,
indice_key=f"spconv{indice_key_id}"),
norm_fn(nPlanes[0]), nn.ReLU())
blocks_tail = {}
for i in range(block_reps):
blocks_tail[f"block{i}"] = block(
nPlanes[0] * (2 - i),
nPlanes[0],
norm_fn,
indice_key=f"subm{indice_key_id}",
normalize_before=normalize_before)
blocks_tail = OrderedDict(blocks_tail)
self.blocks_tail = spconv.SparseSequential(blocks_tail)
def forward(self, input, previous_outputs: Optional[List] = None):
output = self.blocks(input)
identity = spconv.SparseConvTensor(output.features, output.indices,
output.spatial_shape,
output.batch_size)
if len(self.nPlanes) > 1:
output_decoder = self.conv(output)
if self.return_blocks:
output_decoder, previous_outputs = self.u(
output_decoder, previous_outputs)
else:
output_decoder = self.u(output_decoder)
output_decoder = self.deconv(output_decoder)
output.features = torch.cat(
(identity.features, output_decoder.features), dim=1)
output = self.blocks_tail(output)
if self.return_blocks:
# NOTE: to avoid the residual bug
if previous_outputs is None:
previous_outputs = []
previous_outputs.append(output)
return output, previous_outputs
else:
return output
| 0
| 0
| 0
| 5,271
| 0
| 0
| 0
| 78
| 209
|
72cbdd0bcaa996954661c5bd76f6d40be1176bc5
| 1,602
|
py
|
Python
|
canary/app.py
|
jrxFive/canary
|
725b7fc812c06de26056fe6eee9d155abf3e0c47
|
[
"MIT"
] | null | null | null |
canary/app.py
|
jrxFive/canary
|
725b7fc812c06de26056fe6eee9d155abf3e0c47
|
[
"MIT"
] | null | null | null |
canary/app.py
|
jrxFive/canary
|
725b7fc812c06de26056fe6eee9d155abf3e0c47
|
[
"MIT"
] | null | null | null |
import falcon
import skyline
import outlier
import backend
api = falcon.API()
# Skyline Resources
skyline_MedianAbsoluteDeviation = skyline.MedianAbsoluteDeviation()
skyline_Grubbs = skyline.Grubbs()
skyline_FirstHourAverage = skyline.FirstHourAverage()
skyline_HistogramBins = skyline.HistogramBins()
skyline_LeastSquares = skyline.LeastSquares()
skyline_MeanSubtractionCumulation = skyline.MeanSubtractionCumulation()
skyline_StddevFromAverage = skyline.StddevFromAverage()
skyline_StddevFromMovingAverage = skyline.StddevFromMovingAverage()
# Outlier Resources
outlier_Tukey = outlier.Tukey()
# Backend Resources
backend_AvailableBackends = backend.AvailableBackends()
# Skyline routes
api.add_route('/v1/algos/skyline/medianabsolutedeviation',
skyline_MedianAbsoluteDeviation)
api.add_route('/v1/algos/skyline/grubbs',
skyline_Grubbs)
api.add_route('/v1/algos/skyline/firsthouraverage',
skyline_FirstHourAverage)
api.add_route('/v1/algos/skyline/histogrambins',
skyline_HistogramBins)
api.add_route('/v1/algos/skyline/leastsquares',
skyline_LeastSquares)
api.add_route('/v1/algos/skyline/meansubtractioncumulation',
skyline_MeanSubtractionCumulation)
api.add_route('/v1/algos/skyline/stddevfromaverage',
skyline_StddevFromAverage)
api.add_route('/v1/algos/skyline/stddevfrommovingaverage',
skyline_StddevFromMovingAverage)
# Outlier routes
api.add_route('/v1/algos/outliers/tukey', outlier_Tukey)
# Backend routes
api.add_route('/v1/backends', backend_AvailableBackends)
| 29.127273
| 71
| 0.779026
|
import falcon
import skyline
import outlier
import backend
api = falcon.API()
# Skyline Resources
skyline_MedianAbsoluteDeviation = skyline.MedianAbsoluteDeviation()
skyline_Grubbs = skyline.Grubbs()
skyline_FirstHourAverage = skyline.FirstHourAverage()
skyline_HistogramBins = skyline.HistogramBins()
skyline_LeastSquares = skyline.LeastSquares()
skyline_MeanSubtractionCumulation = skyline.MeanSubtractionCumulation()
skyline_StddevFromAverage = skyline.StddevFromAverage()
skyline_StddevFromMovingAverage = skyline.StddevFromMovingAverage()
# Outlier Resources
outlier_Tukey = outlier.Tukey()
# Backend Resources
backend_AvailableBackends = backend.AvailableBackends()
# Skyline routes
api.add_route('/v1/algos/skyline/medianabsolutedeviation',
skyline_MedianAbsoluteDeviation)
api.add_route('/v1/algos/skyline/grubbs',
skyline_Grubbs)
api.add_route('/v1/algos/skyline/firsthouraverage',
skyline_FirstHourAverage)
api.add_route('/v1/algos/skyline/histogrambins',
skyline_HistogramBins)
api.add_route('/v1/algos/skyline/leastsquares',
skyline_LeastSquares)
api.add_route('/v1/algos/skyline/meansubtractioncumulation',
skyline_MeanSubtractionCumulation)
api.add_route('/v1/algos/skyline/stddevfromaverage',
skyline_StddevFromAverage)
api.add_route('/v1/algos/skyline/stddevfrommovingaverage',
skyline_StddevFromMovingAverage)
# Outlier routes
api.add_route('/v1/algos/outliers/tukey', outlier_Tukey)
# Backend routes
api.add_route('/v1/backends', backend_AvailableBackends)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1a09c3a99ab397cf25966358037555ec10ce96b5
| 1,645
|
py
|
Python
|
dthm4kaiako/events/migrations/0003_auto_20190305_2035.py
|
taskmaker1/dthm4kaiako
|
681babc10b3223b5ae7fdf19b98c53d2bef4ea1a
|
[
"MIT"
] | 3
|
2018-12-10T07:03:02.000Z
|
2021-04-12T02:18:30.000Z
|
dthm4kaiako/events/migrations/0003_auto_20190305_2035.py
|
taskmaker1/dthm4kaiako
|
681babc10b3223b5ae7fdf19b98c53d2bef4ea1a
|
[
"MIT"
] | 566
|
2018-09-30T02:54:28.000Z
|
2022-03-28T01:20:01.000Z
|
dthm4kaiako/events/migrations/0003_auto_20190305_2035.py
|
taskmaker1/dthm4kaiako
|
681babc10b3223b5ae7fdf19b98c53d2bef4ea1a
|
[
"MIT"
] | 3
|
2019-04-04T19:53:39.000Z
|
2021-05-16T02:04:46.000Z
|
# Generated by Django 2.1.5 on 2019-03-05 07:35
| 42.179487
| 397
| 0.594529
|
# Generated by Django 2.1.5 on 2019-03-05 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20190305_0753'),
]
operations = [
migrations.AddField(
model_name='location',
name='city',
field=models.CharField(default='Christchurch', help_text='Town or city, for example: Christchurch', max_length=200),
),
migrations.AddField(
model_name='location',
name='region',
field=models.PositiveSmallIntegerField(choices=[(1, 'Northland'), (2, 'Auckland'), (3, 'Waikato'), (4, 'Bay of Plenty'), (5, 'Gisborne'), (6, "Hawke's Bay"), (7, 'Taranaki'), (8, 'Manawatu-Wanganui'), (9, 'Wellington'), (10, 'Tasman'), (11, 'Nelson'), (12, 'Marlborough'), (13, 'West Coast'), (14, 'Canterbury'), (15, 'Otago'), (16, 'Southland'), (17, 'Chatman Islands')], default=14),
),
migrations.AddField(
model_name='location',
name='room',
field=models.CharField(blank=True, help_text='Name of room or space, for example: Room 134', max_length=200),
),
migrations.AddField(
model_name='location',
name='street_address',
field=models.CharField(blank=True, help_text='Street address location, for example: 12 High Street', max_length=200),
),
migrations.AlterField(
model_name='location',
name='name',
field=models.CharField(help_text='Name of location, for example: Middleton Grange School', max_length=200),
),
]
| 0
| 0
| 0
| 1,531
| 0
| 0
| 0
| 19
| 46
|
6799a09b7e3c82d3322add9429550909c8f992c3
| 2,009
|
py
|
Python
|
electricitymap/contrib/config/__init__.py
|
neilfulwiler/electricitymap-contrib
|
bcfd819eb24fc0530861e44acdb828f3ef635fe0
|
[
"MIT"
] | 1
|
2022-02-08T22:52:09.000Z
|
2022-02-08T22:52:09.000Z
|
electricitymap/contrib/config/__init__.py
|
eliagroup/electricitymap-contrib
|
66e1e5976bf7add639c24b61386b329b15955075
|
[
"MIT"
] | null | null | null |
electricitymap/contrib/config/__init__.py
|
eliagroup/electricitymap-contrib
|
66e1e5976bf7add639c24b61386b329b15955075
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from typing import Dict, List, NewType, Tuple
ZoneKey = NewType("ZoneKey", str)
Point = NewType("Point", Tuple[float, float])
BoundingBox = NewType("BoundingBox", List[Point])
CONFIG_DIR = Path(__file__).parent.parent.parent.parent.joinpath("config").resolve()
# Read JOSN files
ZONES_CONFIG = json.load(open(CONFIG_DIR.joinpath("zones.json")))
EXCHANGES_CONFIG = json.load(open(CONFIG_DIR.joinpath("exchanges.json")))
CO2EQ_PARAMETERS_ALL = json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_all.json")))
CO2EQ_PARAMETERS_LIFECYCLE = {
**CO2EQ_PARAMETERS_ALL,
**json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_lifecycle.json")))
}
CO2EQ_PARAMETERS_DIRECT = {
**CO2EQ_PARAMETERS_ALL,
**json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_direct.json")))
}
CO2EQ_PARAMETERS = CO2EQ_PARAMETERS_LIFECYCLE # Global LCA is the default
# Prepare zone bounding boxes
ZONE_BOUNDING_BOXES: Dict[ZoneKey, BoundingBox] = {}
for zone_id, zone_config in ZONES_CONFIG.items():
if "bounding_box" in zone_config:
ZONE_BOUNDING_BOXES[zone_id] = zone_config["bounding_box"]
# Prepare zone neighbours
ZONE_NEIGHBOURS: Dict[ZoneKey, List[ZoneKey]] = {}
for k, v in EXCHANGES_CONFIG.items():
zone_names = k.split("->")
pairs = [(zone_names[0], zone_names[1]), (zone_names[1], zone_names[0])]
for zone_name_1, zone_name_2 in pairs:
if zone_name_1 not in ZONE_NEIGHBOURS:
ZONE_NEIGHBOURS[zone_name_1] = set()
ZONE_NEIGHBOURS[zone_name_1].add(zone_name_2)
# we want neighbors to always be in the same order
for zone, neighbors in ZONE_NEIGHBOURS.items():
ZONE_NEIGHBOURS[zone] = sorted(neighbors)
| 40.18
| 88
| 0.735192
|
import json
from pathlib import Path
from typing import Dict, List, NewType, Tuple
ZoneKey = NewType("ZoneKey", str)
Point = NewType("Point", Tuple[float, float])
BoundingBox = NewType("BoundingBox", List[Point])
CONFIG_DIR = Path(__file__).parent.parent.parent.parent.joinpath("config").resolve()
# Read JOSN files
ZONES_CONFIG = json.load(open(CONFIG_DIR.joinpath("zones.json")))
EXCHANGES_CONFIG = json.load(open(CONFIG_DIR.joinpath("exchanges.json")))
CO2EQ_PARAMETERS_ALL = json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_all.json")))
CO2EQ_PARAMETERS_LIFECYCLE = {
**CO2EQ_PARAMETERS_ALL,
**json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_lifecycle.json")))
}
CO2EQ_PARAMETERS_DIRECT = {
**CO2EQ_PARAMETERS_ALL,
**json.load(open(CONFIG_DIR.joinpath("co2eq_parameters_direct.json")))
}
CO2EQ_PARAMETERS = CO2EQ_PARAMETERS_LIFECYCLE # Global LCA is the default
# Prepare zone bounding boxes
ZONE_BOUNDING_BOXES: Dict[ZoneKey, BoundingBox] = {}
for zone_id, zone_config in ZONES_CONFIG.items():
if "bounding_box" in zone_config:
ZONE_BOUNDING_BOXES[zone_id] = zone_config["bounding_box"]
# Prepare zone neighbours
ZONE_NEIGHBOURS: Dict[ZoneKey, List[ZoneKey]] = {}
for k, v in EXCHANGES_CONFIG.items():
zone_names = k.split("->")
pairs = [(zone_names[0], zone_names[1]), (zone_names[1], zone_names[0])]
for zone_name_1, zone_name_2 in pairs:
if zone_name_1 not in ZONE_NEIGHBOURS:
ZONE_NEIGHBOURS[zone_name_1] = set()
ZONE_NEIGHBOURS[zone_name_1].add(zone_name_2)
# we want neighbors to always be in the same order
for zone, neighbors in ZONE_NEIGHBOURS.items():
ZONE_NEIGHBOURS[zone] = sorted(neighbors)
def emission_factors(zone_key: ZoneKey):
override = CO2EQ_PARAMETERS["emissionFactors"]["zoneOverrides"].get(zone_key, {})
defaults = CO2EQ_PARAMETERS["emissionFactors"]["defaults"]
merged = {**defaults, **override}
return dict([(k, (v or {}).get("value")) for (k, v) in merged.items()])
| 0
| 0
| 0
| 0
| 0
| 282
| 0
| 0
| 23
|
afafff9e14db07eceecd082a992b541d798cad5f
| 2,200
|
py
|
Python
|
tests/conftest.py
|
RussellJQA/test-statsroyale
|
376d00d2bd998de7c58b07df06f1fe7dc82816d1
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
RussellJQA/test-statsroyale
|
376d00d2bd998de7c58b07df06f1fe7dc82816d1
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
RussellJQA/test-statsroyale
|
376d00d2bd998de7c58b07df06f1fe7dc82816d1
|
[
"MIT"
] | null | null | null |
"""
This module contains shared fixtures.
"""
# pip installed
# scope="session" means "Run fixture 1x per session"
# (1x before entire test suite)
# scope="function" [the default] means "Run fixture 1x for each test case" )"
| 31.428571
| 77
| 0.711818
|
"""
This module contains shared fixtures.
"""
import json
from pathlib import Path
# pip installed
import pytest # installed with webdriver_manager
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager
# scope="session" means "Run fixture 1x per session"
# (1x before entire test suite)
@pytest.fixture
def config(scope="session"):
# NOTE: pytest --fixtures test_cards.py lists its available fixtures
# A docstring (as below) for the fixture will be included in the list
"""Load/update configuration parameters to pass to WebDriver instance"""
# Read the file
with open(Path("tests") / "config.json") as config_file:
config = json.load(config_file)
# Assert values are acceptable
assert config["browser"] in ["Firefox", "Chrome"]
assert isinstance(config["implicit_wait"], int)
assert config["implicit_wait"] > 0
# Return config so it can be used
return config
# scope="function" [the default] means "Run fixture 1x for each test case" )"
@pytest.fixture
def browser(config):
"""Yield WebDriver instance with the specified configuration"""
# Setup
# This section known as Arrange (in the Arrange-Act-Assert paradigm)
# This section known as Given (in the Given-When-Then paradigm)
# Initialize the WebDriver instance
if config["browser"] == "Chrome":
b = webdriver.Chrome(ChromeDriverManager().install())
elif config["browser"] == "Firefox":
b = webdriver.Firefox(executable_path=GeckoDriverManager().install())
else:
raise Exception(f'Browser "{config["browser"]}" is not supported')
b.maximize_window() # Needed so that searched-for elements are visible
# Wait up to specified number of seconds for elements to appear
b.implicitly_wait(config["implicit_wait"])
# Run the test: Return the WebDriver instance for the test
# This section known as Act (in the Arrange-Act-Assert paradigm)
# This section known as When (in the Given-When-Then paradigm)
yield b
# Teardown/Cleanup
# Quit the WebDriver instance for the cleanup
b.quit()
| 0
| 1,693
| 0
| 0
| 0
| 0
| 0
| 64
| 213
|
b82068f563050b399255df26391497881d98b3a5
| 12,152
|
py
|
Python
|
pypureclient/flashblade/FB_2_2/api/rdl_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flashblade/FB_2_2/api/rdl_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flashblade/FB_2_2/api/rdl_api.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# python 2 and python 3 compatibility library
from .. import models
| 37.622291
| 199
| 0.632242
|
# coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.2, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class RDLApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api22_rapid_data_locking_get_with_http_info(
self,
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.RapidDataLockingResponse
"""Get the status of the Rapid Data Locking feature.
Displays the status of the Rapid Data Locking feature.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api22_rapid_data_locking_get_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: RapidDataLockingResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.2/rapid-data-locking', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RapidDataLockingResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api22_rapid_data_locking_patch_with_http_info(
self,
rapid_data_locking=None, # type: models.RapidDataLocking
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.RapidDataLockingResponse
"""Modifies the Rapid Data Locking feature.
Modifies the Rapid Data Locking feature. Note that the feature can only be enabled if there are no file systems nor buckets created on the array. Once enabled, the feature cannot be modified.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api22_rapid_data_locking_patch_with_http_info(rapid_data_locking, async_req=True)
>>> result = thread.get()
:param RapidDataLocking rapid_data_locking: (required)
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: RapidDataLockingResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'rapid_data_locking' is set
if rapid_data_locking is None:
raise TypeError("Missing the required parameter `rapid_data_locking` when calling `api22_rapid_data_locking_patch`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'rapid_data_locking' in params:
body_params = params['rapid_data_locking']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.2/rapid-data-locking', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RapidDataLockingResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api22_rapid_data_locking_rotate_post_with_http_info(
self,
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Rotates the external keys on the associated EKM appliance.
Rotates the external keys on the associated EKM appliance.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api22_rapid_data_locking_rotate_post_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.2/rapid-data-locking/rotate', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api22_rapid_data_locking_test_get_with_http_info(
self,
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.TestResultResponse
"""Displays Rapid Data Locking test results.
Displays a detailed result of a Rapid Data Locking test.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api22_rapid_data_locking_test_get_with_http_info(async_req=True)
>>> result = thread.get()
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: TestResultResponse
If the method is called asynchronously,
returns the request thread.
"""
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = ['AuthorizationHeader']
return self.api_client.call_api(
'/api/2.2/rapid-data-locking/test', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TestResultResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 0
| 0
| 0
| 11,690
| 0
| 0
| 0
| -11
| 90
|
b9c448bf2426d47f5d9150b6f0c8d6cbf069d50b
| 6,296
|
py
|
Python
|
cwharaj/cwharaj/parser/opensooq_parser.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | 2
|
2018-12-03T16:30:55.000Z
|
2019-04-03T13:29:20.000Z
|
cwharaj/cwharaj/parser/opensooq_parser.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | null | null | null |
cwharaj/cwharaj/parser/opensooq_parser.py
|
trujunzhang/djzhang-targets
|
c2e327acde9d51f0455e7243f17d93d74b579501
|
[
"MIT"
] | 1
|
2019-04-03T13:29:25.000Z
|
2019-04-03T13:29:25.000Z
|
# coding=utf-8
| 40.883117
| 121
| 0.630083
|
# coding=utf-8
import logging
from cwharaj.items import Ad, CacheItem, City, Member, OpensooqPhone
from cwharaj.parser.base_parser import BaseParser
from cwharaj.parser.utils.harajs_comments import HarajsComments
from cwharaj.parser.utils.harajs_section import HarajsSection
from cwharaj.parser.utils.timer_opensooq_comment_date_util import OpensooqCommentDateUtil
class OpensooqParse(BaseParser):
def __init__(self):
from cwharaj.scraped_websites import WebsiteTypes
self.url_from = WebsiteTypes.opensooq.value
super(OpensooqParse, self).__init__()
# Here,we store items from newest to oldest.
# then fetch the first item from the databse become the oldest.
def parse_paginate(self, url, hxs, cache_db, history_db):
links = hxs.xpath('//*[@id="gridPostListing"]/li')
logging.debug("Get rows count from the opensooq: {}.".format(len(links)))
for idx, link in enumerate(links):
Li_selector = '//*[@id="gridPostListing"]/li[' + str(idx + 1) + ']'
href = self.get_value_from_response_with_urljoin(hxs,
Li_selector + '/div/div[@class="rectLiDetails"]/h3/a/@href',
url)
from cwharaj.utils.crawl_utils import CrawlUtils
_ID = CrawlUtils.get_model_id_by_url_from(href, self.url_from)
# If the link already exist on the history database,ignore it.
if history_db.check_history_exist(_ID):
# logging.debug(" item exist {} on the history database".format(_ID))
continue
item = CacheItem.get_default(model_id=_ID, url=href, url_from=self.url_from)
cache_db.save_cache(item, idx)
# here, must sleep a second.
# time.sleep(1)
def parse(self, url, hxs, item_db):
from cwharaj.utils.crawl_utils import CrawlUtils
_ID = CrawlUtils.get_model_id_by_url_from(url, self.url_from)
# ADs User
# memberName len(list) = 2
_memberName = self.get_value_response(hxs, '//*[@class="userDet tableCell vTop"]/strong/a/text()')
# member_timeregister is 'اريخ الانضمام 08/10/2015'
member_timeregister = self.get_value_response(hxs, '//span[@class="joinDate"]/text()')
_ads_city = self.get_value_response(hxs,
'//*[@class="sellerAddress"]/span[@class="sellerAddressText"]/a/text()')
# ADs
_ads_title = self.get_value_response(hxs, '//*[@class="postTitleCont"]/div/h1/text()')
_image_link = self.get_pictures(hxs, '//*[@class="galleryLeftList fLeft"]/ul/li/a/img/@src')
time_added = self.get_value_response(hxs, '//*[@class="postDate fRight"]/text()')
_ads_body = self.get_all_value_response(hxs, '//*[@class="postDesc"]/p/text()')
_sections = self.get_section(self.get_value_response(hxs, '//*[@class="breadcrumbs"]'))
# Fixing the empty page.
if (_ads_title == '') and (len(_sections) == 0):
logging.debug(" The empty page on the opensooq")
return {
"id_ads": _ID,
"url_from": self.url_from
}
section_item = HarajsSection(_sections, item_db).get_section_item_for_opensooq()
# Replace "\n","\r"
_ads_title = _ads_title.replace("\n", "").replace("\r", "").strip()
# ====
# Save to relative database
# ====
# Because opensooq's contact is image base64 format,
# So Firstly request it via ajax.
ads_contact = ''
phone_number_base64 = self.query_phone_number_base64_image(hxs)
if phone_number_base64:
opensooq_phone_id = item_db.save_opensooq_phone(OpensooqPhone.get_default(phone_number_base64))
# opensooq's contact is a specialized format.
ads_contact = Ad.get_opensooq_phone(opensooq_phone_id)
time_added = OpensooqCommentDateUtil().get_time_for_opensooq_time_added(time_added)
member_timeregister = OpensooqCommentDateUtil().get_time_for_opensooq_member_timeregister(member_timeregister)
city_id = item_db.save_city(City.get_default(_ads_city))
_His_announcement_id = item_db.save_member(
Member.get_default(user_name=_memberName, timeregister=member_timeregister, phone=ads_contact))
item = Ad.get_default(
section_item=section_item,
ads_title=_ads_title,
city_id=city_id,
ads_contact=ads_contact,
ads_body=_ads_body,
image_link=_image_link,
His_announcement_id=_His_announcement_id,
url_from=self.url_from,
Time_added=time_added,
type_ads_or=1, _close_ads=0
)
id_ads = item_db.save_ad(item)
# Scrape all comments for the ad.
HarajsComments(self, item_db, id_ads).save_for_opensooq(hxs)
return item
def query_phone_number_base64_image(self, hxs):
"""
Because the opensooq's phone number is the base64 image,
So we need to get the base64 format via ajax.
:param hxs:
:return:
"""
phone_data_id = self.get_value_response(hxs, '//*[@class="phoneNumber table getPhoneNumber"]/@data-id')
phone_data_type = self.get_value_response(hxs, '//*[@class="phoneNumber table getPhoneNumber"]/@data-type')
if phone_data_id:
ajax_url = "https://sa.opensooq.com/ar/post/get-phone-number?model_id={}&model_type={}".format(
phone_data_id, phone_data_type)
return self.ajax(ajax_url)
def get_pictures(self, hxs, selector):
_pictures = hxs.xpath(selector).extract()
list = []
for picture in _pictures:
list.append(picture.replace('75x75', '563x400'))
return ",".join(list)
def get_section(self, section_panel):
from BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(section_panel)
_As = soup.findAll('a', {'property': 'v:title'})
sections = []
for a in _As:
sections.append(a.text.replace("\n", "").replace("\r", "").strip().encode('utf-8'))
return sections
| 24
| 0
| 0
| 5,894
| 0
| 0
| 0
| 218
| 156
|
7a3b01013fde2ee3d3ab666efa9cbbf280de529f
| 5,695
|
py
|
Python
|
vxt/view/task/factory.py
|
veeso/voice-xtractor
|
8382dd889ed665741c3c66b65321cc1d36c6a767
|
[
"MIT"
] | 1
|
2022-01-11T01:31:36.000Z
|
2022-01-11T01:31:36.000Z
|
vxt/view/task/factory.py
|
veeso/vxt
|
8382dd889ed665741c3c66b65321cc1d36c6a767
|
[
"MIT"
] | null | null | null |
vxt/view/task/factory.py
|
veeso/vxt
|
8382dd889ed665741c3c66b65321cc1d36c6a767
|
[
"MIT"
] | null | null | null |
# VXT
# Developed by Christian Visintin
#
# MIT License
# Copyright (c) 2021 Christian Visintin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# vxt
# tasks
| 38.47973
| 87
| 0.689201
|
# VXT
# Developed by Christian Visintin
#
# MIT License
# Copyright (c) 2021 Christian Visintin
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from ..context import Context
from ..args import CliArgs
from .task import Task
from typing import Optional, Type
# vxt
from vxt.audio.audio import Audio
# tasks
from .audio.amplify import AmplifyTask
from .audio.delete_track import DeleteTrackTask
from .audio.export import ExportTask
from .audio.normalize import NormalizeTask
from .audio.play import PlayTask, PlayChunkTask
from .audio.rename_track import RenameTracksTask
from .audio.split_silence import SplitSilenceTask
from .audio.split_track import SplitTrackTask
from .speech2text.manual import ManualSpeechTask
from .speech2text.speech import SpeechTask
class TaskFactory(object):
"""A task factory"""
@staticmethod
def make(task: Type[Task], ctx: Context, cli_args: CliArgs) -> Task:
"""Make a Task from class name and current context"""
if task == AmplifyTask:
return TaskFactory.__amplify_task(ctx, cli_args.get("dB").as_int())
elif task == DeleteTrackTask:
return TaskFactory.__delete_track_trask(ctx)
elif task == ExportTask:
return TaskFactory.__export_task(ctx, cli_args.get("format").as_str())
elif task == NormalizeTask:
return TaskFactory.__normalize_task(ctx)
elif task == PlayTask:
return TaskFactory.__play_task(ctx)
elif task == PlayChunkTask:
return TaskFactory.__play_chunk_task(
ctx,
cli_args.get_or("start", None).as_int(),
cli_args.get_or("end", None).as_int(),
)
elif task == RenameTracksTask:
return TaskFactory.__rename_track_task(ctx)
elif task == SplitSilenceTask:
return TaskFactory.__split_silence_task(
ctx,
cli_args.get("min_silence_len").as_int(),
cli_args.get("silence_threshold").as_int(),
cli_args.get("keep_silence").as_int(),
)
elif task == SplitTrackTask:
return TaskFactory.__split_track_task(ctx, cli_args.get("offset").as_int())
elif task == SpeechTask:
return TaskFactory.__speech_task(ctx)
elif task == ManualSpeechTask:
return TaskFactory.__manual_speech_task(
ctx, cli_args.get("speech").as_str()
)
else:
raise NotImplementedError
@staticmethod
def __amplify_task(ctx: Context, dB: int) -> AmplifyTask:
return AmplifyTask(TaskFactory.__get_audio(ctx), dB)
@staticmethod
def __delete_track_trask(ctx: Context) -> DeleteTrackTask:
return DeleteTrackTask(ctx.playlist, ctx.cursor)
@staticmethod
def __export_task(ctx: Context, format: Optional[str]) -> ExportTask:
return ExportTask(TaskFactory.__get_audio(ctx), format, ctx.config.output_dir)
@staticmethod
def __normalize_task(ctx: Context) -> NormalizeTask:
return NormalizeTask(TaskFactory.__get_audio(ctx))
@staticmethod
def __play_task(ctx: Context) -> PlayTask:
return PlayTask(TaskFactory.__get_audio(ctx))
@staticmethod
def __play_chunk_task(
ctx: Context, start: Optional[int], end: Optional[int]
) -> PlayChunkTask:
return PlayChunkTask(TaskFactory.__get_audio(ctx), start, end)
@staticmethod
def __rename_track_task(ctx: Context) -> RenameTracksTask:
return RenameTracksTask(ctx.playlist, ctx.config.output_fmt)
@staticmethod
def __split_silence_task(
ctx: Context, min_silence_len: int, silence_threshold: int, keep_silence: int
) -> SplitSilenceTask:
return SplitSilenceTask(
TaskFactory.__get_audio(ctx),
min_silence_len,
silence_threshold,
keep_silence,
)
@staticmethod
def __split_track_task(ctx: Context, offset: int) -> SplitTrackTask:
return SplitTrackTask(
ctx.playlist, ctx.cursor, offset, ctx.config.engine, ctx.config.language
)
@staticmethod
def __speech_task(ctx: Context) -> SpeechTask:
return SpeechTask(ctx.config.engine, ctx.playlist, ctx.config.language)
@staticmethod
def __manual_speech_task(ctx: Context, speech: str) -> ManualSpeechTask:
return ManualSpeechTask(TaskFactory.__get_audio(ctx), speech)
@staticmethod
def __get_audio(ctx: Context) -> Audio:
if ctx.playlist.length > 0:
return ctx.playlist.get(ctx.cursor)
else:
# If playlist is empty, edit audio source
return ctx.source
| 0
| 3,524
| 0
| 381
| 0
| 0
| 0
| 270
| 354
|
c0a838d1742e11f233a011458ca579ef6d49d4db
| 2,243
|
py
|
Python
|
bag8/utils.py
|
peopledoc/bag8
|
6bd8a00ac258297b94eac6c71f4efe275c416203
|
[
"MIT"
] | 4
|
2015-05-27T13:59:00.000Z
|
2016-12-11T20:34:00.000Z
|
bag8/utils.py
|
peopledoc/bag8
|
6bd8a00ac258297b94eac6c71f4efe275c416203
|
[
"MIT"
] | 64
|
2015-04-27T08:33:14.000Z
|
2016-07-15T13:33:11.000Z
|
bag8/utils.py
|
peopledoc/bag8
|
6bd8a00ac258297b94eac6c71f4efe275c416203
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import re
from functools import partial
from subprocess import Popen
from subprocess import PIPE
RE_WORD = re.compile('\W')
call = partial(Popen, stdout=PIPE, stderr=PIPE)
| 20.962617
| 66
| 0.631743
|
from __future__ import absolute_import, division, print_function
import os
import re
import socket
import sys
from itertools import count
from functools import partial
from subprocess import Popen
from subprocess import PIPE
from time import sleep
import click
from distutils.spawn import find_executable
from compose.cli.docker_client import docker_client
from bag8.exceptions import CheckCallFailed, WaitLinkFailed
RE_WORD = re.compile('\W')
call = partial(Popen, stdout=PIPE, stderr=PIPE)
def check_call(args, exit=True, **kwargs):
proc = call(args, **kwargs)
out, err = proc.communicate()
if not proc.returncode:
return out, err, proc.returncode
if exit:
click.echo(out)
click.echo(err)
sys.exit(proc.returncode)
else:
raise CheckCallFailed(out + '\n' + err)
def exec_(args):
# byebye!
os.execv(find_executable(args[0]), args)
def wait_(host, port, max_retry=10, retry_interval=1):
counter = count()
while counter.next() < max_retry:
try:
return socket.socket().connect((host, int(port)))
except socket.error:
click.echo('wait for {}:{}'.format(host, port))
sleep(retry_interval)
raise WaitLinkFailed("can't link to {}:{}".format(host, port))
def confirm(msg):
click.echo('')
click.echo(msg)
click.echo('proceed ?')
char = None
while char not in ['y', 'n']:
click.echo('Yes (y) or no (n) ?')
char = click.getchar()
# Yes
if char == 'y':
return True
def inspect(container, client=None):
client = client or docker_client()
return client.inspect_container(container)
def simple_name(text):
return RE_WORD.sub('', text)
def write_conf(path, content, bak_path=None):
# keep
if bak_path:
call(['cp', path, bak_path])
cmd = [
'sudo',
'--reset-timestamp',
'tee',
path,
]
# confirm
if not confirm('`{0}` ?'.format(' '.join(cmd))):
return
process = call(cmd, stdin=PIPE)
process.stdin.write(content)
process.stdin.close()
exit_code = process.wait()
if exit_code != 0:
raise Exception('Failed to update {0}'.format(path))
| 0
| 0
| 0
| 0
| 0
| 1,573
| 0
| 57
| 365
|
400708af3eb122277da5836ea70cf914223ea9b3
| 1,387
|
py
|
Python
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 2
|
2022-01-24T19:58:44.000Z
|
2022-03-07T20:16:52.000Z
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 8
|
2021-11-29T19:38:01.000Z
|
2022-03-29T19:19:39.000Z
|
src/c3_price/main.py
|
KlimaDAO/discord-bots
|
96fefab99e27abccd0b2d5c9d3812f8ad60e94d6
|
[
"MIT"
] | 12
|
2021-10-06T20:23:08.000Z
|
2022-03-31T23:51:03.000Z
|
import os
from ..utils import get_discord_client, get_eth_web3, get_polygon_web3, load_abi
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
client = get_discord_client()
# Initialize web3
web3 = get_polygon_web3()
web3_eth = get_eth_web3()
# Load ABI
c3_abi = load_abi('erc20_token.json')
client.run(BOT_TOKEN)
| 26.169811
| 78
| 0.674838
|
import os
from discord.ext import tasks
from ..constants import C3_ADDRESS, FRAX_DECIMALS, \
C3_DECIMALS, FRAX_C3_POOL
from ..contract_info import token_supply, uni_v2_pool_price
from ..utils import get_discord_client, get_eth_web3, \
get_polygon_web3, load_abi, \
update_nickname, update_presence, \
prettify_number
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
client = get_discord_client()
# Initialize web3
web3 = get_polygon_web3()
web3_eth = get_eth_web3()
# Load ABI
c3_abi = load_abi('erc20_token.json')
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
if not update_info.is_running():
update_info.start()
@tasks.loop(seconds=300)
async def update_info():
price = uni_v2_pool_price(web3, FRAX_C3_POOL, FRAX_DECIMALS - C3_DECIMALS)
supply = token_supply(web3, C3_ADDRESS, c3_abi, C3_DECIMALS)
if price is not None and supply is not None:
price_text = f'${price:,.3f} C3'
print(price_text)
success = await update_nickname(client, price_text)
if not success:
return
supply_text = f'Supply: {prettify_number(supply)}'
success = await update_presence(client, supply_text)
if not success:
return
client.run(BOT_TOKEN)
| 0
| 686
| 0
| 0
| 0
| 0
| 0
| 244
| 114
|
1d1b2e85d0c1358607cd2dd62f5bab1dffbce3a4
| 191
|
py
|
Python
|
setup.py
|
kempei/hx711py
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | 1
|
2022-03-24T13:50:48.000Z
|
2022-03-24T13:50:48.000Z
|
setup.py
|
kempei/hx711py-jetsonnano
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
kempei/hx711py-jetsonnano
|
1256669450f757e46c859c8b21b40e278f31fef3
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(
name='hx711',
version='0.1',
description='HX711 Python Library for Jetson Nano',
py_modules=['hx711'],
install_requires=['logzero']
)
| 19.1
| 55
| 0.670157
|
from setuptools import setup
setup(
name='hx711',
version='0.1',
description='HX711 Python Library for Jetson Nano',
py_modules=['hx711'],
install_requires=['logzero']
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2b8e9a78122675bae02f4d3f6416c2c2d0b63adb
| 168
|
py
|
Python
|
snippets/delete_from_multi_tables.py
|
hit9/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 114
|
2015-01-06T06:12:30.000Z
|
2021-08-25T06:17:05.000Z
|
snippets/delete_from_multi_tables.py
|
keng-king/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 10
|
2015-03-23T17:05:13.000Z
|
2017-03-24T11:50:18.000Z
|
snippets/delete_from_multi_tables.py
|
keng-king/skylark
|
5b7a14e401196e025117b095a7c5e68e551e547a
|
[
"BSD-2-Clause",
"MIT"
] | 59
|
2015-01-21T14:56:23.000Z
|
2021-09-05T01:24:37.000Z
|
from models import User, Post
# delete user from post, user where post.user_id = user.id
query = (Post & User).delete(User) # mysql supports; sqlite3 dosenot support
| 33.6
| 77
| 0.744048
|
from models import User, Post
# delete user from post, user where post.user_id = user.id
query = (Post & User).delete(User) # mysql supports; sqlite3 dosenot support
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b0e41ddb7c1863a92a19e81a28f9239f50f770ff
| 868
|
py
|
Python
|
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
Semproject/Courseoutline/choices.py
|
sllash2000/CourseOutline
|
d44a2a7d49695d988f8ffa6eb5407011029ece25
|
[
"MIT"
] | null | null | null |
FacultyNameChoices = [
('FAH','Arts And Humanities'),('FBA','Business Admin'),
('FEd','Education'),('FRS','Religous Studies'),('FOS','Science'),
('FIT','Information Technology'),('FON','Nursing')]
CourseCategory = [
('Core Course','Core Course'),('General Education Course','General Education Course'),
('Major Required Course','Major Required Course'),('Major Elective Course','Major Elective Course')
]
HoursChoices = [
(1,'1'),(2,'2'),(3,'3'),(4,'4')
]
Credits = [
('1','1'),('2','2'),('3','3'),('4','4')
]
ResourcesTypes = [
('Text Book','Text Book'),('Internet Resources','Internet Resources'),
('Research Paper','Reserach Paper')
]
EvaluationTypes = [
('Assignments','Assignments'),('Quizzes','Quizzes'),('Attendance','Attendance'),
('Midterm Exam','Midterm Exam'),('Final Exam','Final Exam'),('Projects','Projects')
]
| 39.454545
| 103
| 0.615207
|
FacultyNameChoices = [
('FAH','Arts And Humanities'),('FBA','Business Admin'),
('FEd','Education'),('FRS','Religous Studies'),('FOS','Science'),
('FIT','Information Technology'),('FON','Nursing')]
CourseCategory = [
('Core Course','Core Course'),('General Education Course','General Education Course'),
('Major Required Course','Major Required Course'),('Major Elective Course','Major Elective Course')
]
HoursChoices = [
(1,'1'),(2,'2'),(3,'3'),(4,'4')
]
Credits = [
('1','1'),('2','2'),('3','3'),('4','4')
]
ResourcesTypes = [
('Text Book','Text Book'),('Internet Resources','Internet Resources'),
('Research Paper','Reserach Paper')
]
EvaluationTypes = [
('Assignments','Assignments'),('Quizzes','Quizzes'),('Attendance','Attendance'),
('Midterm Exam','Midterm Exam'),('Final Exam','Final Exam'),('Projects','Projects')
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
960ae6c2cee95810d30359e79da59c8a97171c61
| 894
|
py
|
Python
|
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
diagnose.py
|
youngdon95/start_over
|
48352c42752f6dd24c82250f57c5ee1434352688
|
[
"MIT"
] | null | null | null |
import xlrd
from datetime import date
import random
workbook = xlrd.open_workbook('diagnose.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
file = open("diagnoseInsert.txt","w")
empNum = 2
for x in range(0, 30884):
year = random.choice(range(1990, 2018))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
disease=worksheet.cell(x, 0).value
pid=worksheet.cell(x, 1).value
pid = int(pid)
pidStr = str(pid)
disease=disease.encode('utf-8')
pidStr=pidStr.encode('utf-8')
date_diagnosed = date(year,month, day)
eid = "Doc" + str(empNum)
patient = "P-" + str(pidStr)
file.write("Insert into diagnose" +" " + "values ('"+(eid)+"', '"+(disease)+"','"+(patient)+"','"+str(date_diagnosed)+"');\n")
if empNum <30:
empNum += 2
else: empNum = 2
file.close()
| 26.294118
| 130
| 0.619687
|
import xlrd
from datetime import date
from datetime import datetime
import random
workbook = xlrd.open_workbook('diagnose.xlsx')
worksheet = workbook.sheet_by_name('Sheet1')
file = open("diagnoseInsert.txt","w")
empNum = 2
for x in range(0, 30884):
year = random.choice(range(1990, 2018))
month = random.choice(range(1, 13))
day = random.choice(range(1, 29))
disease=worksheet.cell(x, 0).value
pid=worksheet.cell(x, 1).value
pid = int(pid)
pidStr = str(pid)
disease=disease.encode('utf-8')
pidStr=pidStr.encode('utf-8')
date_diagnosed = date(year,month, day)
eid = "Doc" + str(empNum)
patient = "P-" + str(pidStr)
file.write("Insert into diagnose" +" " + "values ('"+(eid)+"', '"+(disease)+"','"+(patient)+"','"+str(date_diagnosed)+"');\n")
if empNum <30:
empNum += 2
else: empNum = 2
file.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 22
|
60a92fc32f9436c820e1f78c7327d9e09aea64ef
| 881
|
py
|
Python
|
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | 1
|
2019-06-13T13:52:09.000Z
|
2019-06-13T13:52:09.000Z
|
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | null | null | null |
day9/student_sqlcloud.py
|
dikshaa1702/ml
|
c35f279b8fa7544517ca713c2c1e55f08270d4c3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 11:50:16 2019
@author: DiPu
"""
import mysql.connector
con=mysql.connector.connect(user="diksha",password='diksha1702',
host='db4free.net',database='diksha')
c=con.cursor()
c.execute("""CREATE TABLE student(
name text,
age int,
roll_no int,
branch text
)""")
c.execute("INSERT INTO student VALUES ('nEHA',21, 18, 'cs')")
c.execute("INSERT INTO student VALUES ('PRATIK',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('pooja',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('smita',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('saurav',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('gaurav',21, 18, 'ee')")
c.execute("INSERT INTO student VALUES ('Ria',21, 18, 'ee')")
c.execute("SELECT * FROM student")
print ( c.fetchall() )
| 29.366667
| 65
| 0.624291
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 16 11:50:16 2019
@author: DiPu
"""
import mysql.connector
con=mysql.connector.connect(user="diksha",password='diksha1702',
host='db4free.net',database='diksha')
c=con.cursor()
c.execute("""CREATE TABLE student(
name text,
age int,
roll_no int,
branch text
)""")
c.execute("INSERT INTO student VALUES ('nEHA',21, 18, 'cs')")
c.execute("INSERT INTO student VALUES ('PRATIK',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('pooja',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('smita',21, 18, 'IT')")
c.execute("INSERT INTO student VALUES ('saurav',21, 18, 'ec')")
c.execute("INSERT INTO student VALUES ('gaurav',21, 18, 'ee')")
c.execute("INSERT INTO student VALUES ('Ria',21, 18, 'ee')")
c.execute("SELECT * FROM student")
print ( c.fetchall() )
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7e88360483551114fdce570e13880bcbf47a3724
| 5,775
|
py
|
Python
|
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | null | null | null |
Optic/WrapperBl2Seq.py
|
CGATOxford/Optic
|
2df92e953b5139ff4e5c383cb4383e6367cd47f1
|
[
"MIT"
] | 1
|
2020-03-31T22:55:50.000Z
|
2020-03-31T22:55:50.000Z
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
WrapperBl2Seq.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import sys
from CGAT import Experiment as Experiment
from CGAT import FastaIterator as FastaIterator
if __name__ == "__main__":
parser = E.OptionParser(
version="%prog version: $Id: WrapperBl2Seq.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-f", "--input-file", dest="input_filename", type="string",
help="input filename. If '-', stdin is used [default=%default].",
metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_filename", type="string",
help="output filename for codon usage. If '-', output is stdout [default=%default].",
metavar="FILE")
parser.add_option("-e", "--error-file", dest="error_filename", type="string",
help="output filename for error messages. If '-', output is stderr [default=%default].",
metavar="FILE")
parser.set_defaults(
input_filename="-",
output_filename="-",
error_filename="/dev/null",
)
(options, args) = Experiment.Start(parser)
wrapper = Bl2Seq()
if options.input_filename == "-":
file_stdin = sys.stdin
else:
file_stdin = open(options.input_filename, "r")
if options.output_filename:
if options.output_filename == "-":
file_stdout = sys.stdout
else:
file_stdout = open(options.output_filename, "w")
if options.error_filename:
if options.error_filename == "-":
file_stderr = sys.stderr
else:
file_stderr = open(options.error_filename, "w")
wrapper.RunOnFile(file_stdin, file_stdout, file_stderr)
if file_stdin and file_stdin != sys.stdin:
file_stdin.close()
if file_stdout and file_stdout != sys.stdout:
file_stdout.close()
if file_stderr and file_stderr != sys.stderr:
file_stderr.close()
Experiment.Stop()
| 29.020101
| 110
| 0.571602
|
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 Andreas Heger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
WrapperBl2Seq.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import os
import sys
import string
import tempfile
import subprocess
from CGAT import Experiment as Experiment
from CGAT import FastaIterator as FastaIterator
class Bl2SeqError(Exception):
pass
class Bl2Seq:
mOptions = ""
mExecutable = "bl2seq"
mStderr = sys.stderr
def __init__(self, options=""):
self.mOptions = options
def CreateTemporaryFiles(self):
"""create temporary files."""
self.mTempDirectory = tempfile.mkdtemp()
self.mFilenameTempInput = self.mTempDirectory + "/input"
self.mFilenameTempOutput = self.mTempDirectory + "/output"
def DeleteTemporaryFiles(self):
"""clean up."""
os.remove(self.mFilenameTempInput)
os.remove(self.mFilenameTempOutput)
os.rmdir(self.mTempDirectory)
def SetStderr(self, file=None):
"""set file for dumping stderr."""
self.mStderr = file
def WriteOutput(self, lines, filename_output=None):
"""write output to file.
If file is not given, lines are written to stdout.
"""
if filename_output:
outfile = open(filename_output, "w")
else:
outfile = sys.stdout
outfile.write(string.join(lines, ""))
if filename_output:
outfile.close()
def ParseResult(self, trace_file=None, information_file=None):
result = AdaptiveCAIResult()
result.Read(trace_file, information_file)
return result
def RunOnFile(self, infile, outfile, errfile):
self.CreateTemporaryFiles()
statement = string.join((self.mExecutable,
self.mFilenameTempInput,
self.mFilenameTempOutput),
" ")
i = FastaIterator.FastaIterator(infile)
outfile.write("GENE\tBl2Seq\n")
while 1:
f = i.next()
if f is None:
break
file = open(self.mFilenameTempInput, "w")
file.write(">%s\n%s" % (f.title, f.sequence))
file.close()
s = subprocess.Popen(statement,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.mTempDirectory,
close_fds=True)
(out, err) = s.communicate()
if s.returncode != 0:
raise Bl2SeqError, "Error in calculating Bl2Seq\n%s" % err
d = open(self.mFilenameTempOutput).readlines()[2][:-1]
enc = d.split(" ")[2]
outfile.write((string.join((f.title, enc), "\t")) + "\n")
errfile.write(err)
self.DeleteTemporaryFiles()
if __name__ == "__main__":
parser = E.OptionParser(
version="%prog version: $Id: WrapperBl2Seq.py 2781 2009-09-10 11:33:14Z andreas $")
parser.add_option("-f", "--input-file", dest="input_filename", type="string",
help="input filename. If '-', stdin is used [default=%default].",
metavar="FILE")
parser.add_option("-o", "--output-file", dest="output_filename", type="string",
help="output filename for codon usage. If '-', output is stdout [default=%default].",
metavar="FILE")
parser.add_option("-e", "--error-file", dest="error_filename", type="string",
help="output filename for error messages. If '-', output is stderr [default=%default].",
metavar="FILE")
parser.set_defaults(
input_filename="-",
output_filename="-",
error_filename="/dev/null",
)
(options, args) = Experiment.Start(parser)
wrapper = Bl2Seq()
if options.input_filename == "-":
file_stdin = sys.stdin
else:
file_stdin = open(options.input_filename, "r")
if options.output_filename:
if options.output_filename == "-":
file_stdout = sys.stdout
else:
file_stdout = open(options.output_filename, "w")
if options.error_filename:
if options.error_filename == "-":
file_stderr = sys.stderr
else:
file_stderr = open(options.error_filename, "w")
wrapper.RunOnFile(file_stdin, file_stdout, file_stderr)
if file_stdin and file_stdin != sys.stdin:
file_stdin.close()
if file_stdout and file_stdout != sys.stdout:
file_stdout.close()
if file_stderr and file_stderr != sys.stderr:
file_stderr.close()
Experiment.Stop()
| 0
| 0
| 0
| 2,592
| 0
| 0
| 0
| -30
| 134
|
13088a8ac517f64c48d773cb8a5a92fb091eb78b
| 1,728
|
py
|
Python
|
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/cli/__init__.py
|
jamestwebber/prefect
|
410c4ac37d2595ab61007742883687f5e284821d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
| 25.791045
| 80
| 0.696759
|
#!/usr/bin/env python
import click
import json
import logging
import os
import requests
import sys
import prefect
@click.group()
def cli():
"""
The Prefect CLI
"""
pass
@cli.command()
def make_user_config():
"""
Generates a user configuration file
"""
user_config_path = prefect.config.get("user_config_path")
if not user_config_path:
raise ValueError("No user config path set!")
elif os.path.isfile(user_config_path):
raise ValueError("A file already exists at {}".format(user_config_path))
os.makedirs(os.path.dirname(user_config_path), exist_ok=True)
with open(user_config_path, "w") as user_config:
user_config.write(
"# This is a user configuration file.\n"
"# Settings placed here will overwrite Prefect's defaults."
)
click.secho("Config created at {}".format(user_config_path), fg="green")
@cli.command()
@click.argument("environment_file", type=click.Path(exists=True))
@click.option("--runner_kwargs", default={})
def run(environment_file, runner_kwargs):
"""
Run a flow from an environment file.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
with open(environment_file, "r") as f:
environment = schema.load(json.load(f))
click.echo(environment.run(runner_kwargs=runner_kwargs))
@cli.command()
@click.argument("environment_metadata")
def create_environment(environment_metadata):
"""
Call the setup and execute functions for a given environment.
"""
schema = prefect.serialization.environment.EnvironmentSchema()
environment = schema.load(json.loads(environment_metadata))
environment.setup()
environment.execute()
| 0
| 1,516
| 0
| 0
| 0
| 0
| 0
| -62
| 247
|
159bef3318bfb6c684b7d77703c8e85be28688b9
| 411
|
py
|
Python
|
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
year2019/python/day1/day1.py
|
3schwartz/AdventOfCode
|
32f259c4e20c3c4834718411f1053b6a11f71c86
|
[
"MIT"
] | null | null | null |
from day1_func import get_fuel
lines = open('../../data/day1_data.txt').read().strip().split('\n')
fuels = [get_fuel(int(line)) for line in lines]
print(f'Part 1: {sum(fuels)}')
total_sum = 0
for line in lines:
last_value = int(line)
while True:
last_value = get_fuel(last_value)
if last_value <= 0:
break
total_sum += last_value
print(f"Part 2: {total_sum}")
| 19.571429
| 67
| 0.620438
|
from day1_func import get_fuel
lines = open('../../data/day1_data.txt').read().strip().split('\n')
fuels = [get_fuel(int(line)) for line in lines]
print(f'Part 1: {sum(fuels)}')
total_sum = 0
for line in lines:
last_value = int(line)
while True:
last_value = get_fuel(last_value)
if last_value <= 0:
break
total_sum += last_value
print(f"Part 2: {total_sum}")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f1d40cad2f2fbe51cb27d3a15929184262878d47
| 518
|
py
|
Python
|
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | 1
|
2021-02-25T21:52:24.000Z
|
2021-02-25T21:52:24.000Z
|
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | null | null | null |
postgis_helpers/__init__.py
|
aaronfraint/postgis-helpers
|
99f4f9ae50c1197fe6d2d0fe42884c06d2c3589c
|
[
"MIT"
] | 1
|
2021-02-26T00:33:29.000Z
|
2021-02-26T00:33:29.000Z
|
__VERSION__ = "0.2.2"
# _console.print(":globe_showing_americas:", justify="left")
# _console.print(":globe_showing_europe-africa:", justify="center")
# _console.print(":globe_showing_asia-australia:", justify="right")
# _console.print(f"-> postGIS-helpers version {__VERSION__}\n\n")
| 37
| 78
| 0.797297
|
from .PgSQL import PostgreSQL
from .config_helpers import make_config_file, read_config_file, configurations
from .geopandas_helpers import spatialize_point_dataframe
from .raw_data import DataSource
from .console import _console
__VERSION__ = "0.2.2"
# _console.print(":globe_showing_americas:", justify="left")
# _console.print(":globe_showing_europe-africa:", justify="center")
# _console.print(":globe_showing_asia-australia:", justify="right")
# _console.print(f"-> postGIS-helpers version {__VERSION__}\n\n")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 111
|
35abc541d0d7ce086f795d6703dfd297577c9cb7
| 424
|
py
|
Python
|
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | 6
|
2019-11-21T10:09:49.000Z
|
2021-06-19T09:52:59.000Z
|
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
client/delivery/urls.py
|
daniel-waruo/e-commerse-api
|
6b080039398fb4099a34335317d649dd67783f63
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from .views import CreateDeliveryInfo, DeliveryInfoApi, ListDeliveryInfo
app_name = 'delivery'
urlpatterns = [
path('delivery-information/add', CreateDeliveryInfo.as_view(), name='add_delivery_info'),
path('delivery-information/<int:pk>', DeliveryInfoApi.as_view(), name='delivery_info'),
path('delivery-information/list', ListDeliveryInfo.as_view(), name='list_delivery_info')
]
| 35.333333
| 93
| 0.768868
|
from django.urls import path
from .views import CreateDeliveryInfo, DeliveryInfoApi, ListDeliveryInfo
app_name = 'delivery'
urlpatterns = [
path('delivery-information/add', CreateDeliveryInfo.as_view(), name='add_delivery_info'),
path('delivery-information/<int:pk>', DeliveryInfoApi.as_view(), name='delivery_info'),
path('delivery-information/list', ListDeliveryInfo.as_view(), name='list_delivery_info')
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
976b5d264ccc2ded21f52e33be4c2ab378f4c30d
| 2,709
|
py
|
Python
|
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
python/day11/day11.py
|
secworks/advent_of_code_2020
|
b90e4e1d27c1e4b597a08cac8ff13e63686769f2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# day11.py
# -------
# Solutions to Advent of Code 2020, day 11.
# https://adventofcode.com/2020/day/11
#
#=======================================================================
import sys
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Implement the seating update logic.
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-------------------------------------------------------------------
if __name__=="__main__":
print("Advent of Code 2020, day 11")
print("==========================")
problem1("day11_example.txt")
problem2("day11_input.txt")
sys.exit(0)
#=======================================================================
#=======================================================================
| 27.927835
| 72
| 0.298265
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#=======================================================================
#
# day11.py
# -------
# Solutions to Advent of Code 2020, day 11.
# https://adventofcode.com/2020/day/11
#
#=======================================================================
import sys
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def get_input(filename):
l = []
with open(filename,'r') as f:
for line in f:
l.append(line.strip())
return l
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def compare_seatings(s1, s2):
assert len(s1) == len(s2), "Error: Size of seatings are not equal."
for i in range(len(s1)):
if s1[i] != s2[i]:
return False
return True
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def occupy_seat(seat, left, right):
if seat = ".":
return False
if seat = "L":
tmp = True
for s in left:
if s = "L" or s = ".2"
return False
#-------------------------------------------------------------------
# Implement the seating update logic.
#-------------------------------------------------------------------
def update_seating(seating):
tmp = seating[:]
w = len(seating[1])
print("Width of seating row: %d" % (w))
# Handle the middle 1..(w-2) seats
for i in range(1, w - 2):
if
return tmp
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def problem1(filename):
print("Problem 1")
print("---------")
seating = get_input(filename)
new_seating = update_seating(seating)
print(compare_seatings(seating, new_seating))
print("")
#-------------------------------------------------------------------
#-------------------------------------------------------------------
def problem2(filename):
print("Problem 2")
print("---------")
print("")
#-------------------------------------------------------------------
#-------------------------------------------------------------------
if __name__=="__main__":
print("Advent of Code 2020, day 11")
print("==========================")
problem1("day11_example.txt")
problem2("day11_input.txt")
sys.exit(0)
#=======================================================================
#=======================================================================
| 0
| 0
| 0
| 0
| 0
| 908
| 0
| 0
| 132
|
7eb7e67de869f3bb153180f2431ce5d9de6c4ecb
| 389
|
py
|
Python
|
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
tests/edgestats_test.py
|
devjack/edgestats
|
d668cfdc4a6962c0f02a76916fc58d43605d46b2
|
[
"MIT"
] | null | null | null |
import unittest
if __name__ == '__main__':
unittest.main()
| 27.785714
| 69
| 0.699229
|
import unittest
import gzip
from edgestats import EdgeStats
class EdgestatsTest(unittest.TestCase):
def test_can_parse_log_file(self):
with gzip.open('tests/data/simple_unzip_test.gz', 'rb') as f:
file_content = f.read()
expected = b"Simple gzipped string\n"
self.assertEqual(expected, file_content)
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 258
| 0
| 0
| 0
| 0
| 67
|
73dd5e51df3dee9bc1040211fdb75399402c8941
| 15,096
|
py
|
Python
|
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/servicefabricmanagedclusters/azure-mgmt-servicefabricmanagedclusters/azure/mgmt/servicefabricmanagedclusters/models/_service_fabric_managed_clusters_management_client_enums.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
| 42.285714
| 102
| 0.72602
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class Access(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The network traffic is allowed or denied.
"""
ALLOW = "allow"
DENY = "deny"
class ClusterState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The current state of the cluster.
"""
#: Indicates that the cluster resource is created and the resource provider is waiting for Service
#: Fabric VM extension to boot up and report to it.
WAITING_FOR_NODES = "WaitingForNodes"
#: Indicates that the Service Fabric runtime is being installed on the VMs. Cluster resource will
#: be in this state until the cluster boots up and system services are up.
DEPLOYING = "Deploying"
#: Indicates that the cluster is upgrading to establishes the cluster version. This upgrade is
#: automatically initiated when the cluster boots up for the first time.
BASELINE_UPGRADE = "BaselineUpgrade"
#: Indicates that the cluster is being upgraded with the user provided configuration.
UPGRADING = "Upgrading"
#: Indicates that the last upgrade for the cluster has failed.
UPGRADE_FAILED = "UpgradeFailed"
#: Indicates that the cluster is in a stable state.
READY = "Ready"
class ClusterUpgradeCadence(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates when new cluster runtime version upgrades will be applied after they are released. By
default is Wave0.
"""
#: Cluster upgrade starts immediately after a new version is rolled out. Recommended for Test/Dev
#: clusters.
WAVE0 = "Wave0"
#: Cluster upgrade starts 7 days after a new version is rolled out. Recommended for Pre-prod
#: clusters.
WAVE1 = "Wave1"
#: Cluster upgrade starts 14 days after a new version is rolled out. Recommended for Production
#: clusters.
WAVE2 = "Wave2"
class ClusterUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The upgrade mode of the cluster when new Service Fabric runtime version is available.
"""
#: The cluster will be automatically upgraded to the latest Service Fabric runtime version,
#: **clusterUpgradeCadence** will determine when the upgrade starts after the new version becomes
#: available.
AUTOMATIC = "Automatic"
#: The cluster will not be automatically upgraded to the latest Service Fabric runtime version.
#: The cluster is upgraded by setting the **clusterCodeVersion** property in the cluster resource.
MANUAL = "Manual"
class Direction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network security rule direction.
"""
INBOUND = "inbound"
OUTBOUND = "outbound"
class DiskType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Managed data disk type. IOPS and throughput are given by the disk size, to see more information
go to https://docs.microsoft.com/en-us/azure/virtual-machines/disks-types.
"""
#: Standard HDD locally redundant storage. Best for backup, non-critical, and infrequent access.
STANDARD_LRS = "Standard_LRS"
#: Standard SSD locally redundant storage. Best for web servers, lightly used enterprise
#: applications and dev/test.
STANDARD_SSD_LRS = "StandardSSD_LRS"
#: Premium SSD locally redundant storage. Best for production and performance sensitive workloads.
PREMIUM_LRS = "Premium_LRS"
class FailureAction(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The compensating action to perform when a Monitored upgrade encounters monitoring policy or
health policy violations. Invalid indicates the failure action is invalid. Rollback specifies
that the upgrade will start rolling back automatically. Manual indicates that the upgrade will
switch to UnmonitoredManual upgrade mode.
"""
#: Indicates that a rollback of the upgrade will be performed by Service Fabric if the upgrade
#: fails.
ROLLBACK = "Rollback"
#: Indicates that a manual repair will need to be performed by the administrator if the upgrade
#: fails. Service Fabric will not proceed to the next upgrade domain automatically.
MANUAL = "Manual"
class IPAddressType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The IP address type.
"""
#: IPv4 address type.
I_PV4 = "IPv4"
#: IPv6 address type.
I_PV6 = "IPv6"
class ManagedClusterAddOnFeature(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Available cluster add-on features
"""
#: Dns service.
DNS_SERVICE = "DnsService"
#: Backup and restore service.
BACKUP_RESTORE_SERVICE = "BackupRestoreService"
#: Resource monitor service.
RESOURCE_MONITOR_SERVICE = "ResourceMonitorService"
class ManagedClusterVersionEnvironment(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
WINDOWS = "Windows"
class ManagedIdentityType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of managed identity for the resource.
"""
#: Indicates that no identity is associated with the resource.
NONE = "None"
#: Indicates that system assigned identity is associated with the resource.
SYSTEM_ASSIGNED = "SystemAssigned"
#: Indicates that user assigned identity is associated with the resource.
USER_ASSIGNED = "UserAssigned"
#: Indicates that both system assigned and user assigned identity are associated with the
#: resource.
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
class ManagedResourceProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The provisioning state of the managed resource.
"""
NONE = "None"
CREATING = "Creating"
CREATED = "Created"
UPDATING = "Updating"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELED = "Canceled"
DELETING = "Deleting"
DELETED = "Deleted"
OTHER = "Other"
class MoveCost(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Specifies the move cost for the service.
"""
#: Zero move cost. This value is zero.
ZERO = "Zero"
#: Specifies the move cost of the service as Low. The value is 1.
LOW = "Low"
#: Specifies the move cost of the service as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the move cost of the service as High. The value is 3.
HIGH = "High"
class NodeTypeSkuScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Node type capacity scale type.
"""
#: Node count is not adjustable in any way (e.g. it is fixed).
NONE = "None"
#: The user must manually scale out/in.
MANUAL = "Manual"
#: Automatic scale is allowed.
AUTOMATIC = "Automatic"
class NsgProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Network protocol this rule applies to.
"""
HTTP = "http"
HTTPS = "https"
TCP = "tcp"
UDP = "udp"
ICMP = "icmp"
AH = "ah"
ESP = "esp"
class OsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Cluster operating system, the default will be Windows
"""
#: Indicates os is Windows.
WINDOWS = "Windows"
class PartitionScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Indicates that the partition is based on string names, and is a SingletonPartitionScheme
#: object, The value is 0.
SINGLETON = "Singleton"
#: Indicates that the partition is based on Int64 key ranges, and is a
#: UniformInt64RangePartitionScheme object. The value is 1.
UNIFORM_INT64_RANGE = "UniformInt64Range"
#: Indicates that the partition is based on string names, and is a NamedPartitionScheme object.
#: The value is 2.
NAMED = "Named"
class PrivateEndpointNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private end point in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class PrivateLinkServiceNetworkPolicies(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enable or Disable apply network policies on private link service in the subnet.
"""
ENABLED = "enabled"
DISABLED = "disabled"
class ProbeProtocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""the reference to the load balancer probe used by the load balancing rule.
"""
TCP = "tcp"
HTTP = "http"
HTTPS = "https"
class Protocol(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reference to the transport protocol used by the load balancing rule.
"""
TCP = "tcp"
UDP = "udp"
class RollingUpgradeMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The mode used to monitor health during a rolling upgrade. The values are Monitored, and
UnmonitoredAuto.
"""
#: The upgrade will stop after completing each upgrade domain and automatically monitor health
#: before proceeding. The value is 0.
MONITORED = "Monitored"
#: The upgrade will proceed automatically without performing any health monitoring. The value is
#: 1.
UNMONITORED_AUTO = "UnmonitoredAuto"
class ServiceCorrelationScheme(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The service correlation scheme.
"""
#: Aligned affinity ensures that the primaries of the partitions of the affinitized services are
#: collocated on the same nodes. This is the default and is the same as selecting the Affinity
#: scheme. The value is 0.
ALIGNED_AFFINITY = "AlignedAffinity"
#: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same
#: nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will
#: be collocated. The value is 1.
NON_ALIGNED_AFFINITY = "NonAlignedAffinity"
class ServiceKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The kind of service (Stateless or Stateful).
"""
#: Does not use Service Fabric to make its state highly available or reliable. The value is 0.
STATELESS = "Stateless"
#: Uses Service Fabric to make its state or part of its state highly available and reliable. The
#: value is 1.
STATEFUL = "Stateful"
class ServiceLoadMetricWeight(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Determines the metric weight relative to the other metrics that are configured for this
service. During runtime, if two metrics end up in conflict, the Cluster Resource Manager
prefers the metric with the higher weight.
"""
#: Disables resource balancing for this metric. This value is zero.
ZERO = "Zero"
#: Specifies the metric weight of the service load as Low. The value is 1.
LOW = "Low"
#: Specifies the metric weight of the service load as Medium. The value is 2.
MEDIUM = "Medium"
#: Specifies the metric weight of the service load as High. The value is 3.
HIGH = "High"
class ServicePackageActivationMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The activation Mode of the service package
"""
#: Indicates the application package activation mode will use shared process.
SHARED_PROCESS = "SharedProcess"
#: Indicates the application package activation mode will use exclusive process.
EXCLUSIVE_PROCESS = "ExclusiveProcess"
class ServicePlacementPolicyType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of placement policy for a service fabric service. Following are the possible values.
"""
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or
#: upgrade domain cannot be used for placement of this service. The value is 0.
INVALID_DOMAIN = "InvalidDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the
#: service must be placed in a specific domain. The value is 1.
REQUIRED_DOMAIN = "RequiredDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the
#: Primary replica for the partitions of the service should be located in a particular domain as
#: an optimization. The value is 2.
PREFERRED_PRIMARY_DOMAIN = "PreferredPrimaryDomain"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will
#: disallow placement of any two replicas from the same partition in the same domain at any time.
#: The value is 3.
REQUIRED_DOMAIN_DISTRIBUTION = "RequiredDomainDistribution"
#: Indicates that the ServicePlacementPolicyDescription is of type
#: ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all
#: replicas of a particular partition of the service should be placed atomically. The value is 4.
NON_PARTIALLY_PLACE_SERVICE = "NonPartiallyPlaceService"
class ServiceScalingMechanismKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling mechanism for adding or removing instances of stateless service partition.
#: The value is 0.
SCALE_PARTITION_INSTANCE_COUNT = "ScalePartitionInstanceCount"
#: Represents a scaling mechanism for adding or removing named partitions of a stateless service.
#: The value is 1.
ADD_REMOVE_INCREMENTAL_NAMED_PARTITION = "AddRemoveIncrementalNamedPartition"
class ServiceScalingTriggerKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enumerates the ways that a service can be partitioned.
"""
#: Represents a scaling trigger related to an average load of a metric/resource of a partition.
#: The value is 0.
AVERAGE_PARTITION_LOAD_TRIGGER = "AveragePartitionLoadTrigger"
#: Represents a scaling policy related to an average load of a metric/resource of a service. The
#: value is 1.
AVERAGE_SERVICE_LOAD_TRIGGER = "AverageServiceLoadTrigger"
class SkuName(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Sku Name.
"""
#: Basic requires a minimum of 3 nodes and allows only 1 node type.
BASIC = "Basic"
#: Requires a minimum of 5 nodes and allows 1 or more node type.
STANDARD = "Standard"
| 0
| 0
| 0
| 13,837
| 0
| 0
| 0
| 34
| 757
|
0f8896b2d56b1d6c742f1e75296d6e1c654c2549
| 10,751
|
py
|
Python
|
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
solarforecastarbiter/io/reference_observations/srml.py
|
wholmgren/solarforecastarbiter-core
|
e692c7e142f24c0253e4288a6ac760e10ba41dbd
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import logging
import json
from urllib import error
from pkg_resources import resource_filename, Requirement
import pandas as pd
from pvlib import iotools
from requests.exceptions import HTTPError
from solarforecastarbiter.datamodel import Observation, SolarPowerPlant
from solarforecastarbiter.io.reference_observations import (common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'srml_reference_sites.json')
# maps the desired variable names to those returned by pvlib.iotools
srml_variable_map = {
'ghi_': 'ghi',
'dni_': 'dni',
'dhi_': 'dhi',
'wind_speed_': 'wind_speed',
'temp_air_': 'air_temperature',
}
# maps SolarForecastArbiter interval_label to the SRML infix which
# designates the time resolution of each file. The list of file types
# is tried in order, so file types starting with 'P' designating
# processed data are listed first, such that if processed data exists
# we retrieve that first.
FILE_TYPE_MAP = {
1: ['PO', 'RO'],
5: ['PF', 'RF'],
15: ['PQ', 'RQ'],
60: ['PH', 'RH'],
}
logger = logging.getLogger('reference_data')
def adjust_site_parameters(site):
"""Inserts modeling parameters for sites with pv measurments
Parameters
----------
site: dict
Returns
-------
dict
Copy of inputs plus a new key 'modeling_parameters'.
"""
return common.apply_json_site_parameters(DEFAULT_SITEFILE, site)
def request_data(site, year, month):
"""Makes a request for each file type until successful or we
run out of filetypes.
Parameters
----------
site: :py:class:`solarforecastarbiter.datamodel.Site`
year: int
The year of the data to request.
month: int
The month of the data to request.
Returns
-------
DataFrame
A month of SRML data.
"""
extra_params = common.decode_extra_parameters(site)
station_code = extra_params['network_api_abbreviation']
interval_length = extra_params['observation_interval_length']
file_types = FILE_TYPE_MAP[interval_length]
for file_type in file_types:
# The list file_types are listed with processed data
# file types first. On a successful retrieval we return
# the month of data, otherwise we log info and continue
# until we've exhausted the list.
try:
srml_month = iotools.read_srml_month_from_solardat(
station_code, year, month, file_type)
except error.URLError:
logger.warning(f'Could not retrieve {file_type} for SRML data '
f'for site {site.name} on {year}/{month} .')
logger.debug(f'Site abbreviation: {station_code}')
continue
except pd.errors.EmptyDataError:
logger.warning(f'SRML returned an empty file for station '
f'{site.name} on {year}/{month}.')
continue
else:
return srml_month
logger.warning(f'Could not retrieve data for site {site.name} on '
f'{year}/{month}.')
def fetch(api, site, start, end):
"""Retrieve observation data for a srml site between start and end.
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An APISession with a valid JWT for accessing the Reference Data
user.
site : :py:class:`solarforecastarbiter.datamodel.Site`
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for. Must include timezone.
end : datetime
The end of the period to request data for. Must include timezone.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
Raises
------
TypeError
If start and end have different timezones, or if they do not include a
timezone.
"""
month_dfs = []
start_year = start.year
start_month = start.month
# Retrieve each month file necessary
if start.tzinfo != end.tzinfo:
raise TypeError('start and end cannot have different timezones')
while start_year * 100 + start_month <= end.year * 100 + end.month:
logger.info(f'Requesting data for SRML site {site.name}'
f' for {start_year}-{start_month}')
srml_month = request_data(site, start_year, start_month)
if srml_month is not None:
month_dfs.append(srml_month)
start_month += 1
if start_month > 12:
start_month = 1
start_year += 1
try:
all_period_data = pd.concat(month_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return pd.DataFrame()
var_columns = [col for col in all_period_data.columns
if '_flag' not in col]
power_columns = [col for col in var_columns
if col.startswith('5')]
# adjust power from watts to megawatts
for column in power_columns:
all_period_data[column] = all_period_data[column] / 1000000
all_period_data = all_period_data.loc[start:end, var_columns]
# remove possible trailing NaNs, it is necessary to do this after slicing
# because SRML data has nighttime data prefilled with 0s through the end of
# the month. This may not be effective if a given site has more than a 24
# hour lag, which will cause last_valid_index to return the latest
# timestamp just before sunrise, but will suffice for the typical lag on
# the order of hours.
all_period_data = all_period_data[:all_period_data.last_valid_index()]
return all_period_data
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
an SRML site's file.
Parameters
----------
api: :py:class:`solarforecastarbiter.io.api.APISession`
site : :py:class:`solarforecastarbiter.datamodel.Site
The site object for which to create Observations.
Notes
-----
Since variables are labelled with an integer instrument
number, Observations are named with their variable and
instrument number found in the source files.
e.g. A SRML file contains two columns labelled, 1001, and
1002. These columns represent GHI at instrument 1 and
instrument 2 respectively. The `pvlib.iotools` package
converts these to 'ghi_1' and 'ghi_2' for us. We use these
labels to differentiate between measurements recorded by
different instruments.
"""
# Request ~month old data at initialization to ensure we get a response.
start = pd.Timestamp.utcnow() - pd.Timedelta('30 days')
end = start
try:
extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.warning('Cannot create reference observations at MIDC site '
f'{site.name}, missing required parameters.')
return
# use site name without network here to build
# a name with the original column label rather than
# the SFA variable
site_name = common.site_name_no_network(site)
try:
site_df = fetch(api, site, start, end)
except error.HTTPError:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
else:
if site_df is None:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
for variable in srml_variable_map.keys():
matches = [col for col in site_df.columns
if col.startswith(variable)]
for match in matches:
observation_extra_parameters = extra_params.copy()
observation_extra_parameters.update({
'network_data_label': match})
try:
# Here, we pass a name with match instead of variable
# to differentiate between multiple observations of
# the same variable
common.create_observation(
api, site, srml_variable_map[variable],
name=f'{site_name} {match}',
interval_label='beginning',
extra_params=observation_extra_parameters)
except HTTPError as e:
logger.error(
f'Failed to create {variable} observation at Site '
f'{site.name}. Error: {e.response.text}')
with open(DEFAULT_SITEFILE) as fp:
obs_metadata = json.load(fp)['observations']
for obs in obs_metadata:
obs_site_extra_params = json.loads(obs['site']['extra_parameters'])
if obs_site_extra_params['network_api_id'] == extra_params[
'network_api_id']:
obs['site'] = site
observation = Observation.from_dict(obs)
common.check_and_post_observation(api, observation)
def initialize_site_forecasts(api, site):
"""
Create a forecasts for each variable measured at the site
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
site : :py:class:`solarforecastarbiter.datamodel.Site`
The site object for which to create Forecasts.
"""
variables = list(srml_variable_map.values())
if isinstance(site, SolarPowerPlant):
variables += ['ac_power', 'dc_power']
common.create_forecasts(
api, site, variables,
default_forecasts.TEMPLATE_FORECASTS)
def update_observation_data(api, sites, observations, start, end):
"""Post new observation data to a list of SRML Observations
from start to end.
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
sites: list of :py:class:`solarforecastarbiter.datamodel.Site`
List of all reference sites as Objects
observations: list of :py:class:`solarforecastarbiter.datamodel.Observation`
List of all reference observations as Objects
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
""" # noqa
srml_sites = common.filter_by_networks(sites, 'UO SRML')
for site in srml_sites:
common.update_site_observations(api, fetch, site, observations,
start, end)
| 36.692833
| 80
| 0.646172
|
import logging
import json
from urllib import error
from pkg_resources import resource_filename, Requirement
import pandas as pd
from pvlib import iotools
from requests.exceptions import HTTPError
from solarforecastarbiter.datamodel import Observation, SolarPowerPlant
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'srml_reference_sites.json')
# maps the desired variable names to those returned by pvlib.iotools
srml_variable_map = {
'ghi_': 'ghi',
'dni_': 'dni',
'dhi_': 'dhi',
'wind_speed_': 'wind_speed',
'temp_air_': 'air_temperature',
}
# maps SolarForecastArbiter interval_label to the SRML infix which
# designates the time resolution of each file. The list of file types
# is tried in order, so file types starting with 'P' designating
# processed data are listed first, such that if processed data exists
# we retrieve that first.
FILE_TYPE_MAP = {
1: ['PO', 'RO'],
5: ['PF', 'RF'],
15: ['PQ', 'RQ'],
60: ['PH', 'RH'],
}
logger = logging.getLogger('reference_data')
def adjust_site_parameters(site):
"""Inserts modeling parameters for sites with pv measurments
Parameters
----------
site: dict
Returns
-------
dict
Copy of inputs plus a new key 'modeling_parameters'.
"""
return common.apply_json_site_parameters(DEFAULT_SITEFILE, site)
def request_data(site, year, month):
"""Makes a request for each file type until successful or we
run out of filetypes.
Parameters
----------
site: :py:class:`solarforecastarbiter.datamodel.Site`
year: int
The year of the data to request.
month: int
The month of the data to request.
Returns
-------
DataFrame
A month of SRML data.
"""
extra_params = common.decode_extra_parameters(site)
station_code = extra_params['network_api_abbreviation']
interval_length = extra_params['observation_interval_length']
file_types = FILE_TYPE_MAP[interval_length]
for file_type in file_types:
# The list file_types are listed with processed data
# file types first. On a successful retrieval we return
# the month of data, otherwise we log info and continue
# until we've exhausted the list.
try:
srml_month = iotools.read_srml_month_from_solardat(
station_code, year, month, file_type)
except error.URLError:
logger.warning(f'Could not retrieve {file_type} for SRML data '
f'for site {site.name} on {year}/{month} .')
logger.debug(f'Site abbreviation: {station_code}')
continue
except pd.errors.EmptyDataError:
logger.warning(f'SRML returned an empty file for station '
f'{site.name} on {year}/{month}.')
continue
else:
return srml_month
logger.warning(f'Could not retrieve data for site {site.name} on '
f'{year}/{month}.')
def fetch(api, site, start, end):
"""Retrieve observation data for a srml site between start and end.
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An APISession with a valid JWT for accessing the Reference Data
user.
site : :py:class:`solarforecastarbiter.datamodel.Site`
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for. Must include timezone.
end : datetime
The end of the period to request data for. Must include timezone.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
Raises
------
TypeError
If start and end have different timezones, or if they do not include a
timezone.
"""
month_dfs = []
start_year = start.year
start_month = start.month
# Retrieve each month file necessary
if start.tzinfo != end.tzinfo:
raise TypeError('start and end cannot have different timezones')
while start_year * 100 + start_month <= end.year * 100 + end.month:
logger.info(f'Requesting data for SRML site {site.name}'
f' for {start_year}-{start_month}')
srml_month = request_data(site, start_year, start_month)
if srml_month is not None:
month_dfs.append(srml_month)
start_month += 1
if start_month > 12:
start_month = 1
start_year += 1
try:
all_period_data = pd.concat(month_dfs)
except ValueError:
logger.warning(f'No data available for site {site.name} '
f'from {start} to {end}.')
return pd.DataFrame()
var_columns = [col for col in all_period_data.columns
if '_flag' not in col]
power_columns = [col for col in var_columns
if col.startswith('5')]
# adjust power from watts to megawatts
for column in power_columns:
all_period_data[column] = all_period_data[column] / 1000000
all_period_data = all_period_data.loc[start:end, var_columns]
# remove possible trailing NaNs, it is necessary to do this after slicing
# because SRML data has nighttime data prefilled with 0s through the end of
# the month. This may not be effective if a given site has more than a 24
# hour lag, which will cause last_valid_index to return the latest
# timestamp just before sunrise, but will suffice for the typical lag on
# the order of hours.
all_period_data = all_period_data[:all_period_data.last_valid_index()]
return all_period_data
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
an SRML site's file.
Parameters
----------
api: :py:class:`solarforecastarbiter.io.api.APISession`
site : :py:class:`solarforecastarbiter.datamodel.Site
The site object for which to create Observations.
Notes
-----
Since variables are labelled with an integer instrument
number, Observations are named with their variable and
instrument number found in the source files.
e.g. A SRML file contains two columns labelled, 1001, and
1002. These columns represent GHI at instrument 1 and
instrument 2 respectively. The `pvlib.iotools` package
converts these to 'ghi_1' and 'ghi_2' for us. We use these
labels to differentiate between measurements recorded by
different instruments.
"""
# Request ~month old data at initialization to ensure we get a response.
start = pd.Timestamp.utcnow() - pd.Timedelta('30 days')
end = start
try:
extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.warning('Cannot create reference observations at MIDC site '
f'{site.name}, missing required parameters.')
return
# use site name without network here to build
# a name with the original column label rather than
# the SFA variable
site_name = common.site_name_no_network(site)
try:
site_df = fetch(api, site, start, end)
except error.HTTPError:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
else:
if site_df is None:
logger.error('Could not find data to create observations '
f'for SRML site {site_name}.')
return
for variable in srml_variable_map.keys():
matches = [col for col in site_df.columns
if col.startswith(variable)]
for match in matches:
observation_extra_parameters = extra_params.copy()
observation_extra_parameters.update({
'network_data_label': match})
try:
# Here, we pass a name with match instead of variable
# to differentiate between multiple observations of
# the same variable
common.create_observation(
api, site, srml_variable_map[variable],
name=f'{site_name} {match}',
interval_label='beginning',
extra_params=observation_extra_parameters)
except HTTPError as e:
logger.error(
f'Failed to create {variable} observation at Site '
f'{site.name}. Error: {e.response.text}')
with open(DEFAULT_SITEFILE) as fp:
obs_metadata = json.load(fp)['observations']
for obs in obs_metadata:
obs_site_extra_params = json.loads(obs['site']['extra_parameters'])
if obs_site_extra_params['network_api_id'] == extra_params[
'network_api_id']:
obs['site'] = site
observation = Observation.from_dict(obs)
common.check_and_post_observation(api, observation)
def initialize_site_forecasts(api, site):
"""
Create a forecasts for each variable measured at the site
Parameters
----------
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
site : :py:class:`solarforecastarbiter.datamodel.Site`
The site object for which to create Forecasts.
"""
variables = list(srml_variable_map.values())
if isinstance(site, SolarPowerPlant):
variables += ['ac_power', 'dc_power']
common.create_forecasts(
api, site, variables,
default_forecasts.TEMPLATE_FORECASTS)
def update_observation_data(api, sites, observations, start, end):
"""Post new observation data to a list of SRML Observations
from start to end.
api : :py:class:`solarforecastarbiter.io.api.APISession`
An active Reference user session.
sites: list of :py:class:`solarforecastarbiter.datamodel.Site`
List of all reference sites as Objects
observations: list of :py:class:`solarforecastarbiter.datamodel.Observation`
List of all reference observations as Objects
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
""" # noqa
srml_sites = common.filter_by_networks(sites, 'UO SRML')
for site in srml_sites:
common.update_site_observations(api, fetch, site, observations,
start, end)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 5
| 0
|
23ff69637f82addae0325c842c00f5a222080a6a
| 2,369
|
py
|
Python
|
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
tests/test_estimators.py
|
astrojose9/fulmar
|
62a79fb9b7ab01e5b7b3acadaca8e4f0db0e0e2f
|
[
"MIT"
] | null | null | null |
# sys.path.insert(0, os.path.abspath('/home/jrodrigues/Documents/PhD/fulmar'))
from fulmar.estimators import (estimate_planet_mass, estimate_semi_amplitude)
from fulmar.utils import (FulmarWarning)
import astropy.units as u
import numpy.testing as npt
from astropy.units import UnitConversionError
import pytest
def test_estimate_planet_mass():
"""test if estimate_planet_mass behaves as expected"""
npt.assert_equal(estimate_planet_mass(
1, 'Earth').value, 1) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 'Neptune').value, 0.29706202) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 5514).value, 1) # * u.earthMass)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('string', 'Earth')
with pytest.raises(ValueError, match="Accepted str values for rho_p"):
estimate_planet_mass(1, 'Uranus')
with pytest.raises(UnitConversionError):
estimate_planet_mass(1 * u.s, 'neptune')
def test_estimate_semi_amplitude():
"""test if estime_semi_amplitude behaves as exected"""
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1).value, 0.08948015) # * u.m / u.s)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, R_planet=1, rho_planet='earth').value, 0.08948015) # * u.m / u.s)
npt.assert_equal(estimate_semi_amplitude(
365, 1, 1, inc=0).value, 0)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1, ecc=0.5).value, 0.12654404) # * u.m / u.s)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('1 earthRad', 'earth')
estimate_semi_amplitude('1 year', 1, 1)
estimate_semi_amplitude(365, '1 solMass', 1)
estimate_semi_amplitude(365, 1, M_planet='1 earthMass')
estimate_semi_amplitude(365, 1, M_planet=1 * u.earthMass, inc='90 deg')
with pytest.raises(ValueError, match='required when M_planet is not'):
estimate_semi_amplitude(365, 1)
estimate_semi_amplitude(365, 1, R_planet=1)
estimate_semi_amplitude(365, 1, rho_planet=1)
with pytest.warns(FulmarWarning, match='overrides'):
estimate_semi_amplitude(365, 1, 1, R_planet=1, rho_planet='earth')
| 36.446154
| 82
| 0.699451
|
import os
import sys
# sys.path.insert(0, os.path.abspath('/home/jrodrigues/Documents/PhD/fulmar'))
from fulmar.estimators import (
estimate_planet_mass,
estimate_semi_amplitude
)
from fulmar.utils import (
FulmarWarning
)
import astropy.units as u
import numpy as np
import numpy.testing as npt
from astropy.units import UnitConversionError
import pytest
def test_estimate_planet_mass():
"""test if estimate_planet_mass behaves as expected"""
npt.assert_equal(estimate_planet_mass(
1, 'Earth').value, 1) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 'Neptune').value, 0.29706202) # * u.earthMass)
npt.assert_almost_equal(estimate_planet_mass(
1, 5514).value, 1) # * u.earthMass)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('string', 'Earth')
with pytest.raises(ValueError, match="Accepted str values for rho_p"):
estimate_planet_mass(1, 'Uranus')
with pytest.raises(UnitConversionError):
estimate_planet_mass(1 * u.s, 'neptune')
def test_estimate_semi_amplitude():
"""test if estime_semi_amplitude behaves as exected"""
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1).value, 0.08948015) # * u.m / u.s)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, R_planet=1, rho_planet='earth').value, 0.08948015) # * u.m / u.s)
npt.assert_equal(estimate_semi_amplitude(
365, 1, 1, inc=0).value, 0)
npt.assert_almost_equal(estimate_semi_amplitude(
365, 1, 1, ecc=0.5).value, 0.12654404) # * u.m / u.s)
with pytest.raises(TypeError, match='`astropy.units.Quantity` or float'):
estimate_planet_mass('1 earthRad', 'earth')
estimate_semi_amplitude('1 year', 1, 1)
estimate_semi_amplitude(365, '1 solMass', 1)
estimate_semi_amplitude(365, 1, M_planet='1 earthMass')
estimate_semi_amplitude(365, 1, M_planet=1 * u.earthMass, inc='90 deg')
with pytest.raises(ValueError, match='required when M_planet is not'):
estimate_semi_amplitude(365, 1)
estimate_semi_amplitude(365, 1, R_planet=1)
estimate_semi_amplitude(365, 1, rho_planet=1)
with pytest.warns(FulmarWarning, match='overrides'):
estimate_semi_amplitude(365, 1, 1, R_planet=1, rho_planet='earth')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -10
| 66
|
745adf17e78917104f0f56ad44e17b5a7a1b9a7e
| 25,642
|
py
|
Python
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 11
|
2015-03-09T01:53:21.000Z
|
2021-03-29T08:33:05.000Z
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 1
|
2016-08-24T06:20:11.000Z
|
2016-08-24T06:20:11.000Z
|
rwb/images/__init__.py
|
boakley/robotframework-workbench
|
92f15845d6fa4baedd4f3c4346fb8ff5cf9149a6
|
[
"Apache-2.0"
] | 5
|
2016-03-03T15:27:09.000Z
|
2019-03-26T13:05:32.000Z
|
'''
This data was automatically generated by script img2data.py.
These images are part of the FAMFAMFAM silk icons set which is
provided under a creative commons license. For more information see
http://www.famfamfam.com/lab/icons/silk/
'''
data = {
'cancel': '''
R0lGODlhEAAQAPcAAAAAAAAAMwAAZgAAmQAAzAAA/wArAAArMwArZgArmQArzAAr
/wBVAABVMwBVZgBVmQBVzABV/wCAAACAMwCAZgCAmQCAzACA/wCqAACqMwCqZgCq
mQCqzACq/wDVAADVMwDVZgDVmQDVzADV/wD/AAD/MwD/ZgD/mQD/zAD//zMAADMA
MzMAZjMAmTMAzDMA/zMrADMrMzMrZjMrmTMrzDMr/zNVADNVMzNVZjNVmTNVzDNV
/zOAADOAMzOAZjOAmTOAzDOA/zOqADOqMzOqZjOqmTOqzDOq/zPVADPVMzPVZjPV
mTPVzDPV/zP/ADP/MzP/ZjP/mTP/zDP//2YAAGYAM2YAZmYAmWYAzGYA/2YrAGYr
M2YrZmYrmWYrzGYr/2ZVAGZVM2ZVZmZVmWZVzGZV/2aAAGaAM2aAZmaAmWaAzGaA
/2aqAGaqM2aqZmaqmWaqzGaq/2bVAGbVM2bVZmbVmWbVzGbV/2b/AGb/M2b/Zmb/
mWb/zGb//5kAAJkAM5kAZpkAmZkAzJkA/5krAJkrM5krZpkrmZkrzJkr/5lVAJlV
M5lVZplVmZlVzJlV/5mAAJmAM5mAZpmAmZmAzJmA/5mqAJmqM5mqZpmqmZmqzJmq
/5nVAJnVM5nVZpnVmZnVzJnV/5n/AJn/M5n/Zpn/mZn/zJn//8wAAMwAM8wAZswA
mcwAzMwA/8wrAMwrM8wrZswrmcwrzMwr/8xVAMxVM8xVZsxVmcxVzMxV/8yAAMyA
M8yAZsyAmcyAzMyA/8yqAMyqM8yqZsyqmcyqzMyq/8zVAMzVM8zVZszVmczVzMzV
/8z/AMz/M8z/Zsz/mcz/zMz///8AAP8AM/8AZv8Amf8AzP8A//8rAP8rM/8rZv8r
mf8rzP8r//9VAP9VM/9VZv9Vmf9VzP9V//+AAP+AM/+AZv+Amf+AzP+A//+qAP+q
M/+qZv+qmf+qzP+q///VAP/VM//VZv/Vmf/VzP/V////AP//M///Zv//mf//zP//
/wAAAAAAAAAAAAAAACH5BAEAAPwALAAAAAAQABAAAAiWAPcJHEiwYEFpCBMiNLhP
WjZz4CB+A5dN2sGH2TJm+7ax4kCHEOlx3EgPHEeLDc1loydwokB6G1EJlEYRHMt6
+1hW/IaSpreN+/ThzIYq5kyKGffV07ePpzSeMzl+UypU6aunMhtSdCcwI0t606A2
3PjN3VVXK2NO+/iKIzZp0xB+Q4Xt4re7te4WZSgNVV+EfhkKLhgQADs=
''',
'cog': '''
R0lGODlhEAAQAOYAAElJSU5OTlFRUVJSUlNTU1hYWFtbW2FhYWJiYmRkZGtra21t
bW5ubm9vb3FxcXl5eYCAgIGBgYKCgoODg4WFhYeHh4mJiYyMjI+Pj5ycnJ6enqCg
oKGhoaOjo6Wlpaampqenp6ioqKqqqqurq6ysrLCwsLGxsbKysrW1tbe3t7m5ubq6
ury8vL29vb6+vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvL
y8zMzM3Nzc7Ozs/Pz9DQ0NHR0dLS0tTU1NXV1dbW1tjY2NnZ2dvb29zc3N3d3d7e
3uLi4uTk5OXl5efn5+np6e3t7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAFIALAAAAAAQABAAAAfDgFKCgzs3NIOIiVI3Tk0k
ioJBPjpSMktKJ1IkIIhASVFIMi5FQyUkO08piDRJQ0dIpEdCOzgPDohDPDkrGRwy
NjEpFoI4NDBGPSwSghgzMj0XFRM0UEU5Ph6IJDQrNkoKL0xCNj0miCEyKTBCBx0Y
Gz82PBrMMSwqCQUEgiQ1MTU3RICI4QKFCEQjPhCpsSNIjhs8arTYwQARiyUfJlCg
IWMBgw9CIAxA1CCBlAmFEEgpEAAAJCkRWpww8DJRAQEjEwUCADs=
''',
'control_play': '''
R0lGODlhEAAQAPZDAHNzc3NzdHV1dXV1dnZ2d3h4eHh4eXl5eo6OjpCQkJKSkpOT
k5aWlpeXl5mZmZycnJ2dnaKioqenp6qqqqurq7Ozs7m5ucDAwMLCwsPDw8TExMbG
xsnJyc3Nzc7OztHR0dLS0tPT09TU1NXV1dbW1tfX19jY2N3d3eHh4eLi4uPj4+Tk
5OXl5ebm5ufn5+jo6Onp6erq6uvr6+zs7O3t7e7u7u/v7/Dw8PHx8fLy8vPz8/T0
9PX19ff39/j4+Pr6+vv7+/z8/P7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAEQALAAAAAAQABAAAAebgESCg4SFhiYliSEhhoMm
MjtAQDouHY0mPUM9OjqZPRyFJkBDNjYYNjIymhaDJaqlNgIYqS5DJxWCJZ2pMgIB
FjIuPTYUuUClqQIFBx0uLkATgiHHvMoCNiouOhHSnS7BvjYuKTopEIIdKUPOLgDi
KSlAGw6Dp+su2PBDKfSEFjZDKGkDwq/RhGacUlhY0EgQBAYMFiBA0LAioUAAOw==
''',
'cut': '''
R0lGODlhEAAQAOZuAOnt8VaOvKnE19zp9Vum2Pv8/aTB1qXC12Cq3KbC2KrF2KvG
2ZK20eTt84WryVyj0mCFroetyGe372ex5Zy804Oqx9Dg8OLm6aXN77PF0cTW57fH
0ujs79vm7qC+1k14r8vc567I3nWiyl+m1lF6r1qi0mGdxmWz6s7e7cDU5ubq7V+K
uIOow4apwU16svDy9P39/vf5++Hr+FOQwdzn7q7H2uTs8qa4w12QvGOVv12m2KjE
16fD2fr8/WKr3UN2sqPA1puxwFWEtNPi8Zu93Ozv8VF6sHeewWOy50F3tWyewNjk
7cfU3OLo7F6fy8HN1Fmax2aw57TN4myhxF2CtJm62Haavf3+/p6+1oSkut3p9aPA
2Hejwd/p8Ed4s/H2+UV6uGms2mCt5HWavGa27Ofs74CoxkB4t/j5+pS30ff5+ZO1
zrDJ2unw9f///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAG4ALAAAAAAQABAAAAeWgG6Cg24dET2EiYRsaTGD
V4qDNR5fgktmkYILIQ1uNFhrmW4CKV02PFttogYoOwkaIKJuQEMMAwqiMG4HWlIU
mWVWOUcOFhUFkQA4CA8nVTIsT5FjI0wbYkQYEjo3ijNcbi1RIhMQUz9qiQFhSmRC
XlBuQWcqiRlOBCscLiUXWUkvFAFoIogKEhM+jMhyg4YEmA9FCAUCADs=
''',
'disk': '''
R0lGODlhEAAQAPemADZqu3uj4nmi4Xqj4/f6/XSd3PH2/Ojw+vf6/nuk4mWNyXeg
3tvn9zpuvTZru3qj4jRntDdsu+Hs+TJhp3qj4Xih4Huj4dnl97vQ77rQ7r3Q7Nvm
9+7z+3We3r7R7NHf9vL2/Pb6/UBrrbjO74SjzmWMyER0v9vn+Njl9jZqtzlsvOrx
++Xt+jJjrF6Jyevx+36o5/f7/snc9Iqn0sfZ9G2Sy+nx+unw+nSe3TJhqqnC546r
1WqTz2iQzXCVzYCq6WmQynGZ2N3o+HyayKS+5NHg97HF4mWNyn6o6OLs+Zq13TJh
qVWCxpWw2r7S8GqSzfP4/czd9bzO58LV8jJiqjhsunKb2Xef3nybydDf9kJ0wDNj
rXaf3vj6/u3y+zVot/P3/TRmsjtuvUN1wHqk40N0vTZqujVotYWl1kJzvcXY8nqi
4G2W046r2GySyzFgqDxpq+/0/HOb2nii4Heg35+64dHg9nKc2zJiqzhru3mYxzVo
tnOb2TRms9/p+H2m5k99w3af3Xuk47PK7aa94e70+/b6/meOya3G642r2YGezHCZ
1nqi4jhsu+rw+vD1/DNkrzJhqOPt+Xqi4Tptu2aNyXSc2t/p+TNlsGGGvH2n5zNk
rq3F6u70/MPV77bM7jRlsfb5/WSMyMLcv4jAYvj7/v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAj/AAklcjGmQQQHAACoEEPJ
hAIpptDYKVIqRqhHjryEIvDCj4cnRph8+GEohCgOK0CUMpDkRAYNZbRkQWLAgCgb
hQiEOiBkAwVPKRpEgXHgQKUbULrEucTgwhwdeyLI0CRBgqROBMCwaIpiAREIDmj8
EaXgkJsaQEqIWmupTh8AagQNIJMggAUBdLjguFNACSgAUwYMCBAggYAKCzoUkMOn
CSQATh48oGC3wpVABawEWbRjixkMjEqJHk2azQw8eUYIQECqtevWoXiQyNHo0yQE
o3Lrzh2qh6JIVQatYf3adagjWCYAQsSJtPNSQ/SIaOMjzZczEMJg2tSCypI3E+Bk
AgoIADs=
''',
'disk_multiple': '''
R0lGODlhEAAQAPcBAAAAAP//////////////////////////////////////////
/////////////////////26X1Hue1HGY0IKk1miOzWmQzXWa0HOZ0WKLyCBarf//
/////////////////////2WNzLDN8////7PH4////////////////6G/6mCJyf//
/////////////////////1uGx57A732i2Xqd04Cj1Y+u2nea0neb0nec0nGX0GKL
yCBarf///////////////12IyKG/73WZ0bjS9P///7vN5v7///////L2++3x+KG/
6mCJyf///////////////2WNypm46l+JyZu97W6X1Hue1HGY0IKk1miOzWmQzXWa
0HOZ0WKLyCBarf///////2GLyZK15mGLy5687mWNzLDN8////7PH4///////////
/////6G/6mCJyf///////1SBxJe26nOYzqG+6luGx57A7////26TzP////////f7
//H4/4yv5GGKx////////1F/w5q272+WzJG21l2IyKG/7/r8/fv8/v39/vz9/vr7
/fv8/YWo3VN/wf///////1WDxrrO72aOx5y84GWNypm46n6l3YCm3Xyj23qg2Xmg
2Xif2Hie2F2Ev////////zNouliEw1OAxZay7mGLyZK15oGn4oGn4X2j3nuh3Hmf
23ee2XOa1Fd+u////////////////1WDxrrO71SBxJe26urz8+bx7ebx7+bw7+Xx
7e3183mc1URwsP///////////////zNouliEw1F/w5q27+jz6oS/UZjJb5nJcYS/
VOn05Huc3Tppqv///////////////////////1WDxrrO7///3cfuh9f0otf2osfu
jP//4IWi3T5qq////////////////////////zNouliEw2iL03CP4WyN3G2L2m6K
12yLzURtqy5fpv//////////////////////////////////////////////////
/////////////////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'folder': '''
R0lGODlhEAAQANUAANiGLNiILdiyLNmOM9myNNqUNdqbNdqjNtqsNdu2POHCXuLD
YOPHa+XKdObNeenBc+vKa+7OiO7PdO/Qeu/Sgu/UlPDLJvHNLvHOMPHPafHQbPHS
cvLQOvLTfPLWg/PTRfPVTPPYjvTXVfTYXPTbnvTck/Tdp/XaZPXbavXgn/bedPbg
fPbhrPbpyffig/fji/jkjvjlk/jqwvnonPnpovrsrPrts/vvufvyyvvz2vv26fzz
0/767v778/7+/gAAACH5BAkAAD8ALAAAAAAQABAAAAaLwJ9wSCwaj8gkkaBYOBWJ
JMFHpeoYWIajQRAierBSisXrmXk8H8LbK5U8EAiFMpFIdOsfQleJPFpmZ2l5B1SB
OTEuKigjOwdCBz04NzUzLyuMIB87BkIGPTY0iSonIiAcGDidPwU8ooqlHxwXFjgF
QgM5JiQlIR4dHRsaGTIDQgAByAHLzMnJStDRQQA7
''',
'page_add': '''
R0lGODlhEAAQAPetANPl/cLc+O3z+NHk/FWQyjZrvPr7/fH1+dPl/M3j/DdrGbLS
ldDk+426ZGS+/Mff+tbn/tfo/s3j+1GNyDZqvMfg+tzq9uvy+Ozy99jo/szh+z1w
IJGugVOMKNXn/Y+8Z0+MyM/k/DltIlW0+G+Ud1O0+FqSykuGx4e5YO/0+kd8fLHW
kme9/LHTkNPm/EiEyI+7akyHyEJ8w9fm8maOTzpvIsjg++zy+NLm/NTm/VWPye30
+Z7X/8Pd+bTY9oy8ZZu4prnO6Pj7/5jX/87j+46tht/s98Td+brW9UiAw7TUlbXU
84GrYVuUzOjx+EyGxvL2+t/p9Ex7Mcnl+nSi0lWRysvi+32y4qDY/4e6YFa092W+
++/0+dfn/tbo/ury+lWQy8jg+WmW3Gmdz8nh+9Pn/cjg+lKNyM3m/IS24lOy+EBx
wEiEx/D0+E2IyJ+8rdXm/dDmunOYYL3a9maNzGGKSmK9/NXl/lS190aAxjdrITVq
u5Cx3fP3+3yq12i//d/r9V2ISou6Yszj+32w4cbf+uzz+ZbW/7vW9d7r9lePLc/j
/O70+MHb+E18xdTm/ISo1fj7/UB3wlKz98Xf+W7C/ZvW/6rA4muY3FCMyLzZ9t7q
9rPS8UuGyOPt9+nv9e30+LjW9Mrh+jZqZMni++bw97bUnOvz+vD0+e30+rvY9tbn
/f///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAK0ALAAAAAAQABAAAAj/AFsF8dOEgMEqYHSM4WOg
lcNWVAQwAiWg4g0MM65gaujQBJQKZEjZoNQolBAfhMRwPHMgAREJVsIEQBIJzZQ0
kBxOOLCIQYgEoo5oioKhz6ckOlG5eFRmgAQzARAt4dTmicNMXLqs8pADh4YHAeao
ShHDIYgdGTJEgABggIYKPQKk6uTQjSEvEVZtBcBg0INCX144PHEBgt5VcNq+kUOj
jgqHbJzcAQAAAYIBQJgoiQNDEYdWeUqN0IKnhJpJgVqsYPXjw4ZWMjxVwsLD0pBD
Ukyx2r1AQStJgP6w2OLAgZ0agrKwQuG6laMLRhJtsmDhVJEODRY06PD5Ep01BcJT
C6CwZ5QeBSJItAoIADs=
''',
'page_copy': '''
R0lGODlhEAAQAPemAJK75E57yuDt/9zq/8vi/+ry+9fn/9vq/9Ll/83i/NXn/tPm
/G2X0u71+2SM0PH3/9rq/rzZ9+zz/MLc+efx7+fw+vn6/W2X02yX0pO75FyGzV6H
z9fn/OPv+tbn/FaFy6O+4ejy/G+Y0m6Z08jf+muW0unz/dHl/dDk/HCV053D6laB
zmqV0dPn/PP5/vb4+8rh+vL4/8Ti+lKEyr7Z9vP4/pfE7cLc+tLl/NXn//D2/urz
/e31/evz++vy+7TT9czm++jz/sfg+9Hk+3ye1nKk022T0cXe+PT4//X6/pa03K3F
5vP4/+r0+7vX9kZ2yJe04vP4+8Te+fj6/FaDzNTm/ViHzFeDzJa+5f7//5G75FWF
zefx++jy793p9e70/Yi76s3j/HSb1OTv+qK94vH3/trp/W6T0+nx++30/P////L3
/9vq/cvj+4ip3erz8YSm2vD2/M/j++Hs9uvz+nad1u/2/ufy/VeGzHCa1FiDy6/R
8/f5/MTd+ZPB7O/2+5G65O71/Mrg+vL3/lSDymqV08zh+ujx+uz1/drq/057y87k
/HKh0tfo/dbn/VWBynal06HJ7vX5/Ja/5sXe/FmHzPD3/9Hk/FOCz+fy+nqk1O31
/m+Y02SM0W6Z0uv0/Njo/GmS1czi+1aCy9np/97s/////wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAjuAOHUueBphIg8SyyYWsjQ
lBg1iHh84tLEBhQ+DRdyspNgEQwaSYBEApHRFINBC1rIOTKngSRNJRnUAMVhgSEV
kwAAKMIIEpkppjAggQCh0RAsWUzsCNEhExg3L0rESMTGjIcMmygJmfDDhQw/Slgw
KSWg1AAAOhKEIRHBC50ohArFIWsWQBkcl0T1cbJnTKVQb+gO0LJGgYITgqREuGPF
SKAeBSSkAWQph6MqKNrcCIKHyJkNDjo5mPGA1AFSBhAQePCh5JYGplGr/kOlJCYK
sVMT6LKi5CgJhyqgKVDAxxc9JVNceRJAUYDnjzQEBAA7
''',
'page_find': '''
R0lGODlhEAAQAPcBAAAAAP///0R9wU2LyE+MyE+MyE+MyE+MyE+NyE+MyU+Lx0uJ
xTt2xDxos////////////////0mFxOzy9+70+O30+O3z+O3z+O3z+Ozy+Ozy99fm
8n2y4mWU2ztos////////////0uIxfL2+sfg+snh+8ni+8jg+8Xf+cHb+LjW9Pj7
/7TY9n2w4WSS2z5rtP///3uHkmJtd2VtdbXI3V5mb1libMvi+8jg+cLc+LrW9fj7
/c3m/Mnl+oS24kJ5v2NsdYCMmIOPnEdMUqe3y0dNVHmFkH+Mmsbd9cTd+bzZ9t/p
9Ozy9/P3++nv9UV+wn2JlVZeZm94hTtBSnqGlUBKVnSDkkBHT7vP5Mjg+sLc+LvW
9bXU87PS8fD0+EaCxDQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWm55hczh+8ff+sLc
+L3a9rvY9u/0+kaDxlZeZlhhaUhPVkZJTXeAjDc7QDxCRjM4PSUoLNHk/Mzh+8fg
+sPd+cLc+O30+kWCxmNsda+5xI2ap3N6gLTD1lxjbKSvu4uYpkZNU9Pl/dDk+8zj
+8ff+sbf+ury+kKAxl9ocau2wWZweVZcYpypul1mbqWxvWp0fisuM9Pl/dHk/M3i
/Mvh+8nh+/L3+z98xVRbY42ap15ncDo/RZGcrU5VXH+KllZeZhgZG9Hk/M7i/Mzi
/Mng+8jh+/f6/Tt2wTg9QjxCSDE+S09UWEKGtiAwPSQ1QRsrNxUsO1Kz91Kz91Gy
90+y90+y9/X5/DZvv////////zx4wePt927C/aDY/57X/5vW/5jX/5bW/5XW/5TV
/5PU/2W++/T3+zFnu////////zhxv9/r9Wi//We9/GW++2S+/GS+/GK9/GO9+2G8
+2C+/GK8/Pj7/S1kuf///////zNpvOrx+N/s997r9t7q9tzq9tzq9uvz+uvz+ury
+vP3/PT4/P3+/ipgt////////zRquzBlujFmuzBmujBmujBmujBlui9lui5luS5l
uS5kuS5kuSxity5ityH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_paste': '''
R0lGODlhEAAQAPepAE57yn+s19mpX8vi/9Ll/9fn/9vq/4Sv2dGXU/3utdOaVtel
XP7vttXn/ury+9GVU7zZ9/PbgPDUddafWX2p1vH3/9yya82MTenx++nNk+nNke70
/enLjp5sLe31/tHl/d3Mtujy/N+dTtDk/Mvj+8rg+tyWQleGzKR1N+DJlcTi+qV2
O+/2+/H3/t2YRIGt13Gb09bn/WmU04i76tuVQevy++fy/erz/YGt2NLl/FiHzPb4
++G5gFaBzvL3/9ywaFWByoGu18Te+enz/bTT9dCSUEZ2yNWgWdyvZt3p9d2zbO71
++vg0/7//92ye7vX9u/apVaFy7eSY26Y0t61b1SDyvLirk57y9agWs3j/NimXc3i
/MLc+tXn/9qqddObVtmpYObGiebEiOfw+nCgzGePs8zi+9Hk/N6bS/D2/s6OTtyX
ROvSmOjz/vD3/7GIVfHdqtTm/ejx+uHSvliDy/j6/NqpdNquZdOcV+K/hOfJi9mq
YKuAStyueOPv+mePyeTv+tilXc+SUKFuLufx78fg++jy71mHzNyWQ9itdMXe/HCV
0/P5/vLhsPP4+7yab3Gfza/R89+2cJ1qKu7l2tqqYH2p2leDzNyVQsjf+uK9ee3W
npPB7Ovz+s+RUOfy+laDzMLc+eC5dFyGzcTd+dSeU+zz/Nnp/5hjIP///wAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKkALAAAAAAQABAAAAj/AFOlooSqYMEOAhMKZCIF
FQoGESIwGITqEQiFK1JYEZFAgoQEaDhkmKQQVSNNLtYgMoGJRqkwqErCESVJiYUD
QQIEIAOpDIw6qVBBofIDjIAXTYbcCOHn0wwZO1BtsoCkkoADHhQVCkWEkQpOU1Cx
ubNHy4IDabZkyQQhSSdHVVBpEBAIywQcLXKcMUPqSSRAh1DpWXAEj4IAPho0+FBC
CAQbOlCJmfAFwYMAbrrEiDOCBJc2J1DlUYBAkCcKFU4ZOFWAwIAKUVDxeFBEzQUK
S1Szds0C1JtETvp4sWOJkO7WAwz1mMPHIKo/puSMweDAQY0NdBQKXHTJCIArAMID
AxkVEAA7
''',
'page_white': '''
R0lGODlhEAAQAMQAAJSUlJWVlZmZmebm5ufn5+np6erq6uvr6+zs7O3t7e/v7/Dw
8PHx8fLy8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAABgALAAAAAAQABAAAAVjICaOZDlO1qWuE2BaDyQz
SnERQXlJlCQ5C8Ml8hCQLpSkJNI4rC7H1MUygTAQhgF09JxWG4rEVtT1RhyNMaZ8
qVAiETXbkpzIV5Z8pTKxH/EWe4J3gHl5hGwqJBSJKhQmkCUhADs=
''',
'page_white_add': '''
R0lGODlhEAAQAPcNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4+JSU
lAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8/Ofn
55WVlQAAAAAAAP///wAAAAAAAPz8/Pf39/n5+ff39/f39/Pz8/Dw8Orq6vz8/Pb2
9vT09JmZmQAAAP///wAAAAAAAPz8/Pn5+fn5+fn5+ff39/b29vLy8uvr6/z8/Pz8
/Pz8/Pz8/AAAAAAAAAAAAAAAAPz8/Pv7+/z8/Pz8/Pv7+/j4+PX19fHx8ezs7Orq
6ubm5vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+PX19fLy8u/v
7+3t7fz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+Pb29vPz
8/Ly8vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pr6+vn5+fb2
9vb29vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+7bIq3WZ
YGaOT2GKSjxiJgAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+7HEpYGrYbTU
ldDmuo+7alePLTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/r6+mKLSrHTkLHW
kv///4y8ZY+8ZzdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+0x7MbbUnP//
/////////7LSlTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/D9xIou6Yoe6
YP///4e5YI+8ZzdrGf///wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/JaxhlOMKI26
ZLLSlY26ZFOMKDdrGf///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC5aFTZq
GTdrGTZqGTJhF////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_copy': '''
R0lGODlhEAAQANUgAPn5+fLy8sjIyMTExMLCwurq6vPz8/f39+/v78zMzJ6enuvr
6/X19crKysfHx/T09MbGxrm5ucDAwOLi4sXFxeHh4e3t7dHR0f39/fb29vr6+t/f
3/j4+Pv7+8nJyfz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAACAALAAAAAAQABAAAAaGwErDQyRuMKCkEtTQfJ4f
DuG4THo+BwOi8MkMNlXQlZMJLJ4dAXJ57XAYgUvEoRAYkdePOyNxQqVHeXpSWFpc
XhuCHwADUWVnT0RQHxoUg3AWXJJ6HRoQaGRaH5toDlAdABkPo4lFAgqTGgAHo1UY
ApOdTh5heR2/v7VVilAACWGtRUQJE0EAOw==
''',
'page_white_find': '''
R0lGODlhEAAQAPcBAAAAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4
+JSUlAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8
/Ofn55WVlQAAAAAAAP///3WAi2NsdWZtddHV2V9nb2Jma/f39/Pz8/Dw8Orq6vz8
/Pb29vT09JmZmQAAAGNsdYCMmIOPnEhMUsTGyUdNVHuGkJOWmPHx8fLy8uvr6/z8
/Pz8/Pz8/Pz8/AAAAH2JlVZeZm94hTtBSn2IlUBKVnSDkkBHT+Hh4vX19fHx8ezs
7Orq6ubm5vz8/AAAADQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWoKDhPj4+PX19fLy
8u/v7+3t7fz8/AAAAFZeZlhhaUhPVkhKTYqKizk8QDxCRjM4PSorLPv7+/j4+Pb2
9vPz8/Ly8vz8/AAAAGNsda+5xI2ap3h8gNPU1F5kbKSvu4uYpk9RU/z8/Pr6+vn5
+fb29vb29vz8/AAAAF9ocau2wWZweVldYre4uF1mbqWxvWp0fi4wM/z8/Pv7+/n5
+fn5+fj4+Pz8/AAAAFRbY42ap15ncDtARaurrE5VXH+KllZeZhgZG/v7+/r6+vr6
+vj4+Pj4+Pz8/AAAADg9QjxCSCwxNVVXWbi5uTk7PT0/QjQ2Nzs8PPr6+vr6+vr6
+vr6+vr6+vz8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+/v7
+/v7+/v7+/z8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pz8/AAAAP///////wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pn5+QAAAP///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_paste': '''
R0lGODlhEAAQAOZyAN/f3/b29vj4+NegU/n5+fr6+tmnXNefUv3utfPz8+rq6v7v
ts6LRszMzPPbgPf39/Ly8u/v7+C1cKqATal9QZdjINCPSPDUdXhOGf39/daeUsLC
wtCSStOXTeS9fKR1N86NSKFuLreSY+O7etSYTvT09NqbSt+dTt6yadeiYuS/fqh7
P6Z3PaNwOZ5sLeK5dtyraNejV9yuaaV0OuLi4t2uZevg09GUS9elYuC0btmmZPX1
9dyWQtyVQtmlWs2HQ6p+QaV2O96bS96xa9WdUdSbUNWdUMyJRe7l2tGTStyXRO3t
7cuFQuvr66uESd3MttyrYrGIVduqYdqoZdikWOHSvs+OSNOYTuG3c9acUNikYtWc
UMTExKyESdqUQdqoXvn49t+zbdyWQ9KTS51qKr+TUd2xbNuzcryab+S/f9KWTaFq
NKJtNquASvv7+8nJyZhjIPz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHIALAAAAAAQABAAAAfUgHJySHCFhS6CiYI2InAf
Cw4OCyFwaE+KQWcqJwgXFwhCORJkinBpKCZKYjw9XkRmFaUeNVBfBmVdThQTGG8A
GXJwI1I+AwdAYHHKcQIbv3AvBlQDGhRxDwkRCnEBXABwWDEDRkUrzAEQTctvcBIH
WSRqLHFuAjsQS9vsYVtXNxwzlNULkC0OuyEdxlgA0WKZGwIBShiEIyOJBQZH2CyL
U4DAg4kwrDD4wWTNRjcFChiMgmOKDi0pJgh0Q9ONwSptDFXAsHEZgQaKBAF4Q7To
mwY0AgEAOw==
''',
'page_white_stack': '''
R0lGODlhEAAQAMQAAAAAAGhoaJOTk5SUlJiYmLi4uL29vcLCwsPDw8bGxsfHx9bW
1urq6uvr6+3t7e/v7/Ly8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAA
AAAAAAAAAAAAAAAAACH5BAkAABsALAAAAAAQABAAAAVp4CaOZCliWppVgjlq1WIU
yhG4m2YhqFq5GgwCJqMdTJpMQsdLpSyDV0YRHFYiD4aGQhAlqUodBdJweqffpGUC
cWjPS7SagtWcwVTVhSKxjwBJS04YFxV+UnkqGCg4KhmPGYclTpQXOJchADs=
''',
'page_white_text': '''
R0lGODlhEAAQANUAAJSUlJWVlZmZmaWlpaqqqqysrK6urq+vr7GxsbS0tLi4uLq6
ury8vL29vb6+vsDAwMLCwsPDw8bGxsfHx8jIyMrKysvLy83Nzc7Ozs/Pz9PT09TU
1NXV1dbW1tfX19nZ2dvb293d3ebm5ufn5+np6erq6uzs7O3t7e/v7/Dw8PHx8fLy
8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAADYALAAAAAAQABAAAAaAQJtwSCwOY7SachkDGGkt
l1SFItVGgWINJoPBWKlS7dUSEGuyxyJxIAyWtXOyRou5VKaSKD5UTiAOCgkIBgUn
fEJwSnUvLCuINkoYFRIRDw0MCy+QiosyMjGcNR0aGRcWFBQSoWdLNDQzsbGiISAf
HhwbukmtnXBEMr5LMkbFRUEAOw==
''',
'table': '''
R0lGODlhEAAQANUAAEJ3u1OEw1yMyV2KxWOSzmmW0W6a03Ke1nSg13uk2ny+dn6o
3YGp3oLCfIWt4Iiv4onGgo2y5I7Jh5K25pPLi5a66ZjOkJu86p3QlJ/A7aPUmqXC
6qvG6K7I6bPL6brP6rvQ69nj8OTr9erw9+7y+PP2+v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAACcALAAAAAAQABAAAAaSwJNwSCwKN5vM5VKZTCIP
ByOROJau2OwVIbyUNBqMhSKBNBqKkqFbMrnf8FKBjazbNyWCcFLamDhvgG4jAkIR
JRyJHB0eHB6PIYUnhx0mHoFvIQFCDiWPn42OHZpCDCSXIG8fbSaRQgskILKzoCED
QgmxJqlugq23JwgkIyIiIcfIx8AHBgUEAgIB0tMBAEbXREEAOw==
''',
'table_gear': '''
R0lGODlhEAAQANUAAFaHx1h5o1tbW12Nyl6BrmOSzmVlZWmW0WxsbG6a03JycnKG
n3Ke1nSg13uk2ny+dn6o3YGp3oLCfIWFhYWt4Iiv4omJiYnGgo2y5I7Jh5K25pOT
k5PLi5a66ZjOkJqampu86p3QlJ6jq5/A7aKioqPUmqXC6qurq6vG6K7J6bHF37PM
6rW1tbbBzrfE1bu7u7vQ68LCwsrKytHX4NTU1NnZ2dzl8uHh4eTr9ejo6Orw9+7y
+PP2+v7+/gAAAAAAACH5BAkAAD4ALAAAAAAQABAAAAa1QJ9wSCz6TKYRCNTRaDAV
SsThEJp42KwW2xCCeKVSyMPJXCSSBy/h5fXe8Djv0Eba79eCUMMz9VBwgC8sOgNC
GDwoiigpLi8qMS8zIiQ+iCk9K28vNS80nTQWPhQ8K6amLTQnExYsMhs+ETuaMD2u
ORNvHzcsExA7MMEwJCw3H6YfNB8GDsA9tT0yMi8fHywxCD4NOzo4ODY2JDLiMSTY
PgwJBwUDAwALCAQkFgECAkZFCvdDQQA7
''',
'table_multiple': '''
R0lGODlhEAAQANUAAEJ3u1OExFyMyV2KxWOSzmmW0W6Z03Ke1nSg13uk23y+dn+o
3ICp3YLCfIWs4Iew2oiv4onGgouz3I2y45K435O25pPLi5a455u96pu/457B5Z/A
7aLD46PUmqTC6qTLxqTLyavG6K7I6a7K8LHK6Nnj8OXs9unv+Orw9+3y+PP2+vX4
+////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAC0ALAAAAAAQABAAAAaXwJZwSCwKPZ4NpjKBOBiJ
RNGjqlqtiCJG9eFoMhTJY7KIDrejlXqtOqmywi0mBeIomU6otKLCrEJXgVkTfSlb
HR0WEQ0NCioGLYQXK1sslpcsKgUtDioVhipIokgqBC0IKXx8HiwhmCgCLQcGDiaE
IbghJLslsUKohCKtmCUBQ7Odu7kkIsVFqCgmJdPUA0WzBQQB29wAQQA7
''',
'zoom_in': '''
R0lGODlhEAAQAOZ8APTr4uny+vn7/T6DN+jx+dSwcPf6/fbv5L6HTeHJuFOeS1yo
Uu/1+zV5MPTs3Ony+YvGg+nXpdKuhPn7/t3Ckd7EjebRryprJuTOrNi5i72FTMqf
ZTJ0LNKubTBxK+jVo97Eo8OSW9KtbPHl2N/Fj/D2+2OyWfLn2ePMmb+LUOXPqde1
fffw5d3DkdCoatm7jMGOWHa3bd7Dpuzz+ovHhePNu/P4/ODHky5vKcyhZ2WnXmGw
V8+oY2usY9Grg8GPWs2mYsiaYMmbYc6nY/H3/J7RlZ/Sl9/Fo+bRrjN2LubRudGq
dsORVvH2++LLuYbFfbyEUffx7eTMrPHm2LmASMqgb/r29JdhRprPkl+tVoLCffPo
2rZ7Uffv5de2fezcv+71+/L3/ESLPefTuqxlP82naN/Ep9a1f8mbY82kcdq7gK5t
SKbVnZDKiM+pZdKtd+z0+k2WRV6rVOfToLd5Ute3fVqbU2e2XPjx7byDT+ry+uvz
+v///wAAAAAAAAAAACH5BAEAAHwALAAAAAAQABAAAAe6gHyCg4SFgw4tHW5DLi9b
hnxfBXUWLAcYbzljhQ4FKgYMentNAkdoU4QUXgZ7BA8BemACaRKEIkglrrB7e2Fm
IYQ8XXuwonc7CwAphEAHM3qie1lsCgAIhGVSRLwmcjFFPWIDhBlLAgxwC0ZYT20Q
DYQnGyATNgpxOjR2STg1hEpBqsgAAGCAFg4oKuTBQ2iEjx8INDTwcOFDBDVkokAS
5AQGiTk3hFzZKCgBlBVnmHAhWXINFTpW+AQCADs=
''',
'zoom_out': '''
R0lGODlhEAAQAOZ0APTr4u/1+/n7/eny+uzz+tSwcPbv5Ojx+fRFSO71++waI/Ts
3O4mLvdUVvpjYvxvbff6/fE1Or6HTeny+ez0+sGPWvjx7c2mYuPMmdKtd9Grg/D2
+/Hl2PHm2MORVunXpbyEUc6nY9/Fj9a1f8mbY/H3/OfTuuoRHL+LUObRrvPo2vfw
5d3Dkd7DptCoavn7/va2rvjy782kceDHk+LLueHJuNGqdvfv5eDHtvjIv/54dNu/
h9i5i8qfZdm7jPH2+7uGUtKubde2fd7EjfSrpN7Eo9KuhM+oY7FyRffx7ebRuejV
o8mbYeXPqbyDT8GNU9q7gN/Ep82naPL3/PfAt+zcv7uBTN3CkeTMrM+pZePNu8GO
WL2FTMOSW5dhRrNzS/Ln2bmASMqgb/P4/KxlP+bRr9y+pNa0eefToNe1faVcM8ia
YNe3fdKtbN/Fo8yhZ+TOrPOgm+ry+uvz+v///wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHQALAAAAAAQABAAAAe4gHSCg4SFgwssQVkhLj4q
hnRVBWxlKwZwGW8mhQsFTRABcnM/Am4kHYRXQhBzBxMDcgkCMkaEbSkbrrBzc1NR
XYRHN3OwonMECQAohBcGBHLGBBQBABKEUlglvDoPDg0IEQyEPDYCARQPOVQwRHEK
hGA9RS9j3uAMCidahEprYi0AAJh5cgbDECcWCHHQUEECFyBWdiz5AIVMEkiCaGwR
gWYGEy8YBdUAkWaEByQhBeFQE+ZLDDqBAAA7
''',
}
| 61.198091
| 73
| 0.77338
|
'''
This data was automatically generated by script img2data.py.
These images are part of the FAMFAMFAM silk icons set which is
provided under a creative commons license. For more information see
http://www.famfamfam.com/lab/icons/silk/
'''
data = {
'cancel': '''
R0lGODlhEAAQAPcAAAAAAAAAMwAAZgAAmQAAzAAA/wArAAArMwArZgArmQArzAAr
/wBVAABVMwBVZgBVmQBVzABV/wCAAACAMwCAZgCAmQCAzACA/wCqAACqMwCqZgCq
mQCqzACq/wDVAADVMwDVZgDVmQDVzADV/wD/AAD/MwD/ZgD/mQD/zAD//zMAADMA
MzMAZjMAmTMAzDMA/zMrADMrMzMrZjMrmTMrzDMr/zNVADNVMzNVZjNVmTNVzDNV
/zOAADOAMzOAZjOAmTOAzDOA/zOqADOqMzOqZjOqmTOqzDOq/zPVADPVMzPVZjPV
mTPVzDPV/zP/ADP/MzP/ZjP/mTP/zDP//2YAAGYAM2YAZmYAmWYAzGYA/2YrAGYr
M2YrZmYrmWYrzGYr/2ZVAGZVM2ZVZmZVmWZVzGZV/2aAAGaAM2aAZmaAmWaAzGaA
/2aqAGaqM2aqZmaqmWaqzGaq/2bVAGbVM2bVZmbVmWbVzGbV/2b/AGb/M2b/Zmb/
mWb/zGb//5kAAJkAM5kAZpkAmZkAzJkA/5krAJkrM5krZpkrmZkrzJkr/5lVAJlV
M5lVZplVmZlVzJlV/5mAAJmAM5mAZpmAmZmAzJmA/5mqAJmqM5mqZpmqmZmqzJmq
/5nVAJnVM5nVZpnVmZnVzJnV/5n/AJn/M5n/Zpn/mZn/zJn//8wAAMwAM8wAZswA
mcwAzMwA/8wrAMwrM8wrZswrmcwrzMwr/8xVAMxVM8xVZsxVmcxVzMxV/8yAAMyA
M8yAZsyAmcyAzMyA/8yqAMyqM8yqZsyqmcyqzMyq/8zVAMzVM8zVZszVmczVzMzV
/8z/AMz/M8z/Zsz/mcz/zMz///8AAP8AM/8AZv8Amf8AzP8A//8rAP8rM/8rZv8r
mf8rzP8r//9VAP9VM/9VZv9Vmf9VzP9V//+AAP+AM/+AZv+Amf+AzP+A//+qAP+q
M/+qZv+qmf+qzP+q///VAP/VM//VZv/Vmf/VzP/V////AP//M///Zv//mf//zP//
/wAAAAAAAAAAAAAAACH5BAEAAPwALAAAAAAQABAAAAiWAPcJHEiwYEFpCBMiNLhP
WjZz4CB+A5dN2sGH2TJm+7ax4kCHEOlx3EgPHEeLDc1loydwokB6G1EJlEYRHMt6
+1hW/IaSpreN+/ThzIYq5kyKGffV07ePpzSeMzl+UypU6aunMhtSdCcwI0t606A2
3PjN3VVXK2NO+/iKIzZp0xB+Q4Xt4re7te4WZSgNVV+EfhkKLhgQADs=
''',
'cog': '''
R0lGODlhEAAQAOYAAElJSU5OTlFRUVJSUlNTU1hYWFtbW2FhYWJiYmRkZGtra21t
bW5ubm9vb3FxcXl5eYCAgIGBgYKCgoODg4WFhYeHh4mJiYyMjI+Pj5ycnJ6enqCg
oKGhoaOjo6Wlpaampqenp6ioqKqqqqurq6ysrLCwsLGxsbKysrW1tbe3t7m5ubq6
ury8vL29vb6+vr+/v8DAwMHBwcLCwsPDw8TExMXFxcbGxsfHx8jIyMnJycrKysvL
y8zMzM3Nzc7Ozs/Pz9DQ0NHR0dLS0tTU1NXV1dbW1tjY2NnZ2dvb29zc3N3d3d7e
3uLi4uTk5OXl5efn5+np6e3t7QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAFIALAAAAAAQABAAAAfDgFKCgzs3NIOIiVI3Tk0k
ioJBPjpSMktKJ1IkIIhASVFIMi5FQyUkO08piDRJQ0dIpEdCOzgPDohDPDkrGRwy
NjEpFoI4NDBGPSwSghgzMj0XFRM0UEU5Ph6IJDQrNkoKL0xCNj0miCEyKTBCBx0Y
Gz82PBrMMSwqCQUEgiQ1MTU3RICI4QKFCEQjPhCpsSNIjhs8arTYwQARiyUfJlCg
IWMBgw9CIAxA1CCBlAmFEEgpEAAAJCkRWpww8DJRAQEjEwUCADs=
''',
'control_play': '''
R0lGODlhEAAQAPZDAHNzc3NzdHV1dXV1dnZ2d3h4eHh4eXl5eo6OjpCQkJKSkpOT
k5aWlpeXl5mZmZycnJ2dnaKioqenp6qqqqurq7Ozs7m5ucDAwMLCwsPDw8TExMbG
xsnJyc3Nzc7OztHR0dLS0tPT09TU1NXV1dbW1tfX19jY2N3d3eHh4eLi4uPj4+Tk
5OXl5ebm5ufn5+jo6Onp6erq6uvr6+zs7O3t7e7u7u/v7/Dw8PHx8fLy8vPz8/T0
9PX19ff39/j4+Pr6+vv7+/z8/P7+/v///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAEQALAAAAAAQABAAAAebgESCg4SFhiYliSEhhoMm
MjtAQDouHY0mPUM9OjqZPRyFJkBDNjYYNjIymhaDJaqlNgIYqS5DJxWCJZ2pMgIB
FjIuPTYUuUClqQIFBx0uLkATgiHHvMoCNiouOhHSnS7BvjYuKTopEIIdKUPOLgDi
KSlAGw6Dp+su2PBDKfSEFjZDKGkDwq/RhGacUlhY0EgQBAYMFiBA0LAioUAAOw==
''',
'cut': '''
R0lGODlhEAAQAOZuAOnt8VaOvKnE19zp9Vum2Pv8/aTB1qXC12Cq3KbC2KrF2KvG
2ZK20eTt84WryVyj0mCFroetyGe372ex5Zy804Oqx9Dg8OLm6aXN77PF0cTW57fH
0ujs79vm7qC+1k14r8vc567I3nWiyl+m1lF6r1qi0mGdxmWz6s7e7cDU5ubq7V+K
uIOow4apwU16svDy9P39/vf5++Hr+FOQwdzn7q7H2uTs8qa4w12QvGOVv12m2KjE
16fD2fr8/WKr3UN2sqPA1puxwFWEtNPi8Zu93Ozv8VF6sHeewWOy50F3tWyewNjk
7cfU3OLo7F6fy8HN1Fmax2aw57TN4myhxF2CtJm62Haavf3+/p6+1oSkut3p9aPA
2Hejwd/p8Ed4s/H2+UV6uGms2mCt5HWavGa27Ofs74CoxkB4t/j5+pS30ff5+ZO1
zrDJ2unw9f///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAG4ALAAAAAAQABAAAAeWgG6Cg24dET2EiYRsaTGD
V4qDNR5fgktmkYILIQ1uNFhrmW4CKV02PFttogYoOwkaIKJuQEMMAwqiMG4HWlIU
mWVWOUcOFhUFkQA4CA8nVTIsT5FjI0wbYkQYEjo3ijNcbi1RIhMQUz9qiQFhSmRC
XlBuQWcqiRlOBCscLiUXWUkvFAFoIogKEhM+jMhyg4YEmA9FCAUCADs=
''',
'disk': '''
R0lGODlhEAAQAPemADZqu3uj4nmi4Xqj4/f6/XSd3PH2/Ojw+vf6/nuk4mWNyXeg
3tvn9zpuvTZru3qj4jRntDdsu+Hs+TJhp3qj4Xih4Huj4dnl97vQ77rQ7r3Q7Nvm
9+7z+3We3r7R7NHf9vL2/Pb6/UBrrbjO74SjzmWMyER0v9vn+Njl9jZqtzlsvOrx
++Xt+jJjrF6Jyevx+36o5/f7/snc9Iqn0sfZ9G2Sy+nx+unw+nSe3TJhqqnC546r
1WqTz2iQzXCVzYCq6WmQynGZ2N3o+HyayKS+5NHg97HF4mWNyn6o6OLs+Zq13TJh
qVWCxpWw2r7S8GqSzfP4/czd9bzO58LV8jJiqjhsunKb2Xef3nybydDf9kJ0wDNj
rXaf3vj6/u3y+zVot/P3/TRmsjtuvUN1wHqk40N0vTZqujVotYWl1kJzvcXY8nqi
4G2W046r2GySyzFgqDxpq+/0/HOb2nii4Heg35+64dHg9nKc2zJiqzhru3mYxzVo
tnOb2TRms9/p+H2m5k99w3af3Xuk47PK7aa94e70+/b6/meOya3G642r2YGezHCZ
1nqi4jhsu+rw+vD1/DNkrzJhqOPt+Xqi4Tptu2aNyXSc2t/p+TNlsGGGvH2n5zNk
rq3F6u70/MPV77bM7jRlsfb5/WSMyMLcv4jAYvj7/v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAj/AAklcjGmQQQHAACoEEPJ
hAIpptDYKVIqRqhHjryEIvDCj4cnRph8+GEohCgOK0CUMpDkRAYNZbRkQWLAgCgb
hQiEOiBkAwVPKRpEgXHgQKUbULrEucTgwhwdeyLI0CRBgqROBMCwaIpiAREIDmj8
EaXgkJsaQEqIWmupTh8AagQNIJMggAUBdLjguFNACSgAUwYMCBAggYAKCzoUkMOn
CSQATh48oGC3wpVABawEWbRjixkMjEqJHk2azQw8eUYIQECqtevWoXiQyNHo0yQE
o3Lrzh2qh6JIVQatYf3adagjWCYAQsSJtPNSQ/SIaOMjzZczEMJg2tSCypI3E+Bk
AgoIADs=
''',
'disk_multiple': '''
R0lGODlhEAAQAPcBAAAAAP//////////////////////////////////////////
/////////////////////26X1Hue1HGY0IKk1miOzWmQzXWa0HOZ0WKLyCBarf//
/////////////////////2WNzLDN8////7PH4////////////////6G/6mCJyf//
/////////////////////1uGx57A732i2Xqd04Cj1Y+u2nea0neb0nec0nGX0GKL
yCBarf///////////////12IyKG/73WZ0bjS9P///7vN5v7///////L2++3x+KG/
6mCJyf///////////////2WNypm46l+JyZu97W6X1Hue1HGY0IKk1miOzWmQzXWa
0HOZ0WKLyCBarf///////2GLyZK15mGLy5687mWNzLDN8////7PH4///////////
/////6G/6mCJyf///////1SBxJe26nOYzqG+6luGx57A7////26TzP////////f7
//H4/4yv5GGKx////////1F/w5q272+WzJG21l2IyKG/7/r8/fv8/v39/vz9/vr7
/fv8/YWo3VN/wf///////1WDxrrO72aOx5y84GWNypm46n6l3YCm3Xyj23qg2Xmg
2Xif2Hie2F2Ev////////zNouliEw1OAxZay7mGLyZK15oGn4oGn4X2j3nuh3Hmf
23ee2XOa1Fd+u////////////////1WDxrrO71SBxJe26urz8+bx7ebx7+bw7+Xx
7e3183mc1URwsP///////////////zNouliEw1F/w5q27+jz6oS/UZjJb5nJcYS/
VOn05Huc3Tppqv///////////////////////1WDxrrO7///3cfuh9f0otf2osfu
jP//4IWi3T5qq////////////////////////zNouliEw2iL03CP4WyN3G2L2m6K
12yLzURtqy5fpv//////////////////////////////////////////////////
/////////////////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'folder': '''
R0lGODlhEAAQANUAANiGLNiILdiyLNmOM9myNNqUNdqbNdqjNtqsNdu2POHCXuLD
YOPHa+XKdObNeenBc+vKa+7OiO7PdO/Qeu/Sgu/UlPDLJvHNLvHOMPHPafHQbPHS
cvLQOvLTfPLWg/PTRfPVTPPYjvTXVfTYXPTbnvTck/Tdp/XaZPXbavXgn/bedPbg
fPbhrPbpyffig/fji/jkjvjlk/jqwvnonPnpovrsrPrts/vvufvyyvvz2vv26fzz
0/767v778/7+/gAAACH5BAkAAD8ALAAAAAAQABAAAAaLwJ9wSCwaj8gkkaBYOBWJ
JMFHpeoYWIajQRAierBSisXrmXk8H8LbK5U8EAiFMpFIdOsfQleJPFpmZ2l5B1SB
OTEuKigjOwdCBz04NzUzLyuMIB87BkIGPTY0iSonIiAcGDidPwU8ooqlHxwXFjgF
QgM5JiQlIR4dHRsaGTIDQgAByAHLzMnJStDRQQA7
''',
'page_add': '''
R0lGODlhEAAQAPetANPl/cLc+O3z+NHk/FWQyjZrvPr7/fH1+dPl/M3j/DdrGbLS
ldDk+426ZGS+/Mff+tbn/tfo/s3j+1GNyDZqvMfg+tzq9uvy+Ozy99jo/szh+z1w
IJGugVOMKNXn/Y+8Z0+MyM/k/DltIlW0+G+Ud1O0+FqSykuGx4e5YO/0+kd8fLHW
kme9/LHTkNPm/EiEyI+7akyHyEJ8w9fm8maOTzpvIsjg++zy+NLm/NTm/VWPye30
+Z7X/8Pd+bTY9oy8ZZu4prnO6Pj7/5jX/87j+46tht/s98Td+brW9UiAw7TUlbXU
84GrYVuUzOjx+EyGxvL2+t/p9Ex7Mcnl+nSi0lWRysvi+32y4qDY/4e6YFa092W+
++/0+dfn/tbo/ury+lWQy8jg+WmW3Gmdz8nh+9Pn/cjg+lKNyM3m/IS24lOy+EBx
wEiEx/D0+E2IyJ+8rdXm/dDmunOYYL3a9maNzGGKSmK9/NXl/lS190aAxjdrITVq
u5Cx3fP3+3yq12i//d/r9V2ISou6Yszj+32w4cbf+uzz+ZbW/7vW9d7r9lePLc/j
/O70+MHb+E18xdTm/ISo1fj7/UB3wlKz98Xf+W7C/ZvW/6rA4muY3FCMyLzZ9t7q
9rPS8UuGyOPt9+nv9e30+LjW9Mrh+jZqZMni++bw97bUnOvz+vD0+e30+rvY9tbn
/f///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAK0ALAAAAAAQABAAAAj/AFsF8dOEgMEqYHSM4WOg
lcNWVAQwAiWg4g0MM65gaujQBJQKZEjZoNQolBAfhMRwPHMgAREJVsIEQBIJzZQ0
kBxOOLCIQYgEoo5oioKhz6ckOlG5eFRmgAQzARAt4dTmicNMXLqs8pADh4YHAeao
ShHDIYgdGTJEgABggIYKPQKk6uTQjSEvEVZtBcBg0INCX144PHEBgt5VcNq+kUOj
jgqHbJzcAQAAAYIBQJgoiQNDEYdWeUqN0IKnhJpJgVqsYPXjw4ZWMjxVwsLD0pBD
Ukyx2r1AQStJgP6w2OLAgZ0agrKwQuG6laMLRhJtsmDhVJEODRY06PD5Ep01BcJT
C6CwZ5QeBSJItAoIADs=
''',
'page_copy': '''
R0lGODlhEAAQAPemAJK75E57yuDt/9zq/8vi/+ry+9fn/9vq/9Ll/83i/NXn/tPm
/G2X0u71+2SM0PH3/9rq/rzZ9+zz/MLc+efx7+fw+vn6/W2X02yX0pO75FyGzV6H
z9fn/OPv+tbn/FaFy6O+4ejy/G+Y0m6Z08jf+muW0unz/dHl/dDk/HCV053D6laB
zmqV0dPn/PP5/vb4+8rh+vL4/8Ti+lKEyr7Z9vP4/pfE7cLc+tLl/NXn//D2/urz
/e31/evz++vy+7TT9czm++jz/sfg+9Hk+3ye1nKk022T0cXe+PT4//X6/pa03K3F
5vP4/+r0+7vX9kZ2yJe04vP4+8Te+fj6/FaDzNTm/ViHzFeDzJa+5f7//5G75FWF
zefx++jy793p9e70/Yi76s3j/HSb1OTv+qK94vH3/trp/W6T0+nx++30/P////L3
/9vq/cvj+4ip3erz8YSm2vD2/M/j++Hs9uvz+nad1u/2/ufy/VeGzHCa1FiDy6/R
8/f5/MTd+ZPB7O/2+5G65O71/Mrg+vL3/lSDymqV08zh+ujx+uz1/drq/057y87k
/HKh0tfo/dbn/VWBynal06HJ7vX5/Ja/5sXe/FmHzPD3/9Hk/FOCz+fy+nqk1O31
/m+Y02SM0W6Z0uv0/Njo/GmS1czi+1aCy9np/97s/////wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKYALAAAAAAQABAAAAjuAOHUueBphIg8SyyYWsjQ
lBg1iHh84tLEBhQ+DRdyspNgEQwaSYBEApHRFINBC1rIOTKngSRNJRnUAMVhgSEV
kwAAKMIIEpkppjAggQCh0RAsWUzsCNEhExg3L0rESMTGjIcMmygJmfDDhQw/Slgw
KSWg1AAAOhKEIRHBC50ohArFIWsWQBkcl0T1cbJnTKVQb+gO0LJGgYITgqREuGPF
SKAeBSSkAWQph6MqKNrcCIKHyJkNDjo5mPGA1AFSBhAQePCh5JYGplGr/kOlJCYK
sVMT6LKi5CgJhyqgKVDAxxc9JVNceRJAUYDnjzQEBAA7
''',
'page_find': '''
R0lGODlhEAAQAPcBAAAAAP///0R9wU2LyE+MyE+MyE+MyE+MyE+NyE+MyU+Lx0uJ
xTt2xDxos////////////////0mFxOzy9+70+O30+O3z+O3z+O3z+Ozy+Ozy99fm
8n2y4mWU2ztos////////////0uIxfL2+sfg+snh+8ni+8jg+8Xf+cHb+LjW9Pj7
/7TY9n2w4WSS2z5rtP///3uHkmJtd2VtdbXI3V5mb1libMvi+8jg+cLc+LrW9fj7
/c3m/Mnl+oS24kJ5v2NsdYCMmIOPnEdMUqe3y0dNVHmFkH+Mmsbd9cTd+bzZ9t/p
9Ozy9/P3++nv9UV+wn2JlVZeZm94hTtBSnqGlUBKVnSDkkBHT7vP5Mjg+sLc+LvW
9bXU87PS8fD0+EaCxDQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWm55hczh+8ff+sLc
+L3a9rvY9u/0+kaDxlZeZlhhaUhPVkZJTXeAjDc7QDxCRjM4PSUoLNHk/Mzh+8fg
+sPd+cLc+O30+kWCxmNsda+5xI2ap3N6gLTD1lxjbKSvu4uYpkZNU9Pl/dDk+8zj
+8ff+sbf+ury+kKAxl9ocau2wWZweVZcYpypul1mbqWxvWp0fisuM9Pl/dHk/M3i
/Mvh+8nh+/L3+z98xVRbY42ap15ncDo/RZGcrU5VXH+KllZeZhgZG9Hk/M7i/Mzi
/Mng+8jh+/f6/Tt2wTg9QjxCSDE+S09UWEKGtiAwPSQ1QRsrNxUsO1Kz91Kz91Gy
90+y90+y9/X5/DZvv////////zx4wePt927C/aDY/57X/5vW/5jX/5bW/5XW/5TV
/5PU/2W++/T3+zFnu////////zhxv9/r9Wi//We9/GW++2S+/GS+/GK9/GO9+2G8
+2C+/GK8/Pj7/S1kuf///////zNpvOrx+N/s997r9t7q9tzq9tzq9uvz+uvz+ury
+vP3/PT4/P3+/ipgt////////zRquzBlujFmuzBmujBmujBmujBlui9lui5luS5l
uS5kuS5kuSxity5ityH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_paste': '''
R0lGODlhEAAQAPepAE57yn+s19mpX8vi/9Ll/9fn/9vq/4Sv2dGXU/3utdOaVtel
XP7vttXn/ury+9GVU7zZ9/PbgPDUddafWX2p1vH3/9yya82MTenx++nNk+nNke70
/enLjp5sLe31/tHl/d3Mtujy/N+dTtDk/Mvj+8rg+tyWQleGzKR1N+DJlcTi+qV2
O+/2+/H3/t2YRIGt13Gb09bn/WmU04i76tuVQevy++fy/erz/YGt2NLl/FiHzPb4
++G5gFaBzvL3/9ywaFWByoGu18Te+enz/bTT9dCSUEZ2yNWgWdyvZt3p9d2zbO71
++vg0/7//92ye7vX9u/apVaFy7eSY26Y0t61b1SDyvLirk57y9agWs3j/NimXc3i
/MLc+tXn/9qqddObVtmpYObGiebEiOfw+nCgzGePs8zi+9Hk/N6bS/D2/s6OTtyX
ROvSmOjz/vD3/7GIVfHdqtTm/ejx+uHSvliDy/j6/NqpdNquZdOcV+K/hOfJi9mq
YKuAStyueOPv+mePyeTv+tilXc+SUKFuLufx78fg++jy71mHzNyWQ9itdMXe/HCV
0/P5/vLhsPP4+7yab3Gfza/R89+2cJ1qKu7l2tqqYH2p2leDzNyVQsjf+uK9ee3W
npPB7Ovz+s+RUOfy+laDzMLc+eC5dFyGzcTd+dSeU+zz/Nnp/5hjIP///wAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAKkALAAAAAAQABAAAAj/AFOlooSqYMEOAhMKZCIF
FQoGESIwGITqEQiFK1JYEZFAgoQEaDhkmKQQVSNNLtYgMoGJRqkwqErCESVJiYUD
QQIEIAOpDIw6qVBBofIDjIAXTYbcCOHn0wwZO1BtsoCkkoADHhQVCkWEkQpOU1Cx
ubNHy4IDabZkyQQhSSdHVVBpEBAIywQcLXKcMUPqSSRAh1DpWXAEj4IAPho0+FBC
CAQbOlCJmfAFwYMAbrrEiDOCBJc2J1DlUYBAkCcKFU4ZOFWAwIAKUVDxeFBEzQUK
S1Szds0C1JtETvp4sWOJkO7WAwz1mMPHIKo/puSMweDAQY0NdBQKXHTJCIArAMID
AxkVEAA7
''',
'page_white': '''
R0lGODlhEAAQAMQAAJSUlJWVlZmZmebm5ufn5+np6erq6uvr6+zs7O3t7e/v7/Dw
8PHx8fLy8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAABgALAAAAAAQABAAAAVjICaOZDlO1qWuE2BaDyQz
SnERQXlJlCQ5C8Ml8hCQLpSkJNI4rC7H1MUygTAQhgF09JxWG4rEVtT1RhyNMaZ8
qVAiETXbkpzIV5Z8pTKxH/EWe4J3gHl5hGwqJBSJKhQmkCUhADs=
''',
'page_white_add': '''
R0lGODlhEAAQAPcNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4+JSU
lAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8/Ofn
55WVlQAAAAAAAP///wAAAAAAAPz8/Pf39/n5+ff39/f39/Pz8/Dw8Orq6vz8/Pb2
9vT09JmZmQAAAP///wAAAAAAAPz8/Pn5+fn5+fn5+ff39/b29vLy8uvr6/z8/Pz8
/Pz8/Pz8/AAAAAAAAAAAAAAAAPz8/Pv7+/z8/Pz8/Pv7+/j4+PX19fHx8ezs7Orq
6ubm5vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+PX19fLy8u/v
7+3t7fz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+/j4+Pb29vPz
8/Ly8vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pr6+vn5+fb2
9vb29vz8/AAAAAAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pv7+7bIq3WZ
YGaOT2GKSjxiJgAAAAAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+7HEpYGrYbTU
ldDmuo+7alePLTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pv7+/v7+/r6+mKLSrHTkLHW
kv///4y8ZY+8ZzdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+0x7MbbUnP//
/////////7LSlTdrGQAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/D9xIou6Yoe6
YP///4e5YI+8ZzdrGf///wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/JaxhlOMKI26
ZLLSlY26ZFOMKDdrGf///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC5aFTZq
GTdrGTZqGTJhF////yH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_copy': '''
R0lGODlhEAAQANUgAPn5+fLy8sjIyMTExMLCwurq6vPz8/f39+/v78zMzJ6enuvr
6/X19crKysfHx/T09MbGxrm5ucDAwOLi4sXFxeHh4e3t7dHR0f39/fb29vr6+t/f
3/j4+Pv7+8nJyfz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAACAALAAAAAAQABAAAAaGwErDQyRuMKCkEtTQfJ4f
DuG4THo+BwOi8MkMNlXQlZMJLJ4dAXJ57XAYgUvEoRAYkdePOyNxQqVHeXpSWFpc
XhuCHwADUWVnT0RQHxoUg3AWXJJ6HRoQaGRaH5toDlAdABkPo4lFAgqTGgAHo1UY
ApOdTh5heR2/v7VVilAACWGtRUQJE0EAOw==
''',
'page_white_find': '''
R0lGODlhEAAQAPcBAAAAAP///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAP///////////////wAAAPj4+Pv7+/z8/Pz8/Pz8/Pz8/Pz8/Pz8/Pj4
+JSUlAAAAAAAAP///////////wAAAPv7+/T09PX19fX19fX19fHx8e/v7+np6fz8
/Ofn55WVlQAAAAAAAP///3WAi2NsdWZtddHV2V9nb2Jma/f39/Pz8/Dw8Orq6vz8
/Pb29vT09JmZmQAAAGNsdYCMmIOPnEhMUsTGyUdNVHuGkJOWmPHx8fLy8uvr6/z8
/Pz8/Pz8/Pz8/AAAAH2JlVZeZm94hTtBSn2IlUBKVnSDkkBHT+Hh4vX19fHx8ezs
7Orq6ubm5vz8/AAAADQ5Pk1UW5CgsFFdaWV0hk5YY4iYqkpRWoKDhPj4+PX19fLy
8u/v7+3t7fz8/AAAAFZeZlhhaUhPVkhKTYqKizk8QDxCRjM4PSorLPv7+/j4+Pb2
9vPz8/Ly8vz8/AAAAGNsda+5xI2ap3h8gNPU1F5kbKSvu4uYpk9RU/z8/Pr6+vn5
+fb29vb29vz8/AAAAF9ocau2wWZweVldYre4uF1mbqWxvWp0fi4wM/z8/Pv7+/n5
+fn5+fj4+Pz8/AAAAFRbY42ap15ncDtARaurrE5VXH+KllZeZhgZG/v7+/r6+vr6
+vj4+Pj4+Pz8/AAAADg9QjxCSCwxNVVXWbi5uTk7PT0/QjQ2Nzs8PPr6+vr6+vr6
+vr6+vr6+vz8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pv7+/v7+/v7+/v7
+/v7+/v7+/z8/AAAAP///wAAAAAAAPz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pz8/AAAAP///////wAAAPn5+fz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8/Pz8
/Pz8/Pz8/Pn5+QAAAP///////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAAAALAAAAAAQABAAAAj+AAEEEDCAQAEDBxAkULCA
QQMHDyBEkDCBQgULFzBk0LCBQwcPH0CEEDGCRAkTJ1CkULGCRQsXL2DEkDGDRg0b
N3Dk0LGDRw8fP4AEETKESBEjR5AkUbKESRMnT6BEkTKFShUrV7Bk0bKFSxcvX8CE
ETOGTBkzZ9CkUbOGTRs3b+DEkTOHTh07d/Dk0bOHTx8/fwAFEjSIUCFDhxAlUrSI
USNHjyBFkjSJUiVLlzBl0rSJUydPn0CFEjWKVClTp1ClUrWKVStXr2DFkjWLVi1b
t3Dl0rWLVy9fv4AFEzaMWDFjx5AlU7aMWTNnz6BFkzaNWjVr17Bl07aNWzdv38Al
hRM3jlw5c+fQpVO3jl07d+/gxZM3j149e/fw5dO3j18/f/8EBAA7
''',
'page_white_paste': '''
R0lGODlhEAAQAOZyAN/f3/b29vj4+NegU/n5+fr6+tmnXNefUv3utfPz8+rq6v7v
ts6LRszMzPPbgPf39/Ly8u/v7+C1cKqATal9QZdjINCPSPDUdXhOGf39/daeUsLC
wtCSStOXTeS9fKR1N86NSKFuLreSY+O7etSYTvT09NqbSt+dTt6yadeiYuS/fqh7
P6Z3PaNwOZ5sLeK5dtyraNejV9yuaaV0OuLi4t2uZevg09GUS9elYuC0btmmZPX1
9dyWQtyVQtmlWs2HQ6p+QaV2O96bS96xa9WdUdSbUNWdUMyJRe7l2tGTStyXRO3t
7cuFQuvr66uESd3MttyrYrGIVduqYdqoZdikWOHSvs+OSNOYTuG3c9acUNikYtWc
UMTExKyESdqUQdqoXvn49t+zbdyWQ9KTS51qKr+TUd2xbNuzcryab+S/f9KWTaFq
NKJtNquASvv7+8nJyZhjIPz8/P///wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHIALAAAAAAQABAAAAfUgHJySHCFhS6CiYI2InAf
Cw4OCyFwaE+KQWcqJwgXFwhCORJkinBpKCZKYjw9XkRmFaUeNVBfBmVdThQTGG8A
GXJwI1I+AwdAYHHKcQIbv3AvBlQDGhRxDwkRCnEBXABwWDEDRkUrzAEQTctvcBIH
WSRqLHFuAjsQS9vsYVtXNxwzlNULkC0OuyEdxlgA0WKZGwIBShiEIyOJBQZH2CyL
U4DAg4kwrDD4wWTNRjcFChiMgmOKDi0pJgh0Q9ONwSptDFXAsHEZgQaKBAF4Q7To
mwY0AgEAOw==
''',
'page_white_stack': '''
R0lGODlhEAAQAMQAAAAAAGhoaJOTk5SUlJiYmLi4uL29vcLCwsPDw8bGxsfHx9bW
1urq6uvr6+3t7e/v7/Ly8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAA
AAAAAAAAAAAAAAAAACH5BAkAABsALAAAAAAQABAAAAVp4CaOZCliWppVgjlq1WIU
yhG4m2YhqFq5GgwCJqMdTJpMQsdLpSyDV0YRHFYiD4aGQhAlqUodBdJweqffpGUC
cWjPS7SagtWcwVTVhSKxjwBJS04YFxV+UnkqGCg4KhmPGYclTpQXOJchADs=
''',
'page_white_text': '''
R0lGODlhEAAQANUAAJSUlJWVlZmZmaWlpaqqqqysrK6urq+vr7GxsbS0tLi4uLq6
ury8vL29vb6+vsDAwMLCwsPDw8bGxsfHx8jIyMrKysvLy83Nzc7Ozs/Pz9PT09TU
1NXV1dbW1tfX19nZ2dvb293d3ebm5ufn5+np6erq6uzs7O3t7e/v7/Dw8PHx8fLy
8vPz8/T09PX19fb29vf39/j4+Pn5+fr6+vv7+/z8/AAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAADYALAAAAAAQABAAAAaAQJtwSCwOY7SachkDGGkt
l1SFItVGgWINJoPBWKlS7dUSEGuyxyJxIAyWtXOyRou5VKaSKD5UTiAOCgkIBgUn
fEJwSnUvLCuINkoYFRIRDw0MCy+QiosyMjGcNR0aGRcWFBQSoWdLNDQzsbGiISAf
HhwbukmtnXBEMr5LMkbFRUEAOw==
''',
'table': '''
R0lGODlhEAAQANUAAEJ3u1OEw1yMyV2KxWOSzmmW0W6a03Ke1nSg13uk2ny+dn6o
3YGp3oLCfIWt4Iiv4onGgo2y5I7Jh5K25pPLi5a66ZjOkJu86p3QlJ/A7aPUmqXC
6qvG6K7I6bPL6brP6rvQ69nj8OTr9erw9+7y+PP2+v///wAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAACcALAAAAAAQABAAAAaSwJNwSCwKN5vM5VKZTCIP
ByOROJau2OwVIbyUNBqMhSKBNBqKkqFbMrnf8FKBjazbNyWCcFLamDhvgG4jAkIR
JRyJHB0eHB6PIYUnhx0mHoFvIQFCDiWPn42OHZpCDCSXIG8fbSaRQgskILKzoCED
QgmxJqlugq23JwgkIyIiIcfIx8AHBgUEAgIB0tMBAEbXREEAOw==
''',
'table_gear': '''
R0lGODlhEAAQANUAAFaHx1h5o1tbW12Nyl6BrmOSzmVlZWmW0WxsbG6a03JycnKG
n3Ke1nSg13uk2ny+dn6o3YGp3oLCfIWFhYWt4Iiv4omJiYnGgo2y5I7Jh5K25pOT
k5PLi5a66ZjOkJqampu86p3QlJ6jq5/A7aKioqPUmqXC6qurq6vG6K7J6bHF37PM
6rW1tbbBzrfE1bu7u7vQ68LCwsrKytHX4NTU1NnZ2dzl8uHh4eTr9ejo6Orw9+7y
+PP2+v7+/gAAAAAAACH5BAkAAD4ALAAAAAAQABAAAAa1QJ9wSCz6TKYRCNTRaDAV
SsThEJp42KwW2xCCeKVSyMPJXCSSBy/h5fXe8Djv0Eba79eCUMMz9VBwgC8sOgNC
GDwoiigpLi8qMS8zIiQ+iCk9K28vNS80nTQWPhQ8K6amLTQnExYsMhs+ETuaMD2u
ORNvHzcsExA7MMEwJCw3H6YfNB8GDsA9tT0yMi8fHywxCD4NOzo4ODY2JDLiMSTY
PgwJBwUDAwALCAQkFgECAkZFCvdDQQA7
''',
'table_multiple': '''
R0lGODlhEAAQANUAAEJ3u1OExFyMyV2KxWOSzmmW0W6Z03Ke1nSg13uk23y+dn+o
3ICp3YLCfIWs4Iew2oiv4onGgouz3I2y45K435O25pPLi5a455u96pu/457B5Z/A
7aLD46PUmqTC6qTLxqTLyavG6K7I6a7K8LHK6Nnj8OXs9unv+Orw9+3y+PP2+vX4
+////wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAkAAC0ALAAAAAAQABAAAAaXwJZwSCwKPZ4NpjKBOBiJ
RNGjqlqtiCJG9eFoMhTJY7KIDrejlXqtOqmywi0mBeIomU6otKLCrEJXgVkTfSlb
HR0WEQ0NCioGLYQXK1sslpcsKgUtDioVhipIokgqBC0IKXx8HiwhmCgCLQcGDiaE
IbghJLslsUKohCKtmCUBQ7Odu7kkIsVFqCgmJdPUA0WzBQQB29wAQQA7
''',
'zoom_in': '''
R0lGODlhEAAQAOZ8APTr4uny+vn7/T6DN+jx+dSwcPf6/fbv5L6HTeHJuFOeS1yo
Uu/1+zV5MPTs3Ony+YvGg+nXpdKuhPn7/t3Ckd7EjebRryprJuTOrNi5i72FTMqf
ZTJ0LNKubTBxK+jVo97Eo8OSW9KtbPHl2N/Fj/D2+2OyWfLn2ePMmb+LUOXPqde1
fffw5d3DkdCoatm7jMGOWHa3bd7Dpuzz+ovHhePNu/P4/ODHky5vKcyhZ2WnXmGw
V8+oY2usY9Grg8GPWs2mYsiaYMmbYc6nY/H3/J7RlZ/Sl9/Fo+bRrjN2LubRudGq
dsORVvH2++LLuYbFfbyEUffx7eTMrPHm2LmASMqgb/r29JdhRprPkl+tVoLCffPo
2rZ7Uffv5de2fezcv+71+/L3/ESLPefTuqxlP82naN/Ep9a1f8mbY82kcdq7gK5t
SKbVnZDKiM+pZdKtd+z0+k2WRV6rVOfToLd5Ute3fVqbU2e2XPjx7byDT+ry+uvz
+v///wAAAAAAAAAAACH5BAEAAHwALAAAAAAQABAAAAe6gHyCg4SFgw4tHW5DLi9b
hnxfBXUWLAcYbzljhQ4FKgYMentNAkdoU4QUXgZ7BA8BemACaRKEIkglrrB7e2Fm
IYQ8XXuwonc7CwAphEAHM3qie1lsCgAIhGVSRLwmcjFFPWIDhBlLAgxwC0ZYT20Q
DYQnGyATNgpxOjR2STg1hEpBqsgAAGCAFg4oKuTBQ2iEjx8INDTwcOFDBDVkokAS
5AQGiTk3hFzZKCgBlBVnmHAhWXINFTpW+AQCADs=
''',
'zoom_out': '''
R0lGODlhEAAQAOZ0APTr4u/1+/n7/eny+uzz+tSwcPbv5Ojx+fRFSO71++waI/Ts
3O4mLvdUVvpjYvxvbff6/fE1Or6HTeny+ez0+sGPWvjx7c2mYuPMmdKtd9Grg/D2
+/Hl2PHm2MORVunXpbyEUc6nY9/Fj9a1f8mbY/H3/OfTuuoRHL+LUObRrvPo2vfw
5d3Dkd7DptCoavn7/va2rvjy782kceDHk+LLueHJuNGqdvfv5eDHtvjIv/54dNu/
h9i5i8qfZdm7jPH2+7uGUtKubde2fd7EjfSrpN7Eo9KuhM+oY7FyRffx7ebRuejV
o8mbYeXPqbyDT8GNU9q7gN/Ep82naPL3/PfAt+zcv7uBTN3CkeTMrM+pZePNu8GO
WL2FTMOSW5dhRrNzS/Ln2bmASMqgb/P4/KxlP+bRr9y+pNa0eefToNe1faVcM8ia
YNe3fdKtbN/Fo8yhZ+TOrPOgm+ry+uvz+v///wAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAACH5BAEAAHQALAAAAAAQABAAAAe4gHSCg4SFgwssQVkhLj4q
hnRVBWxlKwZwGW8mhQsFTRABcnM/Am4kHYRXQhBzBxMDcgkCMkaEbSkbrrBzc1NR
XYRHN3OwonMECQAohBcGBHLGBBQBABKEUlglvDoPDg0IEQyEPDYCARQPOVQwRHEK
hGA9RS9j3uAMCidahEprYi0AAJh5cgbDECcWCHHQUEECFyBWdiz5AIVMEkiCaGwR
gWYGEy8YBdUAkWaEByQhBeFQE+ZLDDqBAAA7
''',
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
28a6cd67583aaea23b8d40e9061ec596cdb2ce3c
| 34,063
|
py
|
Python
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 2
|
2021-05-29T12:56:05.000Z
|
2021-10-31T04:56:32.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | 1
|
2021-01-29T13:12:28.000Z
|
2021-01-30T16:14:04.000Z
|
chapisha/create/create.py
|
whythawk/chapisha
|
ddaa028a48d10ff5396e18d1c0ae01fd56c9f465
|
[
"BSD-3-Clause"
] | null | null | null |
"""
.. module:: create
:synopsis: Import a Word `docx` document, define its metadata, cover and rights, and publish it as an EPUB3.
.. moduleauthor:: Gavin Chait <github.com/turukawa>
CreateWork
==========
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and define its
metadata, cover and publishing rights. Currently does not support `odt` since `Pandoc` seems to lose any embedded
graphics.
.. note:: This process will overwrite any existing EPUB3 file of the same name, if it already exists.
Workflow
--------
There are two main publication approaches, stateless and non-stateless. A non-stateless approach assumes you may be
starting each step discretely (perhaps via a set of one-time network calls). The second maintains state, so you can
complete the process in one step.
The *stateless* publication process runs as follows:
* Set the working directory on creation,
* Define and validate the metadata required for the creative work,
* Copy the `docx` file to import into the working directory,
* Copy the cover image to import into the working directory,
* Define and add any contributors, such as cover artist,
* Update the creative work's publication rights,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective of this workflow is to support what may be a stateless process i.e. the individual steps first bring all
the data required to produce the creative work into a project directory, and then produces it. State does not need
to be maintained between steps.
The *non-stateless* process runs as follows:
* Define and validate the metadata required for the creative work,
* Supply the `docx` file as a base64 string,
* Copy the cover image as a base64 string,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective in a non-stateless workflow is to minimise disruption, and store the minimum amount of information. Only
the epub itself will be saved, and then only because Pandoc does not support a memory-only epub build.
Build your work
---------------
Import **Chapisha** and create a work:
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory)
Where `directory` is the complete path to where you would like the EPUB created. If you want a stateless workflow,
set the `stateless` boolean to `True`. If you already have the `metadata` (perhaps via a web form), you can skip
several steps and pick up again for setting the files and images.
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory, metadata=metadata, stateless=True)
Set metadata
^^^^^^^^^^^^
`Dublin Core <https://www.dublincore.org/specifications/dublin-core/dces/>`_ is a vocabulary of fifteen properties for
use in resource description. Four of them - `title`, `identifier`, `language` and `rights` - are required. The
`language` code is defined by the `ISO 679-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ standard
(e.g. `en` for English, or `fr` for French).
Metadata properties:
* `identifier`: UUID, DOI or ISBN of the creative work. A UUID will be generated if not included.
* `title`: Name given to the creative work.
* `language`: Specify the language of the creative work. Two letter code defined by ISO 639-1.
* `creator`: Name of a person, organisation, etc. responsible for the creation of the work. May be more than one.
* `work_uri`: The URI for your creative work.
* `contributor`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work. May be more than one.
* `date`: The publication date of the creative work. Provide in ISO format, YYYY-MM-DD.
* `subject`: The subject, or tag, of the creative work. May be more than one.
* `publisher`: Name of a person, organisation, etc. responsible for making the creative work available.
* `publisher_uri`: The URI for the publisher of your creative work.
* `rights`: A short, single-sentence statement of copyright and publication terms for the creative work, e.g. 'All rights reserved.' or 'Attribution-NonCommercial-ShareAlike 4.0 International.'
* `long_rights`: Lengthier description and information about copyright held in and over the creative work. Formatted as you wish it to appear.
* `description`: A short, single-sentence summary of the creative work.
* `long_description`: The pitch, or jacket-cover, description of the creative work.
Create a paired dictionary of these properties. As example:
.. code-block:: python
METADATA = {
"identifier": "isbn:9780993191459",
"title": "Usan Abasi's Lament",
"description": "Years after the events of \"Lament for the Fallen\", Isaiah tells of the myth of Usan Abasi, who was punished by the Sky God to spend eternity in the form of a brass bowl and imprisoned within a vast termite mountain. Now the ceremony which ensures that Usan Abasi remains dormant has failed, and his ancient evil awakes. A free, stand-alone short-story set in the city of Ewuru and linking \"Lament for the Fallen\" to a forthcoming novel.",
"language": "en",
"creator": ["Gavin Chait"],
"rights": "All rights reserved.",
"long_rights": ["The right of the creator to be identified as the author of the Work has been asserted by them in accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright gives creators space to explore and provides for their long-term ability to sustain themselves from their work. Thank you for buying this work and for complying with copyright laws by not reproducing, scanning, or distributing any part of it without permission. Your support will contribute to future works by the creator."],
"publisher": "Qwyre Publishing",
"publisher_uri": "https://qwyre.com",
"work-uri": "https://gavinchait.com",
"date": "2017-07-23",
"subject": ["science fiction", "african mythology"]
}
Set the metadata:
.. code-block:: python
work.set_metadata(METADATA)
Set document
^^^^^^^^^^^^
Most writers still use `Microsoft Word <https://www.microsoft.com/en-us/microsoft-365/word>`_ as their default work tool.
There are certainly other word processors, but this is the one most people will work with if they intend to be
professionally published as publishers still expect Word `docx` files for editing and markup.
**Chapisha** will create your cover, rights and dedication pages, as well as the table of contents. Your `docx` file
must contain **only** the creative content you wish included in that table of contents. Your document must also be
correctly marked up to ensure proper chapter creation.
EPUB documents will be read on multiple and diverse electronic devices. Don't have any expectations for page
number-dependant formatting. Instead:
* Each chapter must have a title, formatted as `Heading 1`, with lower-level headings formatted for each heading type.
* There must be no title page, contents, or anything else. Chapter 1 starts at the top of the first line of the document.
* Page numbers and other page-specific information will be lost.
* Fonts or typographic formats and alignment will be lost, although `bold` and `italics` will be maintained.
* Images will be maintained.
Once the work is built you can enhance its styling. However, there are still limits in the EPUB3 standard in comparison
to a printed work.
.. code-block:: python
work.set_document(source)
Where `source` is any of the complete path to the source `docx` file, a `bytes` file import, or a `base64` string.
Set cover
^^^^^^^^^
There is, unfortunately, no standardisation on the image size, dimensions or resolution required for an EPUB. However,
a recommendation is an image (`.jpeg`, `.jpg` or `.png`) of 1,600 by 2,400 pixels, and less than 5Mb is size. You will
need to create your image (or have someone create it for you) exactly as you wish it to appear on the cover. Nothing
will be added, removed, or changed.
Please also ensure you have the appropriate rights to use the image on your cover. There are more than sufficient
services providing openly-licenced, or even public domain, work for you to use.
.. note:: You can optionally add the image contributor details here, or on the next step. Do not do it in both or the contributor information will be repeated.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.set_cover(source, contributor=CONTRIBUTOR)
Where `source` is the complete path to the image file, a `bytes` file import, or a `base64` string.
Add contributors
^^^^^^^^^^^^^^^^
You may have numerous contributors you wish to acknowledge. Fields are:
* `role`: Contributor identity, based on a specified list of `artist`, `editor` or `translator`.
* `name`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work.
* `terms`: Information about copyright held by the rights-holder in and over their contribution to the creative work. Formatted as you wish it to appear.
* `year`: The year of the contribution or publication of the contributor's work.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.add_contributor(CONTRIBUTOR)
`add_contributor` as many times as you have people or organisations to acknowledge.
Set rights
^^^^^^^^^^
This refers to the `long_rights` you can set, and which you may wish to adjust for presentation on the colophon page.
There are obviously a broad range of rights with which you can release your creative work. Here are two examples which
you can modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Example code:
.. code-block:: python
RIGHTS = [
"You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.",
"In return: You may not use the material for commercial purposes. You must give appropriate credit, provide a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits."
]
work.set_rights(RIGHTS)
Rights terms can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Set dedication
^^^^^^^^^^^^^^
Most creators have a dedication for their work in mind - usually to apologise for all the late nights and impoverishing
returns on their creative efforts.
This is optional, but you can include a dedication page. Each item in the list will be set on a different paragraph.
.. code-block:: python
dedication = [
"For those who leave.",
"For those who remain.",
"For the wings and tail.",
"But most, for her"
]
work.set_dedication(dedication)
The dedication can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Build
^^^^^
The build function is straightforward. Once everything is in place:
.. code-block:: python
work.build()
You will find your EPUB in the directory you specified.
Validate
^^^^^^^^
If you have any doubts as to whether your EPUB is standards compliant, run the validation. This tests the `epub` file
against the standards maintained by the `DAISY Consortium <http://validator.idpf.org/>`_. You can check the file online
at that link. It's the same test.
.. code-block:: python
work.validate()
Output will be `True` or `False`.
"""
| 46.661644
| 551
| 0.648622
|
"""
.. module:: create
:synopsis: Import a Word `docx` document, define its metadata, cover and rights, and publish it as an EPUB3.
.. moduleauthor:: Gavin Chait <github.com/turukawa>
CreateWork
==========
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and define its
metadata, cover and publishing rights. Currently does not support `odt` since `Pandoc` seems to lose any embedded
graphics.
.. note:: This process will overwrite any existing EPUB3 file of the same name, if it already exists.
Workflow
--------
There are two main publication approaches, stateless and non-stateless. A non-stateless approach assumes you may be
starting each step discretely (perhaps via a set of one-time network calls). The second maintains state, so you can
complete the process in one step.
The *stateless* publication process runs as follows:
* Set the working directory on creation,
* Define and validate the metadata required for the creative work,
* Copy the `docx` file to import into the working directory,
* Copy the cover image to import into the working directory,
* Define and add any contributors, such as cover artist,
* Update the creative work's publication rights,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective of this workflow is to support what may be a stateless process i.e. the individual steps first bring all
the data required to produce the creative work into a project directory, and then produces it. State does not need
to be maintained between steps.
The *non-stateless* process runs as follows:
* Define and validate the metadata required for the creative work,
* Supply the `docx` file as a base64 string,
* Copy the cover image as a base64 string,
* Add in an optional dedication,
* Build the creative work,
* Validate the work is EPUB3 standards compliant.
The objective in a non-stateless workflow is to minimise disruption, and store the minimum amount of information. Only
the epub itself will be saved, and then only because Pandoc does not support a memory-only epub build.
Build your work
---------------
Import **Chapisha** and create a work:
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory)
Where `directory` is the complete path to where you would like the EPUB created. If you want a stateless workflow,
set the `stateless` boolean to `True`. If you already have the `metadata` (perhaps via a web form), you can skip
several steps and pick up again for setting the files and images.
.. code-block:: python
from chapisha.create import CreateWork
work = CreateWork(directory, metadata=metadata, stateless=True)
Set metadata
^^^^^^^^^^^^
`Dublin Core <https://www.dublincore.org/specifications/dublin-core/dces/>`_ is a vocabulary of fifteen properties for
use in resource description. Four of them - `title`, `identifier`, `language` and `rights` - are required. The
`language` code is defined by the `ISO 679-1 <https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes>`_ standard
(e.g. `en` for English, or `fr` for French).
Metadata properties:
* `identifier`: UUID, DOI or ISBN of the creative work. A UUID will be generated if not included.
* `title`: Name given to the creative work.
* `language`: Specify the language of the creative work. Two letter code defined by ISO 639-1.
* `creator`: Name of a person, organisation, etc. responsible for the creation of the work. May be more than one.
* `work_uri`: The URI for your creative work.
* `contributor`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work. May be more than one.
* `date`: The publication date of the creative work. Provide in ISO format, YYYY-MM-DD.
* `subject`: The subject, or tag, of the creative work. May be more than one.
* `publisher`: Name of a person, organisation, etc. responsible for making the creative work available.
* `publisher_uri`: The URI for the publisher of your creative work.
* `rights`: A short, single-sentence statement of copyright and publication terms for the creative work, e.g. 'All rights reserved.' or 'Attribution-NonCommercial-ShareAlike 4.0 International.'
* `long_rights`: Lengthier description and information about copyright held in and over the creative work. Formatted as you wish it to appear.
* `description`: A short, single-sentence summary of the creative work.
* `long_description`: The pitch, or jacket-cover, description of the creative work.
Create a paired dictionary of these properties. As example:
.. code-block:: python
METADATA = {
"identifier": "isbn:9780993191459",
"title": "Usan Abasi's Lament",
"description": "Years after the events of \"Lament for the Fallen\", Isaiah tells of the myth of Usan Abasi, who was punished by the Sky God to spend eternity in the form of a brass bowl and imprisoned within a vast termite mountain. Now the ceremony which ensures that Usan Abasi remains dormant has failed, and his ancient evil awakes. A free, stand-alone short-story set in the city of Ewuru and linking \"Lament for the Fallen\" to a forthcoming novel.",
"language": "en",
"creator": ["Gavin Chait"],
"rights": "All rights reserved.",
"long_rights": ["The right of the creator to be identified as the author of the Work has been asserted by them in accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright gives creators space to explore and provides for their long-term ability to sustain themselves from their work. Thank you for buying this work and for complying with copyright laws by not reproducing, scanning, or distributing any part of it without permission. Your support will contribute to future works by the creator."],
"publisher": "Qwyre Publishing",
"publisher_uri": "https://qwyre.com",
"work-uri": "https://gavinchait.com",
"date": "2017-07-23",
"subject": ["science fiction", "african mythology"]
}
Set the metadata:
.. code-block:: python
work.set_metadata(METADATA)
Set document
^^^^^^^^^^^^
Most writers still use `Microsoft Word <https://www.microsoft.com/en-us/microsoft-365/word>`_ as their default work tool.
There are certainly other word processors, but this is the one most people will work with if they intend to be
professionally published as publishers still expect Word `docx` files for editing and markup.
**Chapisha** will create your cover, rights and dedication pages, as well as the table of contents. Your `docx` file
must contain **only** the creative content you wish included in that table of contents. Your document must also be
correctly marked up to ensure proper chapter creation.
EPUB documents will be read on multiple and diverse electronic devices. Don't have any expectations for page
number-dependant formatting. Instead:
* Each chapter must have a title, formatted as `Heading 1`, with lower-level headings formatted for each heading type.
* There must be no title page, contents, or anything else. Chapter 1 starts at the top of the first line of the document.
* Page numbers and other page-specific information will be lost.
* Fonts or typographic formats and alignment will be lost, although `bold` and `italics` will be maintained.
* Images will be maintained.
Once the work is built you can enhance its styling. However, there are still limits in the EPUB3 standard in comparison
to a printed work.
.. code-block:: python
work.set_document(source)
Where `source` is any of the complete path to the source `docx` file, a `bytes` file import, or a `base64` string.
Set cover
^^^^^^^^^
There is, unfortunately, no standardisation on the image size, dimensions or resolution required for an EPUB. However,
a recommendation is an image (`.jpeg`, `.jpg` or `.png`) of 1,600 by 2,400 pixels, and less than 5Mb is size. You will
need to create your image (or have someone create it for you) exactly as you wish it to appear on the cover. Nothing
will be added, removed, or changed.
Please also ensure you have the appropriate rights to use the image on your cover. There are more than sufficient
services providing openly-licenced, or even public domain, work for you to use.
.. note:: You can optionally add the image contributor details here, or on the next step. Do not do it in both or the contributor information will be repeated.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.set_cover(source, contributor=CONTRIBUTOR)
Where `source` is the complete path to the image file, a `bytes` file import, or a `base64` string.
Add contributors
^^^^^^^^^^^^^^^^
You may have numerous contributors you wish to acknowledge. Fields are:
* `role`: Contributor identity, based on a specified list of `artist`, `editor` or `translator`.
* `name`: Name of a person, organisation, etc. that played a secondary role - such as an editor - in the creation of the work.
* `terms`: Information about copyright held by the rights-holder in and over their contribution to the creative work. Formatted as you wish it to appear.
* `year`: The year of the contribution or publication of the contributor's work.
Example code:
.. code-block:: python
CONTRIBUTOR = {
"role": "artist",
"name": "Rodd Halstead",
"terms": "Cover image 'Red Maple Fruit (Samara)' photograph. All rights reserved. Used under licence.",
"year": "2006"
}
work.add_contributor(CONTRIBUTOR)
`add_contributor` as many times as you have people or organisations to acknowledge.
Set rights
^^^^^^^^^^
This refers to the `long_rights` you can set, and which you may wish to adjust for presentation on the colophon page.
There are obviously a broad range of rights with which you can release your creative work. Here are two examples which
you can modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Example code:
.. code-block:: python
RIGHTS = [
"You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.",
"In return: You may not use the material for commercial purposes. You must give appropriate credit, provide a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits."
]
work.set_rights(RIGHTS)
Rights terms can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Set dedication
^^^^^^^^^^^^^^
Most creators have a dedication for their work in mind - usually to apologise for all the late nights and impoverishing
returns on their creative efforts.
This is optional, but you can include a dedication page. Each item in the list will be set on a different paragraph.
.. code-block:: python
dedication = [
"For those who leave.",
"For those who remain.",
"For the wings and tail.",
"But most, for her"
]
work.set_dedication(dedication)
The dedication can be one line of text, or several. If several, each line must be provided as a separate term in a `list`.
Build
^^^^^
The build function is straightforward. Once everything is in place:
.. code-block:: python
work.build()
You will find your EPUB in the directory you specified.
Validate
^^^^^^^^
If you have any doubts as to whether your EPUB is standards compliant, run the validation. This tests the `epub` file
against the standards maintained by the `DAISY Consortium <http://validator.idpf.org/>`_. You can check the file online
at that link. It's the same test.
.. code-block:: python
work.validate()
Output will be `True` or `False`.
"""
import pypandoc
from bs4 import BeautifulSoup
from epubcheck import EpubCheck
from typing import Optional, Literal, List
from urllib.parse import urlparse
from pathlib import Path
import os
import re
import base64
import filetype
from ..models.metadata import WorkMetadata, Contributor
from ..models.matter import Matter, MatterPartition
from ..helpers import pages, formats, coreio as _c
from ..helpers.updatezipfile import UpdateZipFile
class CreateWork:
"""
Publish a standards compliant EPUB3 creative work from a source Microsoft Word `docx` document, and
define its metadata, cover and publishing rights.
If the EPUB file already exists, then publishing this work will overwrite it.
On instantiation, checks `directory` to see if `DEFAULT_METADATA_SETTINGS` is present, loading the required data,
or replacing with specified defaults.
"""
def __init__(self,
directory: Optional[str] = None,
metadata: Optional[WorkMetadata] = None,
stateless: bool = False):
"""
Initialise the CreateWork class.
Parameters
----------
directory: str
A directory path where you would like to save your work.
metadata: WorkMetadata
A model defined by a dictionary of terms.
stateless: bool
Whether your workflow is stateless (default False).
"""
self.stateless = stateless
self.directory = Path(directory)
if self.stateless:
_c.check_path(self.directory)
# Load metadata settings, if exists
try:
_c.check_source(self.directory / _c.DEFAULT_METADATA_SETTINGS)
self.metadata = WorkMetadata(_c.load_json(self.directory / _c.DEFAULT_METADATA_SETTINGS))
self.work_name = self.directory.name # Since will be `.../work-name/`
except FileNotFoundError:
self.metadata = None
self.work_name = None
# Construct the metadata, if it is provided
if metadata:
if isinstance(metadata, WorkMetadata):
metadata = metadata.dict()
self.set_metadata(metadata)
self.source_path = _c.get_helper_path() / "data"
# Set default cover and work bytes
self.work = None
self.cover = None
self.dedication = None
############################################################################
# GATHER WORKING DATA
############################################################################
def get_metadata_schema(self) -> dict:
"""
Return the standard Dublin Core schema permitted for the EPUB3 standard.
Returns
-------
dict
"""
return self.metadata.schema()
def set_metadata(self, metadata: WorkMetadata) -> bool:
"""
Validate metadata values for the permitted Dublin Core schema terms, along with additional metadata. The full
schema, with descriptions, and requirements, is listed by `get_metadata_schema`.
.. note:: The terms `identifier`, `title`, `creator`, `rights` and `language` are required. A random UUID will be assigned if none is provided.
Parameters
----------
metadata: WorkMetadata
A model defined by a dictionary of terms.
Returns
-------
bool
"""
# Dict snake_case fields need to be hyphenated for import
# This as a result of alias names in model
if isinstance(metadata, dict):
for k in [k for k in metadata.keys()]:
hyphenated = "-".join(k.split("_"))
metadata[hyphenated] = metadata.pop(k)
# Rename 'isodate' if it exists
if "isodate" in metadata:
metadata["date"] = metadata.pop("isodate")
# Fix "long-rights" if needed
if "long-rights" in metadata:
metadata["long-rights"] = formats.get_text_paragraphs(metadata["long-rights"])
# Create a temporary WorkMetadata model to hold updated metadata
updated_metadata = WorkMetadata(**metadata)
# And update the original data
# https://fastapi.tiangolo.com/tutorial/body-updates/#partial-updates-with-patch
if self.metadata:
self.metadata = self.metadata.copy(update=updated_metadata.dict(exclude_unset=True))
else:
self.metadata = updated_metadata
work_name = "-".join(["".join([e for e in w if e.isalnum()])
for w in self.metadata.title.lower().split(" ")])
# Set the working directory, if it isn't already, and save metadata there
if not self.work_name:
self.work_name = work_name
self.directory = self.directory / work_name
# If stateless, save the metadata to the working folder
if self.stateless:
_c.check_path(self.directory)
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
return True
def _get_validated_bytes(self,
source: [Path, bytes],
base_type: Optional[List[Literal["cover", "work"]]] = None) -> bytes:
"""
Validate a source file, and return a bytes version.
Parameters
----------
source: Path, bytes or base64 string
Filename to open, base64 string, or bytes from an opened file
base_type: Optional, str
Must be one of "cover" or "work" for interpreting base64 mime type
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
Returns
-------
bytes
"""
if not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
if isinstance(source, Path):
try:
_c.check_source(source)
with open(source, "rb") as f:
source = f.read()
except FileNotFoundError:
e = F"`{source}` is not a valid file source."
raise FileNotFoundError(e)
if isinstance(source, str) and base_type:
# Base64 string, remove any provided mime type
source_type = re.search(_c.DEFAULT_BASE64_TYPES[base_type], source)
if source_type:
source = source.replace(source_type.group(0), "")
source = base64.b64decode(source)
if not isinstance(source, bytes):
e = F"File is not valid."
raise FileNotFoundError(e)
return source
def set_document(self, source: [Path, bytes, str]):
"""
Import source `docx` document and, if stateless, save to the working directory. If you're finding errors in
the build step, it could be you need to convert your base64 string to "utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path, bytes, or str
Filename to open, bytes from an opened file, or a base64 string
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting source document."
raise PermissionError(e)
source = self._get_validated_bytes(source, base_type = "work")
if self.stateless:
with open(self.directory / F"{self.work_name}.docx", "wb") as w:
w.write(source)
else:
self.work = source
def set_cover(self,
source: [Path, bytes],
contributor: Optional[Contributor] = None):
"""
Import cover image and, if stateless, save to the working directory, along with any rights and contributor
information. If you're finding errors in the build step, it could be you need to convert your base64 string to
"utf-8" (`source.decode("utf-8")`).
Parameters
----------
source: Path or bytes
Filename to open, including path, or bytes for file
contributor: Contributor
Optional, string indicating contributor name for cover image.
Raises
------
PermissionError: if metadata not yet validated.
FileNotFoundError: if the source is not valid.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting cover."
raise PermissionError(e)
# Cover contributor
if contributor:
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
# Cover image
source = self._get_validated_bytes(source, base_type = "cover")
if self.stateless:
kind = filetype.guess(source).extension
with open(self.directory / F"cover.{kind}", "wb") as w:
w.write(source)
_c.save_json(self.metadata.dict(by_alias=True),
self.directory / _c.DEFAULT_METADATA_SETTINGS,
overwrite=True)
else:
self.cover = source
def add_contributor(self, contributor: Contributor):
"""
Add a contributor to the list of those supporting the creation of the work. `contributor` is defined as a dict:
.. code-block:: python
contributor = {
"role": "artist",
"name": "Great Artist",
"year": "2021",
"terms": "Public Domain."
}
Parameters
----------
contributor: Contributor
Include the types of contributor who supported the creation of the work. `role`: `artist`, `editor`, `translator`.
Raises
------
PermissionError: if metadata not yet validated.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before adding contributors, or add the contributors when you set the metadata."
raise PermissionError(e)
if self.metadata.contributor is None:
self.metadata.contributor = []
self.metadata.contributor.append(Contributor(**contributor))
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
def set_dedication(self, dedication: [str, list[str]]):
"""
Set dedication page for creative work. Provide as a string, unless it is on multiple paragraphs.
Parameters
----------
dedication: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting dedication."
raise PermissionError(e)
self.dedication = pages.create_dedication_xhtml(dedication)
if self.stateless:
with open(self.directory / F"dedication.xhtml", "w") as w:
w.write(self.dedication)
def set_rights(self, rights: [str, list[str]]):
"""
Set publication `long_rights` for creative work. Provide as a string, or list of strings if it is on multiple
paragraphs.
There are multiple appropriate rights, and two examples are below. Modify as you require.
* Commercial copyright with all rights reserved:
The right of the creator to be identified as the author of the Work has been asserted by them in
accordance with the Copyright, Designs and Patents Act 1988. This creator supports copyright. Copyright
gives creators space to explore and provides for their long-term ability to sustain themselves from
their work. Thank you for buying this work and for complying with copyright laws by not reproducing,
scanning, or distributing any part of it without permission. Your support will contribute to future
works by the creator.
* Commercial copyright but licenced for distribution under Attribution-NonCommercial-ShareAlike 4.0 International (`CC BY-NC-SA 4.0 <https://creativecommons.org/licenses/by-nc-sa/4.0/>`_):
You are free to copy and redistribute the Work in any medium or format, and remix, transform, and build
upon the Work. The creator cannot revoke these freedoms as long as you follow the license terms.
In return: You may not use the material for commercial purposes. You must give appropriate credit, provide
a link to this license, and indicate if changes were made. You may do so in any reasonable manner, but not
in any way that suggests the creator endorses you or your use. If you remix, transform, or build upon the
material, you must distribute your contributions under the same license as the original. You may not apply
legal terms or technological measures that legally restrict others from doing anything the license
permits.
Parameters
----------
rights: str or list of str
Provide as a string, or list of strings for multiple paragraphs.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before setting rights."
raise PermissionError(e)
if isinstance(rights, str):
rights = [rights]
self.metadata.long_rights = rights
_c.save_json(self.metadata.dict(by_alias=True), self.directory / _c.DEFAULT_METADATA_SETTINGS, overwrite=True)
############################################################################
# BUILD CREATIVE WORK
############################################################################
def build(self):
"""
Automatically build the creative work as a standards compliant EPUB3. Save to the root directory.
"""
if not self.work_name or not self.metadata:
e = "`set_metadata` before building creative work."
raise PermissionError(e)
epub_path = self.directory.parent / F"{self.work_name}.epub"
# Generate the initial creative content using Pandoc
# pypandoc can't handle PosixPaths ...
if self.stateless:
pypandoc.convert_file(str(self.directory / F"{self.work_name}.docx"),
format="docx",
to="epub3",
outputfile=str(epub_path))
else:
# Maybe one day Pandoc can return an epub object and we won't save the interim file
pypandoc.convert_text(self.work,
format="docx",
to="epub3",
outputfile=str(epub_path))
# Generate the epub version
with UpdateZipFile(epub_path, "a") as w:
# REMOVES
REMOVES = ["EPUB/styles/stylesheet1.css", "EPUB/text/title_page.xhtml", "EPUB/nav.xhtml"]
# DEFAULT COMPONENTS
DEFAULT = [(self.source_path / "css" / "core.css", "EPUB/css/core.css"),
(self.source_path / "images" / "logo.svg", "EPUB/images/logo.svg"),
(self.source_path / "xhtml" / "onix.xml", "EPUB/onix.xml"),
(self.source_path / "xhtml" / "container.xml", "META-INF/container.xml")]
for default_file, write_file in DEFAULT:
w.write(default_file, write_file)
# DEFAULT FONTS
for f in os.listdir(self.source_path / "fonts"):
w.write(self.source_path / "fonts" / f, F"EPUB/fonts/{f}")
# ADD titlepage.xhtml
w.writestr("EPUB/text/titlepage.xhtml", pages.create_titlepage_xhtml(self.metadata))
# ADD colophon.xhtml
w.writestr("EPUB/text/colophon.xhtml", pages.create_colophon_xhtml(self.metadata))
# ADD cover.img
if self.stateless:
for image_path in [self.directory / F"cover.{t}" for t in ["jpg", "jpeg", "png", "gif", "svg"]]:
if image_path.exists():
w.write(image_path, F"EPUB/images/{image_path.name}")
elif self.cover:
t = filetype.guess(self.cover).extension
w.writestr(F"EPUB/images/cover.{t}", self.cover)
# GET DEDICATION and CHAPTERS
spine = []
# check if the path to dedication exists, if it does, add it to the work and spine
if (self.directory / "dedication.xhtml").exists() or self.dedication:
if self.dedication:
w.writestr("EPUB/text/dedication.xhtml", self.dedication)
else:
w.write(self.directory / "dedication.xhtml", "EPUB/text/dedication.xhtml")
spine = [Matter(partition="frontmatter", content="dedication", title="Dedication")]
CHAPTERS = [f for f in w.namelist() if f.startswith("EPUB/text/ch")]
CHAPTERS.sort()
self.metadata.word_count = 0
for chapter in CHAPTERS:
file_as = F"EPUB/text/chapter-{chapter.split('.')[0][-1]}.xhtml"
try:
chapter_xml = w.read(chapter)
except KeyError:
continue
if file_as != chapter:
# If delete and then re-add same file, causes ZipFile confusion
REMOVES.append(chapter)
# Restructure chapter xml into standard format
chapter_xml = pages.restructure_chapter(chapter_xml)
chapter_title = chapter_xml.title.string
# Count the words (XHTML and HTML treated differently by BeautifulSoup, so first extract `section`)
words = BeautifulSoup(str(chapter_xml.section), "lxml").get_text()
self.metadata.word_count += len(words.replace("\n", " ").replace(" ", " ").strip().split())
w.writestr(file_as, str(chapter_xml))
spine.append(Matter(partition=MatterPartition.body, title=chapter_title))
# PANDOC MAY STILL ADD IMAGES FOUND IN THE WORK WHICH WE NEED TO DISCOVER AND ADD TO THE MANIFEST
# NOTE, these are not only to be added to the manifest, but the folder renamed as well
image_manifest = [f.replace("EPUB/", "") for f in w.namelist() if f.startswith("EPUB/images/")]
for img in [f for f in w.namelist() if f.startswith("EPUB/media/")]:
REMOVES.append(img)
new_img = img.replace("/media/", "/images/")
try:
old_img = w.read(img)
w.writestr(new_img, old_img)
except KeyError:
continue
image_manifest.append(new_img.replace("EPUB/", ""))
# ADD content.opf
w.writestr("EPUB/content.opf", pages.create_content_opf(self.metadata, image_manifest, spine))
# ADD toc.ncx
w.writestr("EPUB/toc.ncx", pages.create_toc_ncx(self.metadata, spine))
# ADD toc.xhtml
w.writestr("EPUB/toc.xhtml", pages.create_toc_xhtml(self.metadata, spine))
# PERFORM REMOVES
for remove in REMOVES:
try:
w.remove_file(remove)
except KeyError:
continue
def validate(self) -> bool:
"""
Validate the creative work as a standards compliant EPUB3.
"""
epub_path = self.directory.parent / F"{self.work_name}.epub"
_c.check_source(epub_path)
result = EpubCheck(epub_path)
return result.valid
| 0
| 0
| 0
| 19,679
| 0
| 0
| 0
| 131
| 333
|
f014873bcb18c5403755ad32e29f145b9b136a1d
| 3,167
|
py
|
Python
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 38
|
2017-10-18T05:44:25.000Z
|
2021-06-20T02:14:13.000Z
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 1
|
2017-12-07T20:14:18.000Z
|
2018-05-07T01:00:34.000Z
|
test/test_tarrecords.py
|
NVlabs/dlinputs
|
fbce290b7b8c5f3b00e9197c55a13b0a5a0f7953
|
[
"BSD-3-Clause"
] | 10
|
2018-01-07T15:19:17.000Z
|
2020-12-01T20:42:37.000Z
|
from __future__ import unicode_literals
from imp import reload
from dlinputs import tarrecords
reload(tarrecords)
# get_ipython().system(u'tar -ztvf testdata/imagenet-000000.tgz | sed 7q')
# get_ipython().system(u'tar xvf testdata/imagenet-000000.tgz 10.png')
# get_ipython().system(u'file 10.png')
| 31.989899
| 79
| 0.641932
|
from __future__ import unicode_literals
import glob
import pdb
from builtins import range
from imp import reload
from io import open
import numpy as np
from dlinputs import tarrecords
reload(tarrecords)
def test_tardata():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
samples = list(data)
assert samples[0] == ('10.cls', b'304'), samples[0]
assert {2} == set([len(x) for x in samples])
def test_group_by_keys():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
data = tarrecords.group_by_keys()(data)
samples = list(data)
keys = list(samples[0].keys())
assert 'png' in keys
assert 'cls' in keys
# get_ipython().system(u'tar -ztvf testdata/imagenet-000000.tgz | sed 7q')
# get_ipython().system(u'tar xvf testdata/imagenet-000000.tgz 10.png')
# get_ipython().system(u'file 10.png')
def test_decoder():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tardata(stream)
data = tarrecords.group_by_keys()(data)
data = tarrecords.decoder()(data)
samples = list(data)
# print samples[0].keys()
keys = list(samples[0].keys())
assert 'png' in keys
assert 'cls' in keys
def test_tariterator1():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator1(stream)
samples = list(data)
assert len(samples) == 47
assert samples[0]["__key__"] == "10", samples[0]["__key__"]
assert set(samples[3].keys()) == set(
"__key__ png cls xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
def test_tariterator():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator(stream)
samples = list(data)
assert len(samples) == 47
for i in range(len(samples)):
assert samples[i]["png"].dtype == np.dtype(
'f'), samples[i]["png"].dtype
assert np.amin(samples[i]["png"]) >= 0, np.amin(samples[i]["png"])
assert np.amin(samples[i]["png"]) <= 1, np.amax(samples[i]["png"])
assert samples[0]["__key__"] == "10"
assert set(samples[3].keys()) == set(
"__key__ __source__ cls png xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
def test_TarWriter():
stream = open("testdata/imagenet-000000.tgz", mode='rb')
data = tarrecords.tariterator(stream, decode=False)
samples = list(data)
stream = open("/tmp/test.tgz", "wb")
sink = tarrecords.TarWriter(stream, encode=False)
for sample in samples:
sink.write(sample)
sink.close()
stream.close()
# Check if test.tgz was created
assert len(glob.glob("/tmp/test.tgz")) == 1
stream = open("/tmp/test.tgz", mode='rb')
data = tarrecords.tariterator(stream)
samples = list(data)
assert len(samples) == 47
# assert samples[0]["__key__"].decode() == "10"
assert set(samples[3].keys()) == set(
"__key__ __source__ cls png xml wnid".split()), list(samples[3].keys())
assert samples[-1]["png"].shape == (400, 300, 3)
| 0
| 0
| 0
| 0
| 0
| 2,631
| 0
| -21
| 250
|
6a64971723ab828855f7055bd04df9ded8f9d292
| 54,054
|
py
|
Python
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 17
|
2016-07-07T23:39:17.000Z
|
2020-05-06T14:03:54.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 111
|
2021-10-01T18:18:17.000Z
|
2022-03-29T12:23:20.000Z
|
tests/unit/fake_data_root/vault/var/lib/juju/agents/unit-vault-hacluster-0/charm/hooks/utils.py
|
KellenRenshaw/hotsos
|
e3fc51ab7f8af606a5846a3486a7fda23d761583
|
[
"Apache-2.0"
] | 20
|
2016-11-03T04:04:09.000Z
|
2021-01-04T20:40:43.000Z
|
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import pcmk
import json
import os
import re
import subprocess
import socket
import time
import xml.etree.ElementTree as ET
import itertools
from charmhelpers.core.strutils import (bool_from_string)
from charmhelpers.core.hookenv import (log, TRACE, DEBUG, ERROR, INFO, WARNING, leader_get, leader_set, relation_get, relation_set, related_units, relation_ids, config, unit_get, status_set)
from charmhelpers.core import unitdata
from charmhelpers.contrib.openstack.utils import (set_unit_paused, clear_unit_paused, is_unit_paused_set, is_unit_upgrading_set)
from charmhelpers.contrib.openstack.ha.utils import (assert_charm_supports_dns_ha)
from charmhelpers.core.host import (mkdir, rsync, service_start, service_stop, service_running, write_file, lsb_release, init_is_systemd, CompareHostReleases)
from charmhelpers.fetch import (apt_install, add_source, apt_update)
from charmhelpers.contrib.network import ip as utils
TEMPLATES_DIR = 'templates'
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
COROSYNC_HACLUSTER_ACL_DIR = '/etc/corosync/uidgid.d'
COROSYNC_HACLUSTER_ACL = COROSYNC_HACLUSTER_ACL_DIR + '/hacluster'
COROSYNC_CONF_FILES = [
COROSYNC_DEFAULT,
COROSYNC_AUTHKEY,
COROSYNC_CONF,
COROSYNC_HACLUSTER_ACL,
]
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
PCMKR_CONFIG_DIR = '/etc/pacemaker'
PCMKR_AUTHKEY = PCMKR_CONFIG_DIR + '/authkey'
PCMKR_MAX_RETRIES = 3
PCMKR_SLEEP_SECS = 5
SYSTEMD_OVERRIDES_DIR = '/etc/systemd/system/{}.service.d'
SYSTEMD_OVERRIDES_FILE = '{}/overrides.conf'
MAAS_DNS_CONF_DIR = '/etc/maas_dns'
STONITH_CONFIGURED = 'stonith-configured'
def nulls(data):
"""Returns keys of values that are null (but not bool)"""
return [k for k in data.keys()
if not isinstance(data[k], bool) and not data[k]]
def emit_systemd_overrides_file():
"""Generate the systemd overrides file
With Start and Stop timeout values
Note: (David Ames) Bug#1654403 Work around
May be removed if bug is resolved
If timeout value is set to -1 pass infinity
"""
if not init_is_systemd():
return
stop_timeout = int(config('service_stop_timeout'))
if stop_timeout < 0:
stop_timeout = 'infinity'
start_timeout = int(config('service_start_timeout'))
if start_timeout < 0:
start_timeout = 'infinity'
systemd_overrides_context = {'service_stop_timeout': stop_timeout,
'service_start_timeout': start_timeout,
}
for service in ['corosync', 'pacemaker']:
overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
if not os.path.isdir(overrides_dir):
os.mkdir(overrides_dir)
write_file(path=overrides_file,
content=render_template('systemd-overrides.conf',
systemd_overrides_context))
# Update systemd with the new information
subprocess.check_call(['systemctl', 'daemon-reload'])
def get_pcmkr_key():
"""Return the pacemaker auth key"""
return config('pacemaker_key') or config('corosync_key')
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "trusty":
msg = "IPv6 is not supported in the charms for Ubuntu " \
"versions less than Trusty 14.04"
status_set('blocked', msg)
raise Exception(msg)
def get_ipv6_addr():
"""Exclude any ip addresses configured or managed by corosync."""
excludes = []
for rid in relation_ids('ha'):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
for res in resources.values():
if 'ocf:heartbeat:IPv6addr' in res:
res_params = parse_data(rid, unit, 'resource_params')
res_p = res_params.get(res)
if res_p:
for k, v in res_p.values():
if utils.is_ipv6(v):
log("Excluding '%s' from address list" % v,
level=DEBUG)
excludes.append(v)
return utils.get_ipv6_addr(exc_list=excludes)[0]
def get_node_flags(flag):
"""Nodes which have advertised the given flag.
:param flag: Flag to check peers relation data for.
:type flag: str
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
hosts = []
if config('prefer-ipv6'):
hosts.append(get_ipv6_addr())
else:
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get(flag, rid=relid, unit=unit):
hosts.append(relation_get('private-address',
rid=relid,
unit=unit))
hosts.sort()
return hosts
def get_cluster_nodes():
"""Nodes which have advertised that they are ready to join the cluster.
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
return get_node_flags('ready')
def get_member_ready_nodes():
"""List of nodes which have advertised that they have joined the cluster.
:returns: List of IPs of nodes that have joined thcluster.
:rtype: List
"""
return get_node_flags('member_ready')
def parse_data(relid, unit, key):
"""Helper to detect and parse json or ast based relation data"""
_key = 'json_{}'.format(key)
data = relation_get(_key, unit, relid) or relation_get(key, unit, relid)
if data:
try:
return json.loads(data)
except (TypeError, ValueError):
return ast.literal_eval(data)
return {}
def configure_monitor_host():
"""Configure extra monitor host for better network failure detection"""
log('Checking monitor host configuration', level=DEBUG)
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
monitor_interval = config('monitor_interval')
cmd = ('crm -w -F configure primitive ping '
'ocf:pacemaker:ping params host_list="%s" '
'multiplier="100" op monitor interval="%s" ' %
(monitor_host, monitor_interval))
pcmk.commit(cmd)
cmd = ('crm -w -F configure clone cl_ping ping '
'meta interleave="true"')
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
cmd = ('crm -w -F resource param ping set host_list="%s"' %
monitor_host)
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration', level=DEBUG)
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global(failure_timeout, cluster_recheck_interval=60):
"""Configure global cluster options
:param failure_timeout: Duration in seconds (measured from the most recent
failure) to wait before resetting failcount to 0.
:type failure_timeout: int
:param cluster_recheck_interval: Duration in seconds for the polling
interval at which the cluster checks for
changes in the resource parameters,
constraints or other cluster options.
:type cluster_recheck_interval: int
"""
log('Applying global cluster configuration', level=DEBUG)
# NOTE(lathiat) quorum in a two-node scenario is handled by
# corosync two_node=1. In this case quorum is required for
# initial cluster startup but not if a node was previously in
# contact with the full cluster.
log('Configuring no-quorum-policy to stop', level=DEBUG)
cmd = "crm configure property no-quorum-policy=stop"
pcmk.commit(cmd)
cmd = ('crm configure rsc_defaults $id="rsc-options" '
'resource-stickiness="100" '
'failure-timeout={}'.format(failure_timeout))
pcmk.commit(cmd)
log('Configuring cluster-recheck-interval to {} seconds'.format(
cluster_recheck_interval), level=DEBUG)
cmd = "crm configure property cluster-recheck-interval={}".format(
cluster_recheck_interval)
pcmk.commit(cmd)
def remove_legacy_maas_stonith_resources():
"""Remove maas stoniths resources using the old name."""
stonith_resources = pcmk.crm_maas_stonith_resource_list()
for resource_name in stonith_resources:
pcmk.commit(
'crm -w -F resource stop {}'.format(resource_name))
pcmk.commit(
'crm -w -F configure delete {}'.format(resource_name))
def configure_null_stonith_resource(stonith_hostnames):
"""Create null stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:null',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-null',
'resource_params': (
"params hostlist='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
# NOTE (gnuoy): Not enabling the global stonith-enabled setting as it
# does not make sense to have stonith-enabled when the only resources
# are null resources, so defer enabling stonith-enabled to the 'real'
# stonith resources.
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def configure_maas_stonith_resource(stonith_hostnames):
"""Create maas stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:external/maas',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-maas',
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'resource_params': (
"params url='{url}' apikey='{apikey}' hostnames='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def enable_stonith():
"""Enable stonith via the global property stonith-enabled.
:raises: EnableStonithFailed
"""
log('Enabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=true",
failure_is_fatal=True)
except subprocess.CalledProcessError as e:
raise EnableStonithFailed(e)
def disable_stonith(failure_is_fatal=True):
"""Disable stonith via the global property stonith-enabled.
:param failure_is_fatal: Whether to raise exception if command fails.
:type failure_is_fatal: bool
:raises: DisableStonithFailed
"""
log('Disabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=false",
failure_is_fatal=failure_is_fatal)
except subprocess.CalledProcessError as e:
raise DisableStonithFailed(e)
def get_ip_addr_from_resource_params(params):
"""Returns the IP address in the resource params provided
:return: the IP address in the params or None if not found
"""
reg_ex = r'.* ip_address="([a-fA-F\d\:\.]+)".*'
res = re.search(reg_ex, params)
return res.group(1) if res else None
def need_resources_on_remotes():
"""Whether to run resources on remote nodes.
Check the 'enable-resources' setting across the remote units. If it is
absent or inconsistent then raise a ValueError.
:returns: Whether to run resources on remote nodes
:rtype: bool
:raises: ValueError
"""
responses = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
data = parse_data(relid, unit, 'enable-resources')
# parse_data returns {} if key is absent.
if type(data) is bool:
responses.append(data)
if len(set(responses)) == 1:
run_resources_on_remotes = responses[0]
else:
msg = "Inconsistent or absent enable-resources setting {}".format(
responses)
log(msg, level=WARNING)
raise ValueError(msg)
return run_resources_on_remotes
def set_cluster_symmetry():
"""Set the cluster symmetry.
By default the cluster is an Opt-out cluster (equivalent to
symmetric-cluster=true) this means that any resource can run anywhere
unless a node explicitly Opts-out. When using pacemaker-remotes there may
be hundreds of nodes and if they are not prepared to run resources the
cluster should be switched to an Opt-in cluster.
"""
try:
symmetric = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculated desired symmetric-cluster setting'
log(msg, level=WARNING)
return
log('Configuring symmetric-cluster: {}'.format(symmetric), level=DEBUG)
cmd = "crm configure property symmetric-cluster={}".format(
str(symmetric).lower())
pcmk.commit(cmd, failure_is_fatal=True)
def add_score_location_rule(res_name, node, location_score):
"""Add or update a location rule that uses a score.
:param res_name: Resource that this location rule controls.
:type res_name: str
:param node: Node that this location rule relates to.
:type node: str
:param location_score: The score to give this location.
:type location_score: int
"""
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
pcmk.crm_update_location(
loc_constraint_name,
res_name,
location_score,
node)
def add_location_rules_for_local_nodes(res_name):
"""Add location rules for running resource on local nodes.
Add location rules allowing the given resource to run on local nodes (eg
not remote nodes).
:param res_name: Resource name to create location rules for.
:type res_name: str
"""
for node in pcmk.list_nodes():
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
if not pcmk.crm_opt_exists(loc_constraint_name):
cmd = 'crm -w -F configure location {} {} 0: {}'.format(
loc_constraint_name,
res_name,
node)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
def add_location_rules_for_pacemaker_remotes(res_names):
"""Add location rules for pacemaker remote resources on local nodes.
Add location rules allowing the pacemaker remote resource to run on a local
node. Use location score rules to spread resources out.
:param res_names: Pacemaker remote resource names.
:type res_names: List[str]
"""
res_names = sorted(res_names)
nodes = sorted(pcmk.list_nodes())
prefered_nodes = list(zip(res_names, itertools.cycle(nodes)))
for res_name in res_names:
for node in nodes:
location_score = 0
if (res_name, node) in prefered_nodes:
location_score = 200
add_score_location_rule(
res_name,
node,
location_score)
def configure_pacemaker_remote(remote_hostname, remote_ip):
"""Create a resource corresponding to the pacemaker remote node.
:param remote_hostname: Remote hostname used for registering remote node.
:type remote_hostname: str
:param remote_ip: Remote IP used for registering remote node.
:type remote_ip: str
:returns: Name of resource for pacemaker remote node.
:rtype: str
"""
resource_name = remote_hostname
if not pcmk.is_resource_present(resource_name):
cmd = (
"crm configure primitive {} ocf:pacemaker:remote "
"params server={} reconnect_interval=60 "
"op monitor interval=30s").format(resource_name,
remote_ip)
pcmk.commit(cmd, failure_is_fatal=True)
return resource_name
def cleanup_remote_nodes(remote_nodes):
"""Cleanup pacemaker remote resources
Remove all status records of the resource and
probe the node afterwards.
:param remote_nodes: List of resource names associated with remote nodes
:type remote_nodes: list
"""
for res_name in remote_nodes:
cmd = 'crm resource cleanup {}'.format(res_name)
# Resource cleanups seem to fail occasionally even on healthy nodes
# Bug #1822962. Given this cleanup task is just housekeeping log
# the message if a failure occurs and move on.
if pcmk.commit(cmd, failure_is_fatal=False) == 0:
log(
'Cleanup of resource {} succeeded'.format(res_name),
level=DEBUG)
else:
log(
'Cleanup of resource {} failed'.format(res_name),
level=WARNING)
def configure_pacemaker_remote_stonith_resource():
"""Create a maas stonith resource for the pacemaker-remotes.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = []
stonith_resource = {}
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
stonith_hostname = parse_data(relid, unit, 'stonith-hostname')
if stonith_hostname:
hostnames.append(stonith_hostname)
if hostnames:
stonith_resource = configure_maas_stonith_resource(hostnames)
return stonith_resource
def configure_peer_stonith_resource():
"""Create a null stonith resource for lxd containers.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = [get_hostname()]
stonith_resource = {}
for relid in relation_ids('hanode'):
for unit in related_units(relid):
stonith_hostname = relation_get('hostname', unit, relid)
if stonith_hostname:
hostnames.append(stonith_hostname)
stonith_resource = configure_null_stonith_resource(hostnames)
return stonith_resource
def configure_pacemaker_remote_resources():
"""Create resources corresponding to the pacemaker remote nodes.
Create resources, location constraints and stonith resources for pacemaker
remote node.
:returns: resource dict {res_name: res_type, ...}
:rtype: dict
"""
log('Checking for pacemaker-remote nodes', level=DEBUG)
resources = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
remote_hostname = parse_data(relid, unit, 'remote-hostname')
remote_ip = parse_data(relid, unit, 'remote-ip')
if remote_hostname:
resource_name = configure_pacemaker_remote(
remote_hostname,
remote_ip)
resources.append(resource_name)
cleanup_remote_nodes(resources)
return {name: 'ocf:pacemaker:remote' for name in resources}
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
"""Add location rules as needed for resources, clones and groups
If remote nodes should not run resources then add location rules then add
location rules to enable them on local nodes.
:param resources: Resource definitions
:type resources: dict
:param clones: Clone definitions
:type clones: dict
:param groups: Group definitions
:type groups: dict
"""
clones = clones or {}
groups = groups or {}
try:
resources_on_remote = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculate whether resources should run on remotes'
log(msg, level=WARNING)
return
if resources_on_remote:
msg = ('Resources are permitted to run on remotes, no need to create '
'location constraints')
log(msg, level=WARNING)
return
pacemaker_remotes = []
for res_name, res_type in resources.items():
if res_name not in list(clones.values()) + list(groups.values()):
if res_type == 'ocf:pacemaker:remote':
pacemaker_remotes.append(res_name)
else:
add_location_rules_for_local_nodes(res_name)
add_location_rules_for_pacemaker_remotes(pacemaker_remotes)
for cl_name in clones:
add_location_rules_for_local_nodes(cl_name)
# Limit clone resources to only running on X number of nodes where X
# is the number of local nodes. Otherwise they will show as offline
# on the remote nodes.
node_count = len(pcmk.list_nodes())
cmd = ('crm_resource --resource {} --set-parameter clone-max '
'--meta --parameter-value {}').format(cl_name, node_count)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
for grp_name in groups:
add_location_rules_for_local_nodes(grp_name)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
return wrap
def try_pcmk_wait():
"""Try pcmk.wait_for_pcmk()
Log results and set status message
"""
try:
pcmk.wait_for_pcmk()
log("Pacemaker is ready", level=TRACE)
except pcmk.ServicesNotUp as e:
status_msg = "Pacemaker is down. Please manually start it."
status_set('blocked', status_msg)
full_msg = "{} {}".format(status_msg, e)
log(full_msg, ERROR)
raise pcmk.ServicesNotUp(full_msg)
def services_running():
"""Determine if both Corosync and Pacemaker are running
Both from the operating system perspective and with a functional test
@returns boolean
"""
pacemaker_status = service_running("pacemaker")
corosync_status = service_running("corosync")
log("Pacemaker status: {}, Corosync status: {}"
"".format(pacemaker_status, corosync_status),
level=DEBUG)
if not (pacemaker_status and corosync_status):
# OS perspective
return False
# Functional test of pacemaker. This will raise if pacemaker doesn't get
# fully ready in time:
pcmk.wait_for_pcmk()
return True
def validated_restart_corosync(retries=10):
"""Restart and validate Corosync and Pacemaker are in fact up and running.
@param retries: number of attempts to restart the services before giving up
@raises pcmk.ServicesNotUp if after retries services are still not up
"""
for restart in range(retries):
try:
if restart_corosync():
log("Corosync and Pacemaker are validated as up and running.",
INFO)
return
else:
log("Corosync or Pacemaker not validated as up yet, retrying",
WARNING)
except pcmk.ServicesNotUp:
log("Pacemaker failed to start, retrying", WARNING)
continue
msg = ("Corosync and/or Pacemaker failed to restart after {} retries"
"".format(retries))
log(msg, ERROR)
status_set('blocked', msg)
raise pcmk.ServicesNotUp(msg)
def validate_dns_ha():
"""Validate the DNS HA
Assert the charm will support DNS HA
Check MAAS related configuration options are properly set
:raises MAASConfigIncomplete: if maas_url and maas_credentials are not set
"""
# Will raise an exception if unable to continue
assert_charm_supports_dns_ha()
if config('maas_url') and config('maas_credentials'):
return True
else:
msg = ("DNS HA is requested but the maas_url or maas_credentials "
"settings are not set")
raise MAASConfigIncomplete(msg)
def setup_maas_api():
"""Install MAAS PPA and packages for accessing the MAAS API.
"""
add_source(config('maas_source'), config('maas_source_key'))
apt_update(fatal=True)
apt_install('python3-maas-client', fatal=True)
def setup_ocf_files():
"""Setup OCF resrouce agent files
"""
# TODO (thedac) Eventually we want to package the OCF files.
# Bundle with the charm until then.
mkdir('/usr/lib/ocf/resource.d/ceph')
mkdir('/usr/lib/ocf/resource.d/maas')
# Xenial corosync is not creating this directory
mkdir('/etc/corosync/uidgid.d')
rsync('files/ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd')
rsync('files/ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns')
rsync('files/ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py')
rsync('files/ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/')
rsync(
'files/ocf/maas/maas_stonith_plugin.py',
'/usr/lib/stonith/plugins/external/maas')
def write_maas_dns_address(resource_name, resource_addr):
"""Writes the specified IP address to the resource file for MAAS dns.
:param resource_name: the name of the resource the address belongs to.
This is the name of the file that will be written in /etc/maas_dns.
:param resource_addr: the IP address for the resource. This will be
written to the resource_name file.
"""
mkdir(MAAS_DNS_CONF_DIR)
write_file(os.path.join(MAAS_DNS_CONF_DIR, resource_name),
content=resource_addr)
def needs_maas_dns_migration():
"""Determines if the MAAS DNS ocf resources need migration.
:return: True if migration is necessary, False otherwise.
"""
try:
subprocess.check_call(['grep', 'OCF_RESOURCE_INSTANCE',
'/usr/lib/ocf/resource.d/maas/dns'])
return True
except subprocess.CalledProcessError:
# check_call will raise an exception if grep doesn't find the string
return False
def is_in_standby_mode(node_name):
"""Check if node is in standby mode in pacemaker
@param node_name: The name of the node to check
@returns boolean - True if node_name is in standby mode
"""
out = (subprocess
.check_output(['crm', 'node', 'status', node_name])
.decode('utf-8'))
root = ET.fromstring(out)
standby_mode = False
for nvpair in root.iter('nvpair'):
if (nvpair.attrib.get('name') == 'standby' and
nvpair.attrib.get('value') == 'on'):
standby_mode = True
return standby_mode
def get_hostname():
"""Return the hostname of this unit
@returns hostname
"""
return socket.gethostname()
def enter_standby_mode(node_name, duration='forever'):
"""Put this node into standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'standby', node_name, duration])
def leave_standby_mode(node_name):
"""Take this node out of standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'online', node_name])
def node_has_resources(node_name):
"""Check if this node is running resources
@param node_name: The name of the node to check
@returns boolean - True if node_name has resources
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
has_resources = False
for resource in root.iter('resource'):
for child in resource:
if child.tag == 'node' and child.attrib.get('name') == node_name:
has_resources = True
return has_resources
def node_is_dc(node_name):
"""Check if this node is the designated controller.
@param node_name: The name of the node to check
@returns boolean - True if node_name is the DC
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
for current_dc in root.iter("current_dc"):
if current_dc.attrib.get('name') == node_name:
return True
return False
def set_unit_status():
"""Set the workload status for this unit
@returns None
"""
status_set(*assess_status_helper())
def resume_unit():
"""Resume services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
leave_standby_mode(node_name)
if is_in_standby_mode(node_name):
messages.append("Node still in standby mode")
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
else:
clear_unit_paused()
set_unit_status()
def pause_unit():
"""Pause services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
enter_standby_mode(node_name)
if not is_in_standby_mode(node_name):
messages.append("Node not in standby mode")
# some resources may take some time to be migrated out from the node. So 3
# retries are made with a 5 seconds wait between each one.
i = 0
ready = False
has_resources = False
while i < PCMKR_MAX_RETRIES and not ready:
if node_has_resources(node_name):
has_resources = True
i += 1
time.sleep(PCMKR_SLEEP_SECS)
else:
ready = True
has_resources = False
if has_resources:
messages.append("Resources still running on unit")
status, message = assess_status_helper()
# New status message will indicate the resource is not running
if status != 'active' and 'not running' not in message:
messages.append(message)
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
else:
set_unit_paused()
status_set("maintenance",
"Paused. Use 'resume' action to resume normal service.")
def assess_status_helper():
"""Assess status of unit
@returns status, message - status is workload status and message is any
corresponding messages
"""
if config('stonith_enabled') in ['true', 'True', True]:
return(
'blocked',
'stonith_enabled config option is no longer supported')
if config('no_quorum_policy'):
if config('no_quorum_policy').lower() not in ['ignore', 'freeze',
'stop', 'suicide']:
return(
'blocked',
'Invalid no_quorum_policy specified')
if is_unit_upgrading_set():
return ("blocked",
"Ready for do-release-upgrade. Set complete when finished")
if is_waiting_unit_series_upgrade_set():
return ("blocked",
"HA services shutdown, peers are ready for series upgrade")
if is_unit_paused_set():
return ("maintenance",
"Paused. Use 'resume' action to resume normal service.")
node_count = int(config('cluster_count'))
status = 'active'
message = 'Unit is ready and clustered'
try:
try_pcmk_wait()
except pcmk.ServicesNotUp:
message = 'Pacemaker is down'
status = 'blocked'
for relid in relation_ids('hanode'):
if len(related_units(relid)) + 1 < node_count:
status = 'blocked'
message = ("Insufficient peer units for ha cluster "
"(require {})".format(node_count))
# if the status was not changed earlier, we verify the maintenance status
try:
if status == 'active':
prop = pcmk.get_property('maintenance-mode').strip()
except pcmk.PropertyNotFound:
# the property is not the output of 'crm configure show xml', so we use
# the default value for this property. For crmsh>=2.2.0 the default
# value is automatically provided by show-property or get-property.
prop = 'false'
if (status == 'active' and prop == 'true'):
# maintenance mode enabled in pacemaker
status = 'maintenance'
message = 'Pacemaker in maintenance mode'
for resource in get_resources().keys():
if not pcmk.is_resource_present(resource):
return ("waiting",
"Resource: {} not yet configured".format(resource))
if not pcmk.crm_res_running_on_node(resource, get_hostname()):
return ("blocked",
"Resource: {} not running".format(resource))
return status, message
def ocf_file_exists(res_name, resources,
RES_ROOT='/usr/lib/ocf/resource.d'):
"""To determine whether the ocf file exists, allow multiple ocf
files with the same name in different directories
@param res_name: The name of the ocf resource to check
@param resources: ocf resources
@return: boolean - True if the ocf resource exists
"""
res_type = None
for key, val in resources.items():
if res_name == key:
if len(val.split(':')) > 2:
res_type = val.split(':')[1]
ocf_name = res_name.replace('res_', '').replace('_', '-')
ocf_file = os.path.join(RES_ROOT, res_type, ocf_name)
if os.path.isfile(ocf_file):
return True
return False
def kill_legacy_ocf_daemon_process(res_name):
"""Kill legacy ocf daemon process
@param res_name: The name of the ocf process to kill
"""
ocf_name = res_name.replace('res_', '').replace('_', '-')
reg_expr = r'([0-9]+)\s+[^0-9]+{}'.format(ocf_name)
cmd = ['ps', '-eo', 'pid,cmd']
ps = subprocess.check_output(cmd).decode('utf-8')
res = re.search(reg_expr, ps, re.MULTILINE)
if res:
pid = res.group(1)
subprocess.call(['sudo', 'kill', '-9', pid])
def maintenance_mode(enable):
"""Enable/disable pacemaker's maintenance mode"""
log('Setting maintenance-mode to %s' % enable, level=INFO)
try:
current_state = pcmk.get_property('maintenance-mode').strip().lower()
except pcmk.PropertyNotFound:
current_state = 'false'
current_state = True if current_state == 'true' else False
log('Is maintenance-mode currently enabled? %s' % current_state,
level=DEBUG)
if current_state != enable:
pcmk.set_property('maintenance-mode', str(enable).lower())
else:
log('Desired value for maintenance-mode is already set', level=DEBUG)
def get_resources():
"""Get resources from the HA relation
:returns: dict of resources
"""
resources = {}
for rid in relation_ids("ha"):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
return resources
def set_waiting_unit_series_upgrade():
"""Set the unit to a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=true in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', True)
def clear_waiting_unit_series_upgrade():
"""Clear the unit from a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=false in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', False)
def is_waiting_unit_series_upgrade_set():
"""Return the state of the kv().get('waiting-unit-series-upgrade').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
with unitdata.HookData()() as t:
kv = t[0]
if not kv.get('waiting-unit-series-upgrade'):
return False
return kv.get('waiting-unit-series-upgrade')
def get_series_upgrade_notifications(relid):
"""Check peers for notifications that they are upgrading their series.
Returns a dict of the form {unit_name: target_series, ...}
:param relid: Relation id to check for notifications.
:type relid: str
:returns: dict
"""
notifications = {}
for unit in related_units(relid):
relation_data = relation_get(rid=relid, unit=unit)
for key, value in relation_data.items():
if key.startswith('series_upgrade_of_'):
notifications[unit] = value
log("Found series upgrade notifications: {}".format(notifications), DEBUG)
return notifications
def disable_ha_services():
"""Shutdown and disable HA services."""
log("Disabling HA services", INFO)
for svc in ['corosync', 'pacemaker']:
disable_lsb_services(svc)
if service_running(svc):
service_stop(svc)
def enable_ha_services():
"""Startup and enable HA services."""
log("Enabling HA services", INFO)
for svc in ['pacemaker', 'corosync']:
enable_lsb_services(svc)
if not service_running(svc):
service_start(svc)
def notify_peers_of_series_upgrade():
"""Notify peers which release this unit is upgrading from."""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: ubuntu_rel}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def clear_series_upgrade_notification():
"""Remove from series upgrade notification from peers."""
log("Removing upgrade notification from peers")
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: None}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def set_stonith_configured(is_configured):
"""Set the STONITH_CONFIGURED state.
:param is_configured: Flag to check peers relation data for.
:type is_configured: bool
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
leader_set({STONITH_CONFIGURED: is_configured})
def is_stonith_configured():
"""Get the STONITH_CONFIGURED state.
:returns: State of STONITH_CONFIGURED state.
:rtype: bool
"""
configured = leader_get(STONITH_CONFIGURED) or 'False'
return bool_from_string(configured)
def get_hanode_hostnames():
"""Hostnames of nodes in the hanode relation.
:returns: List of hostnames of nodes in the hanode relation.
:rtype: List
"""
hanode_hostnames = [get_hostname()]
for relid in relation_ids('hanode'):
for unit in related_units(relid):
hostname = relation_get('hostname', rid=relid, unit=unit)
if hostname:
hanode_hostnames.append(hostname)
hanode_hostnames.sort()
return hanode_hostnames
def update_node_list():
"""Determine and delete unexpected nodes from the corosync ring.
:returns: Set of pcmk nodes not part of Juju hanode relation
:rtype: Set[str]
:raises: RemoveCorosyncNodeFailed
"""
pcmk_nodes = set(pcmk.list_nodes())
juju_nodes = set(get_hanode_hostnames())
diff_nodes = pcmk_nodes.difference(juju_nodes)
log("pcmk_nodes[{}], juju_nodes[{}], diff[{}]"
"".format(pcmk_nodes, juju_nodes, diff_nodes),
DEBUG)
for old_node in diff_nodes:
try:
pcmk.set_node_status_to_maintenance(old_node)
pcmk.delete_node(old_node)
except subprocess.CalledProcessError as e:
raise RemoveCorosyncNodeFailed(old_node, e)
return diff_nodes
| 33.161963
| 79
| 0.643024
|
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import pcmk
import json
import os
import re
import subprocess
import socket
import fcntl
import struct
import time
import xml.etree.ElementTree as ET
import itertools
from base64 import b64decode
from charmhelpers.core.strutils import (
bool_from_string,
)
from charmhelpers.core.hookenv import (
local_unit,
log,
TRACE,
DEBUG,
ERROR,
INFO,
WARNING,
leader_get,
leader_set,
relation_get,
relation_set,
related_units,
relation_ids,
config,
unit_get,
status_set,
)
from charmhelpers.core import unitdata
from charmhelpers.contrib.openstack.utils import (
get_host_ip,
set_unit_paused,
clear_unit_paused,
is_unit_paused_set,
is_unit_upgrading_set,
)
from charmhelpers.contrib.openstack.ha.utils import (
assert_charm_supports_dns_ha
)
from charmhelpers.core.host import (
mkdir,
rsync,
service_start,
service_stop,
service_running,
write_file,
file_hash,
lsb_release,
init_is_systemd,
CompareHostReleases,
)
from charmhelpers.fetch import (
apt_install,
add_source,
apt_update,
)
from charmhelpers.contrib.hahelpers.cluster import (
peer_ips,
)
from charmhelpers.contrib.network import ip as utils
import netifaces
from netaddr import IPNetwork
import jinja2
TEMPLATES_DIR = 'templates'
COROSYNC_CONF = '/etc/corosync/corosync.conf'
COROSYNC_DEFAULT = '/etc/default/corosync'
COROSYNC_AUTHKEY = '/etc/corosync/authkey'
COROSYNC_HACLUSTER_ACL_DIR = '/etc/corosync/uidgid.d'
COROSYNC_HACLUSTER_ACL = COROSYNC_HACLUSTER_ACL_DIR + '/hacluster'
COROSYNC_CONF_FILES = [
COROSYNC_DEFAULT,
COROSYNC_AUTHKEY,
COROSYNC_CONF,
COROSYNC_HACLUSTER_ACL,
]
SUPPORTED_TRANSPORTS = ['udp', 'udpu', 'multicast', 'unicast']
PCMKR_CONFIG_DIR = '/etc/pacemaker'
PCMKR_AUTHKEY = PCMKR_CONFIG_DIR + '/authkey'
PCMKR_MAX_RETRIES = 3
PCMKR_SLEEP_SECS = 5
SYSTEMD_OVERRIDES_DIR = '/etc/systemd/system/{}.service.d'
SYSTEMD_OVERRIDES_FILE = '{}/overrides.conf'
MAAS_DNS_CONF_DIR = '/etc/maas_dns'
STONITH_CONFIGURED = 'stonith-configured'
class MAASConfigIncomplete(Exception):
pass
class RemoveCorosyncNodeFailed(Exception):
def __init__(self, node_name, called_process_error):
msg = 'Removing {} from the cluster failed. {} output={}'.format(
node_name, called_process_error, called_process_error.output)
super(RemoveCorosyncNodeFailed, self).__init__(msg)
class EnableStonithFailed(Exception):
def __init__(self, called_process_error):
msg = 'Enabling STONITH failed. {} output={}'.format(
called_process_error, called_process_error.output)
super(EnableStonithFailed, self).__init__(msg)
class DisableStonithFailed(Exception):
def __init__(self, called_process_error):
msg = 'Disabling STONITH failed. {} output={}'.format(
called_process_error, called_process_error.output)
super(DisableStonithFailed, self).__init__(msg)
def disable_upstart_services(*services):
for service in services:
with open("/etc/init/{}.override".format(service), "wt") as override:
override.write("manual")
def enable_upstart_services(*services):
for service in services:
path = '/etc/init/{}.override'.format(service)
if os.path.exists(path):
os.remove(path)
def disable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'remove'])
def enable_lsb_services(*services):
for service in services:
subprocess.check_call(['update-rc.d', '-f', service, 'defaults'])
def get_iface_ipaddr(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8919, # SIOCGIFADDR
struct.pack('256s', iface[:15])
)[20:24])
def get_iface_netmask(iface):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x891b, # SIOCGIFNETMASK
struct.pack('256s', iface[:15])
)[20:24])
def get_netmask_cidr(netmask):
netmask = netmask.split('.')
binary_str = ''
for octet in netmask:
binary_str += bin(int(octet))[2:].zfill(8)
return str(len(binary_str.rstrip('0')))
def get_network_address(iface):
if iface:
iface = str(iface)
network = "{}/{}".format(get_iface_ipaddr(iface),
get_netmask_cidr(get_iface_netmask(iface)))
ip = IPNetwork(network)
return str(ip.network)
else:
return None
def get_ipv6_network_address(iface):
# Behave in same way as ipv4 get_network_address() above if iface is None.
if not iface:
return None
try:
ipv6_addr = utils.get_ipv6_addr(iface=iface)[0]
all_addrs = netifaces.ifaddresses(iface)
for addr in all_addrs[netifaces.AF_INET6]:
if ipv6_addr == addr['addr']:
network = "{}/{}".format(addr['addr'], addr['netmask'])
return str(IPNetwork(network).network)
except ValueError:
msg = "Invalid interface '%s'" % iface
status_set('blocked', msg)
raise Exception(msg)
msg = "No valid network found for interface '%s'" % iface
status_set('blocked', msg)
raise Exception(msg)
def get_corosync_id(unit_name):
# Corosync nodeid 0 is reserved so increase all the nodeids to avoid it
off_set = 1000
return off_set + int(unit_name.split('/')[1])
def nulls(data):
"""Returns keys of values that are null (but not bool)"""
return [k for k in data.keys()
if not isinstance(data[k], bool) and not data[k]]
def get_corosync_conf():
if config('prefer-ipv6'):
ip_version = 'ipv6'
bindnetaddr = get_ipv6_network_address
else:
ip_version = 'ipv4'
bindnetaddr = get_network_address
transport = get_transport()
# NOTE(jamespage) use local charm configuration over any provided by
# principle charm
conf = {
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': transport,
}
# NOTE(jamespage): only populate multicast configuration if udp is
# configured
if transport == 'udp':
conf.update({
'corosync_bindnetaddr': bindnetaddr(config('corosync_bindiface')),
'corosync_mcastport': config('corosync_mcastport'),
'corosync_mcastaddr': config('corosync_mcastaddr')
})
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if config('netmtu'):
conf['netmtu'] = config('netmtu')
if config('debug'):
conf['debug'] = config('debug')
if not nulls(conf):
log("Found sufficient values in local config to populate "
"corosync.conf", level=DEBUG)
return conf
conf = {}
for relid in relation_ids('ha'):
for unit in related_units(relid):
conf = {
'ip_version': ip_version,
'ha_nodes': get_ha_nodes(),
'transport': transport,
}
# NOTE(jamespage): only populate multicast configuration if udpu is
# configured
if transport == 'udp':
bindiface = relation_get('corosync_bindiface',
unit, relid)
conf.update({
'corosync_bindnetaddr': bindnetaddr(bindiface),
'corosync_mcastport': relation_get('corosync_mcastport',
unit, relid),
'corosync_mcastaddr': config('corosync_mcastaddr'),
})
if config('prefer-ipv6'):
conf['nodeid'] = get_corosync_id(local_unit())
if config('netmtu'):
conf['netmtu'] = config('netmtu')
if config('debug'):
conf['debug'] = config('debug')
# Values up to this point must be non-null
if nulls(conf):
continue
return conf
missing = [k for k, v in conf.items() if v is None]
log('Missing required configuration: %s' % missing)
return None
def emit_systemd_overrides_file():
"""Generate the systemd overrides file
With Start and Stop timeout values
Note: (David Ames) Bug#1654403 Work around
May be removed if bug is resolved
If timeout value is set to -1 pass infinity
"""
if not init_is_systemd():
return
stop_timeout = int(config('service_stop_timeout'))
if stop_timeout < 0:
stop_timeout = 'infinity'
start_timeout = int(config('service_start_timeout'))
if start_timeout < 0:
start_timeout = 'infinity'
systemd_overrides_context = {'service_stop_timeout': stop_timeout,
'service_start_timeout': start_timeout,
}
for service in ['corosync', 'pacemaker']:
overrides_dir = SYSTEMD_OVERRIDES_DIR.format(service)
overrides_file = SYSTEMD_OVERRIDES_FILE.format(overrides_dir)
if not os.path.isdir(overrides_dir):
os.mkdir(overrides_dir)
write_file(path=overrides_file,
content=render_template('systemd-overrides.conf',
systemd_overrides_context))
# Update systemd with the new information
subprocess.check_call(['systemctl', 'daemon-reload'])
def emit_corosync_conf():
corosync_conf_context = get_corosync_conf()
if corosync_conf_context:
write_file(path=COROSYNC_CONF,
content=render_template('corosync.conf',
corosync_conf_context))
return True
return False
def get_pcmkr_key():
"""Return the pacemaker auth key"""
return config('pacemaker_key') or config('corosync_key')
def emit_base_conf():
if not os.path.isdir(COROSYNC_HACLUSTER_ACL_DIR):
os.mkdir(COROSYNC_HACLUSTER_ACL_DIR)
if not os.path.isdir(PCMKR_CONFIG_DIR):
os.mkdir(PCMKR_CONFIG_DIR)
corosync_default_context = {'corosync_enabled': 'yes'}
write_file(path=COROSYNC_DEFAULT,
content=render_template('corosync',
corosync_default_context))
write_file(path=COROSYNC_HACLUSTER_ACL,
content=render_template('hacluster.acl', {}))
corosync_key = config('corosync_key')
if corosync_key:
write_file(path=COROSYNC_AUTHKEY,
content=b64decode(corosync_key),
perms=0o400)
pcmkr_key = get_pcmkr_key()
write_file(path=PCMKR_AUTHKEY,
owner='root',
group='haclient',
content=b64decode(pcmkr_key),
perms=0o440)
return True
return False
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir)
)
template = templates.get_template(template_name)
return template.render(context)
def assert_charm_supports_ipv6():
"""Check whether we are able to support charms ipv6."""
_release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(_release) < "trusty":
msg = "IPv6 is not supported in the charms for Ubuntu " \
"versions less than Trusty 14.04"
status_set('blocked', msg)
raise Exception(msg)
def get_transport():
transport = config('corosync_transport')
_deprecated_transport_values = {"multicast": "udp", "unicast": "udpu"}
val = _deprecated_transport_values.get(transport, transport)
if val not in ['udp', 'udpu']:
msg = ("Unsupported corosync_transport type '%s' - supported "
"types are: %s" % (transport, ', '.join(SUPPORTED_TRANSPORTS)))
status_set('blocked', msg)
raise ValueError(msg)
return val
def get_ipv6_addr():
"""Exclude any ip addresses configured or managed by corosync."""
excludes = []
for rid in relation_ids('ha'):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
for res in resources.values():
if 'ocf:heartbeat:IPv6addr' in res:
res_params = parse_data(rid, unit, 'resource_params')
res_p = res_params.get(res)
if res_p:
for k, v in res_p.values():
if utils.is_ipv6(v):
log("Excluding '%s' from address list" % v,
level=DEBUG)
excludes.append(v)
return utils.get_ipv6_addr(exc_list=excludes)[0]
def get_ha_nodes():
ha_units = peer_ips(peer_relation='hanode')
ha_nodes = {}
for unit in ha_units:
corosync_id = get_corosync_id(unit)
addr = ha_units[unit]
if config('prefer-ipv6'):
if not utils.is_ipv6(addr):
# Not an error since cluster may still be forming/updating
log("Expected an ipv6 address but got %s" % (addr),
level=WARNING)
ha_nodes[corosync_id] = addr
else:
ha_nodes[corosync_id] = get_host_ip(addr)
corosync_id = get_corosync_id(local_unit())
if config('prefer-ipv6'):
addr = get_ipv6_addr()
else:
addr = get_host_ip(unit_get('private-address'))
ha_nodes[corosync_id] = addr
return ha_nodes
def get_node_flags(flag):
"""Nodes which have advertised the given flag.
:param flag: Flag to check peers relation data for.
:type flag: str
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
hosts = []
if config('prefer-ipv6'):
hosts.append(get_ipv6_addr())
else:
hosts.append(unit_get('private-address'))
for relid in relation_ids('hanode'):
for unit in related_units(relid):
if relation_get(flag, rid=relid, unit=unit):
hosts.append(relation_get('private-address',
rid=relid,
unit=unit))
hosts.sort()
return hosts
def get_cluster_nodes():
"""Nodes which have advertised that they are ready to join the cluster.
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
return get_node_flags('ready')
def get_member_ready_nodes():
"""List of nodes which have advertised that they have joined the cluster.
:returns: List of IPs of nodes that have joined thcluster.
:rtype: List
"""
return get_node_flags('member_ready')
def parse_data(relid, unit, key):
"""Helper to detect and parse json or ast based relation data"""
_key = 'json_{}'.format(key)
data = relation_get(_key, unit, relid) or relation_get(key, unit, relid)
if data:
try:
return json.loads(data)
except (TypeError, ValueError):
return ast.literal_eval(data)
return {}
def configure_stonith():
if configure_pacemaker_remote_stonith_resource():
configure_peer_stonith_resource()
enable_stonith()
set_stonith_configured(True)
else:
# NOTE(lourot): We enter here when no MAAS STONITH resource could be
# created. Disabling STONITH for now. We're not calling
# set_stonith_configured(), so that enabling STONITH will be retried
# later. (STONITH is now always enabled in this charm.)
# Without MAAS, we keep entering here, which isn't really an issue,
# except that this fails in rare cases, thus failure_is_fatal=False.
disable_stonith(failure_is_fatal=False)
def configure_monitor_host():
"""Configure extra monitor host for better network failure detection"""
log('Checking monitor host configuration', level=DEBUG)
monitor_host = config('monitor_host')
if monitor_host:
if not pcmk.crm_opt_exists('ping'):
log('Implementing monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
monitor_interval = config('monitor_interval')
cmd = ('crm -w -F configure primitive ping '
'ocf:pacemaker:ping params host_list="%s" '
'multiplier="100" op monitor interval="%s" ' %
(monitor_host, monitor_interval))
pcmk.commit(cmd)
cmd = ('crm -w -F configure clone cl_ping ping '
'meta interleave="true"')
pcmk.commit(cmd)
else:
log('Reconfiguring monitor host configuration (host: %s)' %
monitor_host, level=DEBUG)
cmd = ('crm -w -F resource param ping set host_list="%s"' %
monitor_host)
else:
if pcmk.crm_opt_exists('ping'):
log('Disabling monitor host configuration', level=DEBUG)
pcmk.commit('crm -w -F resource stop ping')
pcmk.commit('crm -w -F configure delete ping')
def configure_cluster_global(failure_timeout, cluster_recheck_interval=60):
"""Configure global cluster options
:param failure_timeout: Duration in seconds (measured from the most recent
failure) to wait before resetting failcount to 0.
:type failure_timeout: int
:param cluster_recheck_interval: Duration in seconds for the polling
interval at which the cluster checks for
changes in the resource parameters,
constraints or other cluster options.
:type cluster_recheck_interval: int
"""
log('Applying global cluster configuration', level=DEBUG)
# NOTE(lathiat) quorum in a two-node scenario is handled by
# corosync two_node=1. In this case quorum is required for
# initial cluster startup but not if a node was previously in
# contact with the full cluster.
log('Configuring no-quorum-policy to stop', level=DEBUG)
cmd = "crm configure property no-quorum-policy=stop"
pcmk.commit(cmd)
cmd = ('crm configure rsc_defaults $id="rsc-options" '
'resource-stickiness="100" '
'failure-timeout={}'.format(failure_timeout))
pcmk.commit(cmd)
log('Configuring cluster-recheck-interval to {} seconds'.format(
cluster_recheck_interval), level=DEBUG)
cmd = "crm configure property cluster-recheck-interval={}".format(
cluster_recheck_interval)
pcmk.commit(cmd)
def remove_legacy_maas_stonith_resources():
"""Remove maas stoniths resources using the old name."""
stonith_resources = pcmk.crm_maas_stonith_resource_list()
for resource_name in stonith_resources:
pcmk.commit(
'crm -w -F resource stop {}'.format(resource_name))
pcmk.commit(
'crm -w -F configure delete {}'.format(resource_name))
def _configure_stonith_resource(ctxt):
hostnames = []
for host in ctxt['stonith_hostnames']:
hostnames.append(host)
if '.' in host:
hostnames.append(host.split('.')[0])
ctxt['hostnames'] = ' '.join(sorted(list(set(hostnames))))
if all(ctxt.values()):
ctxt['resource_params'] = ctxt['resource_params'].format(**ctxt)
if pcmk.is_resource_present(ctxt['stonith_resource_name']):
pcmk.crm_update_resource(
ctxt['stonith_resource_name'],
ctxt['stonith_plugin'],
ctxt['resource_params'])
else:
cmd = (
"crm configure primitive {stonith_resource_name} "
"{stonith_plugin} {resource_params}").format(**ctxt)
pcmk.commit(cmd, failure_is_fatal=True)
else:
raise ValueError("Missing configuration: {}".format(ctxt))
def configure_null_stonith_resource(stonith_hostnames):
"""Create null stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:null',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-null',
'resource_params': (
"params hostlist='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
# NOTE (gnuoy): Not enabling the global stonith-enabled setting as it
# does not make sense to have stonith-enabled when the only resources
# are null resources, so defer enabling stonith-enabled to the 'real'
# stonith resources.
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def configure_maas_stonith_resource(stonith_hostnames):
"""Create maas stonith resource for the given hostname.
:param stonith_hostnames: The hostnames that the stonith management system
refers to the remote node as.
:type stonith_hostname: List
"""
ctxt = {
'stonith_plugin': 'stonith:external/maas',
'stonith_hostnames': stonith_hostnames,
'stonith_resource_name': 'st-maas',
'url': config('maas_url'),
'apikey': config('maas_credentials'),
'resource_params': (
"params url='{url}' apikey='{apikey}' hostnames='{hostnames}' "
"op monitor interval=25 start-delay=25 "
"timeout=25")}
_configure_stonith_resource(ctxt)
return {ctxt['stonith_resource_name']: ctxt['stonith_plugin']}
def enable_stonith():
"""Enable stonith via the global property stonith-enabled.
:raises: EnableStonithFailed
"""
log('Enabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=true",
failure_is_fatal=True)
except subprocess.CalledProcessError as e:
raise EnableStonithFailed(e)
def disable_stonith(failure_is_fatal=True):
"""Disable stonith via the global property stonith-enabled.
:param failure_is_fatal: Whether to raise exception if command fails.
:type failure_is_fatal: bool
:raises: DisableStonithFailed
"""
log('Disabling STONITH', level=INFO)
try:
pcmk.commit(
"crm configure property stonith-enabled=false",
failure_is_fatal=failure_is_fatal)
except subprocess.CalledProcessError as e:
raise DisableStonithFailed(e)
def get_ip_addr_from_resource_params(params):
"""Returns the IP address in the resource params provided
:return: the IP address in the params or None if not found
"""
reg_ex = r'.* ip_address="([a-fA-F\d\:\.]+)".*'
res = re.search(reg_ex, params)
return res.group(1) if res else None
def need_resources_on_remotes():
"""Whether to run resources on remote nodes.
Check the 'enable-resources' setting across the remote units. If it is
absent or inconsistent then raise a ValueError.
:returns: Whether to run resources on remote nodes
:rtype: bool
:raises: ValueError
"""
responses = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
data = parse_data(relid, unit, 'enable-resources')
# parse_data returns {} if key is absent.
if type(data) is bool:
responses.append(data)
if len(set(responses)) == 1:
run_resources_on_remotes = responses[0]
else:
msg = "Inconsistent or absent enable-resources setting {}".format(
responses)
log(msg, level=WARNING)
raise ValueError(msg)
return run_resources_on_remotes
def set_cluster_symmetry():
"""Set the cluster symmetry.
By default the cluster is an Opt-out cluster (equivalent to
symmetric-cluster=true) this means that any resource can run anywhere
unless a node explicitly Opts-out. When using pacemaker-remotes there may
be hundreds of nodes and if they are not prepared to run resources the
cluster should be switched to an Opt-in cluster.
"""
try:
symmetric = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculated desired symmetric-cluster setting'
log(msg, level=WARNING)
return
log('Configuring symmetric-cluster: {}'.format(symmetric), level=DEBUG)
cmd = "crm configure property symmetric-cluster={}".format(
str(symmetric).lower())
pcmk.commit(cmd, failure_is_fatal=True)
def add_score_location_rule(res_name, node, location_score):
"""Add or update a location rule that uses a score.
:param res_name: Resource that this location rule controls.
:type res_name: str
:param node: Node that this location rule relates to.
:type node: str
:param location_score: The score to give this location.
:type location_score: int
"""
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
pcmk.crm_update_location(
loc_constraint_name,
res_name,
location_score,
node)
def add_location_rules_for_local_nodes(res_name):
"""Add location rules for running resource on local nodes.
Add location rules allowing the given resource to run on local nodes (eg
not remote nodes).
:param res_name: Resource name to create location rules for.
:type res_name: str
"""
for node in pcmk.list_nodes():
loc_constraint_name = 'loc-{}-{}'.format(res_name, node)
if not pcmk.crm_opt_exists(loc_constraint_name):
cmd = 'crm -w -F configure location {} {} 0: {}'.format(
loc_constraint_name,
res_name,
node)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
def add_location_rules_for_pacemaker_remotes(res_names):
"""Add location rules for pacemaker remote resources on local nodes.
Add location rules allowing the pacemaker remote resource to run on a local
node. Use location score rules to spread resources out.
:param res_names: Pacemaker remote resource names.
:type res_names: List[str]
"""
res_names = sorted(res_names)
nodes = sorted(pcmk.list_nodes())
prefered_nodes = list(zip(res_names, itertools.cycle(nodes)))
for res_name in res_names:
for node in nodes:
location_score = 0
if (res_name, node) in prefered_nodes:
location_score = 200
add_score_location_rule(
res_name,
node,
location_score)
def configure_pacemaker_remote(remote_hostname, remote_ip):
"""Create a resource corresponding to the pacemaker remote node.
:param remote_hostname: Remote hostname used for registering remote node.
:type remote_hostname: str
:param remote_ip: Remote IP used for registering remote node.
:type remote_ip: str
:returns: Name of resource for pacemaker remote node.
:rtype: str
"""
resource_name = remote_hostname
if not pcmk.is_resource_present(resource_name):
cmd = (
"crm configure primitive {} ocf:pacemaker:remote "
"params server={} reconnect_interval=60 "
"op monitor interval=30s").format(resource_name,
remote_ip)
pcmk.commit(cmd, failure_is_fatal=True)
return resource_name
def cleanup_remote_nodes(remote_nodes):
"""Cleanup pacemaker remote resources
Remove all status records of the resource and
probe the node afterwards.
:param remote_nodes: List of resource names associated with remote nodes
:type remote_nodes: list
"""
for res_name in remote_nodes:
cmd = 'crm resource cleanup {}'.format(res_name)
# Resource cleanups seem to fail occasionally even on healthy nodes
# Bug #1822962. Given this cleanup task is just housekeeping log
# the message if a failure occurs and move on.
if pcmk.commit(cmd, failure_is_fatal=False) == 0:
log(
'Cleanup of resource {} succeeded'.format(res_name),
level=DEBUG)
else:
log(
'Cleanup of resource {} failed'.format(res_name),
level=WARNING)
def configure_pacemaker_remote_stonith_resource():
"""Create a maas stonith resource for the pacemaker-remotes.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = []
stonith_resource = {}
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
stonith_hostname = parse_data(relid, unit, 'stonith-hostname')
if stonith_hostname:
hostnames.append(stonith_hostname)
if hostnames:
stonith_resource = configure_maas_stonith_resource(hostnames)
return stonith_resource
def configure_peer_stonith_resource():
"""Create a null stonith resource for lxd containers.
:returns: Stonith resource dict {res_name: res_type}
:rtype: dict
"""
hostnames = [get_hostname()]
stonith_resource = {}
for relid in relation_ids('hanode'):
for unit in related_units(relid):
stonith_hostname = relation_get('hostname', unit, relid)
if stonith_hostname:
hostnames.append(stonith_hostname)
stonith_resource = configure_null_stonith_resource(hostnames)
return stonith_resource
def configure_pacemaker_remote_resources():
"""Create resources corresponding to the pacemaker remote nodes.
Create resources, location constraints and stonith resources for pacemaker
remote node.
:returns: resource dict {res_name: res_type, ...}
:rtype: dict
"""
log('Checking for pacemaker-remote nodes', level=DEBUG)
resources = []
for relid in relation_ids('pacemaker-remote'):
for unit in related_units(relid):
remote_hostname = parse_data(relid, unit, 'remote-hostname')
remote_ip = parse_data(relid, unit, 'remote-ip')
if remote_hostname:
resource_name = configure_pacemaker_remote(
remote_hostname,
remote_ip)
resources.append(resource_name)
cleanup_remote_nodes(resources)
return {name: 'ocf:pacemaker:remote' for name in resources}
def configure_resources_on_remotes(resources=None, clones=None, groups=None):
"""Add location rules as needed for resources, clones and groups
If remote nodes should not run resources then add location rules then add
location rules to enable them on local nodes.
:param resources: Resource definitions
:type resources: dict
:param clones: Clone definitions
:type clones: dict
:param groups: Group definitions
:type groups: dict
"""
clones = clones or {}
groups = groups or {}
try:
resources_on_remote = need_resources_on_remotes()
except ValueError:
msg = 'Unable to calculate whether resources should run on remotes'
log(msg, level=WARNING)
return
if resources_on_remote:
msg = ('Resources are permitted to run on remotes, no need to create '
'location constraints')
log(msg, level=WARNING)
return
pacemaker_remotes = []
for res_name, res_type in resources.items():
if res_name not in list(clones.values()) + list(groups.values()):
if res_type == 'ocf:pacemaker:remote':
pacemaker_remotes.append(res_name)
else:
add_location_rules_for_local_nodes(res_name)
add_location_rules_for_pacemaker_remotes(pacemaker_remotes)
for cl_name in clones:
add_location_rules_for_local_nodes(cl_name)
# Limit clone resources to only running on X number of nodes where X
# is the number of local nodes. Otherwise they will show as offline
# on the remote nodes.
node_count = len(pcmk.list_nodes())
cmd = ('crm_resource --resource {} --set-parameter clone-max '
'--meta --parameter-value {}').format(cl_name, node_count)
pcmk.commit(cmd, failure_is_fatal=True)
log('%s' % cmd, level=DEBUG)
for grp_name in groups:
add_location_rules_for_local_nodes(grp_name)
def restart_corosync_on_change():
"""Simple decorator to restart corosync if any of its config changes"""
def wrap(f):
def wrapped_f(*args, **kwargs):
checksums = {}
if not is_unit_paused_set():
for path in COROSYNC_CONF_FILES:
checksums[path] = file_hash(path)
return_data = f(*args, **kwargs)
# NOTE: this assumes that this call is always done around
# configure_corosync, which returns true if configuration
# files where actually generated
if return_data and not is_unit_paused_set():
for path in COROSYNC_CONF_FILES:
if checksums[path] != file_hash(path):
validated_restart_corosync()
break
return return_data
return wrapped_f
return wrap
def try_pcmk_wait():
"""Try pcmk.wait_for_pcmk()
Log results and set status message
"""
try:
pcmk.wait_for_pcmk()
log("Pacemaker is ready", level=TRACE)
except pcmk.ServicesNotUp as e:
status_msg = "Pacemaker is down. Please manually start it."
status_set('blocked', status_msg)
full_msg = "{} {}".format(status_msg, e)
log(full_msg, ERROR)
raise pcmk.ServicesNotUp(full_msg)
@restart_corosync_on_change()
def configure_corosync():
log('Configuring and (maybe) restarting corosync', level=DEBUG)
# David Ames Bug#1654403 Work around
# May be removed if bug is resolved
emit_systemd_overrides_file()
return emit_base_conf() and emit_corosync_conf()
def services_running():
"""Determine if both Corosync and Pacemaker are running
Both from the operating system perspective and with a functional test
@returns boolean
"""
pacemaker_status = service_running("pacemaker")
corosync_status = service_running("corosync")
log("Pacemaker status: {}, Corosync status: {}"
"".format(pacemaker_status, corosync_status),
level=DEBUG)
if not (pacemaker_status and corosync_status):
# OS perspective
return False
# Functional test of pacemaker. This will raise if pacemaker doesn't get
# fully ready in time:
pcmk.wait_for_pcmk()
return True
def validated_restart_corosync(retries=10):
"""Restart and validate Corosync and Pacemaker are in fact up and running.
@param retries: number of attempts to restart the services before giving up
@raises pcmk.ServicesNotUp if after retries services are still not up
"""
for restart in range(retries):
try:
if restart_corosync():
log("Corosync and Pacemaker are validated as up and running.",
INFO)
return
else:
log("Corosync or Pacemaker not validated as up yet, retrying",
WARNING)
except pcmk.ServicesNotUp:
log("Pacemaker failed to start, retrying", WARNING)
continue
msg = ("Corosync and/or Pacemaker failed to restart after {} retries"
"".format(retries))
log(msg, ERROR)
status_set('blocked', msg)
raise pcmk.ServicesNotUp(msg)
def restart_corosync():
if service_running("pacemaker"):
log("Stopping pacemaker", DEBUG)
service_stop("pacemaker")
if not is_unit_paused_set():
log("Stopping corosync", DEBUG)
service_stop("corosync")
log("Starting corosync", DEBUG)
service_start("corosync")
log("Starting pacemaker", DEBUG)
service_start("pacemaker")
return services_running()
def validate_dns_ha():
"""Validate the DNS HA
Assert the charm will support DNS HA
Check MAAS related configuration options are properly set
:raises MAASConfigIncomplete: if maas_url and maas_credentials are not set
"""
# Will raise an exception if unable to continue
assert_charm_supports_dns_ha()
if config('maas_url') and config('maas_credentials'):
return True
else:
msg = ("DNS HA is requested but the maas_url or maas_credentials "
"settings are not set")
raise MAASConfigIncomplete(msg)
def setup_maas_api():
"""Install MAAS PPA and packages for accessing the MAAS API.
"""
add_source(config('maas_source'), config('maas_source_key'))
apt_update(fatal=True)
apt_install('python3-maas-client', fatal=True)
def setup_ocf_files():
"""Setup OCF resrouce agent files
"""
# TODO (thedac) Eventually we want to package the OCF files.
# Bundle with the charm until then.
mkdir('/usr/lib/ocf/resource.d/ceph')
mkdir('/usr/lib/ocf/resource.d/maas')
# Xenial corosync is not creating this directory
mkdir('/etc/corosync/uidgid.d')
rsync('files/ocf/ceph/rbd', '/usr/lib/ocf/resource.d/ceph/rbd')
rsync('files/ocf/maas/dns', '/usr/lib/ocf/resource.d/maas/dns')
rsync('files/ocf/maas/maas_dns.py', '/usr/lib/heartbeat/maas_dns.py')
rsync('files/ocf/maas/maasclient/', '/usr/lib/heartbeat/maasclient/')
rsync(
'files/ocf/maas/maas_stonith_plugin.py',
'/usr/lib/stonith/plugins/external/maas')
def write_maas_dns_address(resource_name, resource_addr):
"""Writes the specified IP address to the resource file for MAAS dns.
:param resource_name: the name of the resource the address belongs to.
This is the name of the file that will be written in /etc/maas_dns.
:param resource_addr: the IP address for the resource. This will be
written to the resource_name file.
"""
mkdir(MAAS_DNS_CONF_DIR)
write_file(os.path.join(MAAS_DNS_CONF_DIR, resource_name),
content=resource_addr)
def needs_maas_dns_migration():
"""Determines if the MAAS DNS ocf resources need migration.
:return: True if migration is necessary, False otherwise.
"""
try:
subprocess.check_call(['grep', 'OCF_RESOURCE_INSTANCE',
'/usr/lib/ocf/resource.d/maas/dns'])
return True
except subprocess.CalledProcessError:
# check_call will raise an exception if grep doesn't find the string
return False
def is_in_standby_mode(node_name):
"""Check if node is in standby mode in pacemaker
@param node_name: The name of the node to check
@returns boolean - True if node_name is in standby mode
"""
out = (subprocess
.check_output(['crm', 'node', 'status', node_name])
.decode('utf-8'))
root = ET.fromstring(out)
standby_mode = False
for nvpair in root.iter('nvpair'):
if (nvpair.attrib.get('name') == 'standby' and
nvpair.attrib.get('value') == 'on'):
standby_mode = True
return standby_mode
def get_hostname():
"""Return the hostname of this unit
@returns hostname
"""
return socket.gethostname()
def enter_standby_mode(node_name, duration='forever'):
"""Put this node into standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'standby', node_name, duration])
def leave_standby_mode(node_name):
"""Take this node out of standby mode in pacemaker
@returns None
"""
subprocess.check_call(['crm', 'node', 'online', node_name])
def node_has_resources(node_name):
"""Check if this node is running resources
@param node_name: The name of the node to check
@returns boolean - True if node_name has resources
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
has_resources = False
for resource in root.iter('resource'):
for child in resource:
if child.tag == 'node' and child.attrib.get('name') == node_name:
has_resources = True
return has_resources
def node_is_dc(node_name):
"""Check if this node is the designated controller.
@param node_name: The name of the node to check
@returns boolean - True if node_name is the DC
"""
out = subprocess.check_output(['crm_mon', '-X']).decode('utf-8')
root = ET.fromstring(out)
for current_dc in root.iter("current_dc"):
if current_dc.attrib.get('name') == node_name:
return True
return False
def set_unit_status():
"""Set the workload status for this unit
@returns None
"""
status_set(*assess_status_helper())
def resume_unit():
"""Resume services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
leave_standby_mode(node_name)
if is_in_standby_mode(node_name):
messages.append("Node still in standby mode")
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
else:
clear_unit_paused()
set_unit_status()
def pause_unit():
"""Pause services on this unit and update the units status
@returns None
"""
node_name = get_hostname()
messages = []
enter_standby_mode(node_name)
if not is_in_standby_mode(node_name):
messages.append("Node not in standby mode")
# some resources may take some time to be migrated out from the node. So 3
# retries are made with a 5 seconds wait between each one.
i = 0
ready = False
has_resources = False
while i < PCMKR_MAX_RETRIES and not ready:
if node_has_resources(node_name):
has_resources = True
i += 1
time.sleep(PCMKR_SLEEP_SECS)
else:
ready = True
has_resources = False
if has_resources:
messages.append("Resources still running on unit")
status, message = assess_status_helper()
# New status message will indicate the resource is not running
if status != 'active' and 'not running' not in message:
messages.append(message)
if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
else:
set_unit_paused()
status_set("maintenance",
"Paused. Use 'resume' action to resume normal service.")
def assess_status_helper():
"""Assess status of unit
@returns status, message - status is workload status and message is any
corresponding messages
"""
if config('stonith_enabled') in ['true', 'True', True]:
return(
'blocked',
'stonith_enabled config option is no longer supported')
if config('no_quorum_policy'):
if config('no_quorum_policy').lower() not in ['ignore', 'freeze',
'stop', 'suicide']:
return(
'blocked',
'Invalid no_quorum_policy specified')
if is_unit_upgrading_set():
return ("blocked",
"Ready for do-release-upgrade. Set complete when finished")
if is_waiting_unit_series_upgrade_set():
return ("blocked",
"HA services shutdown, peers are ready for series upgrade")
if is_unit_paused_set():
return ("maintenance",
"Paused. Use 'resume' action to resume normal service.")
node_count = int(config('cluster_count'))
status = 'active'
message = 'Unit is ready and clustered'
try:
try_pcmk_wait()
except pcmk.ServicesNotUp:
message = 'Pacemaker is down'
status = 'blocked'
for relid in relation_ids('hanode'):
if len(related_units(relid)) + 1 < node_count:
status = 'blocked'
message = ("Insufficient peer units for ha cluster "
"(require {})".format(node_count))
# if the status was not changed earlier, we verify the maintenance status
try:
if status == 'active':
prop = pcmk.get_property('maintenance-mode').strip()
except pcmk.PropertyNotFound:
# the property is not the output of 'crm configure show xml', so we use
# the default value for this property. For crmsh>=2.2.0 the default
# value is automatically provided by show-property or get-property.
prop = 'false'
if (status == 'active' and prop == 'true'):
# maintenance mode enabled in pacemaker
status = 'maintenance'
message = 'Pacemaker in maintenance mode'
for resource in get_resources().keys():
if not pcmk.is_resource_present(resource):
return ("waiting",
"Resource: {} not yet configured".format(resource))
if not pcmk.crm_res_running_on_node(resource, get_hostname()):
return ("blocked",
"Resource: {} not running".format(resource))
return status, message
def ocf_file_exists(res_name, resources,
RES_ROOT='/usr/lib/ocf/resource.d'):
"""To determine whether the ocf file exists, allow multiple ocf
files with the same name in different directories
@param res_name: The name of the ocf resource to check
@param resources: ocf resources
@return: boolean - True if the ocf resource exists
"""
res_type = None
for key, val in resources.items():
if res_name == key:
if len(val.split(':')) > 2:
res_type = val.split(':')[1]
ocf_name = res_name.replace('res_', '').replace('_', '-')
ocf_file = os.path.join(RES_ROOT, res_type, ocf_name)
if os.path.isfile(ocf_file):
return True
return False
def kill_legacy_ocf_daemon_process(res_name):
"""Kill legacy ocf daemon process
@param res_name: The name of the ocf process to kill
"""
ocf_name = res_name.replace('res_', '').replace('_', '-')
reg_expr = r'([0-9]+)\s+[^0-9]+{}'.format(ocf_name)
cmd = ['ps', '-eo', 'pid,cmd']
ps = subprocess.check_output(cmd).decode('utf-8')
res = re.search(reg_expr, ps, re.MULTILINE)
if res:
pid = res.group(1)
subprocess.call(['sudo', 'kill', '-9', pid])
def maintenance_mode(enable):
"""Enable/disable pacemaker's maintenance mode"""
log('Setting maintenance-mode to %s' % enable, level=INFO)
try:
current_state = pcmk.get_property('maintenance-mode').strip().lower()
except pcmk.PropertyNotFound:
current_state = 'false'
current_state = True if current_state == 'true' else False
log('Is maintenance-mode currently enabled? %s' % current_state,
level=DEBUG)
if current_state != enable:
pcmk.set_property('maintenance-mode', str(enable).lower())
else:
log('Desired value for maintenance-mode is already set', level=DEBUG)
def get_resources():
"""Get resources from the HA relation
:returns: dict of resources
"""
resources = {}
for rid in relation_ids("ha"):
for unit in related_units(rid):
resources = parse_data(rid, unit, 'resources')
return resources
def set_waiting_unit_series_upgrade():
"""Set the unit to a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=true in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', True)
def clear_waiting_unit_series_upgrade():
"""Clear the unit from a waiting upgrade state in the local kv() store.
"""
log("Setting waiting-unit-series-upgrade=false in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
kv.set('waiting-unit-series-upgrade', False)
def is_waiting_unit_series_upgrade_set():
"""Return the state of the kv().get('waiting-unit-series-upgrade').
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
with unitdata.HookData()() as t:
kv = t[0]
if not kv.get('waiting-unit-series-upgrade'):
return False
return kv.get('waiting-unit-series-upgrade')
def get_series_upgrade_notifications(relid):
"""Check peers for notifications that they are upgrading their series.
Returns a dict of the form {unit_name: target_series, ...}
:param relid: Relation id to check for notifications.
:type relid: str
:returns: dict
"""
notifications = {}
for unit in related_units(relid):
relation_data = relation_get(rid=relid, unit=unit)
for key, value in relation_data.items():
if key.startswith('series_upgrade_of_'):
notifications[unit] = value
log("Found series upgrade notifications: {}".format(notifications), DEBUG)
return notifications
def disable_ha_services():
"""Shutdown and disable HA services."""
log("Disabling HA services", INFO)
for svc in ['corosync', 'pacemaker']:
disable_lsb_services(svc)
if service_running(svc):
service_stop(svc)
def enable_ha_services():
"""Startup and enable HA services."""
log("Enabling HA services", INFO)
for svc in ['pacemaker', 'corosync']:
enable_lsb_services(svc)
if not service_running(svc):
service_start(svc)
def get_series_upgrade_key():
series_upgrade_key = 'series_upgrade_of_{}'.format(
local_unit().replace('/', '_'))
return series_upgrade_key.replace('-', '_')
def notify_peers_of_series_upgrade():
"""Notify peers which release this unit is upgrading from."""
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: ubuntu_rel}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def clear_series_upgrade_notification():
"""Remove from series upgrade notification from peers."""
log("Removing upgrade notification from peers")
series_upgrade_key = get_series_upgrade_key()
relation_data = {
series_upgrade_key: None}
for rel_id in relation_ids('hanode'):
relation_set(
relation_id=rel_id,
relation_settings=relation_data)
def set_stonith_configured(is_configured):
"""Set the STONITH_CONFIGURED state.
:param is_configured: Flag to check peers relation data for.
:type is_configured: bool
:returns: List of IPs of nodes that are ready to join the cluster
:rtype: List
"""
leader_set({STONITH_CONFIGURED: is_configured})
def is_stonith_configured():
"""Get the STONITH_CONFIGURED state.
:returns: State of STONITH_CONFIGURED state.
:rtype: bool
"""
configured = leader_get(STONITH_CONFIGURED) or 'False'
return bool_from_string(configured)
def get_hanode_hostnames():
"""Hostnames of nodes in the hanode relation.
:returns: List of hostnames of nodes in the hanode relation.
:rtype: List
"""
hanode_hostnames = [get_hostname()]
for relid in relation_ids('hanode'):
for unit in related_units(relid):
hostname = relation_get('hostname', rid=relid, unit=unit)
if hostname:
hanode_hostnames.append(hostname)
hanode_hostnames.sort()
return hanode_hostnames
def update_node_list():
"""Determine and delete unexpected nodes from the corosync ring.
:returns: Set of pcmk nodes not part of Juju hanode relation
:rtype: Set[str]
:raises: RemoveCorosyncNodeFailed
"""
pcmk_nodes = set(pcmk.list_nodes())
juju_nodes = set(get_hanode_hostnames())
diff_nodes = pcmk_nodes.difference(juju_nodes)
log("pcmk_nodes[{}], juju_nodes[{}], diff[{}]"
"".format(pcmk_nodes, juju_nodes, diff_nodes),
DEBUG)
for old_node in diff_nodes:
try:
pcmk.set_node_status_to_maintenance(old_node)
pcmk.delete_node(old_node)
except subprocess.CalledProcessError as e:
raise RemoveCorosyncNodeFailed(old_node, e)
return diff_nodes
def is_update_ring_requested(corosync_update_uuid):
log("Setting corosync-update-uuid=<uuid> in local kv", DEBUG)
with unitdata.HookData()() as t:
kv = t[0]
stored_value = kv.get('corosync-update-uuid')
if not stored_value or stored_value != corosync_update_uuid:
kv.set('corosync-update-uuid', corosync_update_uuid)
return True
return False
def trigger_corosync_update_from_leader(unit, rid):
corosync_update_uuid = relation_get(
attribute='trigger-corosync-update',
unit=unit, rid=rid,
)
if (corosync_update_uuid and
is_update_ring_requested(corosync_update_uuid) and
emit_corosync_conf()):
cmd = 'corosync-cfgtool -R'
pcmk.commit(cmd)
return True
return False
| 0
| 270
| 0
| 799
| 0
| 11,160
| 0
| 229
| 803
|
dc846139a64da96893d81c6eddfca55ea20f7f1e
| 70,433
|
py
|
Python
|
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
autots/evaluator/auto_model.py
|
nsankar/AutoTS
|
b4167e1506e1ccb41a85dad1be481a646d808583
|
[
"MIT"
] | null | null | null |
"""Mid-level helper functions for AutoTS."""
import numpy as np
import pandas as pd
import datetime
import json
from hashlib import md5
from autots.evaluator.metrics import PredictionEval
from autots.tools.transform import RandomTransform
def seasonal_int(include_one: bool = False):
"""Generate a random integer of typical seasonalities."""
if include_one:
lag = np.random.choice(
a=[
'random_int',
1,
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.10,
0.05,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
else:
lag = np.random.choice(
a=[
'random_int',
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.15,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
if lag == 'random_int':
lag = np.random.randint(2, 100, size=1).item()
return int(lag)
def create_model_id(
model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {}
):
"""Create a hash ID which should be unique to the model parameters."""
str_repr = (
str(model_str) + json.dumps(parameter_dict) + json.dumps(transformation_dict)
)
str_repr = ''.join(str_repr.split())
hashed = md5(str_repr.encode('utf-8')).hexdigest()
return hashed
def ModelMonster(
model: str,
parameters: dict = {},
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
startTimeStamps=None,
forecast_length: int = 14,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Directs strings and parameters to appropriate model objects.
Args:
model (str): Name of Model Function
parameters (dict): Dictionary of parameters to pass through to model
"""
model = str(model)
if model == 'ZeroesNaive':
from autots.models.basics import ZeroesNaive
return ZeroesNaive(frequency=frequency, prediction_interval=prediction_interval)
if model == 'LastValueNaive':
from autots.models.basics import LastValueNaive
return LastValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
if model == 'AverageValueNaive':
from autots.models.basics import AverageValueNaive
if parameters == {}:
return AverageValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return AverageValueNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
)
if model == 'SeasonalNaive':
from autots.models.basics import SeasonalNaive
if parameters == {}:
return SeasonalNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return SeasonalNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
lag_1=parameters['lag_1'],
lag_2=parameters['lag_2'],
)
if model == 'GLS':
from autots.models.statsmodels import GLS
return GLS(frequency=frequency, prediction_interval=prediction_interval)
if model == 'GLM':
from autots.models.statsmodels import GLM
if parameters == {}:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
family=parameters['family'],
constant=parameters['constant'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ETS':
from autots.models.statsmodels import ETS
if parameters == {}:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
damped=parameters['damped'],
trend=parameters['trend'],
seasonal=parameters['seasonal'],
seasonal_periods=parameters['seasonal_periods'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'ARIMA':
from autots.models.statsmodels import ARIMA
if parameters == {}:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
p=parameters['p'],
d=parameters['d'],
q=parameters['q'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'FBProphet':
from autots.models.prophet import FBProphet
if parameters == {}:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'RollingRegression':
from autots.models.sklearn import RollingRegression
if parameters == {}:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
regression_model=parameters['regression_model'],
mean_rolling_periods=parameters['mean_rolling_periods'],
std_rolling_periods=parameters['std_rolling_periods'],
macd_periods=parameters['macd_periods'],
max_rolling_periods=parameters['max_rolling_periods'],
min_rolling_periods=parameters['min_rolling_periods'],
ewm_alpha=parameters['ewm_alpha'],
additional_lag_periods=parameters['additional_lag_periods'],
x_transform=parameters['x_transform'],
rolling_autocorr_periods=parameters['rolling_autocorr_periods'],
abs_energy=parameters['abs_energy'],
add_date_part=parameters['add_date_part'],
polynomial_degree=parameters['polynomial_degree'],
n_jobs=n_jobs,
)
return model
if model == 'UnobservedComponents':
from autots.models.statsmodels import UnobservedComponents
if parameters == {}:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
level=parameters['level'],
trend=parameters['trend'],
cycle=parameters['cycle'],
damped_cycle=parameters['damped_cycle'],
irregular=parameters['irregular'],
stochastic_trend=parameters['stochastic_trend'],
stochastic_level=parameters['stochastic_level'],
stochastic_cycle=parameters['stochastic_cycle'],
)
return model
if model == 'DynamicFactor':
from autots.models.statsmodels import DynamicFactor
if parameters == {}:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
k_factors=parameters['k_factors'],
factor_order=parameters['factor_order'],
)
return model
if model == 'VAR':
from autots.models.statsmodels import VAR
if parameters == {}:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
maxlags=parameters['maxlags'],
ic=parameters['ic'],
random_seed=random_seed,
verbose=verbose,
)
return model
if model == 'VECM':
from autots.models.statsmodels import VECM
if parameters == {}:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
deterministic=parameters['deterministic'],
k_ar_diff=parameters['k_ar_diff'],
)
return model
if model == 'VARMAX':
from autots.models.statsmodels import VARMAX
if parameters == {}:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
order=parameters['order'],
trend=parameters['trend'],
)
return model
if model == 'GluonTS':
from autots.models.gluonts import GluonTS
if parameters == {}:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
gluon_model=parameters['gluon_model'],
epochs=parameters['epochs'],
learning_rate=parameters['learning_rate'],
forecast_length=forecast_length,
)
return model
if model == 'TSFreshRegressor':
from autots.models.tsfresh import TSFreshRegressor
if parameters == {}:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
max_timeshift=parameters['max_timeshift'],
regression_model=parameters['regression_model'],
feature_selection=parameters['feature_selection'],
)
return model
if model == 'MotifSimulation':
from autots.models.basics import MotifSimulation
if parameters == {}:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
phrase_len=parameters['phrase_len'],
comparison=parameters['comparison'],
shared=parameters['shared'],
distance_metric=parameters['distance_metric'],
max_motifs=parameters['max_motifs'],
recency_weighting=parameters['recency_weighting'],
cutoff_threshold=parameters['cutoff_threshold'],
cutoff_minimum=parameters['cutoff_minimum'],
point_method=parameters['point_method'],
)
return model
if model == 'WindowRegression':
from autots.models.sklearn import WindowRegression
if parameters == {}:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
else:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
window_size=parameters['window_size'],
regression_model=parameters['regression_model'],
input_dim=parameters['input_dim'],
output_dim=parameters['output_dim'],
normalize_window=parameters['normalize_window'],
shuffle=parameters['shuffle'],
max_windows=parameters['max_windows'],
forecast_length=forecast_length,
n_jobs=n_jobs,
)
return model
if model == 'TensorflowSTS':
from autots.models.tfp import TensorflowSTS
if parameters == {}:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
seasonal_periods=parameters['seasonal_periods'],
ar_order=parameters['ar_order'],
trend=parameters['trend'],
fit_method=parameters['fit_method'],
num_steps=parameters['num_steps'],
)
return model
if model == 'TFPRegression':
from autots.models.tfp import TFPRegression
if parameters == {}:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
kernel_initializer=parameters['kernel_initializer'],
epochs=parameters['epochs'],
batch_size=parameters['batch_size'],
optimizer=parameters['optimizer'],
loss=parameters['loss'],
dist=parameters['dist'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ComponentAnalysis':
from autots.models.sklearn import ComponentAnalysis
if parameters == {}:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
model=parameters['model'],
model_parameters=parameters['model_parameters'],
decomposition=parameters['decomposition'],
n_components=parameters['n_components'],
forecast_length=forecast_length,
)
return model
else:
raise AttributeError(
("Model String '{}' not a recognized model type").format(model)
)
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
from autots.tools.transform import GeneralTransformer
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
from autots.tools.transform import simple_context_slicer
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast
def unpack_ensemble_models(
template,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
keep_ensemble: bool = True,
recursive: bool = False,
):
"""Take ensemble models from template and add as new rows."""
ensemble_template = pd.DataFrame()
template['Ensemble'] = np.where(
((template['Model'] == 'Ensemble') & (template['Ensemble'] < 1)),
1,
template['Ensemble'],
)
for index, value in template[template['Ensemble'] != 0][
'ModelParameters'
].iteritems():
model_dict = json.loads(value)['models']
model_df = pd.DataFrame.from_dict(model_dict, orient='index')
model_df = model_df.rename_axis('ID').reset_index(drop=False)
model_df['Ensemble'] = 0
# unpack nested ensembles, if recursive specified
if recursive and 'Ensemble' in model_df['Model'].tolist():
model_df = pd.concat(
[
unpack_ensemble_models(
model_df, recursive=True, template_cols=template_cols
),
model_df,
],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
ensemble_template = pd.concat(
[ensemble_template, model_df], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
if not keep_ensemble:
template = template[template['Ensemble'] == 0]
template = pd.concat(
[template, ensemble_template], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
template = template.drop_duplicates(subset=template_cols)
return template
def PredictWitch(
template,
df_train,
forecast_length: int,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""Takes numeric data, returns numeric forecasts.
Only one model (albeit potentially an ensemble)!
Well, she turned me into a newt.
A newt?
I got better. -Python
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
template_cols (list): column names of columns used as model template
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object):
"""
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
template = template.head(1)
for index_upper, row_upper in template.iterrows():
# if an ensemble
if row_upper['Model'] == 'Ensemble':
from autots.models.ensemble import EnsembleForecast
forecasts_list = []
forecasts_runtime = []
forecasts = []
upper_forecasts = []
lower_forecasts = []
ens_model_str = row_upper['Model']
ens_params = json.loads(row_upper['ModelParameters'])
ens_template = unpack_ensemble_models(
template, template_cols, keep_ensemble=False
)
for index, row in ens_template.iterrows():
# recursive recursion!
if verbose > 2:
total_ens = ens_template.shape[0]
print(
"Ensemble component {} of {} ".format(
model_str, str(index), str(total_ens)
)
)
df_forecast = PredictWitch(
row,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
forecasts_list.extend([model_id])
forecasts_runtime.extend([total_runtime])
forecasts.extend([df_forecast.forecast])
upper_forecasts.extend([df_forecast.upper_forecast])
lower_forecasts.extend([df_forecast.lower_forecast])
ens_forecast = EnsembleForecast(
ens_model_str,
ens_params,
forecasts_list=forecasts_list,
forecasts=forecasts,
lower_forecasts=lower_forecasts,
upper_forecasts=upper_forecasts,
forecasts_runtime=forecasts_runtime,
prediction_interval=prediction_interval,
)
return ens_forecast
# if not an ensemble
else:
model_str = row_upper['Model']
parameter_dict = json.loads(row_upper['ModelParameters'])
transformation_dict = json.loads(row_upper['TransformationParameters'])
df_forecast = ModelPrediction(
df_train,
forecast_length,
transformation_dict,
model_str,
parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
grouping_ids=grouping_ids,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
startTimeStamps=startTimeStamps,
n_jobs=n_jobs,
)
return df_forecast
def TemplateWizard(
template,
df_train,
df_test,
weights,
model_count: int = 0,
ensemble: str = True,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
validation_round: int = 0,
model_interrupt: bool = False,
grouping_ids=None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Take Template, returns Results.
There are some who call me... Tim. - Python
Args:
template (pandas.DataFrame): containing model str, and json of transformations and hyperparamters
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
df_test (pandas.DataFrame): dataframe of actual values of (forecast length * n series)
weights (dict): key = column/series_id, value = weight
ensemble (str): desc of ensemble types to prepare metric collection
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
validation_round (int): int passed to record current validation.
model_interrupt (bool): if True, keyboard interrupts are caught and only break current model eval.
template_cols (list): column names of columns used as model template
Returns:
TemplateEvalObject
"""
ensemble = str(ensemble)
template_result = TemplateEvalObject()
template_result.model_count = model_count
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
# template = unpack_ensemble_models(template, template_cols, keep_ensemble = False)
for index, row in template.iterrows():
try:
model_str = row['Model']
parameter_dict = json.loads(row['ModelParameters'])
transformation_dict = json.loads(row['TransformationParameters'])
ensemble_input = row['Ensemble']
current_template = pd.DataFrame(row).transpose()
template_result.model_count += 1
if verbose > 0:
if verbose > 1:
print(
"Model Number: {} with model {} in Validation {} with params {} and transformations {}".format(
str(template_result.model_count),
model_str,
str(validation_round),
json.dumps(parameter_dict),
json.dumps(transformation_dict),
)
)
else:
print(
"Model Number: {} with model {} in Validation {} ".format(
str(template_result.model_count),
model_str,
str(validation_round),
)
)
df_forecast = PredictWitch(
current_template,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
per_ts = True if 'distance' in ensemble else False
if 'hdist' in ensemble:
dist_n = int(np.ceil(0.3 * forecast_length))
else:
dist_n = None
model_error = PredictionEval(
df_forecast,
df_test,
series_weights=weights,
df_train=df_train,
per_timestamp_errors=per_ts,
dist_n=dist_n,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
result = pd.DataFrame(
{
'ID': model_id,
'Model': df_forecast.model_name,
'ModelParameters': json.dumps(df_forecast.model_parameters),
'TransformationParameters': json.dumps(
df_forecast.transformation_parameters
),
'TransformationRuntime': df_forecast.transformation_runtime,
'FitRuntime': df_forecast.fit_runtime,
'PredictRuntime': df_forecast.predict_runtime,
'TotalRuntime': total_runtime,
'Ensemble': ensemble_input,
'Exceptions': np.nan,
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
a = pd.DataFrame(
model_error.avg_metrics_weighted.rename(lambda x: x + '_weighted')
).transpose()
result = pd.concat(
[result, pd.DataFrame(model_error.avg_metrics).transpose(), a], axis=1
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
if 'horizontal' in ensemble:
cur_mae = model_error.per_series_metrics.loc['mae']
cur_mae = pd.DataFrame(cur_mae).transpose()
cur_mae.index = [model_id]
template_result.per_series_mae = pd.concat(
[template_result.per_series_mae, cur_mae], axis=0
)
if 'probabilistic' in ensemble:
cur_spl = model_error.per_series_metrics.loc['spl']
cur_spl = pd.DataFrame(cur_spl).transpose()
cur_spl.index = [model_id]
template_result.per_series_spl = pd.concat(
[template_result.per_series_spl, cur_spl], axis=0
)
if 'distance' in ensemble:
cur_smape = model_error.per_timestamp.loc['weighted_smape']
cur_smape = pd.DataFrame(cur_smape).transpose()
cur_smape.index = [model_id]
template_result.per_timestamp_smape = pd.concat(
[template_result.per_timestamp_smape, cur_smape], axis=0
)
if 'hdist' in ensemble:
cur_rmse1 = model_error.per_series_metrics.loc['rmse1']
cur_rmse2 = model_error.per_series_metrics.loc['rmse2']
cur_rmse1 = pd.DataFrame(cur_rmse1).transpose()
cur_rmse2 = pd.DataFrame(cur_rmse2).transpose()
cur_rmse1.index = [model_id]
cur_rmse2.index = [model_id]
template_result.per_series_rmse1 = pd.concat(
[template_result.per_series_rmse1, cur_rmse1], axis=0
)
template_result.per_series_rmse2 = pd.concat(
[template_result.per_series_rmse2, cur_rmse2], axis=0
)
except KeyboardInterrupt:
if model_interrupt:
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': "KeyboardInterrupt by user",
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
else:
raise KeyboardInterrupt
except Exception as e:
if verbose >= 0:
print(
'Template Eval Error: {} in model {}: {}'.format(
(repr(e)), template_result.model_count, model_str
)
)
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': repr(e),
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
return template_result
def RandomTemplate(
n: int = 10,
model_list: list = [
'ZeroesNaive',
'LastValueNaive',
'AverageValueNaive',
'GLS',
'GLM',
'ETS',
'ARIMA',
'FBProphet',
'RollingRegression',
'GluonTS',
'UnobservedComponents',
'VARMAX',
'VECM',
'DynamicFactor',
],
):
"""
Returns a template dataframe of randomly generated transformations, models, and hyperparameters.
Args:
n (int): number of random models to return
"""
n = abs(int(n))
template = pd.DataFrame()
counter = 0
while len(template.index) < n:
model_str = np.random.choice(model_list)
param_dict = ModelMonster(model_str).get_new_params()
trans_dict = RandomTransform()
row = pd.DataFrame(
{
'Model': model_str,
'ModelParameters': json.dumps(param_dict),
'TransformationParameters': json.dumps(trans_dict),
'Ensemble': 0,
},
index=[0],
)
template = pd.concat([template, row], axis=0, ignore_index=True)
template.drop_duplicates(inplace=True)
counter += 1
if counter > (n * 3):
break
return template
def UniqueTemplates(
existing_templates,
new_possibilities,
selection_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Returns unique dataframe rows from new_possiblities not in existing_templates.
Args:
selection_cols (list): list of column namess to use to judge uniqueness/match on
"""
keys = list(new_possibilities[selection_cols].columns.values)
idx1 = existing_templates.copy().set_index(keys).index
idx2 = new_possibilities.set_index(keys).index
new_template = new_possibilities[~idx2.isin(idx1)]
return new_template
def dict_recombination(a: dict, b: dict):
"""Recombine two dictionaries with identical keys. Return new dict."""
b_keys = [*b]
key_size = int(len(b_keys) / 2) if len(b_keys) > 1 else 1
bs_keys = np.random.choice(b_keys, size=key_size)
b_prime = {k: b[k] for k in bs_keys}
c = {**a, **b_prime} # overwrites with B
return c
def trans_dict_recomb(dict_array):
"""Recombine two transformation param dictionaries from array of dicts."""
r_sel = np.random.choice(dict_array, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
out_keys = ['outlier_method', 'outlier_threshold', 'outlier_position']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in out_keys}}
mid_trans_keys = ['second_transformation', 'transformation_param']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
mid_trans_keys = ['third_transformation', 'transformation_param2']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
disc_keys = ['discretization', 'n_bins']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in disc_keys}}
disc_keys = ['grouping', 'reconciliation']
current_dict = np.random.choice([a, b], size=1).item()
if all([x in current_dict.keys() for x in disc_keys]):
c = {**c, **{k: current_dict[k] for k in disc_keys}}
return c
def NewGeneticTemplate(
model_results,
submitted_parameters,
sort_column: str = "smape_weighted",
sort_ascending: bool = True,
max_results: int = 50,
max_per_model_class: int = 5,
top_n: int = 50,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Return new template given old template with model accuracies.
Args:
model_results (pandas.DataFrame): models that have actually been run
submitted_paramters (pandas.DataFrame): models tried (may have returned different parameters to results)
"""
new_template = pd.DataFrame()
# filter existing templates
sorted_results = model_results[model_results['Ensemble'] == 0].copy()
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
)
sorted_results = sorted_results.drop_duplicates(subset=template_cols, keep='first')
if str(max_per_model_class).isdigit():
sorted_results = (
sorted_results.sort_values(sort_column, ascending=sort_ascending)
.groupby('Model')
.head(max_per_model_class)
.reset_index()
)
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
).head(top_n)
no_params = ['ZeroesNaive', 'LastValueNaive', 'GLS']
recombination_approved = [
'SeasonalNaive',
'MotifSimulation',
"ETS",
'DynamicFactor',
'VECM',
'VARMAX',
'GLM',
'ARIMA',
'FBProphet',
'GluonTS',
'RollingRegression',
'VAR',
'WindowRegression',
'TensorflowSTS',
'TFPRegression',
]
borrow = ['ComponentAnalysis']
best = json.loads(sorted_results.iloc[0, :]['TransformationParameters'])
for model_type in sorted_results['Model'].unique():
if model_type in no_params:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_param = current_ops.iloc[0, :]['ModelParameters']
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_param,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
elif model_type in recombination_approved:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 4
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
# select the best model of this type
fir = json.loads(current_ops.iloc[0, :]['ModelParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['ModelParameters'])
else:
sec = ModelMonster(model_type).get_new_params()
# generate new random parameters ('mutations')
r = ModelMonster(model_type).get_new_params()
r2 = ModelMonster(model_type).get_new_params()
arr = [fir, sec, r2, r]
model_dicts = list()
# recombine best and random to create new generation
for _ in range(n):
r_sel = np.random.choice(arr, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
else:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_dicts = list()
for _ in range(n):
c = ModelMonster(model_type).get_new_params()
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
new_template = pd.concat(
[new_template, new_row], axis=0, ignore_index=True, sort=False
)
"""
# recombination of transforms across models by shifting transforms
recombination = sorted_results.tail(len(sorted_results.index) - 1).copy()
recombination['TransformationParameters'] = sorted_results['TransformationParameters'].shift(1).tail(len(sorted_results.index) - 1)
new_template = pd.concat([new_template,
recombination.head(top_n)[template_cols]],
axis=0, ignore_index=True, sort=False)
"""
# remove generated models which have already been tried
sorted_results = pd.concat(
[submitted_parameters, sorted_results], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
new_template = UniqueTemplates(
sorted_results, new_template, selection_cols=template_cols
).head(max_results)
return new_template
def validation_aggregation(validation_results):
"""Aggregate a TemplateEvalObject."""
groupby_cols = [
'ID',
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
]
col_aggs = {
'Runs': 'sum',
'smape': 'mean',
'mae': 'mean',
'rmse': 'mean',
'containment': 'mean',
'spl': 'mean',
'contour': 'mean',
'smape_weighted': 'mean',
'mae_weighted': 'mean',
'rmse_weighted': 'mean',
'containment_weighted': 'mean',
'contour_weighted': 'mean',
'spl_weighted': 'mean',
'containment_weighted': 'mean',
'TotalRuntimeSeconds': 'mean',
'Score': 'mean',
}
validation_results.model_results['TotalRuntimeSeconds'] = (
validation_results.model_results['TotalRuntime'].dt.seconds + 1
)
validation_results.model_results = validation_results.model_results[
pd.isnull(validation_results.model_results['Exceptions'])
]
validation_results.model_results = validation_results.model_results.replace(
[np.inf, -np.inf], np.nan
)
validation_results.model_results = validation_results.model_results.groupby(
groupby_cols
).agg(col_aggs)
validation_results.model_results = validation_results.model_results.reset_index(
drop=False
)
return validation_results
def generate_score(
model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9
):
"""Generate score based on relative accuracies."""
try:
smape_weighting = metric_weighting['smape_weighting']
except KeyError:
smape_weighting = 1
try:
mae_weighting = metric_weighting['mae_weighting']
except KeyError:
mae_weighting = 0
try:
rmse_weighting = metric_weighting['rmse_weighting']
except KeyError:
rmse_weighting = 0
try:
containment_weighting = metric_weighting['containment_weighting']
except KeyError:
containment_weighting = 0
try:
runtime_weighting = metric_weighting['runtime_weighting'] * 0.1
except KeyError:
runtime_weighting = 0
try:
spl_weighting = metric_weighting['spl_weighting']
except KeyError:
spl_weighting = 0
try:
contour_weighting = metric_weighting['contour_weighting']
except KeyError:
contour_weighting = 0
# handle various runtime information records
if 'TotalRuntimeSeconds' in model_results.columns:
if 'TotalRuntime' in model_results.columns:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntime'].dt.seconds,
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntimeSeconds'].max(),
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = model_results['TotalRuntime'].dt.seconds
# generate minimizing scores, where smaller = better accuracy
try:
model_results = model_results.replace([np.inf, -np.inf], np.nan)
# model_results = model_results.fillna(value=model_results.max(axis=0))
smape_score = model_results['smape_weighted'] / (
model_results['smape_weighted'].min(skipna=True) + 1
) # smaller better
rmse_scaler = model_results['rmse_weighted'].median(skipna=True)
rmse_scaler = 1 if rmse_scaler == 0 else rmse_scaler
rmse_score = model_results['rmse_weighted'] / rmse_scaler
mae_scaler = model_results['mae_weighted'].median(skipna=True)
mae_scaler = 1 if mae_scaler == 0 else mae_scaler
mae_score = model_results['mae_weighted'] / mae_scaler
containment_score = (
abs(prediction_interval - model_results['containment'])
) + 1 # from 1 to 2, smaller better
runtime = model_results['TotalRuntimeSeconds'] + 120
runtime_score = runtime / (runtime.min(skipna=True)) # smaller better
spl_score = model_results['spl_weighted'] / (
model_results['spl_weighted'].min(skipna=True) + 1
) # smaller better
contour_score = (
(1 / (model_results['contour_weighted']))
.replace([np.inf, -np.inf, np.nan], 10)
.clip(upper=10)
)
except KeyError:
raise KeyError(
"Inconceivable! Evaluation Metrics are missing and all models have failed, by an error in TemplateWizard or metrics. A new template may help, or an adjusted model_list."
)
return (
(smape_score * smape_weighting)
+ (mae_score * mae_weighting)
+ (rmse_score * rmse_weighting)
+ (containment_score * containment_weighting)
+ (runtime_score * runtime_weighting)
+ (spl_score * spl_weighting)
+ (contour_score * contour_weighting)
)
| 37.907966
| 181
| 0.582397
|
"""Mid-level helper functions for AutoTS."""
import numpy as np
import pandas as pd
import datetime
import json
from hashlib import md5
from autots.evaluator.metrics import PredictionEval
from autots.tools.transform import RandomTransform
def seasonal_int(include_one: bool = False):
"""Generate a random integer of typical seasonalities."""
if include_one:
lag = np.random.choice(
a=[
'random_int',
1,
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.10,
0.05,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
else:
lag = np.random.choice(
a=[
'random_int',
2,
4,
7,
10,
12,
24,
28,
60,
96,
168,
364,
1440,
420,
52,
84,
],
size=1,
p=[
0.15,
0.05,
0.05,
0.15,
0.01,
0.1,
0.1,
0.1,
0.1,
0.04,
0.01,
0.1,
0.01,
0.01,
0.01,
0.01,
],
).item()
if lag == 'random_int':
lag = np.random.randint(2, 100, size=1).item()
return int(lag)
def create_model_id(
model_str: str, parameter_dict: dict = {}, transformation_dict: dict = {}
):
"""Create a hash ID which should be unique to the model parameters."""
str_repr = (
str(model_str) + json.dumps(parameter_dict) + json.dumps(transformation_dict)
)
str_repr = ''.join(str_repr.split())
hashed = md5(str_repr.encode('utf-8')).hexdigest()
return hashed
class ModelObject(object):
"""Generic class for holding forecasting models.
Models should all have methods:
.fit(df, future_regressor = []) (taking a DataFrame with DatetimeIndex and n columns of n timeseries)
.predict(forecast_length = int, future_regressor = [], just_point_forecast = False)
.get_new_params() - return a dictionary of weighted random selected parameters
Args:
name (str): Model Name
frequency (str): String alias of datetime index frequency or else 'infer'
prediction_interval (float): Confidence interval for probabilistic forecast
n_jobs (int): used by some models that parallelize to multiple cores
"""
def __init__(
self,
name: str = "Uninitiated Model Name",
frequency: str = 'infer',
prediction_interval: float = 0.9,
regression_type: str = None,
fit_runtime=datetime.timedelta(0),
holiday_country: str = 'US',
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = -1,
):
self.name = name
self.frequency = frequency
self.prediction_interval = prediction_interval
self.regression_type = regression_type
self.fit_runtime = fit_runtime
self.holiday_country = holiday_country
self.random_seed = random_seed
self.verbose = verbose
self.verbose_bool = True if self.verbose > 1 else False
self.n_jobs = n_jobs
def __repr__(self):
"""Print."""
return 'ModelObject of ' + self.name + ' uses standard .fit/.predict'
def basic_profile(self, df):
"""Capture basic training details."""
self.startTime = datetime.datetime.now()
self.train_shape = df.shape
self.column_names = df.columns
self.train_last_date = df.index[-1]
if self.frequency == 'infer':
self.frequency = pd.infer_freq(df.index, warn=False)
return df
def create_forecast_index(self, forecast_length: int):
"""Generate a pd.DatetimeIndex appropriate for a new forecast.
Warnings:
Requires ModelObject.basic_profile() being called as part of .fit()
"""
forecast_index = pd.date_range(
freq=self.frequency, start=self.train_last_date, periods=forecast_length + 1
)
forecast_index = forecast_index[1:]
self.forecast_index = forecast_index
return forecast_index
def get_params(self):
"""Return dict of current parameters."""
return {}
def get_new_params(self, method: str = 'random'):
"""Return dict of new parameters for parameter tuning."""
return {}
class PredictionObject(object):
"""Generic class for holding forecast information."""
def __init__(
self,
model_name: str = 'Uninitiated',
forecast_length: int = 0,
forecast_index=np.nan,
forecast_columns=np.nan,
lower_forecast=np.nan,
forecast=np.nan,
upper_forecast=np.nan,
prediction_interval: float = 0.9,
predict_runtime=datetime.timedelta(0),
fit_runtime=datetime.timedelta(0),
model_parameters={},
transformation_parameters={},
transformation_runtime=datetime.timedelta(0),
):
self.model_name = model_name
self.model_parameters = model_parameters
self.transformation_parameters = transformation_parameters
self.forecast_length = forecast_length
self.forecast_index = forecast_index
self.forecast_columns = forecast_columns
self.lower_forecast = lower_forecast
self.forecast = forecast
self.upper_forecast = upper_forecast
self.prediction_interval = prediction_interval
self.predict_runtime = predict_runtime
self.fit_runtime = fit_runtime
self.transformation_runtime = transformation_runtime
def __repr__(self):
"""Print."""
if isinstance(self.forecast, pd.DataFrame):
return "Prediction object: \nReturn .forecast, \n .upper_forecast, \n .lower_forecast \n .model_parameters \n .transformation_parameters"
else:
return "Empty prediction object."
def __bool__(self):
"""bool version of class."""
if isinstance(self.forecast, pd.DataFrame):
return True
else:
return False
def total_runtime(self):
"""Combine runtimes."""
return self.fit_runtime + self.predict_runtime + self.transformation_runtime
def ModelMonster(
model: str,
parameters: dict = {},
frequency: str = 'infer',
prediction_interval: float = 0.9,
holiday_country: str = 'US',
startTimeStamps=None,
forecast_length: int = 14,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Directs strings and parameters to appropriate model objects.
Args:
model (str): Name of Model Function
parameters (dict): Dictionary of parameters to pass through to model
"""
model = str(model)
if model == 'ZeroesNaive':
from autots.models.basics import ZeroesNaive
return ZeroesNaive(frequency=frequency, prediction_interval=prediction_interval)
if model == 'LastValueNaive':
from autots.models.basics import LastValueNaive
return LastValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
if model == 'AverageValueNaive':
from autots.models.basics import AverageValueNaive
if parameters == {}:
return AverageValueNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return AverageValueNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
)
if model == 'SeasonalNaive':
from autots.models.basics import SeasonalNaive
if parameters == {}:
return SeasonalNaive(
frequency=frequency, prediction_interval=prediction_interval
)
else:
return SeasonalNaive(
frequency=frequency,
prediction_interval=prediction_interval,
method=parameters['method'],
lag_1=parameters['lag_1'],
lag_2=parameters['lag_2'],
)
if model == 'GLS':
from autots.models.statsmodels import GLS
return GLS(frequency=frequency, prediction_interval=prediction_interval)
if model == 'GLM':
from autots.models.statsmodels import GLM
if parameters == {}:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = GLM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
family=parameters['family'],
constant=parameters['constant'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ETS':
from autots.models.statsmodels import ETS
if parameters == {}:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ETS(
frequency=frequency,
prediction_interval=prediction_interval,
damped=parameters['damped'],
trend=parameters['trend'],
seasonal=parameters['seasonal'],
seasonal_periods=parameters['seasonal_periods'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'ARIMA':
from autots.models.statsmodels import ARIMA
if parameters == {}:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = ARIMA(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
p=parameters['p'],
d=parameters['d'],
q=parameters['q'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'FBProphet':
from autots.models.prophet import FBProphet
if parameters == {}:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = FBProphet(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
return model
if model == 'RollingRegression':
from autots.models.sklearn import RollingRegression
if parameters == {}:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
)
else:
model = RollingRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
holiday=parameters['holiday'],
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
regression_model=parameters['regression_model'],
mean_rolling_periods=parameters['mean_rolling_periods'],
std_rolling_periods=parameters['std_rolling_periods'],
macd_periods=parameters['macd_periods'],
max_rolling_periods=parameters['max_rolling_periods'],
min_rolling_periods=parameters['min_rolling_periods'],
ewm_alpha=parameters['ewm_alpha'],
additional_lag_periods=parameters['additional_lag_periods'],
x_transform=parameters['x_transform'],
rolling_autocorr_periods=parameters['rolling_autocorr_periods'],
abs_energy=parameters['abs_energy'],
add_date_part=parameters['add_date_part'],
polynomial_degree=parameters['polynomial_degree'],
n_jobs=n_jobs,
)
return model
if model == 'UnobservedComponents':
from autots.models.statsmodels import UnobservedComponents
if parameters == {}:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = UnobservedComponents(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
level=parameters['level'],
trend=parameters['trend'],
cycle=parameters['cycle'],
damped_cycle=parameters['damped_cycle'],
irregular=parameters['irregular'],
stochastic_trend=parameters['stochastic_trend'],
stochastic_level=parameters['stochastic_level'],
stochastic_cycle=parameters['stochastic_cycle'],
)
return model
if model == 'DynamicFactor':
from autots.models.statsmodels import DynamicFactor
if parameters == {}:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = DynamicFactor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
k_factors=parameters['k_factors'],
factor_order=parameters['factor_order'],
)
return model
if model == 'VAR':
from autots.models.statsmodels import VAR
if parameters == {}:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VAR(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
maxlags=parameters['maxlags'],
ic=parameters['ic'],
random_seed=random_seed,
verbose=verbose,
)
return model
if model == 'VECM':
from autots.models.statsmodels import VECM
if parameters == {}:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VECM(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
regression_type=parameters['regression_type'],
random_seed=random_seed,
verbose=verbose,
deterministic=parameters['deterministic'],
k_ar_diff=parameters['k_ar_diff'],
)
return model
if model == 'VARMAX':
from autots.models.statsmodels import VARMAX
if parameters == {}:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = VARMAX(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
order=parameters['order'],
trend=parameters['trend'],
)
return model
if model == 'GluonTS':
from autots.models.gluonts import GluonTS
if parameters == {}:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = GluonTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
gluon_model=parameters['gluon_model'],
epochs=parameters['epochs'],
learning_rate=parameters['learning_rate'],
forecast_length=forecast_length,
)
return model
if model == 'TSFreshRegressor':
from autots.models.tsfresh import TSFreshRegressor
if parameters == {}:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TSFreshRegressor(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
max_timeshift=parameters['max_timeshift'],
regression_model=parameters['regression_model'],
feature_selection=parameters['feature_selection'],
)
return model
if model == 'MotifSimulation':
from autots.models.basics import MotifSimulation
if parameters == {}:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = MotifSimulation(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
phrase_len=parameters['phrase_len'],
comparison=parameters['comparison'],
shared=parameters['shared'],
distance_metric=parameters['distance_metric'],
max_motifs=parameters['max_motifs'],
recency_weighting=parameters['recency_weighting'],
cutoff_threshold=parameters['cutoff_threshold'],
cutoff_minimum=parameters['cutoff_minimum'],
point_method=parameters['point_method'],
)
return model
if model == 'WindowRegression':
from autots.models.sklearn import WindowRegression
if parameters == {}:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
else:
model = WindowRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
window_size=parameters['window_size'],
regression_model=parameters['regression_model'],
input_dim=parameters['input_dim'],
output_dim=parameters['output_dim'],
normalize_window=parameters['normalize_window'],
shuffle=parameters['shuffle'],
max_windows=parameters['max_windows'],
forecast_length=forecast_length,
n_jobs=n_jobs,
)
return model
if model == 'TensorflowSTS':
from autots.models.tfp import TensorflowSTS
if parameters == {}:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TensorflowSTS(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
seasonal_periods=parameters['seasonal_periods'],
ar_order=parameters['ar_order'],
trend=parameters['trend'],
fit_method=parameters['fit_method'],
num_steps=parameters['num_steps'],
)
return model
if model == 'TFPRegression':
from autots.models.tfp import TFPRegression
if parameters == {}:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
)
else:
model = TFPRegression(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
kernel_initializer=parameters['kernel_initializer'],
epochs=parameters['epochs'],
batch_size=parameters['batch_size'],
optimizer=parameters['optimizer'],
loss=parameters['loss'],
dist=parameters['dist'],
regression_type=parameters['regression_type'],
)
return model
if model == 'ComponentAnalysis':
from autots.models.sklearn import ComponentAnalysis
if parameters == {}:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
)
else:
model = ComponentAnalysis(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
model=parameters['model'],
model_parameters=parameters['model_parameters'],
decomposition=parameters['decomposition'],
n_components=parameters['n_components'],
forecast_length=forecast_length,
)
return model
else:
raise AttributeError(
("Model String '{}' not a recognized model type").format(model)
)
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
from autots.tools.transform import GeneralTransformer
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
from autots.tools.transform import simple_context_slicer
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast
class TemplateEvalObject(object):
"""Object to contain all your failures!."""
def __init__(
self,
model_results=pd.DataFrame(),
per_timestamp_smape=pd.DataFrame(),
per_series_mae=pd.DataFrame(),
per_series_spl=pd.DataFrame(),
per_series_rmse1=pd.DataFrame(),
per_series_rmse2=pd.DataFrame(),
model_count: int = 0,
):
self.model_results = model_results
self.model_count = model_count
self.per_series_mae = per_series_mae
self.per_series_spl = per_series_spl
self.per_series_rmse1 = per_series_rmse1
self.per_series_rmse2 = per_series_rmse2
self.per_timestamp_smape = per_timestamp_smape
def __repr__(self):
"""Print."""
return 'Results objects, result table at self.model_results (pd.df)'
def concat(self, another_eval):
"""Merge another TemplateEvalObject onto this one."""
self.model_results = pd.concat(
[self.model_results, another_eval.model_results],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
self.per_series_mae = pd.concat(
[self.per_series_mae, another_eval.per_series_mae], axis=0, sort=False
)
self.per_series_spl = pd.concat(
[self.per_series_spl, another_eval.per_series_spl], axis=0, sort=False
)
self.per_series_rmse1 = pd.concat(
[self.per_series_rmse1, another_eval.per_series_rmse1], axis=0, sort=False
)
self.per_series_rmse2 = pd.concat(
[self.per_series_rmse2, another_eval.per_series_rmse2], axis=0, sort=False
)
self.per_timestamp_smape = pd.concat(
[self.per_timestamp_smape, another_eval.per_timestamp_smape],
axis=0,
sort=False,
)
self.model_count = self.model_count + another_eval.model_count
return self
def save(self, filename):
"""Save results to a file."""
if '.csv' in filename:
self.model_results.to_csv(filename, index=False)
elif '.pickle' in filename:
import pickle
with open(filename, "wb") as f:
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
else:
raise ValueError("filename not .csv or .pickle")
def unpack_ensemble_models(
template,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
keep_ensemble: bool = True,
recursive: bool = False,
):
"""Take ensemble models from template and add as new rows."""
ensemble_template = pd.DataFrame()
template['Ensemble'] = np.where(
((template['Model'] == 'Ensemble') & (template['Ensemble'] < 1)),
1,
template['Ensemble'],
)
for index, value in template[template['Ensemble'] != 0][
'ModelParameters'
].iteritems():
model_dict = json.loads(value)['models']
model_df = pd.DataFrame.from_dict(model_dict, orient='index')
model_df = model_df.rename_axis('ID').reset_index(drop=False)
model_df['Ensemble'] = 0
# unpack nested ensembles, if recursive specified
if recursive and 'Ensemble' in model_df['Model'].tolist():
model_df = pd.concat(
[
unpack_ensemble_models(
model_df, recursive=True, template_cols=template_cols
),
model_df,
],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
ensemble_template = pd.concat(
[ensemble_template, model_df], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
if not keep_ensemble:
template = template[template['Ensemble'] == 0]
template = pd.concat(
[template, ensemble_template], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
template = template.drop_duplicates(subset=template_cols)
return template
def PredictWitch(
template,
df_train,
forecast_length: int,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""Takes numeric data, returns numeric forecasts.
Only one model (albeit potentially an ensemble)!
Well, she turned me into a newt.
A newt?
I got better. -Python
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
template_cols (list): column names of columns used as model template
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object):
"""
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
template = template.head(1)
for index_upper, row_upper in template.iterrows():
# if an ensemble
if row_upper['Model'] == 'Ensemble':
from autots.models.ensemble import EnsembleForecast
forecasts_list = []
forecasts_runtime = []
forecasts = []
upper_forecasts = []
lower_forecasts = []
ens_model_str = row_upper['Model']
ens_params = json.loads(row_upper['ModelParameters'])
ens_template = unpack_ensemble_models(
template, template_cols, keep_ensemble=False
)
for index, row in ens_template.iterrows():
# recursive recursion!
if verbose > 2:
total_ens = ens_template.shape[0]
print(
"Ensemble component {} of {} ".format(
model_str, str(index), str(total_ens)
)
)
df_forecast = PredictWitch(
row,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
forecasts_list.extend([model_id])
forecasts_runtime.extend([total_runtime])
forecasts.extend([df_forecast.forecast])
upper_forecasts.extend([df_forecast.upper_forecast])
lower_forecasts.extend([df_forecast.lower_forecast])
ens_forecast = EnsembleForecast(
ens_model_str,
ens_params,
forecasts_list=forecasts_list,
forecasts=forecasts,
lower_forecasts=lower_forecasts,
upper_forecasts=upper_forecasts,
forecasts_runtime=forecasts_runtime,
prediction_interval=prediction_interval,
)
return ens_forecast
# if not an ensemble
else:
model_str = row_upper['Model']
parameter_dict = json.loads(row_upper['ModelParameters'])
transformation_dict = json.loads(row_upper['TransformationParameters'])
df_forecast = ModelPrediction(
df_train,
forecast_length,
transformation_dict,
model_str,
parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
grouping_ids=grouping_ids,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
startTimeStamps=startTimeStamps,
n_jobs=n_jobs,
)
return df_forecast
def TemplateWizard(
template,
df_train,
df_test,
weights,
model_count: int = 0,
ensemble: str = True,
forecast_length: int = 14,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
validation_round: int = 0,
model_interrupt: bool = False,
grouping_ids=None,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Take Template, returns Results.
There are some who call me... Tim. - Python
Args:
template (pandas.DataFrame): containing model str, and json of transformations and hyperparamters
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
df_test (pandas.DataFrame): dataframe of actual values of (forecast length * n series)
weights (dict): key = column/series_id, value = weight
ensemble (str): desc of ensemble types to prepare metric collection
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
validation_round (int): int passed to record current validation.
model_interrupt (bool): if True, keyboard interrupts are caught and only break current model eval.
template_cols (list): column names of columns used as model template
Returns:
TemplateEvalObject
"""
ensemble = str(ensemble)
template_result = TemplateEvalObject()
template_result.model_count = model_count
if isinstance(template, pd.Series):
template = pd.DataFrame(template).transpose()
# template = unpack_ensemble_models(template, template_cols, keep_ensemble = False)
for index, row in template.iterrows():
try:
model_str = row['Model']
parameter_dict = json.loads(row['ModelParameters'])
transformation_dict = json.loads(row['TransformationParameters'])
ensemble_input = row['Ensemble']
current_template = pd.DataFrame(row).transpose()
template_result.model_count += 1
if verbose > 0:
if verbose > 1:
print(
"Model Number: {} with model {} in Validation {} with params {} and transformations {}".format(
str(template_result.model_count),
model_str,
str(validation_round),
json.dumps(parameter_dict),
json.dumps(transformation_dict),
)
)
else:
print(
"Model Number: {} with model {} in Validation {} ".format(
str(template_result.model_count),
model_str,
str(validation_round),
)
)
df_forecast = PredictWitch(
current_template,
df_train=df_train,
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
no_negatives=no_negatives,
constraint=constraint,
future_regressor_train=future_regressor_train,
future_regressor_forecast=future_regressor_forecast,
holiday_country=holiday_country,
startTimeStamps=startTimeStamps,
grouping_ids=grouping_ids,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
template_cols=template_cols,
)
per_ts = True if 'distance' in ensemble else False
if 'hdist' in ensemble:
dist_n = int(np.ceil(0.3 * forecast_length))
else:
dist_n = None
model_error = PredictionEval(
df_forecast,
df_test,
series_weights=weights,
df_train=df_train,
per_timestamp_errors=per_ts,
dist_n=dist_n,
)
model_id = create_model_id(
df_forecast.model_name,
df_forecast.model_parameters,
df_forecast.transformation_parameters,
)
total_runtime = (
df_forecast.fit_runtime
+ df_forecast.predict_runtime
+ df_forecast.transformation_runtime
)
result = pd.DataFrame(
{
'ID': model_id,
'Model': df_forecast.model_name,
'ModelParameters': json.dumps(df_forecast.model_parameters),
'TransformationParameters': json.dumps(
df_forecast.transformation_parameters
),
'TransformationRuntime': df_forecast.transformation_runtime,
'FitRuntime': df_forecast.fit_runtime,
'PredictRuntime': df_forecast.predict_runtime,
'TotalRuntime': total_runtime,
'Ensemble': ensemble_input,
'Exceptions': np.nan,
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
a = pd.DataFrame(
model_error.avg_metrics_weighted.rename(lambda x: x + '_weighted')
).transpose()
result = pd.concat(
[result, pd.DataFrame(model_error.avg_metrics).transpose(), a], axis=1
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
if 'horizontal' in ensemble:
cur_mae = model_error.per_series_metrics.loc['mae']
cur_mae = pd.DataFrame(cur_mae).transpose()
cur_mae.index = [model_id]
template_result.per_series_mae = pd.concat(
[template_result.per_series_mae, cur_mae], axis=0
)
if 'probabilistic' in ensemble:
cur_spl = model_error.per_series_metrics.loc['spl']
cur_spl = pd.DataFrame(cur_spl).transpose()
cur_spl.index = [model_id]
template_result.per_series_spl = pd.concat(
[template_result.per_series_spl, cur_spl], axis=0
)
if 'distance' in ensemble:
cur_smape = model_error.per_timestamp.loc['weighted_smape']
cur_smape = pd.DataFrame(cur_smape).transpose()
cur_smape.index = [model_id]
template_result.per_timestamp_smape = pd.concat(
[template_result.per_timestamp_smape, cur_smape], axis=0
)
if 'hdist' in ensemble:
cur_rmse1 = model_error.per_series_metrics.loc['rmse1']
cur_rmse2 = model_error.per_series_metrics.loc['rmse2']
cur_rmse1 = pd.DataFrame(cur_rmse1).transpose()
cur_rmse2 = pd.DataFrame(cur_rmse2).transpose()
cur_rmse1.index = [model_id]
cur_rmse2.index = [model_id]
template_result.per_series_rmse1 = pd.concat(
[template_result.per_series_rmse1, cur_rmse1], axis=0
)
template_result.per_series_rmse2 = pd.concat(
[template_result.per_series_rmse2, cur_rmse2], axis=0
)
except KeyboardInterrupt:
if model_interrupt:
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': "KeyboardInterrupt by user",
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
else:
raise KeyboardInterrupt
except Exception as e:
if verbose >= 0:
print(
'Template Eval Error: {} in model {}: {}'.format(
(repr(e)), template_result.model_count, model_str
)
)
result = pd.DataFrame(
{
'ID': create_model_id(
model_str, parameter_dict, transformation_dict
),
'Model': model_str,
'ModelParameters': json.dumps(parameter_dict),
'TransformationParameters': json.dumps(transformation_dict),
'Ensemble': ensemble_input,
'TransformationRuntime': datetime.timedelta(0),
'FitRuntime': datetime.timedelta(0),
'PredictRuntime': datetime.timedelta(0),
'TotalRuntime': datetime.timedelta(0),
'Exceptions': repr(e),
'Runs': 1,
'ValidationRound': validation_round,
},
index=[0],
)
template_result.model_results = pd.concat(
[template_result.model_results, result],
axis=0,
ignore_index=True,
sort=False,
).reset_index(drop=True)
return template_result
def RandomTemplate(
n: int = 10,
model_list: list = [
'ZeroesNaive',
'LastValueNaive',
'AverageValueNaive',
'GLS',
'GLM',
'ETS',
'ARIMA',
'FBProphet',
'RollingRegression',
'GluonTS',
'UnobservedComponents',
'VARMAX',
'VECM',
'DynamicFactor',
],
):
"""
Returns a template dataframe of randomly generated transformations, models, and hyperparameters.
Args:
n (int): number of random models to return
"""
n = abs(int(n))
template = pd.DataFrame()
counter = 0
while len(template.index) < n:
model_str = np.random.choice(model_list)
param_dict = ModelMonster(model_str).get_new_params()
trans_dict = RandomTransform()
row = pd.DataFrame(
{
'Model': model_str,
'ModelParameters': json.dumps(param_dict),
'TransformationParameters': json.dumps(trans_dict),
'Ensemble': 0,
},
index=[0],
)
template = pd.concat([template, row], axis=0, ignore_index=True)
template.drop_duplicates(inplace=True)
counter += 1
if counter > (n * 3):
break
return template
def UniqueTemplates(
existing_templates,
new_possibilities,
selection_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Returns unique dataframe rows from new_possiblities not in existing_templates.
Args:
selection_cols (list): list of column namess to use to judge uniqueness/match on
"""
keys = list(new_possibilities[selection_cols].columns.values)
idx1 = existing_templates.copy().set_index(keys).index
idx2 = new_possibilities.set_index(keys).index
new_template = new_possibilities[~idx2.isin(idx1)]
return new_template
def dict_recombination(a: dict, b: dict):
"""Recombine two dictionaries with identical keys. Return new dict."""
b_keys = [*b]
key_size = int(len(b_keys) / 2) if len(b_keys) > 1 else 1
bs_keys = np.random.choice(b_keys, size=key_size)
b_prime = {k: b[k] for k in bs_keys}
c = {**a, **b_prime} # overwrites with B
return c
def trans_dict_recomb(dict_array):
"""Recombine two transformation param dictionaries from array of dicts."""
r_sel = np.random.choice(dict_array, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
out_keys = ['outlier_method', 'outlier_threshold', 'outlier_position']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in out_keys}}
mid_trans_keys = ['second_transformation', 'transformation_param']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
mid_trans_keys = ['third_transformation', 'transformation_param2']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in mid_trans_keys}}
disc_keys = ['discretization', 'n_bins']
current_dict = np.random.choice([a, b], size=1).item()
c = {**c, **{k: current_dict[k] for k in disc_keys}}
disc_keys = ['grouping', 'reconciliation']
current_dict = np.random.choice([a, b], size=1).item()
if all([x in current_dict.keys() for x in disc_keys]):
c = {**c, **{k: current_dict[k] for k in disc_keys}}
return c
def _trans_dicts(current_ops, best=None, n: int = 5):
fir = json.loads(current_ops.iloc[0, :]['TransformationParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['TransformationParameters'])
else:
sec = RandomTransform()
r = RandomTransform()
if best is None:
best = RandomTransform()
arr = [fir, sec, best, r]
trans_dicts = [json.dumps(trans_dict_recomb(arr)) for _ in range(n)]
return trans_dicts
def NewGeneticTemplate(
model_results,
submitted_parameters,
sort_column: str = "smape_weighted",
sort_ascending: bool = True,
max_results: int = 50,
max_per_model_class: int = 5,
top_n: int = 50,
template_cols: list = [
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
],
):
"""
Return new template given old template with model accuracies.
Args:
model_results (pandas.DataFrame): models that have actually been run
submitted_paramters (pandas.DataFrame): models tried (may have returned different parameters to results)
"""
new_template = pd.DataFrame()
# filter existing templates
sorted_results = model_results[model_results['Ensemble'] == 0].copy()
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
)
sorted_results = sorted_results.drop_duplicates(subset=template_cols, keep='first')
if str(max_per_model_class).isdigit():
sorted_results = (
sorted_results.sort_values(sort_column, ascending=sort_ascending)
.groupby('Model')
.head(max_per_model_class)
.reset_index()
)
sorted_results = sorted_results.sort_values(
by=sort_column, ascending=sort_ascending, na_position='last'
).head(top_n)
no_params = ['ZeroesNaive', 'LastValueNaive', 'GLS']
recombination_approved = [
'SeasonalNaive',
'MotifSimulation',
"ETS",
'DynamicFactor',
'VECM',
'VARMAX',
'GLM',
'ARIMA',
'FBProphet',
'GluonTS',
'RollingRegression',
'VAR',
'WindowRegression',
'TensorflowSTS',
'TFPRegression',
]
borrow = ['ComponentAnalysis']
best = json.loads(sorted_results.iloc[0, :]['TransformationParameters'])
for model_type in sorted_results['Model'].unique():
if model_type in no_params:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_param = current_ops.iloc[0, :]['ModelParameters']
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_param,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
elif model_type in recombination_approved:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 4
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
# select the best model of this type
fir = json.loads(current_ops.iloc[0, :]['ModelParameters'])
cur_len = current_ops.shape[0]
if cur_len > 1:
# select randomly from best of data, doesn't handle lengths < 2
top_r = np.floor((cur_len / 5) + 2)
r_id = np.random.randint(1, top_r)
sec = json.loads(current_ops.iloc[r_id, :]['ModelParameters'])
else:
sec = ModelMonster(model_type).get_new_params()
# generate new random parameters ('mutations')
r = ModelMonster(model_type).get_new_params()
r2 = ModelMonster(model_type).get_new_params()
arr = [fir, sec, r2, r]
model_dicts = list()
# recombine best and random to create new generation
for _ in range(n):
r_sel = np.random.choice(arr, size=2, replace=False)
a = r_sel[0]
b = r_sel[1]
c = dict_recombination(a, b)
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
else:
current_ops = sorted_results[sorted_results['Model'] == model_type]
n = 3
trans_dicts = _trans_dicts(current_ops, best=best, n=n)
model_dicts = list()
for _ in range(n):
c = ModelMonster(model_type).get_new_params()
model_dicts.append(json.dumps(c))
new_row = pd.DataFrame(
{
'Model': model_type,
'ModelParameters': model_dicts,
'TransformationParameters': trans_dicts,
'Ensemble': 0,
},
index=list(range(n)),
)
new_template = pd.concat(
[new_template, new_row], axis=0, ignore_index=True, sort=False
)
"""
# recombination of transforms across models by shifting transforms
recombination = sorted_results.tail(len(sorted_results.index) - 1).copy()
recombination['TransformationParameters'] = sorted_results['TransformationParameters'].shift(1).tail(len(sorted_results.index) - 1)
new_template = pd.concat([new_template,
recombination.head(top_n)[template_cols]],
axis=0, ignore_index=True, sort=False)
"""
# remove generated models which have already been tried
sorted_results = pd.concat(
[submitted_parameters, sorted_results], axis=0, ignore_index=True, sort=False
).reset_index(drop=True)
new_template = UniqueTemplates(
sorted_results, new_template, selection_cols=template_cols
).head(max_results)
return new_template
def validation_aggregation(validation_results):
"""Aggregate a TemplateEvalObject."""
groupby_cols = [
'ID',
'Model',
'ModelParameters',
'TransformationParameters',
'Ensemble',
]
col_aggs = {
'Runs': 'sum',
'smape': 'mean',
'mae': 'mean',
'rmse': 'mean',
'containment': 'mean',
'spl': 'mean',
'contour': 'mean',
'smape_weighted': 'mean',
'mae_weighted': 'mean',
'rmse_weighted': 'mean',
'containment_weighted': 'mean',
'contour_weighted': 'mean',
'spl_weighted': 'mean',
'containment_weighted': 'mean',
'TotalRuntimeSeconds': 'mean',
'Score': 'mean',
}
validation_results.model_results['TotalRuntimeSeconds'] = (
validation_results.model_results['TotalRuntime'].dt.seconds + 1
)
validation_results.model_results = validation_results.model_results[
pd.isnull(validation_results.model_results['Exceptions'])
]
validation_results.model_results = validation_results.model_results.replace(
[np.inf, -np.inf], np.nan
)
validation_results.model_results = validation_results.model_results.groupby(
groupby_cols
).agg(col_aggs)
validation_results.model_results = validation_results.model_results.reset_index(
drop=False
)
return validation_results
def generate_score(
model_results, metric_weighting: dict = {}, prediction_interval: float = 0.9
):
"""Generate score based on relative accuracies."""
try:
smape_weighting = metric_weighting['smape_weighting']
except KeyError:
smape_weighting = 1
try:
mae_weighting = metric_weighting['mae_weighting']
except KeyError:
mae_weighting = 0
try:
rmse_weighting = metric_weighting['rmse_weighting']
except KeyError:
rmse_weighting = 0
try:
containment_weighting = metric_weighting['containment_weighting']
except KeyError:
containment_weighting = 0
try:
runtime_weighting = metric_weighting['runtime_weighting'] * 0.1
except KeyError:
runtime_weighting = 0
try:
spl_weighting = metric_weighting['spl_weighting']
except KeyError:
spl_weighting = 0
try:
contour_weighting = metric_weighting['contour_weighting']
except KeyError:
contour_weighting = 0
# handle various runtime information records
if 'TotalRuntimeSeconds' in model_results.columns:
if 'TotalRuntime' in model_results.columns:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntime'].dt.seconds,
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = np.where(
model_results['TotalRuntimeSeconds'].isna(),
model_results['TotalRuntimeSeconds'].max(),
model_results['TotalRuntimeSeconds'],
)
else:
model_results['TotalRuntimeSeconds'] = model_results['TotalRuntime'].dt.seconds
# generate minimizing scores, where smaller = better accuracy
try:
model_results = model_results.replace([np.inf, -np.inf], np.nan)
# model_results = model_results.fillna(value=model_results.max(axis=0))
smape_score = model_results['smape_weighted'] / (
model_results['smape_weighted'].min(skipna=True) + 1
) # smaller better
rmse_scaler = model_results['rmse_weighted'].median(skipna=True)
rmse_scaler = 1 if rmse_scaler == 0 else rmse_scaler
rmse_score = model_results['rmse_weighted'] / rmse_scaler
mae_scaler = model_results['mae_weighted'].median(skipna=True)
mae_scaler = 1 if mae_scaler == 0 else mae_scaler
mae_score = model_results['mae_weighted'] / mae_scaler
containment_score = (
abs(prediction_interval - model_results['containment'])
) + 1 # from 1 to 2, smaller better
runtime = model_results['TotalRuntimeSeconds'] + 120
runtime_score = runtime / (runtime.min(skipna=True)) # smaller better
spl_score = model_results['spl_weighted'] / (
model_results['spl_weighted'].min(skipna=True) + 1
) # smaller better
contour_score = (
(1 / (model_results['contour_weighted']))
.replace([np.inf, -np.inf, np.nan], 10)
.clip(upper=10)
)
except KeyError:
raise KeyError(
"Inconceivable! Evaluation Metrics are missing and all models have failed, by an error in TemplateWizard or metrics. A new template may help, or an adjusted model_list."
)
return (
(smape_score * smape_weighting)
+ (mae_score * mae_weighting)
+ (rmse_score * rmse_weighting)
+ (containment_score * containment_weighting)
+ (runtime_score * runtime_weighting)
+ (spl_score * spl_weighting)
+ (contour_score * contour_weighting)
)
| 0
| 0
| 0
| 6,864
| 0
| 647
| 0
| 0
| 92
|
b6fd8e198c7dfa420a6d4c45a470a2d144bb9ee4
| 1,025
|
py
|
Python
|
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | 1
|
2019-09-15T16:37:24.000Z
|
2019-09-15T16:37:24.000Z
|
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
aoc2020/day12/day12_part2.py
|
GetPastTheMonkey/advent-of-code
|
db80be6d87baba4d5315cc69276905c55762da86
|
[
"MIT"
] | null | null | null |
from utils import get_input_lines
pos_x = 0
pos_y = 0
waypoint_x = 10
waypoint_y = 1
for line in get_input_lines(__file__):
action = line[0]
n = int(line[1:])
# Handle actions
if action == "N":
waypoint_y += n
elif action == "S":
waypoint_y -= n
elif action == "E":
waypoint_x += n
elif action == "W":
waypoint_x -= n
elif action == "L":
# Rotate (n//90) times CCW: (new_x, new_y) = (-old_y, old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = -waypoint_y
waypoint_y = tmp_x
elif action == "R":
# Rotate (n//90) times CW: (new_x, new_y) = (old_y, -old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = waypoint_y
waypoint_y = -tmp_x
elif action == "F":
pos_x += n * waypoint_x
pos_y += n * waypoint_y
else:
raise NotImplementedError(f"Unknown action '{action}'")
print(abs(pos_x) + abs(pos_y))
| 25
| 68
| 0.537561
|
from utils import get_input_lines
pos_x = 0
pos_y = 0
waypoint_x = 10
waypoint_y = 1
for line in get_input_lines(__file__):
action = line[0]
n = int(line[1:])
# Handle actions
if action == "N":
waypoint_y += n
elif action == "S":
waypoint_y -= n
elif action == "E":
waypoint_x += n
elif action == "W":
waypoint_x -= n
elif action == "L":
# Rotate (n//90) times CCW: (new_x, new_y) = (-old_y, old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = -waypoint_y
waypoint_y = tmp_x
elif action == "R":
# Rotate (n//90) times CW: (new_x, new_y) = (old_y, -old_x)
for i in range(n // 90):
tmp_x = waypoint_x
waypoint_x = waypoint_y
waypoint_y = -tmp_x
elif action == "F":
pos_x += n * waypoint_x
pos_y += n * waypoint_y
else:
raise NotImplementedError(f"Unknown action '{action}'")
print(abs(pos_x) + abs(pos_y))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2326b0fce7d21d579893e74d3e91c5354e98cf2f
| 282
|
py
|
Python
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 10
|
2017-12-11T17:54:52.000Z
|
2021-12-09T20:16:30.000Z
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | 260
|
2015-12-09T11:03:03.000Z
|
2021-12-12T14:32:23.000Z
|
src/year2019/day13a.py
|
lancelote/advent_of_code
|
06dda6ca034bc1e86addee7798bb9b2a34ff565b
|
[
"Unlicense"
] | null | null | null |
"""2019 - Day 13 Part 1: Care Package."""
from src.year2019.intcode import Computer
def solve(task: str) -> int:
"""Count the number of blocks."""
computer = Computer()
computer.load_program(task)
computer.execute()
return list(computer.stdout)[2::3].count(2)
| 25.636364
| 47
| 0.666667
|
"""2019 - Day 13 Part 1: Care Package."""
from src.year2019.intcode import Computer
def solve(task: str) -> int:
"""Count the number of blocks."""
computer = Computer()
computer.load_program(task)
computer.execute()
return list(computer.stdout)[2::3].count(2)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6af6580ccb03ce18151efe09dbeb559263c69624
| 1,816
|
py
|
Python
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 41
|
2015-05-21T21:12:26.000Z
|
2022-02-17T17:23:14.000Z
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 63
|
2015-05-15T10:25:55.000Z
|
2021-02-23T04:51:17.000Z
|
examples/fontforge-old/demoAddToMenu.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 12
|
2015-06-12T11:52:08.000Z
|
2020-09-23T10:40:59.000Z
|
#!/usr/bin/env python
'FontForge: Demo script to add menu items to FF tools menu'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2014 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
import sys, os, fontforge
sys.path.append(os.path.join(os.environ['HOME'], 'src/pysilfont/scripts'))
import samples.demoFunctions
from samples.demoFunctions import functionList, callFunctions
#from samples.demoCallFunctions import callFunctions
funcList=functionList()
for functionGroup in funcList :
menuType = funcList[functionGroup][0]
fontforge.registerMenuItem(toolMenuFunction,None,functionGroup,menuType,None,functionGroup);
print functionGroup, " registered"
''' This script needs to be called from one of the folders that FontForge looks in for scripts to
run when it is started. With current versions of FontForge, one is Home/.config/fontforge/python.
You may need to turn on showing hidden files (ctrl-H in Nautilus) before you can see the .config
folder. Within there create a one-line python script, say call sampledemo.py containing a call
to this script, eg:
execfile("/home/david/src/pysilfont/scripts/samples/demoAddToMenu.py")
Due to the reload(samples.demoFunctions) line above, changes functions defined in demoFunctions.py
are dynamic, ie FontForge does not have to be restarted (as would be the case if the functions were
called directly from the tools menu. Functions can even be added dynamically to the function groups.
If new function groups are defined, FontForge does have to be restarted to add them to the tools menu.
'''
| 46.564103
| 102
| 0.785793
|
#!/usr/bin/env python
'FontForge: Demo script to add menu items to FF tools menu'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2014 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
import sys, os, fontforge
sys.path.append(os.path.join(os.environ['HOME'], 'src/pysilfont/scripts'))
import samples.demoFunctions
from samples.demoFunctions import functionList, callFunctions
#from samples.demoCallFunctions import callFunctions
def toolMenuFunction(functionGroup,font) :
reload (samples.demoFunctions)
callFunctions(functionGroup,font)
funcList=functionList()
for functionGroup in funcList :
menuType = funcList[functionGroup][0]
fontforge.registerMenuItem(toolMenuFunction,None,functionGroup,menuType,None,functionGroup);
print functionGroup, " registered"
''' This script needs to be called from one of the folders that FontForge looks in for scripts to
run when it is started. With current versions of FontForge, one is Home/.config/fontforge/python.
You may need to turn on showing hidden files (ctrl-H in Nautilus) before you can see the .config
folder. Within there create a one-line python script, say call sampledemo.py containing a call
to this script, eg:
execfile("/home/david/src/pysilfont/scripts/samples/demoAddToMenu.py")
Due to the reload(samples.demoFunctions) line above, changes functions defined in demoFunctions.py
are dynamic, ie FontForge does not have to be restarted (as would be the case if the functions were
called directly from the tools menu. Functions can even be added dynamically to the function groups.
If new function groups are defined, FontForge does have to be restarted to add them to the tools menu.
'''
| 0
| 0
| 0
| 0
| 0
| 94
| 0
| 0
| 23
|
46615d419dda76960016bd1ad4896644e6b356d7
| 29,475
|
py
|
Python
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 69
|
2021-04-14T06:35:07.000Z
|
2022-03-31T18:35:05.000Z
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 11
|
2021-04-16T11:16:04.000Z
|
2022-03-22T21:21:29.000Z
|
tensor2struct/utils/tree_kernels.py
|
chenyangh/tensor2struct-public
|
d3257cba6d76d3c658a58a78f687d986bdc755cf
|
[
"MIT"
] | 18
|
2021-04-14T07:19:56.000Z
|
2022-03-23T19:26:18.000Z
|
####
| 41.927454
| 145
| 0.397116
|
import math
from copy import deepcopy
from tensor2struct.utils import tree
class Kernel:
# Common routines for kernel functions
def kernel(self, a, b):
# compute the tree kernel on the trees a and b
if not isinstance(a, tree.Tree):
print("ERROR: first parameter has to be a Tree Object")
return ""
if not isinstance(b, tree.Tree):
print("ERROR: second parameter has to be a Tree Object")
return ""
self.preProcess(a)
self.preProcess(b)
return self.evaluate(a, b)
def preProcess(self, a):
# Create any data structure useful for computing the kernel
# To be instantiated in subclasses
print("ERROR: prepProcess() must be executed in subclasses")
pass
def evaluate(self, a, b):
# To be instantiated in subclasses
print("ERROR: evaluated() must be executed in subclasses")
pass
def printKernelMatrix(self, dataset):
if not isinstance(dataset, tree.Dataset):
print("ERROR: the first Parameter must be a Dataset object")
return
ne = len(dataset)
for i in range(ne):
for j in range(i, ne):
print(
"%d %d %.2f"
% (i, j, self.kernel(dataset.getExample(i), dataset.getExample(j)),)
)
class KernelST(Kernel):
def __init__(self, l, savememory=1, hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.savememory = savememory
def preProcess(self, a):
if hasattr(a, "kernelstrepr"): # already preprocessed
return
if not hasattr(a.root, "stsize"):
a.root.setSubtreeSize()
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelstrepr = tree.SubtreeIDSubtreeSizeList(a.root)
a.kernelstrepr.sort()
if self.savememory == 1:
a.deleteRootTreeNode()
def evaluate(self, a, b):
ha, hb = (a.kernelstrepr, b.kernelstrepr)
# Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize)
# a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i, j, k, toti, totj = (0, 0, 0, len(ha), len(hb))
while i < toti and j < totj:
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
ci, cj = (i, j)
while i < toti and ha.getSubtreeID(i) == ha.getSubtreeID(ci):
i += 1
while j < totj and hb.getSubtreeID(j) == hb.getSubtreeID(cj):
j += 1
k += (i - ci) * (j - cj) * (self.l ** ha.getSubtreeSize(ci))
elif ha.getSubtreeID(i) < hb.getSubtreeID(j):
i += 1
else:
j += 1
return k
class KernelSST(Kernel):
def __init__(self, l, hashsep="#"):
self.l = float(l)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self, a):
if hasattr(a, "kernelsstrepr"): # already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelsstrepr = tree.ProdSubtreeList(a.root)
a.kernelsstrepr.sort()
def CSST(self, c, d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
else:
prod = self.l
nc = c.getOutdegree()
if nc == d.getOutdegree():
for ci in range(nc):
if c.getChild(ci).getProduction() == d.getChild(ci).getProduction():
prod *= 1 + self.CSST(c.getChild(ci), d.getChild(ci))
else:
cid, did = (
c.getChild(ci).getSubtreeID(),
d.getChild(ci).getSubtreeID(),
)
if cid < did:
self.cache.insert(str(cid) + str(did), 0)
else:
self.cache.insert(str(did) + str(cid), 0)
self.cache.insert(tmpkey, prod)
return float(prod)
def evaluate(self, a, b):
pa, pb = (a.kernelsstrepr, b.kernelsstrepr)
self.cache.removeAll()
i, j, k, toti, totj = (0, 0, 0, len(pa), len(pb))
while i < toti and j < totj:
if pa.getProduction(i) == pb.getProduction(j):
ci, cj = (i, j)
while i < toti and pa.getProduction(i) == pa.getProduction(ci):
j = cj
while j < totj and pb.getProduction(j) == pb.getProduction(cj):
k += self.CSST(pa.getTree(i), pb.getTree(j))
j += 1
i += 1
elif len(pa.getProduction(i)) < len(pb.getProduction(j)) or (
len(pa.getProduction(i)) == len(pb.getProduction(j))
and pa.getProduction(i) < pb.getProduction(j)
):
i += 1
else:
j += 1
return k
class KernelPT(Kernel):
def __init__(self, l, m, hashsep="#"):
self.l = float(l)
self.m = float(m)
self.hashsep = hashsep
self.cache = Cache()
def preProcess(self, a):
if hasattr(a, "kernelptrepr"): # already preprocessed
return
a.root.setHashSubtreeIdentifier(self.hashsep)
a.kernelptrepr = tree.LabelSubtreeList(a.root)
a.kernelptrepr.sort()
def DeltaSk(self, a, b, nca, ncb):
DPS = [[0 for i in range(ncb + 1)] for j in range(nca + 1)]
DP = [[0 for i in range(ncb + 1)] for j in range(nca + 1)]
kmat = [0] * (nca + 1)
for i in range(1, nca + 1):
for j in range(1, ncb + 1):
if a.getChild(i - 1).getLabel() == b.getChild(j - 1).getLabel():
DPS[i][j] = self.CPT(a.getChild(i - 1), b.getChild(j - 1))
kmat[0] += DPS[i][j]
else:
DPS[i][j] = 0
for s in range(1, min(nca, ncb)):
for i in range(nca + 1):
DP[i][s - 1] = 0
for j in range(ncb + 1):
DP[s - 1][j] = 0
for i in range(s, nca + 1):
for j in range(s, ncb + 1):
DP[i][j] = (
DPS[i][j]
+ self.l * DP[i - 1][j]
+ self.l * DP[i][j - 1]
- self.l ** 2 * DP[i - 1][j - 1]
)
if a.getChild(i - 1).getLabel() == b.getChild(j - 1).getLabel():
DPS[i][j] = (
self.CPT(a.getChild(i - 1), b.getChild(j - 1))
* DP[i - 1][j - 1]
)
kmat[s] += DPS[i][j]
return sum(kmat)
def CPT(self, c, d):
if c.getSubtreeID() < d.getSubtreeID():
tmpkey = str(c.getSubtreeID()) + "#" + str(d.getSubtreeID())
else:
tmpkey = str(d.getSubtreeID()) + "#" + str(c.getSubtreeID())
if self.cache.exists(tmpkey):
return self.cache.read(tmpkey)
else:
if c.getOutdegree() == 0 or d.getOutdegree() == 0:
prod = self.m * self.l ** 2
else:
prod = self.m * (
self.l ** 2 + self.DeltaSk(c, d, c.getOutdegree(), d.getOutdegree())
)
self.cache.insert(tmpkey, prod)
return prod
def evaluate(self, a, b):
self.cache.removeAll()
la, lb = (a.kernelptrepr, b.kernelptrepr)
i, j, k, toti, totj = (0, 0, 0, len(la), len(lb))
while i < toti and j < totj:
if la.getLabel(i) == lb.getLabel(j):
ci, cj = (i, j)
while i < toti and la.getLabel(i) == la.getLabel(ci):
j = cj
while j < totj and lb.getLabel(j) == lb.getLabel(cj):
k += self.CPT(la.getTree(i), lb.getTree(j))
j += 1
i += 1
elif la.getLabel(i) <= lb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdak(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.kernelpdakrepr = tree.SubtreePositionIDLabelSubtreeSizeList(t.root)
def mergetrees_with_depth(self, tree1, tree2):
merge = {}
for key in tree1:
if key in tree2:
merge[key] = (
{(tree1[key][0], tree1[key][2]): {tree1[key][1]: 1}},
{(tree2[key][0], tree2[key][2]): {tree2[key][1]: 1}},
)
del tree2[key]
else:
merge[key] = (
{(tree1[key][0], tree1[key][2]): {tree1[key][1]: 1}},
None,
)
for key in tree2:
merge[key] = (None, {(tree2[key][0], tree2[key][2]): {tree2[key][1]: 1}})
return merge
def visit_with_depth(self, jtree, node, depth, param, lambda_par, gamma_par):
kvalue = 0
if node is not None:
child = 0
key = str(hash(node + "#" + str(child)))
while key in jtree:
kvalue = kvalue + self.visit_with_depth(
jtree, key, depth + 1, param, lambda_par, gamma_par
)
if jtree[key][0] is not None:
if jtree[node][0] is None:
# jtree[node][0] = jtree[key][0]
jtree[node] = (jtree[key][0], jtree[node][1])
else:
for tmpkey in jtree[key][0]:
if tmpkey in jtree[node][0]:
for tmpkey2 in jtree[key][0][tmpkey]:
if tmpkey2 in jtree[node][0][tmpkey]:
jtree[node][0][tmpkey][tmpkey2] = (
jtree[node][0][tmpkey][tmpkey2]
+ jtree[key][0][tmpkey][tmpkey2]
)
else:
jtree[node][0][tmpkey][tmpkey2] = jtree[key][0][
tmpkey
][tmpkey2]
else:
jtree[node][0][tmpkey] = jtree[key][0][tmpkey]
if jtree[key][1] is not None:
if jtree[node][1] is None:
# jtree[node][1]=jtree[key][1]
jtree[node] = (jtree[node][0], jtree[key][1])
else:
for tmpkey in jtree[key][1]:
if tmpkey in jtree[node][1]:
for tmpkey2 in jtree[key][1][tmpkey]:
if tmpkey2 in jtree[node][1][tmpkey]:
jtree[node][1][tmpkey][tmpkey2] = (
jtree[node][1][tmpkey][tmpkey2]
+ jtree[key][1][tmpkey][tmpkey2]
)
else:
jtree[node][1][tmpkey][tmpkey2] = jtree[key][1][
tmpkey
][tmpkey2]
else:
jtree[node][1][tmpkey] = jtree[key][1][tmpkey]
child = child + 1
key = str(hash(node + "#" + str(child)))
# print jtree[node]
if (jtree[node][0] is not None) and (jtree[node][1] is not None):
for lkey in jtree[node][0]:
if lkey in jtree[node][1]:
tmpk = 0
for fkey1 in jtree[node][0][lkey]:
for fkey2 in jtree[node][1][lkey]:
tmpk = tmpk + lambda_par ** lkey[1] * jtree[node][0][
lkey
][fkey1] * jtree[node][1][lkey][fkey2] * math.exp(
-param * (fkey1 + fkey2)
)
kvalue = kvalue + (gamma_par ** depth) * tmpk * math.exp(
2 * param * depth
)
return kvalue
def evaluate(self, a, b):
tree1 = deepcopy(a.kernelpdakrepr.sids)
tree2 = deepcopy(b.kernelpdakrepr.sids)
m = self.mergetrees_with_depth(tree1, tree2)
kvalue = self.visit_with_depth(
m, str(hash("0")), 1, self.l, self.gamma, self.beta
)
del m, tree1, tree2
return kvalue
class KernelPdakMine(Kernel):
def __init__(self, l, gamma, beta, hashsep="#"):
self.l = float(l)
self.gamma = float(gamma)
self.beta = float(beta)
self.hashsep = hashsep
self.cache = Cache()
self.cachesize = 10000
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
t.computeRoutes()
t.kernelpdakrepr = tree.SubtreeIDSubtreeSizeRouteList(t.root)
t.kernelpdakrepr.sort()
# print t.kernelpdakrepr.sids
def ntk(self, ra, da, rb, db, hra, hrb):
if hra < hrb:
tmpkey = str(hra) + "#" + str(hrb)
else:
tmpkey = str(hrb) + "#" + str(hra)
if self.cache.exists(tmpkey):
return float(self.cache.read(tmpkey))
lena, lenb = len(ra), len(rb)
c, p, minlen = 0, 0, min(lena, lenb)
while c < minlen and ra[c] == rb[c]:
if ra[c] == "#":
p += 1
c += 1
# print "p = ", p, "da, db", da, db, ra, rb
if self.gamma == 1:
r = (p + 1) * (math.e ** (-self.beta * (da + db - 2 * p)))
else:
r = (
(1 - self.gamma ** (p + 1))
/ (1 - self.gamma)
* (math.e ** (-self.beta * (da + db - 2 * p)))
)
if len(self.cache) > self.cachesize:
self.cache.removeAll()
self.cache.insert(tmpkey, r)
return r
# if self.gamma == 1:
# return (p+1)*(math.e**(-self.beta*(da + db - 2*p)))
# else:
# return (1-self.gamma**(p+1))/(1-self.gamma)*(math.e**(-self.beta*(da + db - 2*p)))
def evaluate(self, a, b):
ha, hb = (a.kernelpdakrepr, b.kernelpdakrepr)
# print ha, hb
# Assumes ha and hb are ordered list of pairs (subtreeid, subtreesize, route)
# a.kernelreprst,b.kernelreprst are checked or created in preProcess()
i, j, k, toti, totj = (0, 0, 0, len(ha), len(hb))
while i < toti and j < totj:
if ha.getLabel(i) == hb.getLabel(j):
ci, cj = (i, j)
while i < toti and ha.getLabel(i) == ha.getLabel(ci):
j = cj
while j < totj and hb.getLabel(j) == hb.getLabel(cj):
cst = self.l
if ha.getSubtreeID(i) == hb.getSubtreeID(j):
cst += self.l ** ha.getSubtreeSize(i)
# print ha.getLabel(i), hb.getLabel(j), cst, self.ntk(ha.getRoute(i), ha.getDepth(i), hb.getRoute(j), hb.getDepth(j))
k += cst * self.ntk(
ha.getRoute(i),
ha.getDepth(i),
hb.getRoute(j),
hb.getDepth(j),
ha.getRouteHash(i),
hb.getRouteHash(j),
)
j += 1
i += 1
elif ha.getLabel(i) < hb.getLabel(j):
i += 1
else:
j += 1
return k
class KernelPdakFast(KernelPdak):
def preProcess(self, t):
if hasattr(t, "kernelpdakrepr"): # already preprocessed
return
if not hasattr(t.root, "stsize"):
t.root.setSubtreeSize()
t.root.setHashSubtreeIdentifier(self.hashsep)
t.computeNodesDepth()
a = tree.SubtreePositionIDSubtreeIDSubtreeSizeListLabel(t.root)
t.kernelpdakrepr = (a.sids, a.pinv)
def mergetrees_with_depth_del_labels(self, tree_labels1, tree_labels2):
tree1, labels1 = tree_labels_1
tree2, labels2 = tree_labels_2
merge = {}
match = 0
for key in tree1:
if key in tree2:
if tree1[key][0] in labels2:
match = match + 1
if tree2[key][0] in labels1:
merge[key] = (
{(tree1[key][0], tree1[key][1]): 0},
{(tree2[key][0], tree2[key][1]): 0},
)
else:
merge[key] = ({(tree1[key][0], tree1[key][1]): 0}, {})
else:
if tree2[key][0] in labels1:
merge[key] = ({}, {(tree2[key][0], tree2[key][1]): 0})
match = match + 1
else:
merge[key] = ({}, {})
del tree2[key]
else:
if tree1[key][0] in labels2:
merge[key] = ({(tree1[key][0], tree1[key][1]): 0}, {})
match = match + 1
else:
merge[key] = ({}, {})
for key in tree2:
if tree2[key][0] in labels1:
merge[key] = ({}, {(tree2[key][0], tree2[key][1]): 0})
match = match + 1
else:
merge[key] = ({}, {})
return (merge, match)
def visit_with_depth(self, jtree, node, depth, param, lambda_par, gamma_par):
kvalue = 0
tmpk = 0
if node is not None:
child = 0
key = str(hash(node + "#" + str(child)))
startkey = key
max_size = [0, None]
while key in jtree:
kvalue = kvalue + self.visit_with_depth(
jtree, key, depth + 1, param, lambda_par, gamma_par
)
if (len(jtree[key][0]) + len(jtree[key][1])) > max_size[0]:
max_size[0] = len(jtree[key][0]) + len(jtree[key][1])
max_size[1] = key
child = child + 1
key = str(hash(node + "#" + str(child)))
# print 'max_size',max_size[0]
if max_size[0] > 0:
child = 0
while startkey in jtree:
if startkey != max_size[1]:
if jtree[startkey][0] is not {}:
for tmpkey in jtree[startkey][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[startkey][0][tmpkey]
* jtree[max_size[1]][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[startkey][0][tmpkey]
* jtree[max_size[1]][1][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if jtree[startkey][1] is not {}:
for tmpkey in jtree[startkey][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[startkey][1][tmpkey]
* jtree[max_size[1]][0][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[startkey][1][tmpkey]
* jtree[max_size[1]][0][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = (
jtree[max_size[1]][1][tmpkey]
+ jtree[startkey][1][tmpkey]
)
else:
jtree[max_size[1]][1][tmpkey] = jtree[startkey][1][
tmpkey
]
# inserisco anche hash 0
for tmpkey in jtree[startkey][0]:
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = (
jtree[max_size[1]][0][tmpkey]
+ jtree[startkey][0][tmpkey]
)
else:
jtree[max_size[1]][0][tmpkey] = jtree[startkey][0][
tmpkey
]
# next child
child = child + 1
startkey = str(hash(node + "#" + str(child)))
# fine while figli
if jtree[node][0] is not {}:
for tmpkey in jtree[node][0]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][1][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][0]:
jtree[max_size[1]][0][tmpkey] = jtree[max_size[1]][0][
tmpkey
] + math.exp(-param * depth)
else:
jtree[max_size[1]][0][tmpkey] = math.exp(-param * depth)
if jtree[node][1] is not {}:
for tmpkey in jtree[node][1]:
# calcolo kernel
if tmpkey in jtree[max_size[1]][0]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][0][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* math.exp(-param * depth)
* jtree[max_size[1]][0][tmpkey]
)
# fine calcolo kernel, inizio inserimento
if tmpkey in jtree[max_size[1]][1]:
jtree[max_size[1]][1][tmpkey] = jtree[max_size[1]][1][
tmpkey
] + math.exp(-param * depth)
else:
jtree[max_size[1]][1][tmpkey] = math.exp(-param * depth)
jtree[node] = (jtree[max_size[1]][0], jtree[max_size[1]][1])
else:
for tmpkey in jtree[node][0]:
jtree[node][0][tmpkey] = math.exp(-param * depth)
for tmpkey in jtree[node][1]:
jtree[node][1][tmpkey] = math.exp(-param * depth)
if jtree[node][0] is not {} and jtree[node][1] is not {}:
for tmpkey in jtree[node][0]:
if tmpkey in jtree[node][1]:
if gamma_par != 1.0:
tmpk = (
tmpk
+ (gamma_par ** (depth + 1) - gamma_par)
/ (gamma_par - 1)
* lambda_par ** tmpkey[1]
* jtree[node][0][tmpkey]
* jtree[node][1][tmpkey]
)
else:
tmpk = (
tmpk
+ depth
* lambda_par ** tmpkey[1]
* jtree[node][0][tmpkey]
* jtree[node][1][tmpkey]
)
return kvalue + tmpk * math.exp(2 * param * depth)
def evaluate(self, a, b):
tree1 = deepcopy(a.kernelpdakrepr)
tree2 = deepcopy(b.kernelpdakrepr)
(m, match) = self.mergetrees_with_depth_del_labels(tree1, tree2)
kvalue = 0
if match > 0:
kvalue = self.visit_with_depth(
m, str(hash("0")), 1, self.l, self.gamma, self.beta
)
del m, tree1, tree2
return kvalue
####
class Cache:
# An extremely simple cache
def __init__(self):
self.cache = {}
self.size = 0
def exists(self, key):
return key in self.cache
def existsPair(self, keya, keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
return tmpkey in self.cache
def insert(self, key, value):
self.cache[key] = value
self.size += 1
def insertPairIfNew(self, keya, keyb):
if keya < keyb:
tmpkey = str(keya) + "#" + str(keyb)
else:
tmpkey = str(keyb) + "#" + str(keya)
if not tmpkey in self.cache:
self.insert(tmpkey)
def remove(self, key):
del self.cache[key]
self.size -= 1
def removeAll(self):
self.cache = {}
self.size = 0
def read(self, key):
return self.cache[key]
def __len__(self):
return self.size
| 0
| 0
| 0
| 29,200
| 0
| 0
| 0
| 9
| 251
|
162b83394afde2c91cf06578b6a90603be379765
| 469
|
py
|
Python
|
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 10
|
2021-12-17T06:07:25.000Z
|
2022-03-25T13:50:05.000Z
|
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | null | null | null |
Question_nlp/answers/onehot.py
|
skn047/DeepLearningMugenKnock
|
73d2b903816b380d56020c8336041883bc0d131c
|
[
"MIT"
] | 2
|
2022-03-15T02:42:09.000Z
|
2022-03-30T23:19:55.000Z
|
_chars = ""
chars = [c for c in _chars]
print(data_load())
| 23.45
| 106
| 0.556503
|
_chars = "あいうおえかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわをんがぎぐげござじずぜぞだぢづでどばびぶべぼぱぴぷぺぽぁぃぅぇぉゃゅょっー1234567890!?、。"
chars = [c for c in _chars]
def data_load():
fname = 'sandwitchman.txt'
xs = []
with open(fname, 'r') as f:
for l in f.readlines():
l = l.strip()
for c in l:
x = [0 for _ in range(len(chars))]
x[chars.index(c)] = 1
xs.append(x)
return xs
print(data_load())
| 285
| 0
| 0
| 0
| 0
| 291
| 0
| 0
| 23
|
12774b82ae587f55749e4d546d6743b03cdf3463
| 590
|
py
|
Python
|
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/593.Valid_Square/li.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
# coding=utf-8
# Author: Jianghan LI
# Question: 593.Valid_Square
# Date: 2017-05-22
| 26.818182
| 137
| 0.477966
|
# coding=utf-8
# Author: Jianghan LI
# Question: 593.Valid_Square
# Date: 2017-05-22
class Solution(object):
def validSquare(self, p1, p2, p3, p4):
"""
:type p1: List[int]
:type p2: List[int]
:type p3: List[int]
:type p4: List[int]
:rtype: bool
"""
p1, p2, p3, p4 = sorted([p1, p2, p3, p4])
def isRight(a, b, c):
return (a[1] - b[1]) * (c[1] - b[1]) + (a[0] - b[0]) * (c[0] - b[0]) == 0 and abs(a[1] - b[1]) == abs(c[0] - b[0]) and a != b
return isRight(p2, p1, p3) and isRight(p2, p4, p3)
| 0
| 0
| 0
| 481
| 0
| 0
| 0
| 0
| 23
|
f1f4c0e148288296136b5caf6748c31645cc02a9
| 769
|
py
|
Python
|
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | 1
|
2020-10-28T07:52:17.000Z
|
2020-10-28T07:52:17.000Z
|
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced_softuni
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | null | null | null |
Tuples and Sets - Exercise/07. Battle of Names.py
|
B3WD/python_advanced_softuni
|
477b2eac41f1ec5a172d612afda1096a9d7fb2f5
|
[
"MIT"
] | null | null | null |
lines_count = int(input())
lines = [input() for _ in range(lines_count)]
solve(lines)
| 27.464286
| 62
| 0.587776
|
def solve(liens):
results_odd = set()
results_even = set()
for i, name in enumerate(lines):
ascii_sum = 0
for char in name:
ascii_sum += ord(char)
result = int(ascii_sum / (i + 1))
if result % 2 != 0:
results_odd.add(result)
else:
results_even.add(result)
if sum(results_odd) == sum(results_even):
print(", ".join(map(str, results_odd | results_even)))
elif sum(results_odd) > sum(results_even):
print(", ".join(map(str, results_odd - results_even)))
elif sum(results_odd) < sum(results_even):
print(", ".join(map(str, results_odd ^ results_even)))
lines_count = int(input())
lines = [input() for _ in range(lines_count)]
solve(lines)
| 0
| 0
| 0
| 0
| 0
| 658
| 0
| 0
| 22
|
bc177fd76c0ff5e5ec9943fd6750ff2dfe5cb200
| 3,707
|
py
|
Python
|
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | 6
|
2022-03-19T02:57:11.000Z
|
2022-03-31T16:34:34.000Z
|
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/touca/_printer.py
|
trytouca/trytouca
|
eae38a96407d1ecac543c5a5fb05cbbe632ddfca
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
from colorama import init
init()
| 34.971698
| 88
| 0.534394
|
# Copyright 2021 Touca, Inc. Subject to Apache-2.0 License.
import math
from pathlib import Path
from colorama import Style, Fore, Back, init
init()
class Printer:
def print_warning(fmt: str, *args, **kwargs):
print(f"{Fore.YELLOW}{fmt.format(*args, **kwargs)}{Fore.RESET}")
def print_error(fmt: str, *args, **kwargs):
import sys
print(f"{Fore.RED}{fmt.format(*args, **kwargs)}{Fore.RESET}", file=sys.stderr)
def print_app_header():
print("\nTouca Test Framework")
def print_app_footer():
print("\n✨ Ran all test suites.\n")
def __init__(self, options):
self.options = options
self.testcase_width = max(len(k) for k in options.get("testcases"))
self.testcase_count = len(options.get("testcases"))
def print_line(self, fmt: str, *args, **kwargs):
msg = fmt.format(*args, **kwargs) if args or kwargs else fmt
if self.options.get("colored-output"):
print(msg)
return
import re
line = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])").sub("", msg)
print(line)
def print_header(self):
revision = "/".join([self.options.get(k) for k in ["suite", "version"]])
self.print_line("\nSuite: {:s}\n", revision)
def print_progress(self, timer, testcase, idx, status, errors=[]):
states = {
"pass": ("PASS", Back.GREEN),
"skip": ("SKIP", Back.YELLOW),
"fail": ("FAIL", Back.RED),
}
performance = (
""
if status == "skip"
else " {dim}({timer:d} ms){reset}".format(
dim=Style.DIM,
reset=Style.NORMAL,
timer=timer.count(testcase),
)
)
progress = " {number:>{width}d}{dim}.{reset}".format(
dim=Style.DIM,
reset=Style.NORMAL,
number=idx + 1,
count=self.testcase_count,
width=int(math.log10(self.testcase_count)) + 1,
)
badge = "{bg_color} {text} {bg_reset}".format(
bg_color=states.get(status)[1],
bg_reset=Back.RESET,
text=states.get(status)[0],
)
self.print_line(
"{progress} {badge} {testcase:<{testcase_width}s}{performance}",
badge=badge,
progress=progress,
testcase=testcase,
testcase_width=self.testcase_width + 3,
performance=performance,
)
if errors:
self.print_line("\n {}Exception Raised:{}", Style.DIM, Style.NORMAL)
self.print_line("\n".join(f" - {error}\n" for error in errors))
def print_footer(self, stats, timer, options):
states = [
("pass", "passed", Fore.GREEN),
("skip", "skipped", Fore.YELLOW),
("fail", "failed", Fore.RED),
]
messages = []
for state in states:
if not stats.count(state[0]):
continue
messages.append(f"{state[2]}{stats.count(state[0])} {state[1]}{Fore.RESET}")
messages.append(f"{self.testcase_count} total")
left_pad = int(math.log10(self.testcase_count)) + 11
self.print_line("\n{:s} {:s}", "Tests:".ljust(left_pad), ", ".join(messages))
self.print_line(
"{:s} {:.2f} s", "Time:".ljust(left_pad), timer.count("__workflow__") / 1000
)
if any(map(options.get, ["save-as-binary", "save-as-json"])):
results_dir = Path(
*map(options.get, ["output-directory", "suite", "version"])
)
self.print_line("{:s} {}", "Results:".ljust(left_pad), results_dir)
| 3
| 0
| 0
| 3,531
| 0
| 0
| 0
| 12
| 68
|
55dadc90c2f9fdbe258867ab069d2c73b2196055
| 394
|
py
|
Python
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | 1
|
2019-05-31T14:02:08.000Z
|
2019-05-31T14:02:08.000Z
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | 147
|
2021-03-06T01:01:13.000Z
|
2022-03-30T22:18:18.000Z
|
siphon/__init__.py
|
DanielWatkins/siphon
|
4c6740c2f8030ec1a23cafd8b8b9713dcd382cb2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for accessing atmospheric and oceanic science data on remote servers."""
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 39.4
| 81
| 0.786802
|
# Copyright (c) 2013-2015 Siphon Contributors.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tools for accessing atmospheric and oceanic science data on remote servers."""
# Version import needs to come first so everyone else can pull on import
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d33d5c69746ae0b6fef212b68e6a477d56434fb5
| 88
|
py
|
Python
|
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
Python/B2025.py
|
Epoch1017/LintCode-Shared-Solutions
|
d1559ef96917c4255e1ce2cf25ef17edec596ac3
|
[
"CC0-1.0"
] | null | null | null |
# B2025-
print(" *")
print(" ***")
print("*****")
print(" ***")
print(" *")
| 14.666667
| 15
| 0.409091
|
# B2025-输出字符菱形
print(" *")
print(" ***")
print("*****")
print(" ***")
print(" *")
| 18
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6adbb154ad2791f017881d82ab4f304608aa3b72
| 21,792
|
py
|
Python
|
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | 5
|
2015-03-18T01:19:56.000Z
|
2020-10-23T12:44:47.000Z
|
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
rave/filesystem.py
|
rave-engine/rave
|
0eeb956363f4d7eda92350775d7d386550361273
|
[
"BSD-2-Clause"
] | null | null | null |
# rave's virtual file system.
import re
import rave.common
import rave.log
_log = rave.log.get(__name__)
# Canonical path separator.
PATH_SEPARATOR = '/'
# Root.
ROOT = '/'
# Unnormalized path pattern.
BAD_PATH_PATTERN = re.compile(r'(?:{0}{{2,}}|(?:{0}|^)\.+(?:{0}|$))'.format(PATH_SEPARATOR))
# Various standard mount points.
ENGINE_MOUNT = '/.rave'
MODULE_MOUNT = '/.modules'
GAME_MOUNT = '/'
COMMON_MOUNT = '/.common'
## Stateful API.
| 36.686869
| 149
| 0.62321
|
# rave's virtual file system.
import os
import io
import re
import threading
import collections
import rave.common
import rave.log
_log = rave.log.get(__name__)
# Canonical path separator.
PATH_SEPARATOR = '/'
# Root.
ROOT = '/'
# Unnormalized path pattern.
BAD_PATH_PATTERN = re.compile(r'(?:{0}{{2,}}|(?:{0}|^)\.+(?:{0}|$))'.format(PATH_SEPARATOR))
# Various standard mount points.
ENGINE_MOUNT = '/.rave'
MODULE_MOUNT = '/.modules'
GAME_MOUNT = '/'
COMMON_MOUNT = '/.common'
class FileSystemError(rave.common.raveError, IOError):
def __init__(self, filename, message=None):
super().__init__(message or filename)
self.filename = filename
class NativeError(FileSystemError):
def __init__(self, filename, parent):
super().__init__(filename, message=repr(parent))
self.native_error = parent
class FileNotFound(FileSystemError, FileNotFoundError):
pass
class AccessDenied(FileSystemError, PermissionError):
pass
class FileNotReadable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileNotWritable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileNotSeekable(FileSystemError, PermissionError, io.UnsupportedOperation):
pass
class FileClosed(FileSystemError, BrokenPipeError):
pass
class NotAFile(FileSystemError, IsADirectoryError):
pass
class NotADirectory(FileSystemError, NotADirectoryError):
pass
class FileSystem:
def __init__(self):
# Lock when rebuilding cache or modifying the file system.
self._lock = threading.RLock()
# Clear the file system.
self.clear()
def __repr__(self):
return '<{}>'.format(self.__class__.__name__)
def clear(self):
if hasattr(self, '_roots'):
_log.trace('Clearing file system...')
with self._lock:
# File system roots. A mapping of path -> [ list of providers ].
self._roots = {}
# Transforming providers. A mapping of extension -> [ list of providers ].
self._transformers = {}
# File/directory list cache. A mapping of filename -> [ list of providers ].
self._file_cache = None
# Directory content cache. A mapping of directory -> { set of direct contents }.
self._listing_cache = None
## Building file cache.
def _build_cache(self):
""" Rebuild internal file cache. This will make looking up files, errors notwithstanding, an O(1) lookup operation. """
_log.trace('Building cache...')
with self._lock:
self._file_cache = { ROOT: [] }
self._listing_cache = { ROOT: set() }
for root, providers in self._roots.items():
for provider in providers:
self._build_provider_cache(provider, root)
def _build_provider_cache(self, provider, root):
"""
Add provider to file cache. This will traverse the providers file and iteratively add them to the file cache.
This function will check if transformers exist for the file in the process, which might indirectly trigger recursion,
since a transformed file acts as a new provider.
"""
_log.trace('Caching mount point {root} <- {prov}...', prov=provider, root=root)
# Add root to cache.
self._cache_directory(provider, root, root)
# Traverse provider and add files and directories on the go.
for subpath in provider.list():
path = self.join(root, subpath)
if provider.isdir(subpath):
self._cache_directory(provider, root, path)
else:
self._cache_file(provider, root, path)
def _build_transformer_cache(self, transformer, pattern):
"""
Add `transformer` to file cache. This will search all existing files to look for files that match the `pattern`, and if so,
adds the transformer as a new provider for that file and optionally removes it if the transformer consumes the file.
"""
_log.trace('Caching {trans} for {pattern}...', trans=transformer, pattern=pattern.pattern)
# Traverse paths to find matching files.
for file in self._file_cache.copy():
if not pattern.search(file):
continue
# Gotcha.
try:
handle = self.open(file)
except:
_log.warn('Couldn\'t open {path} for transformer {transformer}. Moving on...'.format(path=file, transformer=transformer))
continue
self._cache_transformed_file(transformer, file, handle)
def _cache_directory(self, provider, root, path):
""" Add `path`, provided by `provider`, as a directory to the file cache. """
_log.trace('Caching directory: {path} <- {provider}...', path=path, provider=provider)
with self._lock:
self._listing_cache.setdefault(path, set())
self._cache_entry(provider, root, path)
def _cache_file(self, provider, root, path):
""" Add `path`, provided by `provider`, as a file to the file cache. """
_log.trace('Caching file: {path} <- {provider}...', path=path, provider=provider)
localpath = self._local_file(root, path)
for pattern, transformers in self._transformers.items():
if not pattern.search(path):
continue
consumed = False
for transformer in transformers:
try:
handle = provider.open(localpath)
except Exception as e:
_log.warn('Couldn\'t open {provider}:{path} for transformer {transformer}. Error: {err}',
provider=provider, path=localpath, transformer=transformer, err=e)
continue
consumed = self._cache_transformed_file(transformer, path, handle)
if consumed:
break
# Stop processing entirely if we have consumed the file.
if consumed:
_log.debug('Cached file {path} consumed by transformer.', path=path)
break
else:
# No transformers found for file, or file wasn't consumed. Add it to cache.
self._cache_entry(provider, root, path)
def _cache_entry(self, provider, root, path):
""" Add an entry at `path`, provided by `provider`, to the file cache. """
with self._lock:
self._file_cache.setdefault(path, [])
if provider and provider not in self._file_cache[path]:
self._file_cache[path].append((provider, root))
if path != ROOT:
parent = self.dirname(path)
if not self.exists(parent):
self._cache_directory(None, None, parent)
basename = self.basename(path)
self._listing_cache.setdefault(parent, set())
self._listing_cache[parent].add(basename)
def _cache_transformed_file(self, transformer, path, handle):
"""
Add a transformed file at `path`, transformed by `transformer`, to the file cache.
This will return whether or not the original file was consumed by `transformer`.
It might fail to add the transformed file to the file cache if the transformers raises an error.
If the transformer consumes the original file, this function will remove the original file from the file system,
if it exists on it.
"""
try:
instance = transformer(path, handle)
except Exception as e:
_log.warn('Error while transforming {path} with {transformer}: {err}', path=path, transformer=transformer, err=e)
return False
if not instance.valid():
return False
_log.trace('Caching transformed file: {path} <- {trans}...', path=path, trans=transformer)
# Determine root directory of files.
if instance.relative():
parentdir = self.dirname(path)
else:
parentdir = ROOT
# Mount as provider.
self._build_provider_cache(instance, parentdir)
if instance.consumes():
# Remove file cache for now-consumed file.
with self._lock:
if path in self._file_cache:
del self._file_cache[path]
return True
else:
return False
def _providers_for_file(self, path):
"""
Return a generator yielding (provider, mountpoint) tuples for all providers that provide given `path`.
Priority is done on a last-come last-serve basis: the last provider added that provides `path` is yielded first.
"""
if self._file_cache is None:
self._build_cache()
if path not in self._file_cache:
raise FileNotFound(path)
for provider, mountpoint in reversed(self._file_cache[path]):
yield provider, self._local_file(mountpoint, path)
def _local_file(self, root, path):
return path[len(root.rstrip(PATH_SEPARATOR)):]
## API.
def list(self, subdir=None):
""" List all files and directories in the root file system, or `subdir` if given, recursively. """
if self._file_cache is None:
self._build_cache()
if subdir is not None:
subdir = self.normalize(subdir)
if not self.isdir(subdir):
if not self.exists(subdir):
raise FileNotFound(subdir)
else:
raise NotADirectory(subdir)
files = { '/' }
to_process = collections.deque()
to_process.append(subdir)
while to_process:
target = to_process.popleft()
for entry in self._listing_cache[target]:
path = self.join(target, entry)
if self.isdir(path):
to_process.append(path)
files.add(path.replace(subdir, ''))
return files
else:
return set(self._file_cache)
def listdir(self, subdir=None):
""" List all files and directories in the root file system, or `subdir` is given. """
if self._file_cache is None:
self._build_cache()
if not subdir:
subdir = ROOT
else:
subdir = self.normalize(subdir)
if not self.isdir(subdir):
if not self.exists(subdir):
raise FileNotFound(subdir)
else:
raise NotADirectory(subdir)
return self._listing_cache[subdir]
def mount(self, path, provider):
"""
Mount `provider` at `path` in the virtual file system.
`provider` must be an object satisfying the following API:
- list(): return a list of all file names (including folders) this provider can provide.
- has(filename): return whether this provider can open given file.
- open(filename, **kwargs): open a file, has to raise one of the subclasses of `FileSystemError` on error, else return a subclass of `File`.
- isfile(filename): check if the given file is a file, should raise applicable `FileSystemError` subclass if applicable,
except for NotAFile/NotADirectory, or return a boolean.
- isdir(filename): check if the given file is a directory, should raise applicable `FileSystemError` subclass if applicable,
except for NotAFile/NotADirectory, or return a boolean.
A path or file can be provided by different providers. Their file lists will be merged.
Conflicting files will be handled as such:
- The last provider that has been mounted will serve the file first.
- If an error occurs while serving the file, the next provider according to these rules will serve it.
"""
path = self.normalize(path)
with self._lock:
self._roots.setdefault(path, [])
self._roots[path].append(provider)
_log.debug('Mounted {provider} on {path}.', provider=provider, path=path)
if self._file_cache is None:
self._build_cache()
else:
self._build_provider_cache(provider, path)
def unmount(self, path, provider):
""" Unmount `provider` from `path` in the virtual file system. Will trigger a full cache rebuild. """
path = self.normalize(path)
with self._lock:
self._roots[path].remove(provider)
_log.debug('Unmounted {provider} from {path}.', provider=provider, path=path)
self._build_cache()
def transform(self, pattern, transformer):
"""
TRANSFORMERS! TRANSFORMERS! MORE THAN MEETS THE EYE! TRANSFORMERS!
Add `transformer` as a transformer for files matching `pattern`.
`transformer` has to be a class(!) satisfying the provider API (see `mount`), plus the following API:
- __init__(filename, handle): initialize object, can raise any kind of error if the file is invalid.
`handle` is a `File` object pointing to the opened file.
- valid(): return whether the file is valid according to the format this transformer parses.
- consumes(): return whether the source file should be retained in the file system.
- relative(): return whether files exposed by this transformer should be relative to the path of the source file or absolute.
"""
pattern = re.compile(pattern, re.UNICODE)
with self._lock:
self._transformers.setdefault(pattern, [])
self._transformers[pattern].append(transformer)
_log.debug('Added transformer {transformer} for pattern {pattern}.', transformer=transformer, pattern=pattern.pattern)
if self._file_cache is None:
self._build_cache()
else:
self._build_transformer_cache(transformer, pattern)
def untransform(self, pattern, transformer):
""" Remove a transformer from the virtual file system. Will trigger a full cache rebuild. """
pattern = re.compile(pattern, re.UNICODE)
with self._lock:
self._transformers[pattern].remove(transformer)
_log.debug('Removed transformer {transformer} for pattern {pattern}.', transformer=transformer, pattern=pattern.pattern)
self._build_cache()
def open(self, filename, *args, **kwargs):
"""
Open `filename` and return a corresponding `File` object. Will raise `FileNotFound` if the file was not found.
Will only raise the error from the last attempted provider if multiple providers raise an error.
"""
error = None
filename = self.normalize(filename)
if self.isdir(filename):
raise NotAFile(filename)
for provider, localfile in self._providers_for_file(filename):
try:
_log.trace('Opening {filename} from {provider}...', filename=filename, provider=provider)
return provider.open(localfile, *args, **kwargs)
except FileNotFound:
continue
except FileSystemError as e:
error = e
if error:
raise error
else:
raise FileNotFound(filename)
def exists(self, filename):
""" Return whether or not `filename` exists. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._file_cache
def isdir(self, filename):
""" Return whether or not `filename` exists and is a directory. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._listing_cache
def isfile(self, filename):
""" Return whether or not `filename` exists and is a file. """
if self._file_cache is None:
self._build_cache()
filename = self.normalize(filename)
return filename in self._file_cache and filename not in self._listing_cache
def dirname(self, path):
""" Return the directory part of the given `path`. """
path = self.normalize(path)
return path.rsplit(PATH_SEPARATOR, 1)[0] or ROOT
def basename(self, path):
""" Return the filename part of the given `path`. """
if path == ROOT:
return ''
path = self.normalize(path)
return path.rsplit(PATH_SEPARATOR, 1)[1]
def join(self, *paths, normalized=True):
""" Join path components into a file system path. Optionally normalize the result. """
if normalized:
return self.normalize(PATH_SEPARATOR.join(paths))
return PATH_SEPARATOR.join(paths)
def split(self, path, *args, **kwargs):
""" Split path by path separator. """
return path.split(PATH_SEPARATOR, *args, **kwargs)
def normalize(self, path):
""" Normalize path to canonical path. """
# Quick check to see if we need to normalize at all.
if path.startswith(ROOT) and not BAD_PATH_PATTERN.search(path):
if path.endswith(PATH_SEPARATOR) and path != ROOT:
return path[:-len(PATH_SEPARATOR)]
return path
# Remove root.
if path.startswith(ROOT):
path = path[len(ROOT):]
# Split path into directory pieces and remove empty or redundant directories.
pieces = [ piece for piece in self.split(path) if piece and piece != '.' ]
# Remove parent directory entries.
while '..' in pieces:
i = pieces.index('..')
del pieces[i]
# The preceding directory too, of course.
if i > 0:
del pieces[i - 1]
return ROOT + self.join(*pieces, normalized=False)
class File(io.IOBase):
"""
An open file in the virtual file system.
Subclasses are expected to at least override the following:
- opened()
- readable() (if readable, returns False by default)
- writable() (if writable, returns False by default)
- seekable() (if seekable, returns False by default)
- close()
- read(amount=None) (if readable, raises FileNotReadable by default)
- write(data) (if writable, raises FileNotWritable by default)
- seek(position, mode) (if seekable, raises FileNotSeekable by default)
- tell() (if seekable, raises FileNotSeekable by default)
"""
def __del__(self):
try:
self.close()
except:
# Nothing we can do about it now, anyway.
pass
def close(self):
""" Close file. Any operation on the file after calling this method will fail with `FileClosed` raised. """
raise NotImplementedError
def opened(self):
""" Return whether this file is open. """
raise NotImplementedError
def readable(self):
""" Return whether this file is readable. """
return False
def writable(self):
""" Return whether this file is writable. """
return False
def seekable(self):
""" Return whether this file is seeekable. """
return False
def read(self, amount=None):
""" Read `amount` bytes from file. Will read full contents if `amount` is not given. """
raise FileNotReadable(self)
def write(self, data):
""" Write `data` to file. """
raise FileNotWritable(self)
def seek(self, position, mode=os.SEEK_CUR):
""" Seek in file. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
def tell(self):
""" Tell current file position. May raise `FileNotSeekable` if this file can't be seeked in. """
raise FileNotSeekable(self)
class FileSystemProvider:
""" A provider to mount a filesystem within another filesystem. """
def __init__(self, fs):
self.fs = fs
def __repr__(self):
return '<FileSystemProvider: {}>'.format(self.fs)
def list(self):
return self.fs.list()
def open(self, filename, *args, **kwargs):
return self.fs.open(filename, *args, **kwargs)
def has(self, filename):
return self.fs.isfile(filename)
def isfile(self, filename):
return self.fs.isfile(filename)
def isdir(self, filename):
return self.fs.isdir(filename)
## Stateful API.
def current():
import rave.game, rave.engine
game = rave.game.current()
if not game:
return rave.engine.engine.fs
return game.fs
def list(subdir=None):
return current().list(subdir)
def listdir(subdir=None):
return current().listdir(subdir)
def mount(path, provider):
return current().mount(path, provider)
def unmount(path, provider):
return current().unmount(path, provider)
def transform(pattern, transformer):
return current().transform(pattern, transformer)
def untransform(pattern, transformer):
return current().untransform(pattern, transformer)
def open(filename, *args, **kwargs):
return current().open(filename, *args, **kwargs)
def exists(filename):
return current().exists(filename)
def isfile(filename):
return current().isfile(filename)
def isdir(filename):
return current().isdir(filename)
def dirname(path):
return current().dirname(path)
def basename(path):
return current().basename(path)
def join(*paths, normalized=True):
return current().join(*paths, normalized=normalized)
def split(path, *args, **kwargs):
return current().split(path, *args, **kwargs)
def normalize(path):
return current().normalize(path)
| 0
| 0
| 0
| 19,766
| 0
| 861
| 0
| -32
| 748
|
dbf0337be9f2b428ff1ae5f70ee820bc2aaa584f
| 802
|
py
|
Python
|
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
geogebra_applet/views.py
|
Stasianna/geogebra-project
|
33ddf30ec8b001f86fb35d336b8d53bcdf69231b
|
[
"MIT"
] | null | null | null |
#from django.http import HttpResponse
#from django.http import response_redirect
| 32.08
| 70
| 0.77182
|
#from django.http import HttpResponse
#from django.http import response_redirect
from django.shortcuts import render_to_response
from django.views.generic import DetailView, CreateView, TemplateView
from geogebra_applet.models import GeogebraApplet
class GeogebraAppletDetailView(DetailView):
model = GeogebraApplet
class MainPageView(TemplateView):
template_name = 'geogebra_applet/main_str.html'
def get_context_data(self, **kwargs):
context = super(MainPageView, self).get_context_data(**kwargs)
context['arhives'] = GeogebraApplet.objects.all()
return context
def ViewHtml(request, file):
copymodel = GeogebraApplet.objects.filter(id = file).first()
f = copymodel.index_file.open(mode="rb")
return render_to_response(copymodel.index_file.url)
| 0
| 0
| 0
| 308
| 0
| 173
| 0
| 102
| 136
|
637cd14234add4d98a4167907d1629a8dc3593e3
| 6,036
|
py
|
Python
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 6
|
2020-10-09T15:55:57.000Z
|
2021-07-29T11:08:10.000Z
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 34
|
2020-09-05T05:08:16.000Z
|
2022-03-09T15:13:55.000Z
|
test/test_diff_tar.py
|
gmertes/conda-mirror
|
34b206e19d8c858676ce2b707da15165578e6f79
|
[
"BSD-3-Clause"
] | 7
|
2020-09-07T09:45:59.000Z
|
2022-01-20T20:16:38.000Z
|
EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
| 29.588235
| 88
| 0.680583
|
import os
import sys
import json
import shutil
import tempfile
from os.path import isfile, join
import pathlib
import pytest
import conda_mirror.diff_tar as dt
EMPTY_MD5 = "d41d8cd98f00b204e9800998ecf8427e"
@pytest.fixture
def tmpdir():
tmpdir = tempfile.mkdtemp()
dt.mirror_dir = join(tmpdir, "repo")
dt.DEFAULT_REFERENCE_PATH = join(tmpdir, "reference.json")
dt.DEFAULT_UPDATE_PATH = join(tmpdir, "updates.tar")
yield tmpdir
shutil.rmtree(tmpdir)
def test_md5_file(tmpdir):
tmpfile = join(tmpdir, "testfile")
with open(tmpfile, "wb") as fo:
fo.write(b"A\n")
assert dt.md5_file(tmpfile) == "bf072e9119077b4e76437a93986787ef"
def create_test_repo(subdirname="linux-64"):
subdir = join(dt.mirror_dir, subdirname)
os.makedirs(subdir)
with open(join(subdir, "repodata.json"), "w") as fo:
fo.write(json.dumps({"packages": {"a-1.0-0.tar.bz2": {"md5": EMPTY_MD5}}}))
for fn in "repodata.json.bz2", "a-1.0-0.tar.bz2":
with open(join(subdir, fn), "wb") as fo:
pass
def test_find_repos(tmpdir):
create_test_repo()
assert list(dt.find_repos(dt.mirror_dir)) == [join(dt.mirror_dir, "linux-64")]
def test_all_repodata_repos(tmpdir):
create_test_repo()
d = dt.all_repodata(dt.mirror_dir)
assert d[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_verify_all_repos(tmpdir):
create_test_repo()
dt.verify_all_repos(dt.mirror_dir)
def test_read_no_reference(tmpdir):
# tmpdir is empty - join(tmpdir, 'reference.json') does not exist
with pytest.raises(dt.NoReferenceError):
dt.read_reference()
def test_write_and_read_reference(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"))
ref = dt.read_reference()
assert ref[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_write_and_read_reference_with_target(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"), join(tmpdir, "reference_target.json"))
ref = dt.read_reference(join(tmpdir, "reference_target.json"))
assert ref[join(dt.mirror_dir, "linux-64")]["a-1.0-0.tar.bz2"]["md5"] == EMPTY_MD5
def test_get_updates(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"))
assert list(dt.get_updates(dt.mirror_dir)) == []
create_test_repo("win-32")
lst = sorted(pathlib.Path(f) for f in dt.get_updates(dt.mirror_dir))
assert lst == [
pathlib.Path("win-32/a-1.0-0.tar.bz2"),
pathlib.Path("win-32/repodata.json"),
pathlib.Path("win-32/repodata.json.bz2"),
]
def test_get_updates_with_target(tmpdir):
create_test_repo()
dt.write_reference(join(tmpdir, "repo"), join(tmpdir, "reference_target.json"))
assert (
list(dt.get_updates(dt.mirror_dir, join(tmpdir, "reference_target.json"))) == []
)
create_test_repo("win-32")
lst = sorted(
pathlib.Path(f)
for f in dt.get_updates(dt.mirror_dir, join(tmpdir, "reference_target.json"))
)
assert lst == [
pathlib.Path("win-32/a-1.0-0.tar.bz2"),
pathlib.Path("win-32/repodata.json"),
pathlib.Path("win-32/repodata.json.bz2"),
]
def test_tar_repo(tmpdir):
create_test_repo()
dt.write_reference(dt.mirror_dir)
create_test_repo("win-32")
dt.tar_repo(dt.mirror_dir)
assert isfile(dt.DEFAULT_UPDATE_PATH)
def test_tar_repo_with_target(tmpdir):
create_test_repo()
tarball = join(tmpdir, "updates_target.tar")
reference = join(tmpdir, "reference_target.json")
dt.write_reference(dt.mirror_dir, reference)
create_test_repo("win-32")
dt.tar_repo(dt.mirror_dir, reference, tarball)
assert isfile(tarball)
def run_with_args(args):
old_args = list(sys.argv)
sys.argv = ["conda-diff-tar"] + args
dt.main()
sys.argv = old_args
def test_version():
run_with_args(["--version"])
def test_cli_reference(tmpdir):
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
assert isfile(dt.DEFAULT_REFERENCE_PATH)
def test_cli_reference_outfile(tmpdir):
target_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
assert isfile(dt.DEFAULT_REFERENCE_PATH)
run_with_args(["--reference", "--outfile", target_path, dt.mirror_dir])
assert isfile(target_path)
with open(dt.DEFAULT_REFERENCE_PATH, "r") as ref1:
with open(target_path, "r") as ref2:
assert ref1.readlines() == ref2.readlines()
def test_cli_create_outfile(tmpdir):
target_path = join(tmpdir, "tar_target.tar")
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
run_with_args(["--create", "--outfile", target_path, dt.mirror_dir])
assert isfile(target_path)
def test_cli_create_infile(tmpdir):
target_ref_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", "--outfile", target_ref_path, dt.mirror_dir])
assert isfile(target_ref_path)
run_with_args(["--create", "--infile", target_ref_path, dt.mirror_dir])
assert isfile(dt.DEFAULT_UPDATE_PATH)
def test_cli_create_infile_outfile(tmpdir):
target_tar_path = join(tmpdir, "tar_target.tar")
target_ref_path = join(tmpdir, "ref_target.json")
create_test_repo()
run_with_args(["--reference", "--outfile", target_ref_path, dt.mirror_dir])
assert isfile(target_ref_path)
run_with_args(
[
"--create",
"--outfile",
target_tar_path,
"--infile",
target_ref_path,
dt.mirror_dir,
]
)
assert isfile(target_tar_path)
def test_misc(tmpdir):
create_test_repo()
run_with_args(["--reference", dt.mirror_dir])
create_test_repo("win-32")
run_with_args(["--show", dt.mirror_dir])
run_with_args(["--create", "--verbose", dt.mirror_dir])
run_with_args(["--verify", dt.mirror_dir])
run_with_args([dt.mirror_dir]) # do nothing
| 0
| 244
| 0
| 0
| 0
| 5,077
| 0
| -38
| 683
|
926b3d6413e556587b0edd606bb4824c907485dd
| 417
|
py
|
Python
|
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_item_slug.py
|
Andika7/microservice-django
|
7c25635d7fe371a62f14d2e3b6685678354a0568
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-05 10:54
| 20.85
| 54
| 0.58753
|
# Generated by Django 3.0.2 on 2020-02-05 10:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20200205_1029'),
]
operations = [
migrations.AddField(
model_name='item',
name='slug',
field=models.SlugField(default='product'),
preserve_default=False,
),
]
| 0
| 0
| 0
| 303
| 0
| 0
| 0
| 19
| 46
|
be8b6d00d6603fd9d2bfbc19d2ac5292cd519ac9
| 1,268
|
py
|
Python
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 1
|
2019-06-17T17:01:17.000Z
|
2019-06-17T17:01:17.000Z
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 7
|
2021-02-08T20:46:15.000Z
|
2021-09-08T02:12:59.000Z
|
src/views/CmdView/deactivate.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
| 30.190476
| 77
| 0.576498
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from views.CmdView.tokenify import Tokenify
from views.CmdView.command import Command
class Deactivate(Command):
def __init__(self, object, obj, id=None):
super(Deactivate, self).__init__(obj)
self.id = id
self.object = object
def execute(self, token, username):
body = {'status': 'DEACTIVATED'}
cookies = dict(username=username, token=token)
if self.object not in ['worker', 'master']:
print("Object " + self.object + " not known")
return
elif self.object == 'worker':
self.check_id(self.id)
url = os.path.join(Tokenify.get_url(), 'workers', str(self.id))
elif self.object == 'master':
url = os.path.join(Tokenify.get_url(), 'master')
r, text = self.send_request('patch', url, cookies=cookies, json=body)
if r.status_code == 200 or r.status_code == 204:
print(self.object + ' ' + str(self.id) + ' deactivated!')
else:
print(str(r.status_code) + "---- HTTP REQUEST GET'S ERROR")
print(text)
def check_id(self,id):
if id is None:
print("Id for Storage or Worker needed")
exit(0)
| 0
| 0
| 0
| 1,105
| 0
| 0
| 0
| 30
| 90
|
277cfc2eaf7d209975206be666ec892e87746c43
| 4,457
|
py
|
Python
|
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | null | null | null |
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2019-08-16T07:09:22.000Z
|
2019-09-04T04:59:51.000Z
|
datasets/utils.py
|
xdr940/cc
|
a98fe9b6c33c332a4c399f968032a90989c55672
|
[
"MIT"
] | 1
|
2020-01-13T04:51:22.000Z
|
2020-01-13T04:51:22.000Z
|
#
#
#gt
| 30.319728
| 118
| 0.572582
|
import numpy as np
from scipy.misc import imread
import torch
import random
def load_depth(path,format='png'):
if format=='npy':
tgt_depth = np.expand_dims(np.load(path), axis=0)
elif format=='png':
tgt_depth =np.expand_dims( imread(path), axis=0)
return torch.from_numpy(tgt_depth).float() / 255
def load_as_float(path):
return imread(path).astype(np.float32)
#这里跳帧
def crawl_folders(folders_list, sequence_length,shuffle = False):
sequence_set = []
demi_length = (sequence_length-1)//2
for folder in folders_list:
intrinsics = np.genfromtxt(folder/'cam.txt', delimiter=',')#分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
imgs = sorted(folder.files('*.jpg'))
if len(imgs) < sequence_length:
continue
for i in range(demi_length, len(imgs)-demi_length):
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []}
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i+j])
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
return sequence_set
#增加跳帧功能
def crawl_folders2(folders_list, sequence_length,interval_frame=0,sample_gap = 0, shuffle=False):
sequence_set = []
demi_length = (sequence_length - 1) // 2
for folder in folders_list:
intrinsics = np.genfromtxt(folder / 'cam.txt', delimiter=',') # 分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
imgs = sorted(folder.files('*.jpg'))
if len(imgs) < sequence_length:#frame太少, 放弃这个folder
continue
#插孔抽出
for i in range(len(imgs)):
if i % (interval_frame+1) != 0 :
imgs[i]=None
while None in imgs:
imgs.remove(None)
for i in range(demi_length, len(imgs) - demi_length):#在一个folder里
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []}
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i + j])
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
# 插空减少样本,提升训练速度
for i in range(len(sequence_set)):
if i % (sample_gap+1) != 0:
sequence_set[i] = None
while None in sequence_set:
sequence_set.remove(None)
return sequence_set
#跳帧且加载gt
def crawl_folders_gt(folders_list, sequence_length,interval_frame=0,sample_gap = 0,depth_format='png', shuffle=False):
sequence_set = []
demi_length = (sequence_length - 1) // 2
for folder in folders_list:
intrinsics = np.genfromtxt(folder / 'cam.txt', delimiter=',') # 分隔符空格
intrinsics = intrinsics.astype(np.float32).reshape((3, 3))
depths_folder = folder / 'depths'
imgs_folder = folder/'imgs'
# all paths
imgs = sorted(imgs_folder.files('*.png'))
if depth_format=='npy':
depths = sorted(depths_folder.files('*.npy'))
elif depth_format=='png':
depths = sorted(depths_folder.files('*.png'))
if len(imgs) < sequence_length:#frame太少, 放弃这个folder
continue
#插孔抽出
for i in range(len(imgs)):
if i % (interval_frame+1) != 0 :
imgs[i]=None
depths[i]=None
#pose[i]=None
#flow[i]=None
while None in imgs:
imgs.remove(None)
depths.remove(None)
for i in range(demi_length, len(imgs) - demi_length):#在一个folder里
sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': [],'tgt_depth':depths[i]}
#ref imgs precess
for j in range(-demi_length, demi_length + 1):
if j != 0:
sample['ref_imgs'].append(imgs[i + j])
#flow precess
#pose precess
sequence_set.append(sample)
if shuffle:
random.shuffle(sequence_set)
else:
pass
# 插空减少样本,提升训练速度
for i in range(len(sequence_set)):
if i % (sample_gap+1) != 0:
sequence_set[i] = None
while None in sequence_set:
sequence_set.remove(None)
return sequence_set
| 252
| 0
| 0
| 0
| 0
| 4,171
| 0
| -12
| 200
|
6377ca21c65dacd8a4f10dd4484517d8cf1d99aa
| 6,665
|
py
|
Python
|
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | 1
|
2018-02-16T13:13:09.000Z
|
2018-02-16T13:13:09.000Z
|
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | null | null | null |
modules/datastructures/TrainData_deepCSV.py
|
dntaylor/DeepJet
|
249610b3b80543c8c84f5ba795bbb07c097f8150
|
[
"Apache-2.0"
] | 5
|
2017-11-03T15:51:27.000Z
|
2019-05-29T14:45:23.000Z
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
| 35.452128
| 103
| 0.580045
|
'''
Created on 21 Feb 2017
@author: jkiesele
'''
from TrainDataDeepJet import TrainData_Flavour, TrainData_simpleTruth, TrainData_fullTruth, fileTimeOut
import numpy as np
class TrainData_deepCSV(TrainData_Flavour, TrainData_simpleTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
TrainData_Flavour.__init__(self)
self.addBranches(['jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches(['TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'],
6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches(['TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'],
1)
def readFromRootFile(self,filename,TupleMeanStd, weighter):
super(TrainData_deepCSV, self).readFromRootFile(filename, TupleMeanStd, weighter)
ys = self.y[0]
flav_sum = ys.sum(axis=1)
if (flav_sum > 1).any():
raise ValueError('In file: %s I get a jet with multiple flavours assigned!' % filename)
mask = (flav_sum == 1) if self.remove else (np.ones(flav_sum.shape[0]) == 1)
self.x = [self.x[0][mask]]
self.y = [self.y[0][mask]]
self.w = [self.w[0][mask]]
class TrainData_deepCSV_RNN(TrainData_fullTruth):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN, self).__init__()
self.addBranches([
'jet_pt', 'jet_eta',
'TagVarCSV_jetNSecondaryVertices',
'TagVarCSV_trackSumJetEtRatio', 'TagVarCSV_trackSumJetDeltaR',
'TagVarCSV_vertexCategory', 'TagVarCSV_trackSip2dValAboveCharm',
'TagVarCSV_trackSip2dSigAboveCharm', 'TagVarCSV_trackSip3dValAboveCharm',
'TagVarCSV_trackSip3dSigAboveCharm', 'TagVarCSV_jetNSelectedTracks',
'TagVarCSV_jetNTracksEtaRel'])
self.addBranches([
'TagVarCSVTrk_trackJetDistVal',
'TagVarCSVTrk_trackPtRel',
'TagVarCSVTrk_trackDeltaR',
'TagVarCSVTrk_trackPtRatio',
'TagVarCSVTrk_trackSip3dSig',
'TagVarCSVTrk_trackSip2dSig',
'TagVarCSVTrk_trackDecayLenVal'
], 6)
self.addBranches(['TagVarCSV_trackEtaRel'],4)
self.addBranches([
'TagVarCSV_vertexMass',
'TagVarCSV_vertexNTracks',
'TagVarCSV_vertexEnergyRatio',
'TagVarCSV_vertexJetDeltaR',
'TagVarCSV_flightDistance2dVal',
'TagVarCSV_flightDistance2dSig',
'TagVarCSV_flightDistance3dVal',
'TagVarCSV_flightDistance3dSig'
], 1)
self.addBranches(['jet_corr_pt'])
self.registerBranches(['gen_pt_WithNu'])
self.regressiontargetclasses=['uncPt','Pt']
def readFromRootFile(self,filename,TupleMeanStd, weighter):
from DeepJetCore.preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
import numpy
from DeepJetCore.stopwatch import stopwatch
sw=stopwatch()
swall=stopwatch()
import ROOT
fileTimeOut(filename,120) #give eos a minute to recover
rfile = ROOT.TFile(filename)
tree = rfile.Get("deepntuplizer/tree")
self.nsamples=tree.GetEntries()
print('took ', sw.getAndReset(), ' seconds for getting tree entries')
# split for convolutional network
x_global = MeanNormZeroPad(
filename,None,
[self.branches[0]],
[self.branchcutoffs[0]],self.nsamples
)
x_cpf = MeanNormZeroPadParticles(
filename,None,
self.branches[1],
self.branchcutoffs[1],self.nsamples
)
x_etarel = MeanNormZeroPadParticles(
filename,None,
self.branches[2],
self.branchcutoffs[2],self.nsamples
)
x_sv = MeanNormZeroPadParticles(
filename,None,
self.branches[3],
self.branchcutoffs[3],self.nsamples
)
print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
npy_array = self.readTreeFromRootToTuple(filename)
reg_truth=npy_array['gen_pt_WithNu'].view(numpy.ndarray)
reco_pt=npy_array['jet_corr_pt'].view(numpy.ndarray)
correctionfactor=numpy.zeros(self.nsamples)
for i in range(self.nsamples):
correctionfactor[i]=reg_truth[i]/reco_pt[i]
truthtuple = npy_array[self.truthclasses]
alltruth=self.reduceTruth(truthtuple)
self.x=[x_global, x_cpf, x_etarel, x_sv, reco_pt]
self.y=[alltruth,correctionfactor]
self._normalize_input_(weighter, npy_array)
class TrainData_deepCSV_RNN_Deeper(TrainData_deepCSV_RNN):
'''
same as TrainData_deepCSV but with 4 truth labels: B BB C UDSG
'''
def __init__(self):
'''
Constructor
'''
super(TrainData_deepCSV_RNN_Deeper, self).__init__()
self.branchcutoffs = [1, 20, 13, 4, 1]
| 0
| 0
| 0
| 6,401
| 0
| 0
| 0
| 79
| 113
|
589972506c18ceae6aaf9215a98021b547e17043
| 603
|
py
|
Python
|
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
src/clover/demo.py
|
gregjhansell97/leprechaun
|
d31e8d1a4b0a91aee2902602224c924b0b89fa06
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from clover.gui import CloverApp
# may want to consider bounding speed
app = CloverApp(title="leo-demo")
| 20.793103
| 55
| 0.633499
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from clover.gui import CloverApp, Clock
# may want to consider bounding speed
app = CloverApp(title="leo-demo")
def tick(time_interval):
if app.world.gold is None or app.world.leo is None:
return
try:
gold_x, gold_y = app.world.gold.loc
leo_x, leo_y = app.world.leo.loc
except AttributeError:
return
scale_factor = 0.01
dx = scale_factor * (gold_x - leo_x)
dy = scale_factor * (gold_y - leo_y)
app.world.leo.vel = (dx, dy)
def main():
Clock.schedule_interval(tick, 0.01)
app.run()
| 0
| 0
| 0
| 0
| 0
| 393
| 0
| 7
| 46
|
7c1efb98e5fd682a9bcfc00cec5517743f22f8a6
| 1,197
|
py
|
Python
|
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | null | null | null |
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | 2
|
2021-08-02T02:53:14.000Z
|
2021-11-05T18:08:18.000Z
|
flatiron-notebook/test/test_languages.py
|
IllumiDesk/flatiron-stacks
|
51ec24fefc35ccca0a1667ae20438db26a901d22
|
[
"MIT"
] | 1
|
2020-10-21T16:08:46.000Z
|
2020-10-21T16:08:46.000Z
|
import docker
from docker.errors import ContainerError
import logging
import pytest
LOGGER = logging.getLogger(__name__)
PYTHON_VERSION='3.9.5'
NOTEBOOK_IMAGE_TAG=f'python-{PYTHON_VERSION}'
def test_invalid_cmd():
"""Ensure that an invalid command returns a docker.errors.ContainerError
"""
with pytest.raises(ContainerError):
LOGGER.info('Test an invalid command ...')
client = docker.from_env()
client.containers.run('illumidesk/flatiron-notebook', 'foo --version')
| 29.925
| 113
| 0.70259
|
import docker
from docker.errors import ContainerError
import logging
import pytest
LOGGER = logging.getLogger(__name__)
PYTHON_VERSION='3.9.5'
NOTEBOOK_IMAGE_TAG=f'python-{PYTHON_VERSION}'
@pytest.mark.parametrize(
'language,version_output',
[
('python', ['Python', '3.9.5\n']),
],
)
def test_languages(language, version_output):
"""Ensure that the language is available in the container's PATH and that
it has the correct version
"""
LOGGER.info(f'Test that language {language} {PYTHON_VERSION} is correctly installed ...')
client = docker.from_env()
output = client.containers.run(f'illumidesk/flatiron-notebook:{NOTEBOOK_IMAGE_TAG}', f'{language} --version')
output_decoded = output.decode('utf-8').split(' ')
assert output_decoded[0:3] == version_output
LOGGER.info(f'Output from command: {output_decoded[0:3]}')
def test_invalid_cmd():
"""Ensure that an invalid command returns a docker.errors.ContainerError
"""
with pytest.raises(ContainerError):
LOGGER.info('Test an invalid command ...')
client = docker.from_env()
client.containers.run('illumidesk/flatiron-notebook', 'foo --version')
| 0
| 662
| 0
| 0
| 0
| 0
| 0
| 0
| 23
|
39b66c96fbaf8cecf65a0efb079529756c9fb5ba
| 5,846
|
py
|
Python
|
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | 1
|
2017-10-08T17:18:44.000Z
|
2017-10-08T17:18:44.000Z
|
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | null | null | null |
bot.py
|
rivermont/orka
|
4719c3b758d85f9b340698b9b637af196a50cec2
|
[
"MIT"
] | null | null | null |
"""
Orka Discord Bot
Copyright (c) 2017 William Bennett
"""
###########
# IMPORTS #
###########
from os import path, makedirs
###################
# OTHER FUNCTIONS #
###################
def add_msg(channel, text, mode='a+'):
"""
Appends a message to the end of a file.
"""
with open('channels/{0}.txt'.format(channel), '{0}'.format(mode), encoding="utf_8") as file:
file.write('{0}\n'.format(text))
#######
# BOT #
#######
#######
# RUN #
#######
client = Orka()
read = []
if __name__ == '__main__':
if not path.exists('channels\\'):
makedirs('channels\\')
client.run()
| 34.591716
| 128
| 0.561923
|
"""
Orka Discord Bot
Copyright (c) 2017 William Bennett
"""
###########
# IMPORTS #
###########
import discord
import random
import markovify
from os import path, makedirs
from scripts import *
###################
# OTHER FUNCTIONS #
###################
def add_msg(channel, text, mode='a+'):
"""
Appends a message to the end of a file.
"""
with open('channels/{0}.txt'.format(channel), '{0}'.format(mode), encoding="utf_8") as file:
file.write('{0}\n'.format(text))
def make_markov_model(channel):
with open('channels/{0}.txt'.format(channel), 'r', encoding="utf_8") as file:
model = markovify.NewlineText(file)
global model
#######
# BOT #
#######
class Orka(discord.Client):
async def on_ready(self):
print('Logging in...')
print('Logged in as {0}; ID #{1}'.format(client.user.name, client.user.id))
print('Setting status...')
await client.change_presence(game=discord.Game(name='https://github.com/rivermont/orka'))
print('Gathering available text channels...')
for server in client.servers:
for channel in server.channels:
if channel.type == discord.ChannelType.text:
if channel.permissions_for(server.me).read_messages:
print('Read access in: ' + server.name + '/' + channel.name)
read.append(channel)
print('Downloading logs from readable text channels...')
for channel in read:
add_msg(channel, '', mode='w+')
async for message in client.logs_from(channel, limit=1000):
add_msg(channel, message.content, mode='a')
print('Ready.')
async def on_member_join(self, member):
general = self.get_server("256600580837998592").get_channel("256600580837998592")
await client.send_message(
general,
'Welcome, @{0}! Please familiarize yourself with our #rules, then go wild!'.format(member.name)
)
async def on_message(self, message):
print('Received message..')
content = message.content
channel = message.channel
add_msg(channel, content)
# General commands
if message.content.startswith('!flip'):
# Flips a coin on two choices. Defaults to Heads or Tails.
print('Flipping coin...')
if len(content.split()) == 1:
choice_ = random.choice(['Heads', 'Tails'])
await client.send_message(channel, choice_)
elif len(content.split()) == 2:
await client.send_message(channel, 'Only one option supplied. Must be two or none.')
elif len(content.split()) == 3:
options = content.split()[1:]
flip = random.choice(options)
await client.send_message(channel, flip)
elif len(content.split()) > 3:
await client.send_message(channel, 'Too many options supplied. Must be two or none.')
elif content.startswith('!roll'):
# Rolls a dice. Defaults to a d6.
print('Rolling die...')
if len(content.split()) == 1:
roll = random.randint(1, 6)
await client.send_message(channel, 'You rolled a {0}.'.format(roll))
if len(content.split()) == 2:
input_ = content.split()[1]
roll = random.randint(1, int(input_))
await client.send_message(channel, 'You rolled a {0}.'.format(roll))
elif content.startswith('!convert'):
# Converts Kelvin/Celsius/Fahrenheit
input_ = content.split()
try:
amount = int(input_[1][:-1])
unit_from = input_[1][-1]
unit_to = input_[2]
result = convert(amount, unit_from, unit_to)
if result == "Error":
raise IndexError
else:
await client.send_message(channel, 'Converted {0}{1} to {2}{3}.'.format(amount, unit_from, result, unit_to))
except IndexError:
print('Invalid input.')
await client.send_message(channel, 'Invalid input. Must be in format `!convert 23U U`.')
# Moderation commands
elif content.startswith('@stop'):
print('Stopping bot...')
await client.logout()
elif content.startswith('@logs'):
async for m in client.logs_from(channel):
add_msg(channel, m.content)
elif content.startswith('@generate'):
print('Generating markov model for channel {0}'.format(channel))
make_markov_model(channel)
await client.send_message(channel, 'Successfully generated markov model.')
elif content.startswith('!sentence'):
# Generates a single line from the current markov model
# Under moderation b/c that's where @generate is
sentence = ''
try:
sentence = model.make_sentence(tries=1000)
except NameError:
print('No available markov model.')
await client.send_message(channel, 'No available markov model.')
if not bool(sentence):
await client.send_message(channel, 'No sentence generated.')
else:
await client.send_message(channel, sentence)
elif content.startswith('@save'):
with open('model.json', 'w+') as f:
f.write(model.to_json())
elif content.startswith('@test'):
# Generic testing function
pass
#######
# RUN #
#######
client = Orka()
read = []
if __name__ == '__main__':
if not path.exists('channels\\'):
makedirs('channels\\')
client.run()
| 0
| 0
| 4,865
| 6
| 0
| 153
| 0
| -20
| 217
|
5dfc4ead3ddd6b27d4c94803fa33ddb4e209c9c4
| 20,054
|
py
|
Python
|
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
composer/callbacks/checkpoint_saver.py
|
hanlint/composer
|
83d96b7efde533cbc2fff7dd7e0769da2b177807
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 MosaicML. All Rights Reserved.
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
from typing import Callable, Union
from composer.core import Event, State
from composer.core.time import Time, TimeUnit
log = logging.getLogger(__name__)
__all__ = ["CheckpointSaver", "checkpoint_periodically"]
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, Time]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`~TimeUnit.EPOCH`\\s.
Otherwise, the unit must be either :attr:`TimeUnit.EPOCH` or :attr:`TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f"Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.")
last_checkpoint_batch = None
return save_interval
| 49.761787
| 154
| 0.621472
|
# Copyright 2021 MosaicML. All Rights Reserved.
"""Callback to save checkpoints during training."""
from __future__ import annotations
import logging
import os
import pathlib
import textwrap
from typing import Callable, List, Optional, Tuple, Union
from composer.core import Event, State
from composer.core.callback import Callback
from composer.core.time import Time, Timestamp, TimeUnit
from composer.loggers import Logger
from composer.loggers.logger import LogLevel
from composer.utils import checkpoint, dist
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, FORMAT_NAME_WITH_DIST_TABLE,
ensure_folder_is_empty, format_name_with_dist, format_name_with_dist_and_time,
is_tar)
log = logging.getLogger(__name__)
__all__ = ["CheckpointSaver", "checkpoint_periodically"]
def checkpoint_periodically(interval: Union[str, int, Time]) -> Callable[[State, Event], bool]:
"""Helper function to create a checkpoint scheduler according to a specified interval.
Args:
interval (Union[str, int, Time]): The interval describing how often checkpoints should be
saved. If an integer, it will be assumed to be in :attr:`~TimeUnit.EPOCH`\\s.
Otherwise, the unit must be either :attr:`TimeUnit.EPOCH` or :attr:`TimeUnit.BATCH`.
Checkpoints will be saved every ``n`` batches or epochs (depending on the unit),
and at the end of training.
Returns:
Callable[[State, Event], bool]: A function that can be passed as the ``save_interval``
argument into the :class:`CheckpointSaver`.
"""
if isinstance(interval, str):
interval = Time.from_timestring(interval)
if isinstance(interval, int):
interval = Time(interval, TimeUnit.EPOCH)
if interval.unit == TimeUnit.EPOCH:
save_event = Event.EPOCH_CHECKPOINT
elif interval.unit == TimeUnit.BATCH:
save_event = Event.BATCH_CHECKPOINT
else:
raise NotImplementedError(
f"Unknown checkpointing interval: {interval.unit}. Must be TimeUnit.EPOCH or TimeUnit.BATCH.")
last_checkpoint_batch = None
def save_interval(state: State, event: Event):
nonlocal last_checkpoint_batch
if state.get_elapsed_duration() >= 1.0:
# if doing batch-wise checkpointing, and we saved a checkpoint at the batch_checkpoint event
# right before the epoch_checkpoint event, do not save another checkpoint at the epoch_checkpoint
# event if the batch count didn't increase.
if state.timer.batch != last_checkpoint_batch:
last_checkpoint_batch = state.timer.batch
return True
if save_event == Event.EPOCH_CHECKPOINT:
count = state.timer.epoch
elif save_event == Event.BATCH_CHECKPOINT:
count = state.timer.batch
else:
raise RuntimeError(f"Invalid save_event: {save_event}")
if event == save_event and int(count) % int(interval) == 0:
last_checkpoint_batch = state.timer.batch
return True
return False
return save_interval
class CheckpointSaver(Callback):
__doc__ = f"""Callback to save checkpoints.
.. note::
If the ``folder`` argument is specified constructing the :class:`~composer.trainer.trainer.Trainer`,
then the :class:`.CheckpointSaver` callback need not be constructed manually. However, for advanced
checkpointing use cases (such as saving a weights-only checkpoint at one interval and the full training state
at another interval), instance(s) of this :class:`.CheckpointSaver` callback can be specified in the
``callbacks`` argument of the :class:`~composer.trainer.trainer.Trainer`, as shown in the example below.
Example
.. testsetup::
from composer.callbacks.checkpoint_saver import CheckpointSaver
.. doctest::
>>> trainer = Trainer(..., callbacks=[
... CheckpointSaver(
... folder='{{run_name}}/checkpoints',
... filename="ep{{epoch}}-ba{{batch}}-rank{{rank}}",
... latest_filename="latest-rank{{rank}}",
... save_interval="1ep",
... weights_only=False,
... )
... ])
.. testcleanup::
trainer.engine.close()
Args:
folder (str, optional): Format string for the folder where checkpoints will be saved.
(default: ``'{{run_name}}/checkpoints'``)
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_TABLE, prefix=' ')}
.. note::
When training with multiple devices (i.e. GPUs), ensure that ``'{{rank}}'`` appears in the format.
Otherwise, multiple processes may attempt to write to the same file.
filename (str, optional): A format string describing how to name checkpoints.
(default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}'``)
Checkpoints will be saved approximately to ``{{folder}}/{{filename.format(...)}}``.
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{{rank}}'`` appears within the ``filename``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``'.tar'`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario, where:
* The :attr:`~.Logger.run_name` is ``'awesome-training-run'``
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``"awesome-training-run/checkpoints/ep1-ba42-rank0"``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
artifact_name (str, optional): Format string for the checkpoint's artifact name.
(default: ``'{{run_name}}/checkpoints/ep{{epoch}}-ba{{batch}}-rank{{rank}}"``)
After the checkpoint is saved, it will be periodically logged as a file artifact.
The artifact name will be determined by this format string.
.. seealso:: :meth:`~composer.loggers.logger.Logger.log_file_artifact` for file artifact logging.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable logging trace files as file artifacts, set this parameter to ``None``.
latest_filename (str, optional): A format string for a symlink which points to the last saved checkpoint.
(default: ``'latest-rank{{rank}}'``)
Symlinks will be created approximately at ``{{folder}}/{{latest_filename.format(...)}}``.
The same format variables as for ``name`` are available.
To disable symlinks, set this parameter to ``None``.
Consider the following scenario, where:
* The :attr:`~.Logger.run_name` is 'awesome-training-run'
* The default ``folder='{{run_name}}/checkpoints'`` is used.
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The default ``latest_filename='latest-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to
``'awesome-training-run/checkpoints/ep1-ba42-rank0'``,
and a symlink will be created at
``'awesome-training-run/checkpoints/latest-rank0' -> 'awesome-training-run/checkpoints/ep1-ba42-rank0'``
When DeepSpeed is being used, each rank (process) will save checkpoints to::
awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
Corresponding symlinks will be created at::
awesome-training-run/checkpoints/latest-rank0.tar -> awesome-training-run/checkpoints/ep1-ba42-rank0.tar
awesome-training-run/checkpoints/latest-rank1.tar -> awesome-training-run/checkpoints/ep1-ba42-rank1.tar
awesome-training-run/checkpoints/latest-rank2.tar -> awesome-training-run/checkpoints/ep1-ba42-rank2.tar
...
latest_artifact_name (str, optional): Format string for the checkpoint's latest symlink artifact name.
(default: ``'{{run_name}}/checkpoints/latest-rank{{rank}}"``)
Whenever a new checkpoint is saved, a symlink artifact is created or updated to point to the latest checkpoint's ``artifact_name``.
The artifact name will be determined by this format string. This parameter has no effect if ``latest_filename`` or ``artifact_name`` is None."
.. seealso:: :meth:`~composer.loggers.logger.Logger.log_symlink_artifact` for symlink artifact logging.
The same format variables for ``filename`` are available.
Leading slashes (``'/'``) will be stripped.
To disable symlinks in logger, set this parameter to ``None``.
overwrite (bool, optional): Whether existing checkpoints should be overridden.
If ``False`` (the default), then the ``folder`` must not exist or be empty.
(default: ``False``)
save_interval (Time | str | int | (State, Event) -> bool): A :class:`Time`, time-string, integer (in epochs),
or a function that takes (state, event) and returns a boolean whether a checkpoint should be saved.
If an integer, checkpoints will be saved every n epochs.
If :class:`Time` or a time-string, checkpoints will be saved according to this interval.
.. seealso:: :func:`.checkpoint_periodically`
If a function, then this function should take two arguments (:class:`State`, :class:`Event`).
The first argument will be the current state of the trainer, and the second argument will be
be :attr:`.Event.BATCH_CHECKPOINT` or :attr:`.EPOCH_CHECKPOINT` (depending on the current training
progress). It should return ``True`` if a checkpoint should be saved given the current state and
event.
weights_only (bool): If ``True``, save only the model weights instead of the entire training state.
This parmeter must be ``False`` when using DeepSpeed. (default: ``False``)
num_checkpoints_to_keep (int, optional): The number of checkpoints to keep locally. The oldest checkpoints
are removed first. Set to ``-1`` to keep all checkpoints locally. (default: ``-1``)
Checkpoints will be removed after they have been logged as a file artifact. For example, when this callback
is used in conjunction with the :class:`~composer.loggers.object_store_logger.ObjectStoreLogger`, set this
parameter to ``0`` to immediately delete checkpoints from the local disk after they have been uploaded to
the object store.
This parameter only controls how many checkpoints are kept locally; checkpoints are not deleted from
artifact stores.
Attributes:
saved_checkpoints (List[Tuple[Timestamp, List[pathlib.Path]]]): The checkpoint timestamps and filepaths.
This list contains tuples of the save timestamp and the checkpoint filepaths.
This list will have at most ``num_checkpoints_to_keep`` entries. The latest checkpoint
will be at the end.
.. note::
When using DeepSpeed, the index of a filepath in each list corresponds to the global rank of
the process that wrote that file. Each filepath is valid only on the process's (rank's) node.
Otherwise, when not using DeepSpeed, each sub-list will contain only one filepath since only rank zero
saves checkpoints.
"""
def __init__(
self,
folder: str = "{run_name}/checkpoints",
filename: str = "ep{epoch}-ba{batch}-rank{rank}",
artifact_name: Optional[str] = "{run_name}/checkpoints/ep{epoch}-ba{batch}-rank{rank}",
latest_filename: Optional[str] = "latest-rank{rank}",
latest_artifact_name: Optional[str] = "{run_name}/checkpoints/latest-rank{rank}",
save_interval: Union[Time, str, int, Callable[[State, Event], bool]] = "1ep",
*,
overwrite: bool = False,
num_checkpoints_to_keep: int = -1,
weights_only: bool = False,
):
if not callable(save_interval):
save_interval = checkpoint_periodically(save_interval)
self.folder = folder
self.filename = filename
self.artifact_name = artifact_name
self.latest_filename = latest_filename
self.latest_artifact_name = latest_artifact_name
self.overwrite = overwrite
self.save_interval = save_interval
self.saved_checkpoints: List[Tuple[Timestamp, List[pathlib.Path]]] = []
self.num_checkpoints_to_keep = num_checkpoints_to_keep
self.weights_only = weights_only
def init(self, state: State, logger: Logger) -> None:
del state # unused
folder = format_name_with_dist(self.folder, logger.run_name)
os.makedirs(folder, exist_ok=True)
if not self.overwrite:
ensure_folder_is_empty(folder)
# Ensure no rank proceeds (and potentially attempts to write to the folder), until all ranks have validated that the folder is empty.
dist.barrier()
def fit_start(self, state: State, logger: Logger) -> None:
if state.is_model_deepspeed:
if self.weights_only:
NotImplementedError(
("Saving checkpoints with `weights_only=True` is not currently supported when using DeepSpeed. "
"See https://github.com/mosaicml/composer/issues/685."))
def batch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.BATCH_CHECKPOINT):
# If training is finished, log at the FIT loglevel
log_level = LogLevel.BATCH if state.get_elapsed_duration() < 1.0 else LogLevel.FIT
self._save_checkpoint(state, logger, log_level)
def epoch_checkpoint(self, state: State, logger: Logger):
if self.save_interval(state, Event.EPOCH_CHECKPOINT):
log_level = LogLevel.EPOCH if state.get_elapsed_duration() < 1.0 else LogLevel.FIT
self._save_checkpoint(state, logger, log_level)
def _save_checkpoint(self, state: State, logger: Logger, log_level: LogLevel):
checkpoint_filepath = os.path.join(format_name_with_dist(self.folder, logger.run_name), self.filename)
checkpoint_filepaths = checkpoint.save_checkpoint(state,
logger,
checkpoint_filepath,
weights_only=self.weights_only)
if dist.get_global_rank() < len(checkpoint_filepaths):
# Log the checkpoint as an artifact
checkpoint_filepath = checkpoint_filepaths[dist.get_global_rank()]
if self.artifact_name is not None:
artifact_name = format_name_with_dist_and_time(self.artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
if state.is_model_deepspeed and not is_tar(artifact_name):
# Deepspeed requires tarballs; appending `.tar`
artifact_name += ".tar"
logger.file_artifact(log_level=log_level,
artifact_name=artifact_name,
file_path=checkpoint_filepath,
overwrite=self.overwrite)
if self.latest_filename is not None:
symlink_name = os.path.join(
format_name_with_dist(self.folder, logger.run_name),
format_name_with_dist_and_time(self.latest_filename, logger.run_name,
state.timer.get_timestamp()).lstrip("/"),
)
if state.is_model_deepspeed and not is_tar(symlink_name):
# Deepspeed requires tarballs; appending `.tar`
symlink_name += ".tar"
symlink_dirname = os.path.dirname(symlink_name)
if symlink_dirname:
os.makedirs(symlink_dirname, exist_ok=True)
try:
os.remove(symlink_name)
except FileNotFoundError:
pass
os.symlink(checkpoint_filepath, symlink_name)
if self.artifact_name is not None and self.latest_artifact_name is not None:
symlink_artifact_name = format_name_with_dist_and_time(self.latest_artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
artifact_name = format_name_with_dist_and_time(self.artifact_name, logger.run_name,
state.timer.get_timestamp()).lstrip("/")
# Always overwrite for symlinks since we use the same filename for latest
logger.symlink_artifact(log_level=log_level,
existing_artifact_name=artifact_name,
symlink_artifact_name=symlink_artifact_name,
overwrite=True)
timestamp = state.timer.get_timestamp()
self.saved_checkpoints.append((timestamp, checkpoint_filepaths))
if self.num_checkpoints_to_keep >= 0:
while len(self.saved_checkpoints) > self.num_checkpoints_to_keep:
timestamp, checkpoint_filepaths = self.saved_checkpoints[0]
if dist.get_global_rank() < len(checkpoint_filepaths):
# Remove this rank's checkpoint
os.remove(checkpoint_filepaths[dist.get_global_rank()])
del self.saved_checkpoints[0]
| 0
| 0
| 0
| 16,816
| 0
| 956
| 0
| 345
| 226
|
9df712a61c104af137c3836ef28840763dfb0311
| 5,919
|
py
|
Python
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 171
|
2020-06-28T04:03:23.000Z
|
2022-03-30T08:50:20.000Z
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 9
|
2020-08-20T08:56:38.000Z
|
2022-01-19T19:53:51.000Z
|
wrapperNYU.py
|
Z7Gao/InverseRenderingOfIndoorScene
|
f245d20dcbe05b1de766c2e53af79fd489f58d74
|
[
"MIT"
] | 19
|
2020-06-23T11:49:03.000Z
|
2022-01-22T01:49:26.000Z
|
# Return triplet of predictions, ground-truth and error
| 48.516393
| 167
| 0.66937
|
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import models
# Return triplet of predictions, ground-truth and error
def wrapperNYU(dataBatch, opt,
encoder, albedoDecoder, normalDecoder, roughDecoder, depthDecoder ):
# Load data from cpu to gpu
normal_cpu = dataBatch['normal']
normalBatch = Variable(normal_cpu ).cuda()
depth_cpu = dataBatch['depth']
depthBatch = Variable(depth_cpu ).cuda()
seg_cpu = dataBatch['segNormal']
segNormalBatch = Variable( seg_cpu ).cuda()
seg_cpu = dataBatch['segDepth']
segDepthBatch = Variable(seg_cpu ).cuda()
# Load the image from cpu to gpu
im_cpu = (dataBatch['im'] )
imBatch = Variable(im_cpu ).cuda()
if opt.cascadeLevel > 0:
albedoPre_cpu = dataBatch['albedoPre']
albedoPreBatch = Variable(albedoPre_cpu ).cuda()
normalPre_cpu = dataBatch['normalPre']
normalPreBatch = Variable(normalPre_cpu ).cuda()
roughPre_cpu = dataBatch['roughPre']
roughPreBatch = Variable(roughPre_cpu ).cuda()
depthPre_cpu = dataBatch['depthPre']
depthPreBatch = Variable(depthPre_cpu ).cuda()
diffusePre_cpu = dataBatch['diffusePre']
diffusePreBatch = Variable(diffusePre_cpu ).cuda()
specularPre_cpu = dataBatch['specularPre']
specularPreBatch = Variable(specularPre_cpu ).cuda()
if albedoPreBatch.size(2) < opt.imHeight or albedoPreBatch.size(3) < opt.imWidth:
albedoPreBatch = F.interpolate(albedoPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if normalPreBatch.size(2) < opt.imHeight or normalPreBatch.size(3) < opt.imWidth :
normalPreBatch = F.interpolate(normalPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if roughPreBatch.size(2) < opt.imHeight or roughPreBatch.size(3) < opt.imWidth :
roughPreBatch = F.interpolate(roughPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if depthPreBatch.size(2) < opt.imHeight or depthPreBatch.size(3) < opt.imWidth :
depthPreBatch = F.interpolate(depthPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Regress the diffusePred and specular Pred
envRow, envCol = diffusePreBatch.size(2), diffusePreBatch.size(3)
imBatchSmall = F.adaptive_avg_pool2d(imBatch, (envRow, envCol) )
diffusePreBatch, specularPreBatch = models.LSregressDiffSpec(
diffusePreBatch.detach(),
specularPreBatch.detach(),
imBatchSmall,
diffusePreBatch, specularPreBatch )
if diffusePreBatch.size(2) < opt.imHeight or diffusePreBatch.size(3) < opt.imWidth:
diffusePreBatch = F.interpolate(diffusePreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
if specularPreBatch.size(2) < opt.imHeight or specularPreBatch.size(3) < opt.imWidth:
specularPreBatch = F.interpolate(specularPreBatch, [opt.imHeight, opt.imWidth ], mode='bilinear')
# Normalize Albedo and depth
bn, ch, nrow, ncol = albedoPreBatch.size()
albedoPreBatch = albedoPreBatch.view(bn, -1)
albedoPreBatch = albedoPreBatch / torch.clamp(torch.mean(albedoPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
albedoPreBatch = albedoPreBatch.view(bn, ch, nrow, ncol)
bn, ch, nrow, ncol = depthPreBatch.size()
depthPreBatch = depthPreBatch.view(bn, -1)
depthPreBatch = depthPreBatch / torch.clamp(torch.mean(depthPreBatch, dim=1), min=1e-10).unsqueeze(1) / 3.0
depthPreBatch = depthPreBatch.view(bn, ch, nrow, ncol)
########################################################
# Build the cascade network architecture #
if opt.cascadeLevel == 0:
inputBatch = imBatch
elif opt.cascadeLevel > 0:
inputBatch = torch.cat([imBatch, albedoPreBatch,
normalPreBatch, roughPreBatch, depthPreBatch,
diffusePreBatch, specularPreBatch ], dim=1)
# Initial Prediction
x1, x2, x3, x4, x5, x6 = encoder(inputBatch )
albedoPred = 0.5 * (albedoDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = normalDecoder(imBatch, x1, x2, x3, x4, x5, x6)
roughPred = roughDecoder(imBatch, x1, x2, x3, x4, x5, x6)
depthPred = 0.5 * (depthDecoder(imBatch, x1, x2, x3, x4, x5, x6) + 1)
normalPred = F.interpolate(normalPred, [normalBatch.size(2), normalBatch.size(3)], mode='bilinear')
depthPred = F.interpolate(depthPred, [depthBatch.size(2), depthBatch.size(3)], mode='bilinear')
depthPred = models.LSregress(depthPred.detach() * segDepthBatch.expand_as(depthPred),
depthBatch * segDepthBatch.expand_as(depthBatch), depthPred)
## Compute Errors
pixelAllNumNormal = (torch.sum(segNormalBatch ).cpu().data).item()
normalErr = torch.sum( (normalPred - normalBatch)
* (normalPred - normalBatch) * segNormalBatch.expand_as(normalBatch) ) / pixelAllNumNormal / 3.0
pixelAllNumDepth = (torch.sum(segDepthBatch ).cpu().data).item()
depthErr = torch.sum( (torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1 ) )
* ( torch.log(depthPred + 0.1) - torch.log(depthBatch + 0.1) ) * segDepthBatch.expand_as(depthBatch ) ) / pixelAllNumDepth
angleMean = torch.sum(torch.acos( torch.clamp(torch.sum(normalPred * normalBatch, dim=1).unsqueeze(1), -1, 1) ) / np.pi * 180 * segNormalBatch) / pixelAllNumNormal
normalPred_np = normalPred.data.cpu().numpy()
normalBatch_np = normalBatch.data.cpu().numpy()
segNormalBatch_np = segNormalBatch.cpu().numpy()
theta = np.arccos( np.clip(np.sum(normalPred_np * normalBatch_np, axis=1)[:, np.newaxis, :, :], -1, 1) ) / np.pi * 180
angleMean_np = (theta * segNormalBatch_np ) / pixelAllNumNormal
return [albedoPred, None], [normalPred, normalErr, angleMean], \
[roughPred, None ], [depthPred, depthErr], \
| 0
| 0
| 0
| 0
| 0
| 5,724
| 0
| 4
| 134
|