hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60d2134f1b978a5ccd35690d147a761894f25efe
| 19,494
|
py
|
Python
|
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | 1
|
2021-07-19T03:17:50.000Z
|
2021-07-19T03:17:50.000Z
|
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | null | null | null |
easyocr/easyocr.py
|
ghandic/EasyOCR
|
f96bea526e7208e4630a18698c18d0223e2a1168
|
[
"Apache-2.0"
] | 1
|
2020-10-24T11:40:29.000Z
|
2020-10-24T11:40:29.000Z
|
# -*- coding: utf-8 -*-
import sys
from logging import getLogger
if sys.version_info[0] == 2:
from six.moves.urllib.request import urlretrieve
from pathlib2 import Path
else:
LOGGER = getLogger(__name__)
| 46.194313
| 123
| 0.601005
|
# -*- coding: utf-8 -*-
import os
import sys
from logging import getLogger
from typing import Any, List, Tuple
import cv2
import numpy as np
import torch
from bidi.algorithm import get_display
from .detection import get_detector, get_textbox
from .imgproc import loadImage
from .recognition import get_recognizer, get_text
from .settings import *
from .utils import calculate_md5, download_and_unzip, get_image_list, get_paragraph, group_text_box
if sys.version_info[0] == 2:
from io import open
from six.moves.urllib.request import urlretrieve
from pathlib2 import Path
else:
from urllib.request import urlretrieve
from pathlib import Path
LOGGER = getLogger(__name__)
class Reader(object):
def __init__(
self, lang_list: List[str], gpu: bool = True, model_storage_directory: str = None, download_enabled: bool = True
):
"""Create an EasyOCR Reader.
Args:
lang_list (List[str]): Language codes (ISO 639) for languages to be recognized during analysis.
gpu (bool, optional): Enable GPU support. Defaults to True.
model_storage_directory (str, optional): Path to directory for model data. If not specified,
models will be read from a directory as defined by the environment variable
EASYOCR_MODULE_PATH (preferred), MODULE_PATH (if defined), or ~/.EasyOCR/. Defaults to None.
download_enabled (bool, optional): Enabled downloading of model data via HTTP. Defaults to True.
"""
self._set_device(gpu)
self._set_model_lang(lang_list)
self._set_character_choices()
self._set_lang_char(lang_list) # self.lang_list doesn't seem to be used
self._set_model_paths(model_storage_directory)
self._download_models(download_enabled)
self.detector = get_detector(self._detector_path, self.device)
self.recognizer, self.converter = get_recognizer(
input_channel,
output_channel,
hidden_size,
self.character,
self.separator_list,
self.dict_list,
self._recognition_model_path,
device=self.device,
)
def readtext(
self,
image: Any,
decoder: str = "greedy",
beamWidth: int = 5,
batch_size: int = 1,
workers: int = 0,
allowlist: List[str] = None,
blocklist: List[str] = None,
detail: int = 1,
paragraph: bool = False,
contrast_ths: float = 0.1,
adjust_contrast: float = 0.5,
filter_ths: float = 0.003,
text_threshold: float = 0.7,
low_text: float = 0.4,
link_threshold: float = 0.4,
canvas_size: int = 2560,
mag_ratio: float = 1.0,
slope_ths: float = 0.1,
ycenter_ths: float = 0.5,
height_ths: float = 0.5,
width_ths: float = 0.5,
add_margin: float = 0.1,
) -> List: # TODO: ghandic - unsure on output shape
"""[summary] # TODO
Args:
image (Any): [description]
decoder (str, optional): [description]. Defaults to "greedy".
beamWidth (int, optional): [description]. Defaults to 5.
batch_size (int, optional): [description]. Defaults to 1.
workers (int, optional): [description]. Defaults to 0.
allowlist (List[str], optional): [description]. Defaults to None.
blocklist (List[str], optional): [description]. Defaults to None.
detail (int, optional): [description]. Defaults to 1.
paragraph (bool, optional): [description]. Defaults to False.
contrast_ths (float, optional): [description]. Defaults to 0.1.
adjust_contrast (float, optional): [description]. Defaults to 0.5.
filter_ths (float, optional): [description]. Defaults to 0.003.
text_threshold (float, optional): [description]. Defaults to 0.7.
low_text (float, optional): [description]. Defaults to 0.4.
link_threshold (float, optional): [description]. Defaults to 0.4.
canvas_size (int, optional): [description]. Defaults to 2560.
mag_ratio (float, optional): [description]. Defaults to 1.0.
slope_ths (float, optional): [description]. Defaults to 0.1.
ycenter_ths (float, optional): [description]. Defaults to 0.5.
height_ths (float, optional): [description]. Defaults to 0.5.
width_ths (float, optional): [description]. Defaults to 0.5.
add_margin (float, optional): [description]. Defaults to 0.1.
Returns:
List: [description]
"""
img, img_cv_grey = self._load_image(image)
text_box = get_textbox(
self.detector, img, canvas_size, mag_ratio, text_threshold, link_threshold, low_text, False, self.device
)
horizontal_list, free_list = group_text_box(text_box, slope_ths, ycenter_ths, height_ths, width_ths, add_margin)
# should add filter to screen small box out
image_list, max_width = get_image_list(horizontal_list, free_list, img_cv_grey, model_height=imgH)
if allowlist:
ignore_char = "".join(set(self.character) - set(allowlist))
elif blocklist:
ignore_char = "".join(set(blocklist))
else:
ignore_char = "".join(set(self.character) - set(self.lang_char))
if self.model_lang in ["chinese_tra", "chinese_sim", "japanese", "korean"]:
decoder = "greedy"
result = get_text(
self.character,
imgH,
int(max_width),
self.recognizer,
self.converter,
image_list,
ignore_char,
decoder,
beamWidth,
batch_size,
contrast_ths,
adjust_contrast,
filter_ths,
workers,
self.device,
)
if self.model_lang == "arabic":
direction_mode = "rtl"
result = [list(item) for item in result]
for item in result:
item[1] = get_display(item[1])
else:
direction_mode = "ltr"
if paragraph:
result = get_paragraph(result, mode=direction_mode)
if detail == 0:
return [item[1] for item in result]
else:
return result
def _load_image(self, image: Any) -> Tuple[np.ndarray, np.ndarray]:
if type(image) == str:
if image.startswith("http://") or image.startswith("https://"):
tmp, _ = urlretrieve(image)
img_cv_grey = cv2.imread(tmp, cv2.IMREAD_GRAYSCALE)
os.remove(tmp)
else:
img_cv_grey = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
image = os.path.expanduser(image)
img = loadImage(image) # can accept URL
elif type(image) == bytes:
nparr = np.frombuffer(image, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_cv_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
elif type(image) == np.ndarray:
if len(image.shape) == 2: # grayscale
img_cv_grey = image
img = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
elif len(image.shape) == 3: # BGRscale
img = image
img_cv_grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
raise TypeError("Could not load image")
return img, img_cv_grey
def _download_models(self, download_enabled):
corrupt_msg = "MD5 hash mismatch, possible file corruption"
if os.path.isfile(self._detector_path) == False:
if not download_enabled:
raise FileNotFoundError("Missing %s and downloads disabled" % self._detector_path)
LOGGER.warning(
"Downloading detection model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(model_url["detector"][0], DETECTOR_FILENAME, self.model_storage_directory)
assert calculate_md5(self._detector_path) == model_url["detector"][1], corrupt_msg
LOGGER.info("Download complete")
elif calculate_md5(self._detector_path) != model_url["detector"][1]:
if not download_enabled:
raise FileNotFoundError("MD5 mismatch for %s and downloads disabled" % self._detector_path)
LOGGER.warning(corrupt_msg)
os.remove(self._detector_path)
LOGGER.warning(
"Re-downloading the detection model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(model_url["detector"][0], DETECTOR_FILENAME, self.model_storage_directory)
assert calculate_md5(self._detector_path) == model_url["detector"][1], corrupt_msg
# check model file
if os.path.isfile(self._recognition_model_path) == False:
if not download_enabled:
raise FileNotFoundError("Missing %s and downloads disabled" % self._recognition_model_path)
LOGGER.warning(
"Downloading recognition model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(
model_url[self._recognition_model_file][0], self._recognition_model_file, self.model_storage_directory
)
assert (
calculate_md5(self._recognition_model_path) == model_url[self._recognition_model_file][1]
), corrupt_msg
LOGGER.info("Download complete.")
elif calculate_md5(self._recognition_model_path) != model_url[self._recognition_model_file][1]:
if not download_enabled:
raise FileNotFoundError("MD5 mismatch for %s and downloads disabled" % self._recognition_model_path)
LOGGER.warning(corrupt_msg)
os.remove(self._recognition_model_path)
LOGGER.warning(
"Re-downloading the recognition model, please wait. "
"This may take several minutes depending upon your network connection."
)
download_and_unzip(
model_url[self._recognition_model_file][0], self._recognition_model_file, self.model_storage_directory
)
assert (
calculate_md5(self._recognition_model_path) == model_url[self._recognition_model_file][1]
), corrupt_msg
LOGGER.info("Download complete")
def _set_lang_char(self, lang_list: List[str]):
self.dict_list = {}
for lang in lang_list:
self.dict_list[lang] = os.path.join(BASE_PATH, "dict", lang + ".txt")
self.lang_char = []
for lang in lang_list:
char_file = os.path.join(BASE_PATH, "character", lang + "_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
char_list = input_file.read().splitlines()
self.lang_char += char_list
self.lang_char = set(self.lang_char).union(set(number + symbol))
self.lang_char = "".join(self.lang_char)
def _set_model_lang(self, lang_list: List[str]):
# check available languages
unknown_lang = set(lang_list) - set(all_lang_list)
if unknown_lang != set():
raise ValueError(unknown_lang, "is not supported")
# choose model
if "th" in lang_list:
self.model_lang = "thai"
if set(lang_list) - set(["th", "en"]) != set():
raise ValueError('Thai is only compatible with English, try lang_list=["th","en"]')
elif "ch_tra" in lang_list:
self.model_lang = "chinese_tra"
if set(lang_list) - set(["ch_tra", "en"]) != set():
raise ValueError('Chinese is only compatible with English, try lang_list=["ch_tra","en"]')
elif "ch_sim" in lang_list:
self.model_lang = "chinese_sim"
if set(lang_list) - set(["ch_sim", "en"]) != set():
raise ValueError('Chinese is only compatible with English, try lang_list=["ch_sim","en"]')
elif "ja" in lang_list:
self.model_lang = "japanese"
if set(lang_list) - set(["ja", "en"]) != set():
raise ValueError('Japanese is only compatible with English, try lang_list=["ja","en"]')
elif "ko" in lang_list:
self.model_lang = "korean"
if set(lang_list) - set(["ko", "en"]) != set():
raise ValueError('Korean is only compatible with English, try lang_list=["ko","en"]')
elif "ta" in lang_list:
self.model_lang = "tamil"
if set(lang_list) - set(["ta", "en"]) != set():
raise ValueError('Tamil is only compatible with English, try lang_list=["ta","en"]')
elif set(lang_list) & set(arabic_lang_list):
self.model_lang = "arabic"
if set(lang_list) - set(arabic_lang_list + ["en"]) != set():
raise ValueError('Arabic is only compatible with English, try lang_list=["ar","fa","ur","ug","en"]')
elif set(lang_list) & set(devanagari_lang_list):
self.model_lang = "devanagari"
if set(lang_list) - set(devanagari_lang_list + ["en"]) != set():
raise ValueError('Devanagari is only compatible with English, try lang_list=["hi","mr","ne","en"]')
elif set(lang_list) & set(cyrillic_lang_list):
self.model_lang = "cyrillic"
if set(lang_list) - set(cyrillic_lang_list + ["en"]) != set():
raise ValueError(
'Cyrillic is only compatible with English, try lang_list=["ru","rs_cyrillic","be","bg","uk","mn","en"]'
)
else:
self.model_lang = "latin"
def _set_character_choices(self):
self.separator_list = {}
if self.model_lang == "latin":
all_char = (
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "ÀÁÂÃÄÅÆÇÈÉÊËÍÎÑÒÓÔÕÖØÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿąęĮįıŁłŒœŠšųŽž"
)
self.character = number + symbol + all_char
self._recognition_model_file = "latin.pth"
elif self.model_lang == "arabic":
ar_number = "٠١٢٣٤٥٦٧٨٩"
ar_symbol = "«»؟،؛"
ar_char = "ءآأؤإئااًبةتثجحخدذرزسشصضطظعغفقكلمنهوىيًٌٍَُِّْٰٓٔٱٹپچڈڑژکڭگںھۀہۂۃۆۇۈۋیېےۓە"
self.character = number + symbol + en_char + ar_number + ar_symbol + ar_char
self._recognition_model_file = "arabic.pth"
elif self.model_lang == "cyrillic":
cyrillic_char = (
"ЁЂЄІЇЈЉЊЋЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяёђєіїјљњћўџҐґҮүө"
)
self.character = number + symbol + en_char + cyrillic_char
self._recognition_model_file = "cyrillic.pth"
elif self.model_lang == "devanagari":
devanagari_char = (
".ँंःअअंअःआइईउऊऋएऐऑओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयरऱलळवशषसह़ािीुूृॅेैॉोौ्ॐ॒क़ख़ग़ज़ड़ढ़फ़ॠ।०१२३४५६७८९॰"
)
self.character = number + symbol + en_char + devanagari_char
self._recognition_model_file = "devanagari.pth"
elif self.model_lang == "chinese_tra":
char_file = os.path.join(BASE_PATH, "character", "ch_tra_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ch_tra_list = input_file.read().splitlines()
ch_tra_char = "".join(ch_tra_list)
self.character = number + symbol + en_char + ch_tra_char
self._recognition_model_file = "chinese.pth"
elif self.model_lang == "chinese_sim":
char_file = os.path.join(BASE_PATH, "character", "ch_sim_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ch_sim_list = input_file.read().splitlines()
ch_sim_char = "".join(ch_sim_list)
self.character = number + symbol + en_char + ch_sim_char
self._recognition_model_file = "chinese_sim.pth"
elif self.model_lang == "japanese":
char_file = os.path.join(BASE_PATH, "character", "ja_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ja_list = input_file.read().splitlines()
ja_char = "".join(ja_list)
self.character = number + symbol + en_char + ja_char
self._recognition_model_file = "japanese.pth"
elif self.model_lang == "korean":
char_file = os.path.join(BASE_PATH, "character", "ko_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ko_list = input_file.read().splitlines()
ko_char = "".join(ko_list)
self.character = number + symbol + en_char + ko_char
self._recognition_model_file = "korean.pth"
elif self.model_lang == "tamil":
char_file = os.path.join(BASE_PATH, "character", "ta_char.txt")
with open(char_file, "r", encoding="utf-8-sig") as input_file:
ta_list = input_file.read().splitlines()
ta_char = "".join(ta_list)
self.character = number + symbol + en_char + ta_char
self._recognition_model_file = "tamil.pth"
elif self.model_lang == "thai":
self.separator_list = {"th": ["\xa2", "\xa3"], "en": ["\xa4", "\xa5"]}
separator_char = []
for lang, sep in self.separator_list.items():
separator_char += sep
special_c0 = "ุู"
special_c1 = "ิีืึ" + "ั"
special_c2 = "่้๊๋"
special_c3 = "็์"
special_c = special_c0 + special_c1 + special_c2 + special_c3 + "ำ"
th_char = "กขคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรลวศษสหฬอฮฤ" + "เแโใไะา" + special_c + "ํฺ" + "ฯๆ"
th_number = "0123456789๑๒๓๔๕๖๗๘๙"
self.character = "".join(separator_char) + symbol + en_char + th_char + th_number
self._recognition_model_file = "thai.pth"
else:
LOGGER.error("invalid language")
raise NotImplementedError("invalid language")
def _set_model_paths(self, dir: str):
self.model_storage_directory = MODULE_PATH + "/model"
if dir:
self.model_storage_directory = dir
Path(self.model_storage_directory).mkdir(parents=True, exist_ok=True)
self._recognition_model_path = os.path.join(self.model_storage_directory, self._recognition_model_file)
self._detector_path = os.path.join(self.model_storage_directory, DETECTOR_FILENAME)
def _set_device(self, gpu: bool):
if gpu is False:
self.device = "cpu"
LOGGER.warning("Using CPU. Note: This module is much faster with a GPU.")
elif not torch.cuda.is_available():
self.device = "cpu"
LOGGER.warning("CUDA not available - defaulting to CPU. Note: This module is much faster with a GPU.")
elif gpu is True:
self.device = "cuda"
else:
self.device = gpu
| 1,008
| 0
| 0
| 18,354
| 0
| 0
| 0
| 158
| 346
|
b9ade0befeaaf199c9e1afc1d7f76c7fb111996b
| 740
|
py
|
Python
|
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
src/proxies/images.py
|
otanadzetsotne/nn-image-similarity
|
8a00c30359e56c4a229942b4b2df6265fa2856a7
|
[
"MIT"
] | null | null | null |
# local
| 20.555556
| 50
| 0.601351
|
# local
from src.utils.images import ImagesHelper
from src.dtypes import ImagesInner
class ProxyImages:
@staticmethod
def filter_correct(
images: ImagesInner,
) -> ImagesInner:
"""
Filter images and return just corrects
"""
return ImagesHelper.filter_correct(images)
@staticmethod
def filter_error(
images: ImagesInner,
) -> ImagesInner:
"""
Filter images and return just with errors
"""
return ImagesHelper.filter_error(images)
@staticmethod
def has_correct(
images: ImagesInner,
) -> bool:
"""
Check ImagesInner object
"""
return ImagesHelper.has_correct(images)
| 0
| 554
| 0
| -3
| 0
| 0
| 0
| 33
| 147
|
22afb31fa0ba4539038dbf716afbd984f54b90ca
| 6,054
|
py
|
Python
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 5
|
2020-04-05T18:04:13.000Z
|
2021-04-13T20:34:19.000Z
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 1
|
2020-04-29T21:42:26.000Z
|
2020-05-01T23:45:45.000Z
|
code/src/main/python/misconceptions/rUtils/functions.py
|
DynamicCodeSearch/CodeSeer
|
ee985ece7691691585952eb88565f0e08bdc9113
|
[
"MIT"
] | 3
|
2020-01-27T16:02:14.000Z
|
2021-02-08T13:25:15.000Z
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import rpy2.robjects as robjects
from rpy2 import rinterface
from rpy2.robjects import pandas2ri
pandas2ri.activate()
rinterface.set_writeconsole_warnerror(None)
rinterface.set_writeconsole_regular(None)
r_source = robjects.r['source']
R_GEN_PREFIX = "gen_func_r_"
FUNC_BODY_REGEX = r'function\s*\(.*?\)\s*((.|\s)+)'
FUNCTION_STORE = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/expt/r_functions.pkl"
| 28.422535
| 107
| 0.743971
|
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import copy
import signal
import time
import re
import rpy2
import rpy2.robjects as robjects
from rpy2 import rinterface
from rpy2.robjects import pandas2ri
from rpy2.robjects.functions import SignatureTranslatedFunction
from collections import OrderedDict
from analysis.helpers import constants as a_consts
from analysis import execute
from misconceptions.common import datatypes
from misconceptions.rUtils import generator, dataframer
from utils import cache
pandas2ri.activate()
rinterface.set_writeconsole_warnerror(None)
rinterface.set_writeconsole_regular(None)
r_source = robjects.r['source']
R_GEN_PREFIX = "gen_func_r_"
FUNC_BODY_REGEX = r'function\s*\(.*?\)\s*((.|\s)+)'
FUNCTION_STORE = "/Users/panzer/Raise/ProgramRepair/CodeSeer/code/src/main/python/expt/r_functions.pkl"
def get_R_error_message(exception):
return exception.message.strip()
def get_env_variables(r_file_path):
try:
robjects.r('''
source('%s')
''' % r_file_path)
return robjects.globalenv
except rinterface.RRuntimeError as e:
print("Error while fetching environment variables.\n%s" % get_R_error_message(e))
return None
def r_compile(r_file_path, del_compiled=True):
try:
robjects.r('''
library(compiler)
cmpfile('%s')
''' % r_file_path)
if del_compiled:
compiled_file = r_file_path.rsplit(".", 1)[0] + ".Rc"
cache.delete_file(compiled_file)
return True
except Exception as e:
# print("Error while compilation.\n%s" % get_R_error_message(e))
# error_message = get_R_error_message(e)
# return error_message and "import pandas" in error_message
pass
return False
def get_r_function(r_file_path, func_name):
env_variables = get_env_variables(r_file_path)
if not env_variables:
return None
for name in env_variables.keys():
if name == func_name and isinstance(env_variables[name], SignatureTranslatedFunction):
return env_variables[name]
return None
def get_r_functions(r_file_path):
r_functions = {}
env_variables = get_env_variables(r_file_path)
if not env_variables:
return None
for name in env_variables.keys():
if isinstance(env_variables[name], SignatureTranslatedFunction):
r_functions[name] = env_variables[name]
return r_functions
def get_function_arg_names(r_func):
return list(r_func.formals().names)
def get_function_body(r_func):
func_str = str(r_func).strip()
return re.match(FUNC_BODY_REGEX, func_str).group(1)
def get_r_types(r_func):
formal_args = r_func.formals()
arg_names = get_function_arg_names(r_func)
if formal_args is None or type(formal_args) == rpy2.rinterface.RNULLType:
return None
r_types = OrderedDict()
for arg_name, formal_arg in zip(arg_names, formal_args):
r_types[arg_name] = {"type": rpy2.robjects.vectors.DataFrame}
return r_types
def get_function_as_str(func_name, func):
return ("%s <- %s" % (func_name, str(func))).strip()
def convert_to_R_args(py_args):
r_args = []
for py_arg in py_args:
r_arg = datatypes.convert_py_object_to_r(py_arg)
r_args.append(r_arg)
return r_args
def execute_R_function(r_func, arg):
cloned = convert_to_R_args([copy.deepcopy(x) for x in arg])
prev_signal = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, execute.timeout_handler)
signal.alarm(a_consts.METHOD_WAIT_TIMEOUT)
duration = a_consts.METHOD_WAIT_TIMEOUT * 1000
ret_obj = {"return": None, "errorMessage": None}
try:
start = time.time()
ret = r_func(*cloned)
duration = (time.time() - start) * 1000
ret_obj["return"] = datatypes.convert_r_object_to_py(ret)
except execute.TimeoutException:
ret_obj["errorMessage"] = "Method timed out after %d seconds" % a_consts.METHOD_WAIT_TIMEOUT
except rinterface.RRuntimeError as e:
# print("Error while executing rUtils function %s. Error: %s" % (func_name, e.message))
ret_obj["errorMessage"] = e.message
except Exception as e:
ret_obj["errorMessage"] = e.message
ret_obj["duration"] = duration
signal.alarm(0)
signal.signal(signal.SIGALRM, prev_signal)
return ret_obj
def process_R_function(file_path, func_name, r_func):
print("Processing %s ... " % func_name)
r_types = get_r_types(r_func)
if r_types is None:
return None
args = generator.load_args(r_types)
func_key = generator.make_key(r_types)
results = execute_R_function_on_args(r_func, args)
function_data = {
"name": func_name,
"filePath": file_path,
"inputKey": func_key,
"body": get_function_as_str(func_name, r_func)
}
if results:
function_data["outputs"] = results
return function_data
def execute_R_function_on_args(r_func, args_set):
results = []
is_valid = False
for args in args_set:
result = execute_R_function(r_func, args)
if not is_valid and result.get("return", None) is not None:
is_valid = True
results.append(result)
if not is_valid:
print("Function is invalid")
return None
return results
def save_function(func_data):
saved_funcs = cache.load_pickle(FUNCTION_STORE)
if not saved_funcs:
saved_funcs = {}
saved_funcs[func_data["name"]] = func_data
cache.save_pickle(FUNCTION_STORE, saved_funcs)
def extract_col_names(r_func):
arg_names = get_function_arg_names(r_func)
func_body = get_function_body(r_func)
arg_cols = {}
for arg_name in arg_names:
df = dataframer.extract_col_names(arg_name, func_body)
if df:
arg_cols[arg_name] = df
return arg_cols
def parse_function_for_col_names(func_name, source_file):
all_funcs = get_r_functions(source_file)
r_func = all_funcs[func_name]
return extract_col_names(r_func)
def test_function():
file_path = '/Users/panzer/Raise/ProgramRepair/CodeSeer/projects/src/main/R/Example/PandasR/r_snippets.R'
func_name = 'gen_func_r_drop'
r_functions = get_r_functions(file_path)
r_func = r_functions[func_name]
process_R_function(file_path, func_name, r_func)
| 0
| 0
| 0
| 0
| 0
| 4,737
| 0
| 100
| 657
|
a65d3f0e19e9c311490bb7bc77d8eea9559cd262
| 339
|
py
|
Python
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 62
|
2019-11-16T22:07:42.000Z
|
2022-03-08T20:50:01.000Z
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 30
|
2019-03-19T15:05:55.000Z
|
2022-03-24T05:00:53.000Z
|
bot/plugins/joke.py
|
Preocts/twitch-chat-bot
|
50341c30d8eada4b50634c8f25a9eb0eed681735
|
[
"MIT"
] | 56
|
2019-06-08T20:34:31.000Z
|
2022-02-21T20:10:38.000Z
|
from __future__ import annotations
| 21.1875
| 61
| 0.764012
|
from __future__ import annotations
from typing import Match
import pyjokes
from bot.config import Config
from bot.data import command
from bot.data import esc
from bot.data import format_msg
@command('!joke', '!yoke')
async def cmd_joke(config: Config, match: Match[str]) -> str:
return format_msg(match, esc(pyjokes.get_joke()))
| 0
| 121
| 0
| 0
| 0
| 0
| 0
| 24
| 158
|
b10aa05fe838d0b0b31227f058840a4db0cf7599
| 11,594
|
py
|
Python
|
ngskit/trim_reads.py
|
kim-lab/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | 1
|
2021-12-10T22:23:50.000Z
|
2021-12-10T22:23:50.000Z
|
ngskit/trim_reads.py
|
kimlaborg/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | null | null | null |
ngskit/trim_reads.py
|
kimlaborg/NGSKit
|
62f609111ba59b9d7d87dc9979a9a2c57959e297
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import logging
import argparse
import time
import ngskit.barcodes as barcodes
from ngskit.utils import fasta_tools, fastq_tools
#import barcodes
#from utils import fasta_tools, fastq_tools
def trimming(demultiplexed_fastq, barcode, quality_threshold,
trgt_len, output_fmt, output_folder):
"""Extract seq from the FASTAQ demultiplexed files. Trim barcodes + Constant
Parameters
----------
demultiplexed_fastq : str
Path of the demultiplexed fastq file
barcode : barcode.object
Barcode object wiht info about barcode and constant regions
quality_threshold : int
reading quality Threshold, any sequence will be trimmed under that level
trgt_len : int
length in bases of the target sequences.
output_fmt : str
Output format, by default fasta
working_folder : str
Output folder to save files with trimmed sequences
Returns
-------
output format save fasta or fastq
Notes
-----
Result str, in Fasta format
>FASTAQ_ID+ length + Quality
ATGATGGTAGTAGTAGAAAGATAGATGATGATGAT
it will be storage:
/data_path/Sequences/Sample_id.fasta
"""
# Init the output format, retunr a function
logger = logging.getLogger(__name__)
create_folder(output_folder)
#
if output_fmt == 'fasta':
save_seq = fasta_tools.write_fasta_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fasta','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fasta'))
if output_fmt == 'fastq':
save_seq = fastq_tools.write_fastq_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fastq','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fastq'))
# check barcodes integrity, peplength, fastq
# barcodes_list = barcodes.read(barcode_file)
# Stats
nseqs = 0
ntrimed = 0
# Open Fastq file
with open(demultiplexed_fastq, 'r') as read1:
for read1_id in read1:
# Read 4 by 4
# ID lane info, seq info etc
# Read seq and Quality info
read1_seq, read1_strand, read1_qual = [next(read1) for _ in range(3)]
#Translate the Quality to a list of Integers
qual = [ord(c)-33 for c in read1_qual.strip()]
target_sequence = read1_seq[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
#remove the quality of the barcode and the constant region
target_qual = qual[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
nseqs += 1
# Control
try:
avg_quality = sum(target_qual)/float(len(target_qual))
except ZeroDivisionError:
logger.error('Sequence with no lenght or no score', exc_info=True)
logger.error(read1_seq,read1_qual,target_qual,target_qual,trgt_len)
sys.exit()
if len(target_sequence) == trgt_len and avg_quality >= quality_threshold:
ntrimed += 1
# save output format
# attach Qavgm and length origin to the id
seq_id = '{}_Q:{:.2f}_F:{}'.format(read1_id.strip(), avg_quality, trgt_len)
save_seq([seq_id, target_sequence, target_qual],
file_output=filehdl_output)
# save
else:
# Stats
pass
logger.info('Read %i Sequences' % (nseqs))
logger.info('Trimmed %i Sequences' % (ntrimed))
filehdl_output.close()
def get_options():
"""Get arguments from command line.
Parameters
----------
Returns
-------
"""
parser = argparse.ArgumentParser(description="""
Trimming Fastq sequences tool
Usage Trimming:
%prog -d [demultiplexed Folder]-b [BarCode_file.inp] -q [Quality threshold]\
-m [method] --output_fmt fasta
""")
parser.add_argument('-d', '--input_folder', action="store",
dest="input_folder", default=False, help='Folder \
contains demultiplexed folders and files', required=True)
parser.add_argument('-b', '--barcode_file', action="store",
dest="barcode_file", default=False, help='File that \
contains barcodes and cosntant regions', required=True)
parser.add_argument('-o', '--out_folder', action="store", dest="out_folder",
default='Sequences', help='Output folder, called \
Sequences by default')
# optional Arguments
parser.add_argument('-m', '--trimming_method', action="store",
dest="trimming_method", default='standard', type=str,
choices=['standard',
'dynamic'],
help="""standard Trimm sequences according barcode file configuration, ignores float window output files\n
dynamic Trimm sequences using file lenght label, or output of float window demultiplex """)
# Default 1
parser.add_argument('-q', '--quality', action="store",
dest="quality", default=30, type=int,
help='Quality reading threshold \
(default 30)')
parser.add_argument('--output_fmt', help='Output format, default fasta',
dest='output_fmt', default='fasta', action='store')
parser.add_argument('--force-lenght', help='force a lenght and ignore file label, overwrites dynamic option',
dest='force_lenght', default=False, action='store')
options = parser.parse_args()
return options
def main():
"""Pipeline Control.
Parameters
----------
opts
"""
opts = get_options()
# init logging
time_stamp = time.ctime()
seconds_time = int(time.time())
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M',
filename= opts.input_folder+ '/Logs/Trimming_'+opts.input_folder.rpartition('/')[-1]+'_'+opts.barcode_file+'_{}.log'.format(seconds_time),
filemode='w')
logger = logging.getLogger(__name__)
logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# DEMULTIPLEX
# Check inputs
# Load Barcodes info
# check barcodes integrity, peplength, fastq
barcodes_list = barcodes.read(opts.barcode_file)
# make output folder
# Init Logging
logger.info('#### TRIMMING ####')
# incompatible
logger.info('Method: {}'.format(opts.trimming_method))
logger.info('Quality threshold: {}'.format(opts.quality))
logger.info('Output format: {}'.format(opts.output_fmt))
#
logger.info('Barcode file: {}'.format(opts.barcode_file))
logger.info('Input folder: {}'.format(opts.input_folder))
output_folder = opts.input_folder+'/'+opts.out_folder
logger.info('Output folder: {}'.format(output_folder))
logger.info('Force target lenght: %s', opts.force_lenght)
# foreach sample in barcodes
for barcode in barcodes_list:
logger.info('Triming Sample: {}'.format(barcode.id))
# folder must == sample id in the barcode
# TODO: need to improve this line, it can be problematic
working_folder = './'+opts.input_folder+'/'+barcode.id+'/'
# get all fastq under the folder
for demultiplexed_fastq in os.listdir(working_folder):
# ToDO: only get fastq files
#ToDo: only those I want (target lenthg)
# if method is dynamic, get all the files in the folder
if opts.trimming_method == 'dynamic':
# To do
# read lenght from the filename
seq_length = get_length_label(demultiplexed_fastq)
# modifiy target size
# Skip empty vectors
if seq_length:
# modify output folder
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# trim!
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= seq_length,
output_fmt= opts.output_fmt,
output_folder=output_folder+'_'+str(seq_length))
# raw_name = demultiplexed_file.replace('_F.fastq','')
# read the length from the file
elif opts.trimming_method == 'standard':
# Trim time
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# ignore files from dynamic target
seq_length = get_length_label(demultiplexed_fastq)
if seq_length != barcode.trgt_len:
logger.info("file label and barcode lenght are different: %s SKIPPING FILE", demultiplexed_fastq)
continue
else:
logger.info('Triming file: {}'.format(demultiplexed_fastq))
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= barcode.trgt_len,
output_fmt= opts.output_fmt,
output_folder=output_folder)
# add here, multilenghts trimmming
elif opts.trimming_method == 'force':
# Todo: this option can be useful in the future
continue
else:
# unknow method
pass
# DONE
time_stamp = time.ctime()
logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
return
# def main():
# # Read argtments
# opts = get_options()
# # init logging
# time_stamp = time.ctime()
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# datefmt='%m-%d %H:%M',
# filename= 'Trimming_'+opts.input_folder+'_'+opts.barcode_file+'_{4}_{1}_{2}_{0}_{3}.log'.format(*time_stamp.split()),
# filemode='w')
# logger = logging.getLogger(__name__)
# logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# # DEMULTIPLEX
# workflow(opts)
# # DONE
# time_stamp = time.ctime()
# logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
if __name__ == '__main__':
main()
| 35.894737
| 163
| 0.582456
|
#!/usr/bin/env python
import os
import sys
import logging
import argparse
import time
import ngskit.barcodes as barcodes
from ngskit.utils import fasta_tools, fastq_tools
#import barcodes
#from utils import fasta_tools, fastq_tools
def create_folder(output_folder):
# Create output folder
logger = logging.getLogger(__name__)
logger.info('Open folder %s', output_folder)
try:
# by default Sequences
os.makedirs(output_folder)
except OSError:
_ = sys.exc_info()
logger.warning('Warning, Folder %s already exist', output_folder)
return
def trimming(demultiplexed_fastq, barcode, quality_threshold,
trgt_len, output_fmt, output_folder):
"""Extract seq from the FASTAQ demultiplexed files. Trim barcodes + Constant
Parameters
----------
demultiplexed_fastq : str
Path of the demultiplexed fastq file
barcode : barcode.object
Barcode object wiht info about barcode and constant regions
quality_threshold : int
reading quality Threshold, any sequence will be trimmed under that level
trgt_len : int
length in bases of the target sequences.
output_fmt : str
Output format, by default fasta
working_folder : str
Output folder to save files with trimmed sequences
Returns
-------
output format save fasta or fastq
Notes
-----
Result str, in Fasta format
>FASTAQ_ID+ length + Quality
ATGATGGTAGTAGTAGAAAGATAGATGATGATGAT
it will be storage:
/data_path/Sequences/Sample_id.fasta
"""
# Init the output format, retunr a function
logger = logging.getLogger(__name__)
create_folder(output_folder)
#
if output_fmt == 'fasta':
save_seq = fasta_tools.write_fasta_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fasta','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fasta'))
if output_fmt == 'fastq':
save_seq = fastq_tools.write_fastq_sequence
filehdl_output = open(output_folder+'/'+barcode.id+'.fastq','a')
logger.info('Output file: %s' % (output_folder+'/'+barcode.id+'.fastq'))
# check barcodes integrity, peplength, fastq
# barcodes_list = barcodes.read(barcode_file)
# Stats
nseqs = 0
ntrimed = 0
# Open Fastq file
with open(demultiplexed_fastq, 'r') as read1:
for read1_id in read1:
# Read 4 by 4
# ID lane info, seq info etc
# Read seq and Quality info
read1_seq, read1_strand, read1_qual = [next(read1) for _ in range(3)]
#Translate the Quality to a list of Integers
qual = [ord(c)-33 for c in read1_qual.strip()]
target_sequence = read1_seq[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
#remove the quality of the barcode and the constant region
target_qual = qual[barcode.b1_len+barcode.c1_len:
barcode.b1_len+barcode.c1_len+trgt_len]
nseqs += 1
# Control
try:
avg_quality = sum(target_qual)/float(len(target_qual))
except ZeroDivisionError:
logger.error('Sequence with no lenght or no score', exc_info=True)
logger.error(read1_seq,read1_qual,target_qual,target_qual,trgt_len)
sys.exit()
if len(target_sequence) == trgt_len and avg_quality >= quality_threshold:
ntrimed += 1
# save output format
# attach Qavgm and length origin to the id
seq_id = '{}_Q:{:.2f}_F:{}'.format(read1_id.strip(), avg_quality, trgt_len)
save_seq([seq_id, target_sequence, target_qual],
file_output=filehdl_output)
# save
else:
# Stats
pass
logger.info('Read %i Sequences' % (nseqs))
logger.info('Trimmed %i Sequences' % (ntrimed))
filehdl_output.close()
def get_length_label(demultiplexed_fastq_file):
logger = logging.getLogger(__name__)
filename, _ = os.path.splitext(demultiplexed_fastq_file)
seq_lenght = filename.split('_')[-2:-1]
logger.info("Label lenght: %s", seq_lenght[0])
return int(seq_lenght[0])
def get_options():
"""Get arguments from command line.
Parameters
----------
Returns
-------
"""
parser = argparse.ArgumentParser(description="""
Trimming Fastq sequences tool
Usage Trimming:
%prog -d [demultiplexed Folder]-b [BarCode_file.inp] -q [Quality threshold]\
-m [method] --output_fmt fasta
""")
parser.add_argument('-d', '--input_folder', action="store",
dest="input_folder", default=False, help='Folder \
contains demultiplexed folders and files', required=True)
parser.add_argument('-b', '--barcode_file', action="store",
dest="barcode_file", default=False, help='File that \
contains barcodes and cosntant regions', required=True)
parser.add_argument('-o', '--out_folder', action="store", dest="out_folder",
default='Sequences', help='Output folder, called \
Sequences by default')
# optional Arguments
parser.add_argument('-m', '--trimming_method', action="store",
dest="trimming_method", default='standard', type=str,
choices=['standard',
'dynamic'],
help="""standard Trimm sequences according barcode file configuration, ignores float window output files\n
dynamic Trimm sequences using file lenght label, or output of float window demultiplex """)
# Default 1
parser.add_argument('-q', '--quality', action="store",
dest="quality", default=30, type=int,
help='Quality reading threshold \
(default 30)')
parser.add_argument('--output_fmt', help='Output format, default fasta',
dest='output_fmt', default='fasta', action='store')
parser.add_argument('--force-lenght', help='force a lenght and ignore file label, overwrites dynamic option',
dest='force_lenght', default=False, action='store')
options = parser.parse_args()
return options
def main():
"""Pipeline Control.
Parameters
----------
opts
"""
opts = get_options()
# init logging
time_stamp = time.ctime()
seconds_time = int(time.time())
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%m-%d %H:%M',
filename= opts.input_folder+ '/Logs/Trimming_'+opts.input_folder.rpartition('/')[-1]+'_'+opts.barcode_file+'_{}.log'.format(seconds_time),
filemode='w')
logger = logging.getLogger(__name__)
logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# DEMULTIPLEX
# Check inputs
# Load Barcodes info
# check barcodes integrity, peplength, fastq
barcodes_list = barcodes.read(opts.barcode_file)
# make output folder
# Init Logging
logger.info('#### TRIMMING ####')
# incompatible
logger.info('Method: {}'.format(opts.trimming_method))
logger.info('Quality threshold: {}'.format(opts.quality))
logger.info('Output format: {}'.format(opts.output_fmt))
#
logger.info('Barcode file: {}'.format(opts.barcode_file))
logger.info('Input folder: {}'.format(opts.input_folder))
output_folder = opts.input_folder+'/'+opts.out_folder
logger.info('Output folder: {}'.format(output_folder))
logger.info('Force target lenght: %s', opts.force_lenght)
# foreach sample in barcodes
for barcode in barcodes_list:
logger.info('Triming Sample: {}'.format(barcode.id))
# folder must == sample id in the barcode
# TODO: need to improve this line, it can be problematic
working_folder = './'+opts.input_folder+'/'+barcode.id+'/'
# get all fastq under the folder
for demultiplexed_fastq in os.listdir(working_folder):
# ToDO: only get fastq files
#ToDo: only those I want (target lenthg)
# if method is dynamic, get all the files in the folder
if opts.trimming_method == 'dynamic':
# To do
# read lenght from the filename
seq_length = get_length_label(demultiplexed_fastq)
# modifiy target size
# Skip empty vectors
if seq_length:
# modify output folder
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# trim!
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= seq_length,
output_fmt= opts.output_fmt,
output_folder=output_folder+'_'+str(seq_length))
# raw_name = demultiplexed_file.replace('_F.fastq','')
# read the length from the file
elif opts.trimming_method == 'standard':
# Trim time
dir_emultiplexed_fastq = working_folder+demultiplexed_fastq
# ignore files from dynamic target
seq_length = get_length_label(demultiplexed_fastq)
if seq_length != barcode.trgt_len:
logger.info("file label and barcode lenght are different: %s SKIPPING FILE", demultiplexed_fastq)
continue
else:
logger.info('Triming file: {}'.format(demultiplexed_fastq))
trimming(dir_emultiplexed_fastq,
barcode,
quality_threshold= opts.quality,
trgt_len= barcode.trgt_len,
output_fmt= opts.output_fmt,
output_folder=output_folder)
# add here, multilenghts trimmming
elif opts.trimming_method == 'force':
# Todo: this option can be useful in the future
continue
else:
# unknow method
pass
# DONE
time_stamp = time.ctime()
logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
return
# def main():
# # Read argtments
# opts = get_options()
# # init logging
# time_stamp = time.ctime()
# logging.basicConfig(level=logging.INFO,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
# datefmt='%m-%d %H:%M',
# filename= 'Trimming_'+opts.input_folder+'_'+opts.barcode_file+'_{4}_{1}_{2}_{0}_{3}.log'.format(*time_stamp.split()),
# filemode='w')
# logger = logging.getLogger(__name__)
# logger.info('JOB START {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
# # DEMULTIPLEX
# workflow(opts)
# # DONE
# time_stamp = time.ctime()
# logger.info('JOB ENDS {4} {1} {2} {0} {3}'.format(*time_stamp.split()))
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 592
| 0
| 0
| 46
|
feade5496f17453a160a194c258f2778a56f8b61
| 75
|
py
|
Python
|
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/yaml_doc/registry.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
from checkov.yaml_doc.base_registry import Registry
registry = Registry()
| 18.75
| 51
| 0.826667
|
from checkov.yaml_doc.base_registry import Registry
registry = Registry()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
223e2dd85d17fc5cef76030696a77e0b1f297257
| 497
|
py
|
Python
|
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
api/v1/internal.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
"""
Internal api methods for current service.
Example:
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def your_internal_api_method(api: InternalAPI, *params, **options):
# current_service = api.service
...
"""
| 20.708333
| 77
| 0.714286
|
"""
Internal api methods for current service.
Example:
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def your_internal_api_method(api: InternalAPI, *params, **options):
# current_service = api.service
...
"""
from anthill.platform.api.internal import as_internal, InternalAPI
@as_internal()
async def spawn(api: InternalAPI, **options):
pass
@as_internal()
async def terminate(api: InternalAPI, **options):
pass
| 0
| 100
| 0
| 0
| 0
| 0
| 0
| 45
| 68
|
4842a357559df39c5885b5a0a2d27b724cb94ce7
| 12,748
|
py
|
Python
|
tf-model-manip.py
|
PeiqinSun/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 184
|
2019-02-25T09:03:30.000Z
|
2020-05-20T12:30:00.000Z
|
tf-model-manip.py
|
megvii-research/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 73
|
2019-02-28T02:51:14.000Z
|
2020-04-08T10:48:07.000Z
|
tf-model-manip.py
|
PeiqinSun/tf-tutorials
|
4d3a9560bce018989e62e9146d63e8fe16eaed91
|
[
"Apache-2.0"
] | 103
|
2019-02-28T09:05:21.000Z
|
2020-05-18T13:22:10.000Z
|
#!/usr/bin/env mdl
# -*- coding: utf-8 -*-
# =======================================
# File Name :
# Purpose :
# Creation Date :
# Last Modified :
# Created By : sunpeiqin
# =======================================
import os
import sys
import argparse
import magic
import keyword
import importlib
def import_python_source_as_module(fpath, mod_name=None):
""" import a python source as a module; its directory is added to
``sys.path`` during importing, and ``sys.path`` would be restored
afterwards.
Modules newly loaded in the same directory as *fpath* would have an
attribute `__dynamic_loaded_by_spq__` set to 1, and fpath itself would
have that value set to 2.
:type fpath: str
:param fpath: python source file path
:type mod_name: str or None
:param mod_name: target module name; if it exists in `sys.modules`, the
corresponding module would be directly returned; otherwise it is added
to ``sys.modules`` afterward. If it is None, module name would be
derived from *fpath* by replacing '/' to '.' and special chars to '_'
"""
fpath = os.path.realpath(fpath)
if mod_name is None:
# automatically generate mod_name
mod_name = []
for i in fpath.split(os.path.sep):
v = ''
for j in i:
if not j.isidentifier() and not j.isdigit():
j = '_'
v += j
if not v.isidentifier() or keyword.iskeyword(v):
v = '_' + v
assert v.isidentifier() and not keyword.iskeyword(v), (
'failed to convert to python identifier: in={} out={}'.format(
i, v))
mod_name.append(v)
mod_name = '_'.join(mod_name)
if mod_name in sys.modules:
return sys.modules[mod_name]
old_path = sys.path[:]
mod_dir = os.path.dirname(fpath)
sys.path.append(mod_dir)
old_mod_names = set(sys.modules.keys())
try:
final_mod = importlib.machinery.SourceFileLoader(
mod_name, fpath).load_module()
finally:
sys.path.remove(mod_dir)
sys.modules[mod_name] = final_mod
for name, mod in list(sys.modules.items()):
if name in old_mod_names:
continue
try:
fpath = getattr(mod, '__file__', None)
except Exception as exc:
print('caught exception {} while trying to get '
'read __file__ attr from {}'.format(repr(exc), name))
continue
if fpath is not None and (
os.path.dirname(os.path.realpath(fpath)).startswith(mod_dir)):
try:
mod.__dynamic_loaded_by_spq__ = 1
except Exception:
pass
try:
final_mod.__dynamic_loaded_by_spq__ = 2
except Exception:
pass
return final_mod
def load_network(network, get_kwargs={}):
'''load a model defined by model.py'''
network = os.path.realpath(network)
mf = magic.from_file(network, mime=True)
mf = mf.decode('utf-8') if isinstance(mf, bytes) else mf
if mf.startswith('text'):
return import_python_source_as_module(network).Model().build()
else:
print('Only supports a model.py which defines a network')
exit(0)
if __name__ == "__main__":
actions = [InfoAction,]
parser = argparse.ArgumentParser()
parser.add_argument('network')
subparsers = parser.add_subparsers(help='action')
for i in actions:
i.add_subparser(subparsers)
args = parser.parse_args()
# load network
load_network(args.network)
if hasattr(args, 'func'):
args.func(args)
else:
print('no action given')
| 34.361186
| 122
| 0.547145
|
#!/usr/bin/env mdl
# -*- coding: utf-8 -*-
# =======================================
# File Name :
# Purpose :
# Creation Date :
# Last Modified :
# Created By : sunpeiqin
# =======================================
import os
import sys
import argparse
import magic
import keyword
import importlib
import collections
import re
import tabulate
import numpy as np
import tensorflow as tf
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "{:3.3f} {}{}".format(num, unit, suffix)
num /= 1024.0
sign_str = '-' if num < 0 else ''
return "{}{:.1f} {}{}".format(sign_str, num, 'Yi', suffix)
def import_python_source_as_module(fpath, mod_name=None):
""" import a python source as a module; its directory is added to
``sys.path`` during importing, and ``sys.path`` would be restored
afterwards.
Modules newly loaded in the same directory as *fpath* would have an
attribute `__dynamic_loaded_by_spq__` set to 1, and fpath itself would
have that value set to 2.
:type fpath: str
:param fpath: python source file path
:type mod_name: str or None
:param mod_name: target module name; if it exists in `sys.modules`, the
corresponding module would be directly returned; otherwise it is added
to ``sys.modules`` afterward. If it is None, module name would be
derived from *fpath* by replacing '/' to '.' and special chars to '_'
"""
fpath = os.path.realpath(fpath)
if mod_name is None:
# automatically generate mod_name
mod_name = []
for i in fpath.split(os.path.sep):
v = ''
for j in i:
if not j.isidentifier() and not j.isdigit():
j = '_'
v += j
if not v.isidentifier() or keyword.iskeyword(v):
v = '_' + v
assert v.isidentifier() and not keyword.iskeyword(v), (
'failed to convert to python identifier: in={} out={}'.format(
i, v))
mod_name.append(v)
mod_name = '_'.join(mod_name)
if mod_name in sys.modules:
return sys.modules[mod_name]
old_path = sys.path[:]
mod_dir = os.path.dirname(fpath)
sys.path.append(mod_dir)
old_mod_names = set(sys.modules.keys())
try:
final_mod = importlib.machinery.SourceFileLoader(
mod_name, fpath).load_module()
finally:
sys.path.remove(mod_dir)
sys.modules[mod_name] = final_mod
for name, mod in list(sys.modules.items()):
if name in old_mod_names:
continue
try:
fpath = getattr(mod, '__file__', None)
except Exception as exc:
print('caught exception {} while trying to get '
'read __file__ attr from {}'.format(repr(exc), name))
continue
if fpath is not None and (
os.path.dirname(os.path.realpath(fpath)).startswith(mod_dir)):
try:
mod.__dynamic_loaded_by_spq__ = 1
except Exception:
pass
try:
final_mod.__dynamic_loaded_by_spq__ = 2
except Exception:
pass
return final_mod
def load_network(network, get_kwargs={}):
'''load a model defined by model.py'''
network = os.path.realpath(network)
mf = magic.from_file(network, mime=True)
mf = mf.decode('utf-8') if isinstance(mf, bytes) else mf
if mf.startswith('text'):
return import_python_source_as_module(network).Model().build()
else:
print('Only supports a model.py which defines a network')
exit(0)
def compute_receptiveField_and_stride(nodes):
stride_list = []
receptive_field_list = []
new_nodes = collections.OrderedDict()
for k, v_dict in nodes.items():
data_format = v_dict.get('data_format', None)
ksize = v_dict.get('ksize', [])
shape = v_dict.get('shape', [])
strides = v_dict.get('strides', [])
if data_format == 'NHWC':
h_stride, w_stride = strides[1], strides[2]
if ksize:
h_size, w_size = ksize[1], ksize[2]
else:
h_size, w_size = shape[0], shape[1]
elif data_format == 'NCHW':
h_stride, w_stride = strides[2], strides[3]
if ksize:
h_size, w_size = ksize[2], ksize[3]
else:
h_size, w_size = shape[0], shape[1]
else:
continue
if not stride_list:
receptive_field_list.append((h_size, w_size))
stride_list.append((h_stride, w_stride))
else:
pre_s = stride_list[-1]
pre_rf = receptive_field_list[-1]
stride_list.append((h_stride * pre_s[0], w_stride * pre_s[1]))
receptive_field_list.append((h_size * pre_s[0] + pre_rf[0] - pre_s[0],
w_size * pre_s[1] + pre_rf[1] - pre_s[1]))
nodes[k].update({
'receptive_field': receptive_field_list[-1],
'g_stride': stride_list[-1],
})
new_nodes.update({k:nodes[k]})
return new_nodes
class InfoAction:
@classmethod
def add_subparser(cls, subparsers):
parser = subparsers.add_parser(
'info', help='view some summary infomation in text')
parser.set_defaults(func=cls.run)
@classmethod
def run(cls, args):
sess = tf.Session()
sess.run(tf.global_variables_initializer()) # must init graph
cls._cache = collections.OrderedDict()
cls.param_stats(sess)
cls.flops_stats(sess)
cls.summary(sess)
@classmethod
def summary(cls, sess):
data = [['item', 'value']]
data.extend(list(cls._cache.items()))
print('\n'*2)
print('summary\n' + tabulate.tabulate(data))
@classmethod
def param_stats(cls, sess, bar_length_max=20):
tot_param_dim, param_size_bit = 0, 0
data = []
for param in tf.trainable_variables():
value = sess.run(param)
param_dim = np.prod(value.shape)
tot_param_dim += int(param_dim)
nbits = int(re.findall(r"\d+", str(param.dtype))[0])
param_size_bit += param_dim * nbits
# fill data
data.append(dict(
name=param.name,
shape=param.get_shape(),
param_dim=param_dim,
param_type=param.dtype,
size=sizeof_fmt(param_dim * nbits / 8),
size_cum=sizeof_fmt(tot_param_dim * nbits / 8),
mean='{:.2g}'.format(value.mean()),
std='{:.2g}'.format(value.std()),
))
for d in data:
ratio = d['param_dim'] / tot_param_dim
d['ratio'] = ratio
d['percentage'] = '{:.2f}%'.format(ratio * 100)
# construct bar
max_ratio = max([d['ratio'] for d in data])
for d in data:
bar_length = int(d['ratio'] / max_ratio * bar_length_max)
d['size_bar'] = '#' * bar_length
param_size = sizeof_fmt(param_size_bit / 8)
data.append(dict(
name='total',
param_dim=tot_param_dim,
size=param_size,
))
cls._cache['#params'] = len(data)
cls._cache['tot_param_dim'] = tot_param_dim
cls._cache['param_size'] = param_size
cls._param_size = param_size_bit / 8
header = [
'name', 'shape', 'mean', 'std', 'param_dim', 'size', 'size_cum',
'percentage', 'size_bar'
]
# make a table
print('\n'*2)
print('param stats: \n' + tabulate.tabulate(
cls._dict2table(data, header=header)))
@classmethod
def _dict2table(self, list_of_dict, header):
table_data = [header]
for d in list_of_dict:
row = []
for h in header:
v = ''
if h in d:
v = d[h]
row.append(v)
table_data.append(row)
return table_data
@classmethod
def flops_stats(cls, sess, bar_length_max=20):
nodes = [n for n in tf.get_default_graph().as_graph_def(add_shapes=True).node]
cls._cache['#nodes'] = len(nodes)
# get nodes which can affect recept filed and stride
rf_nodes = collections.OrderedDict()
for n in nodes:
if n.op in ['Conv2D', 'VariableV2']:
name_scope = '/'.join(n.name.split('/')[:-1])
if name_scope not in rf_nodes.keys():
rf_nodes[name_scope] = {}
if 'shape' in n.attr.keys() and not rf_nodes[name_scope].get('shape', []):
rf_nodes[name_scope].update(shape=[i.size for i in n.attr['shape'].shape.dim])
if 'strides' in n.attr.keys():
rf_nodes[name_scope].update(strides=list(n.attr['strides'].list.i))
rf_nodes[name_scope].update(data_format=n.attr['data_format'].s.decode('utf-8'))
rf_nodes[name_scope].update(operator=n)
if n.op in ['MaxPool', 'AvgPool']:
rf_nodes[n.name] = {
'ksize': list(n.attr['ksize'].list.i),
'strides': list(n.attr['ksize'].list.i),
'data_format': n.attr['data_format'].s.decode('utf-8'),
'operator': n,
}
rf_nodes = compute_receptiveField_and_stride(rf_nodes)
# find the input node (only data)
for n in nodes:
if n.op == 'Placeholder':
input_shape = [i.size for i in n.attr['shape'].shape.dim][1:]
break
for k, v_dict in rf_nodes.items():
if v_dict['data_format'] == 'NHWC':
v_dict['input_shape'] = input_shape
v_dict['output_shape'] = [i.size for i in v_dict['operator'].attr['_output_shapes'].list.shape[0].dim][1:]
elif v_dict['data_format'] == 'NCHW':
pass
if v_dict['operator'].op in ['Conv2D']:
ic = v_dict['input_shape'][-1]
v_dict['flops'] = np.prod(v_dict['output_shape']) * ic * np.prod(v_dict['shape'][:2])
elif v_dict['operator'].op in ['MaxPool', 'AvgPool']:
v_dict['flops'] = 0
input_shape = v_dict['output_shape']
opr_info = []
total_flops = 0
for k, v_dict in rf_nodes.items():
total_flops += v_dict['flops']
opr_info.append({
'opr_name': v_dict['operator'].name,
'opr_class': v_dict['operator'].op,
'input_shapes': v_dict['input_shape'],
'output_shapes': v_dict['output_shape'],
'flops_num': v_dict['flops'],
'flops_cum': total_flops,
'receptive_field': v_dict['receptive_field'],
'stride': v_dict['g_stride']
})
flops = [i['flops_num'] for i in opr_info]
max_flops = max(flops + [0])
for i in opr_info:
f = i['flops_num']
i['flops'] = sizeof_fmt(f, suffix='OPs')
fc = i['flops_cum']
i['flops_cum'] = sizeof_fmt(fc, suffix='OPs')
r = i['ratio'] = f / total_flops
i['percentage'] = '{:.2f}%'.format(r * 100)
bar_length = int(f / max_flops * bar_length_max)
i['bar'] = '#' * bar_length
header = ['opr_name', 'opr_class', 'input_shapes', 'output_shapes', 'receptive_field',
'stride', 'flops', 'flops_cum', 'percentage', 'bar']
total_flops_str = sizeof_fmt(total_flops, suffix='OPs')
#total_var_size = sum(sum(s[1] for s in i['output_shapes']) for i in opr_info)
opr_info.append(dict(
opr_name='total',
flops=total_flops_str,
#output_shapes=total_var_size
))
cls._cache['total_flops'] = total_flops_str
cls._cache['flops/param_size'] = '{:.3g}'.format(
total_flops / cls._param_size)
print('\n'*2)
print('flops stats: \n' + tabulate.tabulate(
cls._dict2table(opr_info, header=header)))
if __name__ == "__main__":
actions = [InfoAction,]
parser = argparse.ArgumentParser()
parser.add_argument('network')
subparsers = parser.add_subparsers(help='action')
for i in actions:
i.add_subparser(subparsers)
args = parser.parse_args()
# load network
load_network(args.network)
if hasattr(args, 'func'):
args.func(args)
else:
print('no action given')
| 0
| 6,922
| 0
| -4
| 0
| 1,792
| 0
| -22
| 341
|
d607417a565fc6e36134e72eef7edfbbfe35876d
| 3,985
|
py
|
Python
|
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | 15
|
2019-06-05T21:24:42.000Z
|
2021-01-04T00:30:29.000Z
|
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | null | null | null |
preprocessing.py
|
pedrada88/rwe
|
a3462556a70bd4a51d2978cadc6101e22723356a
|
[
"BSD-Source-Code"
] | 1
|
2022-01-29T16:23:03.000Z
|
2022-01-29T16:23:03.000Z
|
# -*- coding: utf-8 -*-
#Load embedding vocabulary
#Load embedding vocabulary
#Load embeddings filtered by pre-given vocabulary
#Load embedding matrices input/output
#Split training and development data
| 38.68932
| 108
| 0.673275
|
# -*- coding: utf-8 -*-
import numpy as np
import random
#Load embedding vocabulary
def load_vocab_embeddings(input_path):
first_line=True
vocab=set()
input_file_relations=open(input_file_relations, 'r', encoding='utf-8')
for line in input_file_relations:
if first_line==True:
first_line=False
else:
vocab.add(line.strip().split(" ")[0])
return vocab
#Load embedding vocabulary
def load_word_vocab_from_relation_vectors(input_path):
pre_word_vocab=set()
first_line=True
final_word_vocab=set()
input_file_relations=open(input_path, 'r', encoding='utf-8')
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
first_line=False
else:
relation=linesplit[0]
if "__" not in relation: sys.exit("ERROR: Pair '"+relation+"' does not contain underscore")
relation_split=relation.rsplit("__",1)
word1=relation_split[0]
word2=relation_split[1]
pre_word_vocab.add(word1)
pre_word_vocab.add(word2)
return pre_word_vocab
#Load embeddings filtered by pre-given vocabulary
def load_embeddings_filtered_byvocab(input_path,vocab):
word2index={}
index2word={}
matrix_word_embeddings=[]
first_line=True
input_file_relations=open(input_path, 'r', encoding='utf-8')
cont=0
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
dimensions=int(linesplit[1])
first_line=False
else:
word=linesplit[0]
if word in vocab and word not in word2index:
word2index[word]=cont
index2word[cont]=word
cont+=1
matrix_word_embeddings.append(np.asarray([float(dim) for dim in linesplit[1:dimensions+1]]))
return matrix_word_embeddings,word2index,index2word,dimensions
#Load embedding matrices input/output
def load_training_data(input_path,matrix_word_embeddings,word2index):
matrix_input=[]
matrix_output=[]
first_line=True
input_file_relations=open(input_path, 'r', encoding='utf-8')
for line in input_file_relations:
linesplit=line.strip().split(" ")
if first_line==True:
dimensions=int(str(line.split(" ")[1]))
first_line=False
else:
relation=linesplit[0]
if "__" not in relation: sys.exit("ERROR: Pair '"+relation+"' does not contain underscore")
relation_split=relation.rsplit("__",1)
word1=relation_split[0]
word2=relation_split[1]
if word1 in word2index and word2 in word2index:
matrix_input.append(np.asarray([word2index[word1],word2index[word2]]))
matrix_output.append(np.asarray([float(dim) for dim in linesplit[1:dimensions+1]]))
return matrix_input,matrix_output,dimensions
#Split training and development data
def split_training_data(matrix_input,matrix_output,devsize,batchsize):
matrix_input_train=[]
matrix_output_train=[]
matrix_input_dev=[]
matrix_output_dev=[]
num_instances=int((len(matrix_input)//batchsize)*batchsize)
final_size_dev=int(((num_instances*devsize)//batchsize)*batchsize)
final_size_train=int(((num_instances-final_size_dev)//batchsize)*batchsize)
print ("Size train set: "+str(final_size_train))
print ("Size dev set: "+str(final_size_dev))
all_instances=range(num_instances)
list_index_dev=random.sample(all_instances,final_size_dev)
for i in range(num_instances):
if i in list_index_dev:
matrix_input_dev.append(matrix_input[i])
matrix_output_dev.append(matrix_output[i])
else:
matrix_input_train.append(matrix_input[i])
matrix_output_train.append(matrix_output[i])
return matrix_input_train,matrix_output_train,matrix_input_dev,matrix_output_dev
| 0
| 0
| 0
| 0
| 0
| 3,633
| 0
| -11
| 154
|
89b0c8a21bc6d8dd82ceda3dad99a30e2b867960
| 4,219
|
py
|
Python
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 939
|
2019-08-28T06:50:30.000Z
|
2022-03-30T02:37:07.000Z
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 80
|
2019-09-01T19:47:30.000Z
|
2022-02-02T20:38:38.000Z
|
research/carls/candidate_sampling/candidate_sampler_config_builder_test.py
|
srihari-humbarwadi/neural-structured-learning
|
345b8d644dd7745179263bf6dc9aeb8a921528f4
|
[
"Apache-2.0"
] | 196
|
2019-09-01T19:38:53.000Z
|
2022-02-08T01:25:57.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for candidate_sampler_config_builder."""
import tensorflow as tf
if __name__ == '__main__':
tf.test.main()
| 34.024194
| 99
| 0.71178
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for candidate_sampler_config_builder."""
from research.carls.candidate_sampling import candidate_sampler_config_builder as cs_config_builder
from research.carls.candidate_sampling import candidate_sampler_config_pb2 as cs_config_pb2
import tensorflow as tf
class CandidateSamplerConfigBuilderTest(tf.test.TestCase):
def test_negative_sampler(self):
self.assertProtoEquals(
"""
unique: true
sampler: UNIFORM
""",
cs_config_builder.negative_sampler(
True, cs_config_pb2.NegativeSamplerConfig.UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: UNIFORM
""",
cs_config_builder.negative_sampler(
False, cs_config_pb2.NegativeSamplerConfig.UNIFORM))
self.assertProtoEquals(
"""
unique: true
sampler: LOG_UNIFORM
""",
cs_config_builder.negative_sampler(
True, cs_config_pb2.NegativeSamplerConfig.LOG_UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: LOG_UNIFORM
""",
cs_config_builder.negative_sampler(
False, cs_config_pb2.NegativeSamplerConfig.LOG_UNIFORM))
self.assertProtoEquals(
"""
unique: false
sampler: UNIFORM
""", cs_config_builder.negative_sampler(False, 'UNIFORM'))
self.assertProtoEquals(
"""
unique: true
sampler: LOG_UNIFORM
""", cs_config_builder.negative_sampler(True, 'LOG_UNIFORM'))
def test_brute_force_topk_sampler_success(self):
self.assertProtoEquals("""
similarity_type: COSINE
""", cs_config_builder.brute_force_topk_sampler('COSINE'))
self.assertProtoEquals(
"""
similarity_type: COSINE
""", cs_config_builder.brute_force_topk_sampler(cs_config_pb2.COSINE))
self.assertProtoEquals(
"""
similarity_type: DOT_PRODUCT
""", cs_config_builder.brute_force_topk_sampler('DOT_PRODUCT'))
self.assertProtoEquals(
"""
similarity_type: DOT_PRODUCT
""", cs_config_builder.brute_force_topk_sampler(cs_config_pb2.DOT_PRODUCT))
def test_brute_force_topk_sampler_failed(self):
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(cs_config_pb2.UNKNOWN)
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler('Unknown type string')
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(cs_config_pb2.SampleContext())
with self.assertRaises(ValueError):
cs_config_builder.brute_force_topk_sampler(999)
def test_build_candidate_sampler_config_success(self):
self.assertProtoEquals(
"""
extension {
[type.googleapis.com/carls.candidate_sampling.BruteForceTopkSamplerConfig] {
similarity_type: COSINE
}
}
""",
cs_config_builder.build_candidate_sampler_config(
cs_config_builder.brute_force_topk_sampler('COSINE')))
self.assertProtoEquals(
"""
extension {
[type.googleapis.com/carls.candidate_sampling.NegativeSamplerConfig] {
unique: true
sampler: UNIFORM
}
}
""",
cs_config_builder.build_candidate_sampler_config(
cs_config_builder.negative_sampler(True, 'UNIFORM')))
def test_build_candidate_sampler_config_failed(self):
with self.assertRaises(ValueError):
cs_config_builder.build_candidate_sampler_config(100)
with self.assertRaises(ValueError):
cs_config_builder.build_candidate_sampler_config('invalid')
if __name__ == '__main__':
tf.test.main()
| 0
| 0
| 0
| 3,306
| 0
| 0
| 0
| 148
| 68
|
3089c603282eb0dd2e940a59b5b3d380394bef44
| 4,460
|
py
|
Python
|
experiments/tuning/tune_came.py
|
antoineBarbez/Project
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | 4
|
2019-09-30T19:47:42.000Z
|
2020-02-13T18:46:32.000Z
|
experiments/tuning/tune_came.py
|
antoineBarbez/CAME
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | null | null | null |
experiments/tuning/tune_came.py
|
antoineBarbez/CAME
|
8fa42b5198d03b5b142f413e218b7d7a2d994fc9
|
[
"MIT"
] | null | null | null |
from context import ROOT_DIR, nnUtils, train_came, came
import tensorflow as tf
import numpy as np
import os
import progressbar
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
if __name__ == "__main__":
args = parse_args()
data_x, data_y = train_came.build_dataset(train_came.training_systems, args.antipattern, args.history_length)
data_x, data_y = nnUtils.shuffle(data_x, data_y)
bar = progressbar.ProgressBar(maxval=args.n_test, \
widgets=['Performing cross validation: ' ,progressbar.Percentage()])
bar.start()
output_file_path = os.path.join(ROOT_DIR, 'experiments', 'tuning', 'results', 'came_' + args.antipattern + '_' + str(args.history_length) + '.csv')
params = []
perfs = []
for i in range(args.n_test):
learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes = generateRandomHyperParameters(args.history_length)
params.append([learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes])
predictions = np.empty(shape=[0, 1])
for j in range(args.n_fold):
x_train, y_train, x_test, y_test = get_cross_validation_dataset(data_x, data_y, j, args.n_fold)
# New graph
tf.reset_default_graph()
# Create model
model = came.CAME(
nb_metrics=x_train.shape[-1],
history_length=args.history_length,
filters=nb_filters,
kernel_sizes=kernel_sizes,
pool_sizes=pool_sizes,
dense_sizes=dense_sizes)
with tf.Session() as session:
# Initialize the variables of the TensorFlow graph.
session.run(tf.global_variables_initializer())
train(
session=session,
model=model,
x_train=x_train,
y_train=y_train,
num_step=args.n_step,
lr=learning_rate,
beta=beta,
gamma=gamma)
predictions = np.concatenate((predictions, session.run(model.inference, feed_dict={model.input_x: x_test})), axis=0)
perfs.append(nnUtils.f_measure(predictions, data_y))
indexes = np.argsort(np.array(perfs))
with open(output_file_path, 'w') as file:
file.write("Learning rate;Beta;Gamma;Filters;Kernel;Pool;Dense;F-measure\n")
for j in reversed(indexes):
for k in range(len(params[j])):
file.write(str(params[j][k]) + ';')
file.write(str(perfs[j]) + '\n')
bar.update(i+1)
bar.finish()
| 33.533835
| 148
| 0.717937
|
from context import ROOT_DIR, nnUtils, train_came, came
import tensorflow as tf
import numpy as np
import argparse
import os
import progressbar
import random
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("antipattern", help="either 'god_class' or 'feature_envy'")
parser.add_argument("history_length", type=int)
parser.add_argument("-n_fold", type=int, default=5)
parser.add_argument("-n_step", type=int, default=100)
parser.add_argument("-n_test", type=int, default=100)
return parser.parse_args()
def generateRandomHyperParameters(history_length):
learning_rate = 10**-random.uniform(0.0, 2.5)
beta = 10**-random.uniform(0.0, 2.5)
gamma = random.randint(1, 10)
nb_filters = []
kernel_sizes = []
pool_sizes = []
nb_conv_layer = 0 if history_length <= 1 else random.randint(0,1) if history_length <= 10 else random.randint(1,2) if history_length <= 100 else 2
for _ in range(nb_conv_layer):
nb_filter = random.randint(10,60)
kernel_size = random.randint(2,4)
pool_size = random.choice([2, 5, 10]) if history_length <=100 else random.choice([5, 10, 15, 20])
nb_filters.append(nb_filter)
kernel_sizes.append(kernel_size)
pool_sizes.append(pool_size)
minBound = 4
maxBound = 100
dense_sizes = []
nb_dense_layer = random.randint(1, 3)
for _ in range(nb_dense_layer):
dense_size = random.randint(minBound, maxBound)
dense_sizes.append(dense_size)
maxBound = dense_size
return learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes
def get_cross_validation_dataset(X, Y, fold_index, n_fold):
folds_x, folds_y = nnUtils.split(X, Y, n_fold)
x_train = np.empty(shape=[0, X.shape[1], X.shape[2]])
y_train = np.empty(shape=[0, 1])
for i in range(n_fold):
if i != fold_index:
x_train = np.concatenate((x_train, folds_x[i]), axis=0)
y_train = np.concatenate((y_train, folds_y[i]), axis=0)
return x_train, y_train, folds_x[fold_index], folds_y[fold_index]
def train(session, model, x_train, y_train, num_step, lr, beta, gamma):
learning_rate = lr
for step in range(num_step):
feed_dict_train = {
model.input_x: x_train,
model.input_y: y_train,
model.learning_rate:learning_rate,
model.beta:beta,
model.gamma:gamma}
session.run(model.learning_step, feed_dict=feed_dict_train)
if __name__ == "__main__":
args = parse_args()
data_x, data_y = train_came.build_dataset(train_came.training_systems, args.antipattern, args.history_length)
data_x, data_y = nnUtils.shuffle(data_x, data_y)
bar = progressbar.ProgressBar(maxval=args.n_test, \
widgets=['Performing cross validation: ' ,progressbar.Percentage()])
bar.start()
output_file_path = os.path.join(ROOT_DIR, 'experiments', 'tuning', 'results', 'came_' + args.antipattern + '_' + str(args.history_length) + '.csv')
params = []
perfs = []
for i in range(args.n_test):
learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes = generateRandomHyperParameters(args.history_length)
params.append([learning_rate, beta, gamma, nb_filters, kernel_sizes, pool_sizes, dense_sizes])
predictions = np.empty(shape=[0, 1])
for j in range(args.n_fold):
x_train, y_train, x_test, y_test = get_cross_validation_dataset(data_x, data_y, j, args.n_fold)
# New graph
tf.reset_default_graph()
# Create model
model = came.CAME(
nb_metrics=x_train.shape[-1],
history_length=args.history_length,
filters=nb_filters,
kernel_sizes=kernel_sizes,
pool_sizes=pool_sizes,
dense_sizes=dense_sizes)
with tf.Session() as session:
# Initialize the variables of the TensorFlow graph.
session.run(tf.global_variables_initializer())
train(
session=session,
model=model,
x_train=x_train,
y_train=y_train,
num_step=args.n_step,
lr=learning_rate,
beta=beta,
gamma=gamma)
predictions = np.concatenate((predictions, session.run(model.inference, feed_dict={model.input_x: x_test})), axis=0)
perfs.append(nnUtils.f_measure(predictions, data_y))
indexes = np.argsort(np.array(perfs))
with open(output_file_path, 'w') as file:
file.write("Learning rate;Beta;Gamma;Filters;Kernel;Pool;Dense;F-measure\n")
for j in reversed(indexes):
for k in range(len(params[j])):
file.write(str(params[j][k]) + ';')
file.write(str(perfs[j]) + '\n')
bar.update(i+1)
bar.finish()
| 0
| 0
| 0
| 0
| 0
| 2,067
| 0
| -14
| 137
|
35765a3f52057a1d8c00d42bc632985e8ea22e07
| 4,364
|
py
|
Python
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 27
|
2015-02-11T13:56:46.000Z
|
2021-12-28T14:17:20.000Z
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 32
|
2015-09-23T13:19:04.000Z
|
2022-03-15T13:50:45.000Z
|
confluent_server/confluent/discovery/handlers/imm.py
|
brianfinley/confluent
|
6458eac93b1e3c6d45e26a7ddb434d692b5cdff2
|
[
"Apache-2.0"
] | 24
|
2015-07-14T20:41:55.000Z
|
2021-07-15T04:18:51.000Z
|
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.util as util
| 38.964286
| 77
| 0.552704
|
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import confluent.discovery.handlers.bmc as bmchandler
import pyghmi.exceptions as pygexc
import pyghmi.ipmi.private.util as pygutil
import confluent.util as util
import struct
class NodeHandler(bmchandler.NodeHandler):
devname = 'IMM'
@classmethod
def adequate(cls, info):
# We can sometimes receive a partially initialized SLP packet
# This is not adequate for being satisfied
return bool(info.get('attributes', {}))
def scan(self):
slpattrs = self.info.get('attributes', {})
self.isdense = False
try:
ff = slpattrs.get('enclosure-form-factor', [''])[0]
except IndexError:
return
wronguuid = slpattrs.get('node-uuid', [''])[0]
if wronguuid:
# we need to fix the first three portions of the uuid
uuidprefix = wronguuid.split('-')[:3]
uuidprefix = codecs.encode(struct.pack(
'<IHH', *[int(x, 16) for x in uuidprefix]), 'hex')
uuidprefix = util.stringify(uuidprefix)
uuidprefix = uuidprefix[:8] + '-' + uuidprefix[8:12] + '-' + \
uuidprefix[12:16]
self.info['uuid'] = uuidprefix + '-' + '-'.join(
wronguuid.split('-')[3:])
self.info['uuid'] = self.info['uuid'].lower()
room = slpattrs.get('room-id', [None])[0]
if room:
self.info['room'] = room
rack = slpattrs.get('rack-id', [None])[0]
if rack:
self.info['rack'] = rack
name = slpattrs.get('name', [None])[0]
if name:
self.info['hostname'] = name
unumber = slpattrs.get('lowest-u', [None])[0]
if unumber:
self.info['u'] = unumber
location = slpattrs.get('location', [None])[0]
if location:
self.info['location'] = location
if ff not in ('dense-computing', 'BC2'):
# do not probe unless it's a dense platform
return
self.isdense = True
encuuid = slpattrs.get('chassis-uuid', [None])[0]
if encuuid:
self.info['enclosure.uuid'] = encuuid
slot = int(slpattrs.get('slot', ['0'])[0])
if slot != 0:
self.info['enclosure.bay'] = slot
def probe(self):
if self.info.get('enclosure.bay', 0) == 0:
self.scan()
if self.info.get('enclosure.bay', 0) != 0:
# scan has already populated info
return
ff = self.info.get('attributes', {}).get('enclosure-form-factor', '')
if ff != 'dense-computing':
return
try:
# we are a dense platform, but the SLP data did not give us slot
# attempt to probe using IPMI
ipmicmd = self._get_ipmicmd()
guiddata = ipmicmd.xraw_command(netfn=6, command=8)
self.info['uuid'] = pygutil.decode_wireformat_uuid(
guiddata['data']).lower()
ipmicmd.oem_init()
bayid = ipmicmd._oem.immhandler.get_property(
'/v2/cmm/sp/7')
if not bayid:
return
self.info['enclosure.bay'] = int(bayid)
smmid = ipmicmd._oem.immhandler.get_property(
'/v2/ibmc/smm/chassis/uuid')
if not smmid:
return
smmid = smmid.lower().replace(' ', '')
smmid = '{0}-{1}-{2}-{3}-{4}'.format(smmid[:8], smmid[8:12],
smmid[12:16], smmid[16:20],
smmid[20:])
self.info['enclosure.uuid'] = smmid
self.info['enclosure.type'] = 'smm'
except pygexc.IpmiException as ie:
print(repr(ie))
raise
| 0
| 189
| 0
| 3,390
| 0
| 0
| 0
| 50
| 134
|
c2c0dc95899f6f8dad0a7096d7c04088b895f8b1
| 363
|
py
|
Python
|
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | 1
|
2022-03-20T21:46:05.000Z
|
2022-03-20T21:46:05.000Z
|
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | null | null | null |
alembic/versions/0367b739bb81_add_country_code_to_table.py
|
danieliheonu/bigfastapi
|
483554776195c9f38bb46ba719b613360eda1028
|
[
"MIT"
] | null | null | null |
"""add country code to table
Revision ID: 0367b739bb81
Revises: 1e09924c1938
Create Date: 2022-01-27 16:10:57.297020
"""
# revision identifiers, used by Alembic.
revision = '0367b739bb81'
down_revision = '1e09924c1938'
branch_labels = None
depends_on = None
| 14.52
| 40
| 0.741047
|
"""add country code to table
Revision ID: 0367b739bb81
Revises: 1e09924c1938
Create Date: 2022-01-27 16:10:57.297020
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0367b739bb81'
down_revision = '1e09924c1938'
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 0
| 0
| 0
| 0
| 0
| 6
| 0
| 3
| 90
|
9c35a421d2475f3566cf629c7b74b3188447fc25
| 152
|
py
|
Python
|
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | 7
|
2018-02-23T13:02:13.000Z
|
2020-07-28T18:27:47.000Z
|
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/visualize_dataset.py
|
birlrobotics/smach_based_introspection_framework
|
f16742339cddfc86effba4dbf6e5062304704b89
|
[
"BSD-3-Clause"
] | 1
|
2019-06-24T09:20:06.000Z
|
2019-06-24T09:20:06.000Z
|
#!/usr/bin/env python
import smach_based_introspection_framework.offline_part.visualize_dataset as m
if __name__ == "__main__":
m.run()
| 19
| 78
| 0.776316
|
#!/usr/bin/env python
import os
import smach_based_introspection_framework.offline_part.visualize_dataset as m
if __name__ == "__main__":
m.run()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -12
| 23
|
489726ea2da03626c5a2318798d31acaac09e9b1
| 12,261
|
py
|
Python
|
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
packages/weevely/modules/net/proxy.py
|
ZooAtmosphereGroup/HelloPackages
|
0ccffd33bf927b13d28c8f715ed35004c33465d9
|
[
"Apache-2.0"
] | null | null | null |
from core.config import base_path
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
import re
import os
import threading
import re
from tempfile import mkdtemp
re_valid_ip = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
re_valid_hostname = re.compile("^(([a-zA-Z0-9\-]+)\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$")
temp_certdir = mkdtemp()
lock = threading.Lock()
# Create path for the CA certificates and keys
cert_folder = os.path.join(base_path, 'certs')
try:
os.makedirs(cert_folder)
except:
pass
#
# Most of the Proxy part has been taken from https://github.com/inaz2/proxy2
#
| 32.350923
| 119
| 0.575402
|
from core.loggers import log, dlog
from core import messages
from core.vectors import ModuleExec
from core.module import Module
from core.config import base_path
from http.server import HTTPServer, BaseHTTPRequestHandler
from tempfile import gettempdir
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, urlunparse, ParseResult
from io import StringIO
from http.client import HTTPResponse
import threading
import re
import os
import sys
import socket
import ssl
import select
import http.client
import urllib.parse
import threading
import time
import json
import re
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
from io import BytesIO
from subprocess import Popen, PIPE
from html.parser import HTMLParser
from tempfile import mkdtemp
re_valid_ip = re.compile(
"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$")
re_valid_hostname = re.compile("^(([a-zA-Z0-9\-]+)\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$")
temp_certdir = mkdtemp()
lock = threading.Lock()
class FakeSocket():
def __init__(self, response_str):
self._file = BytesIO(response_str)
def makefile(self, *args, **kwargs):
return self._file
# Create path for the CA certificates and keys
cert_folder = os.path.join(base_path, 'certs')
try:
os.makedirs(cert_folder)
except:
pass
def get_cert_path(path):
return os.path.join(cert_folder, path)
def initialize_certificates():
cakey_path = get_cert_path("ca.key")
cacrt_path = get_cert_path("ca.crt")
certkey_path = get_cert_path("cert.key")
if not os.path.isfile(cakey_path) or not os.path.isfile(cacrt_path) or not os.path.isfile(certkey_path):
# openssl genrsa -out ca.key 2048
p1 = Popen(["openssl", "genrsa", "-out", cakey_path, "2048"])
p1.communicate()
p1.wait()
# openssl req -new -x509 -days 3650 -key ca.key -out ca.crt -subj "/CN=proxy2 CA"
p2 = Popen(["openssl", "req", "-new", "-x509", "-days", "3650", "-key",
cakey_path, "-out", cacrt_path, "-subj", "/CN=proxy2 CA"])
p2.communicate()
p2.wait()
# openssl genrsa -out cert.key 2048
p3 = Popen(["openssl", "genrsa", "-out", certkey_path, "2048"])
p3.communicate()
p3.wait()
#
# Most of the Proxy part has been taken from https://github.com/inaz2/proxy2
#
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
address_family = socket.AF_INET
daemon_threads = True
def handle_error(self, request, client_address):
# surpress socket/ssl related errors
cls, e = sys.exc_info()[:2]
if cls is socket.error or cls is ssl.SSLError:
pass
else:
return HTTPServer.handle_error(self, request, client_address)
class ProxyRequestHandler(BaseHTTPRequestHandler):
cakey = get_cert_path('ca.key')
cacert = get_cert_path('ca.crt')
certkey = get_cert_path('cert.key')
certdir = temp_certdir
timeout = 5
lock = threading.Lock()
def __init__(self, *args, **kwargs):
self.tls = threading.local()
self.tls.conns = {}
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_error(self, format, *args):
# surpress "Request timed out: timeout('timed out',)"
if isinstance(args[0], socket.timeout):
return
def do_CONNECT(self):
self.connect_intercept()
def connect_intercept(self):
hostname = self.path.split(':')[0]
certname = "%s.crt" % (hostname)
certpath = os.path.join(self.certdir, certname)
if not (re_valid_ip.match(hostname) or re_valid_hostname.match(hostname)):
log.warning("CN name '%s' is not valid, using 'www.weevely.com'" % (hostname))
hostname = 'www.weevely.com'
with self.lock:
if not os.path.isfile(certpath):
epoch = "%d" % (time.time() * 1000)
p1 = Popen(["openssl", "req", "-new", "-key", self.certkey, "-subj", "/CN=%s" % hostname], stdout=PIPE)
p2 = Popen(["openssl", "x509", "-req", "-days", "3650", "-CA", self.cacert, "-CAkey", self.cakey,
"-set_serial", epoch, "-out", certpath], stdin=p1.stdout, stderr=PIPE)
p2.communicate()
self.send_response_only(200, 'Connection Established')
self.end_headers()
try:
self.connection = ssl.wrap_socket(self.connection, keyfile=self.certkey, certfile=certpath,
server_side=True)
self.rfile = self.connection.makefile("rb", self.rbufsize)
self.wfile = self.connection.makefile("wb", self.wbufsize)
except Exception as e:
log.debug(e)
raise
conntype = self.headers.get('Proxy-Connection', '')
if self.protocol_version == "HTTP/1.1" and conntype.lower() != 'close':
self.close_connection = 0
else:
self.close_connection = 1
def connect_relay(self):
address = self.path.split(':', 1)
address[1] = int(address[1]) or 443
try:
s = socket.create_connection(address, timeout=self.timeout)
except Exception as e:
self.send_error(502)
return
self.send_response(200, 'Connection Established')
self.end_headers()
conns = [self.connection, s]
self.close_connection = 0
while not self.close_connection:
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist or not rlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
if not data:
self.close_connection = 1
break
other.sendall(data)
def do_GET(self):
if self.path == 'http://weevely/':
self.send_cacert()
return
req = self
content_length = int(req.headers.get('Content-Length', 0))
req_body = self.rfile.read(content_length) if content_length else ''
if req.path[0] == '/':
if isinstance(self.connection, ssl.SSLSocket):
req.path = "https://%s%s" % (req.headers['Host'], req.path)
else:
req.path = "http://%s%s" % (req.headers['Host'], req.path)
req.headers['Content-length'] = str(len(req_body))
u = urllib.parse.urlsplit(req.path)
scheme, netloc, path = u.scheme, u.netloc, (u.path + '?' + u.query if u.query else u.path)
assert scheme in ('http', 'https')
if netloc:
req.headers['Host'] = netloc
setattr(req, 'headers', self.filter_headers(req.headers))
net_curl_args = [
'-X',
self.command,
'-i'
]
net_curl_args.append(self.path)
for h in req.headers:
if h.title().lower() == 'host':
host = self.headers[h]
else:
net_curl_args += ['-H', '%s: %s' % (h.title(), self.headers[h])]
if self.command == 'POST':
content_len = int(self.headers.get('content-length', 0))
net_curl_args += ['-d', req_body]
lock.acquire()
try:
result, headers, saved = ModuleExec(
'net_curl',
net_curl_args
).run()
finally:
lock.release()
if not headers:
log.debug('Error no headers')
self.send_error(502)
return
log.debug(
'> ' + '\r\n> '.join(
['%s: %s' % (
h.title(),
self.headers[h]
) for h in self.headers
]
)
)
log.debug('< ' + '\r\n< '.join([h.decode('utf-8', 'replace') for h in headers]))
http_response_str = b'\r\n'.join(headers) + b'\r\n\r\n' + result
source = FakeSocket(http_response_str)
res = HTTPResponse(source)
res.begin()
version_table = {10: 'HTTP/1.0', 11: 'HTTP/1.1'}
setattr(res, 'headers', res.msg)
setattr(res, 'response_version', version_table[res.version])
# support streaming
if not 'Content-Length' in res.headers and 'no-store' in res.headers.get('Cache-Control', ''):
setattr(res, 'headers', self.filter_headers(res.headers))
self.relay_streaming(res)
return
try:
res_body = res.read()
except Exception as e:
log.debug(e)
self.send_error(500)
return
setattr(res, 'headers', self.filter_headers(res.headers))
respstring = "%s %d %s\r\n" % (self.protocol_version, res.status, res.reason)
self.wfile.write(respstring.encode('utf-8'))
self.wfile.write(res.headers.as_bytes())
self.wfile.write(res_body)
self.wfile.flush()
def relay_streaming(self, res):
respstring = "%s %d %s\r\n" % (self.protocol_version, res.status, res.reason)
self.wfile.write(respstring.encode('utf-8'))
self.wfile.write(res.headers.as_bytes() + b"\r\n")
try:
while True:
chunk = res.read(8192)
if not chunk:
break
self.wfile.write(chunk)
self.wfile.flush()
except socket.error:
# connection closed by client
pass
do_HEAD = do_GET
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
do_OPTIONS = do_GET
def filter_headers(self, headers):
# http://tools.ietf.org/html/rfc2616#section-13.5.1
hop_by_hop = (
'connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
'upgrade')
for k in hop_by_hop:
del headers[k]
return headers
def send_cacert(self):
with open(self.cacert, 'rb') as f:
data = f.read()
self.wfile.write("%s %d %s\r\n" % (self.protocol_version, 200, 'OK'))
self.send_header('Content-Type', 'application/x-x509-ca-cert')
self.send_header('Content-Length', len(data))
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(data)
def run_proxy2(HandlerClass=ProxyRequestHandler, ServerClass=ThreadingHTTPServer, protocol="HTTP/1.1",
hostname='127.0.0.1', port='8080'):
server_address = (hostname, port)
HandlerClass.protocol_version = protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
httpd.serve_forever()
class Proxy(Module):
"""Run local proxy to pivot HTTP/HTTPS browsing through the target."""
def init(self):
self.register_info(
{
'author': [
'Emilio Pinna'
],
'license': 'GPLv3'
}
)
self.register_arguments([
{'name': '-lhost', 'default': '127.0.0.1'},
{'name': '-lport', 'default': 8080, 'type': int},
{'name': '-no-background', 'action': 'store_true', 'default': False, 'help': 'Run foreground'}
])
def run(self):
log.warning(messages.module_net_proxy.proxy_starting_s_i % (self.args['lhost'], self.args['lport']))
log.warning(messages.module_net_proxy.proxy_set_proxy)
initialize_certificates()
if self.args['no_background']:
log.warning(messages.module_net_proxy.proxy_started_foreground)
run_proxy2(
hostname=self.args['lhost'],
port=self.args['lport']
)
else:
log.warning(messages.module_net_proxy.proxy_started_background)
server_thread = threading.Thread(target=run_proxy2, kwargs={
'hostname': self.args['lhost'],
'port': self.args['lport']
})
server_thread.daemon = True
server_thread.start()
| 0
| 0
| 0
| 9,527
| 0
| 1,233
| 0
| 123
| 623
|
e5d6df24af9bac17d018ee4f885d2b0a7d316e52
| 4,965
|
py
|
Python
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 1
|
2017-01-29T09:52:06.000Z
|
2017-01-29T09:52:06.000Z
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 287
|
2017-03-09T00:17:20.000Z
|
2022-01-08T00:36:34.000Z
|
course_grader/views/api/submitted_graderoster.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from logging import getLogger
logger = getLogger(__name__)
| 35.978261
| 76
| 0.627593
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.cache import never_cache
from django.utils.decorators import method_decorator
from uw_saml.decorators import group_required
from course_grader.views.rest_dispatch import RESTDispatch
from course_grader.models import (
SubmittedGradeRoster as SubmittedGradeRosterModel)
from course_grader.dao.person import person_from_regid, person_display_name
from course_grader.dao.section import section_from_label
from course_grader.dao.term import term_from_param
from uw_sws_graderoster.models import GradeRoster
from lxml import etree
from logging import getLogger
import csv
logger = getLogger(__name__)
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
class SubmissionsByTerm(RESTDispatch):
def get(self, request, *args, **kwargs):
term_id = kwargs.get("term_id")
try:
selected_term = term_from_param(term_id)
except Exception as ex:
return self.error_response(400, "Invalid Term ID")
graderosters = SubmittedGradeRosterModel.objects.get_status_by_term(
selected_term)
response = self.csv_response(filename=term_id)
csv.register_dialect("unix_newline", lineterminator="\n")
writer = csv.writer(response, dialect="unix_newline")
writer.writerow([
"Section",
"Secondary section",
"Submitter",
"Submission datetime"
])
for graderoster in graderosters:
writer.writerow([
graderoster["section_id"],
graderoster["secondary_section_id"],
graderoster["submitted_by"],
graderoster["submitted_date"],
])
return response
@method_decorator(group_required(settings.GRADEPAGE_SUPPORT_GROUP),
name='dispatch')
@method_decorator(never_cache, name='dispatch')
class SubmittedGradeRoster(RESTDispatch):
def get(self, request, *args, **kwargs):
graderoster_id = kwargs.get("graderoster_id")
try:
model = SubmittedGradeRosterModel.objects.get(pk=graderoster_id)
section = section_from_label(model.section_id)
instructor = person_from_regid(model.instructor_id)
submitter = person_from_regid(model.submitted_by)
graderoster = GradeRoster.from_xhtml(
etree.fromstring(model.document.strip()),
section=section, instructor=instructor)
except SubmittedGradeRosterModel.DoesNotExist:
return self.error_response(404, "Not Found")
except Exception as ex:
logger.error(
"Download failed for graderoster model {}: {}".format(
graderoster_id, ex))
return self.error_response(500, "{}".format(ex))
if model.secondary_section_id is not None:
filename = model.secondary_section_id
else:
filename = model.section_id
response = self.csv_response(filename=filename)
csv.register_dialect("unix_newline", lineterminator="\n")
writer = csv.writer(response, dialect="unix_newline")
writer.writerow([
"Student number",
"Student name",
"Course",
"Section",
"Credits",
"Incomplete",
"Grade",
"Writing credit",
"Instructor name",
"Instructor netid",
"Submitter name",
"Submitter netid"
])
secondary_section = getattr(graderoster, "secondary_section", None)
for item in graderoster.items:
if (secondary_section is not None and
secondary_section.section_id != item.section_id):
continue
writer.writerow([
item.student_number,
"{first_name} {last_name}".format(
first_name=item.student_first_name,
last_name=item.student_surname),
"{curr_abbr} {course_num}".format(
curr_abbr=section.curriculum_abbr,
course_num=section.course_number),
item.section_id,
item.student_credits,
"I" if item.has_incomplete else "",
"X" if item.no_grade_now else str(item.grade),
"W" if item.has_writing_credit else "",
person_display_name(instructor),
instructor.uwnetid,
person_display_name(submitter),
submitter.uwnetid
])
logger.info("Graderoster downloaded: {}-{}".format(
model.section_id, model.instructor_id))
return response
| 0
| 4,129
| 0
| 0
| 0
| 0
| 0
| 354
| 333
|
72cac8fb30a2e307bde5d70d65c30b41c1787dec
| 1,235
|
py
|
Python
|
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
pipeline/boto_helpers.py
|
DMS-medical-informatics/beiwe-backend
|
55afe3a16e1c9b34501f3655288b5c19c663a083
|
[
"BSD-3-Clause"
] | null | null | null |
# This is all cribbed from the django branch's cluster_management/deployment_helpers folder
# TODO once the branches are merged, use that code and NOT this code
| 29.404762
| 99
| 0.728745
|
import json
import os.path
import subprocess
import boto3
# This is all cribbed from the django branch's cluster_management/deployment_helpers folder
# TODO once the branches are merged, use that code and NOT this code
def get_aws_object_names():
configs_folder = get_configs_folder()
with open(os.path.join(configs_folder, 'aws-object-names.json')) as fn:
return json.load(fn)
def get_boto_client(client_type):
from config.settings import BEIWE_SERVER_AWS_ACCESS_KEY_ID, BEIWE_SERVER_AWS_SECRET_ACCESS_KEY
aws_object_names = get_aws_object_names()
return boto3.client(
client_type,
aws_access_key_id=BEIWE_SERVER_AWS_ACCESS_KEY_ID,
aws_secret_access_key=BEIWE_SERVER_AWS_SECRET_ACCESS_KEY,
region_name=aws_object_names['region_name'],
)
def get_pipeline_folder():
return os.path.abspath(__file__).rsplit('/', 1)[0]
def get_configs_folder():
return os.path.join(get_pipeline_folder(), 'configs')
def set_default_region():
aws_object_names = get_aws_object_names()
region_name = aws_object_names['region_name']
subprocess.check_call(['aws', 'configure', 'set', 'default.region', region_name])
| 0
| 0
| 0
| 0
| 0
| 871
| 0
| -30
| 219
|
e5e52448863aa3d2032ea0acf739006c4aeffca6
| 1,783
|
py
|
Python
|
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
WaltzControl/use_cases/tel_controller_boundarys.py
|
DaneSpaeth/WaltzControl_refactored
|
80aa3e28f1e0709bc7dd9472bc1d841e9b4da9e7
|
[
"MIT"
] | null | null | null |
"""Boundarys for Responses from TelescopeController (TC) and Requests to TC.
Data entry and exit point into use_cases layer.
"""
| 28.758065
| 80
| 0.574313
|
"""Boundarys for Responses from TelescopeController (TC) and Requests to TC.
Data entry and exit point into use_cases layer.
"""
class TelescopeControllerResponseBoundary:
"""Contains Responses from TelescopeController Device.
"""
def __init__(
self,
ra_response = None,
dec_response = None,
validate_response = None):
"""Store Responses of Telescope Controller as floats.
"""
self.ra_response = ra_response
self.dec_response = dec_response
self.validate_response = validate_response
def set_ra_response(self, ra):
"""Set ra response.
Input: ra as float in hours
"""
self.ra_response = ra
def set_dec_response(self, dec):
"""Set dec response.
Input: dec as float in degrees
"""
self.dec_response = dec
def set_validate_response(self, valid):
"""Set validate response.
Input: valid as boolean (accounts for Returns of Telesope Controllere
to set_target etc)
"""
self.validate_response = valid
def reset_responses(self):
"""Reset all responses to None.
"""
self.ra_response = None
self.dec_response = None
self.validate_response = None
def retrieve_position(self):
"""Returns ra and dec_responses.
"""
return (self.ra_response, self.dec_response)
class TelescopeControllerRequestBoundary:
"""Interface for commands to TelescopeController Device.
"""
def __init__(self):
pass
def request_position(self):
pass
| 0
| 0
| 0
| 1,601
| 0
| 0
| 0
| 0
| 50
|
2992c83e0ce52d8039899799790c8ae2a72523fc
| 3,505
|
py
|
Python
|
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | 1
|
2021-06-02T09:14:30.000Z
|
2021-06-02T09:14:30.000Z
|
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | null | null | null |
example_group_epochs.py
|
DraganaMana/mne_microstates
|
de3dc76e63e49fb4b61810bf737d4d5d11f5b2f0
|
[
"MIT"
] | 1
|
2020-06-15T13:59:07.000Z
|
2020-06-15T13:59:07.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 18:04:32 2020
@author: Dragana
"""
import mne
import microstates as mst
import numpy as np
HC_RS_path = 'C:/Users/.../Documents/RS_EEG/'
subj_folder = ['subj01', 'subj02', 'subj03', 'subj04', 'subj05']
# Parameteres setting up
chan_to_drop = ['E67', 'E73', 'E247', 'E251', 'E256', 'E243', 'E246', 'E250',
'E255', 'E82', 'E91', 'E254', 'E249', 'E245', 'E242', 'E253',
'E252', 'E248', 'E244', 'E241', 'E92', 'E102', 'E103', 'E111',
'E112', 'E120', 'E121', 'E133', 'E134', 'E145', 'E146', 'E156',
'E165', 'E166', 'E174', 'E175', 'E187', 'E188', 'E199', 'E200',
'E208', 'E209', 'E216', 'E217', 'E228', 'E229', 'E232', 'E233',
'E236', 'E237', 'E240', 'E218', 'E227', 'E231', 'E235', 'E239',
'E219', 'E225', 'E226', 'E230', 'E234', 'E238']
pax = len(subj_folder) # number of participants
n_states = 4
n_inits = 10
EGI256 = True
if EGI256 == True:
n_channels = 256 - len(chan_to_drop)
grouped_maps = np.array([], dtype=np.int64).reshape(0, n_channels)
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
# Segment the data in microstates
maps, segmentation, gev, gfp_peaks = mst.segment(data, n_states, n_inits)
grouped_maps = np.concatenate((grouped_maps, maps), axis=0)
# Transpose the maps from maps(n_maps, n_channels) to maps(n_channels, n_maps)
# and treat the n_maps as a sample in time.
grouped_maps_T = grouped_maps.transpose()
# Find the group maps using k-means clustering
group_maps, group_gev = mst.segment(grouped_maps_T, n_states, n_inits, use_peaks=False)
# Plot the maps
mst.viz.plot_maps(group_maps, epochs.info)
# Fitting the maps back to the original epoched data by subject
grouped_segment, all_p = [], []
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
n_epochs, n_chans, n_samples = data.shape
# Make the data 2D
data = np.hstack(data)
# Compute final microstate segmentations on the original data
activation = group_maps.dot(data)
segmentation = np.argmax(np.abs(activation), axis=0)
# Add all the per subject segmentations in one array
# (n_times, subjects)
grouped_segment.append(segmentation)
# Plot the segmentation per subject
sfreq = epochs.info['sfreq']
times = np.arange(0, len(data[1])/sfreq, 1/sfreq)
mst.viz.plot_segmentation(segmentation[:500], data[:, :500], times[:500])
# p_empirical
epoched_data = True
p_hat = mst.analysis.p_empirical(segmentation, n_epochs, n_samples, n_states,
epoched_data)
all_p.append(p_hat)
# p_empirical printing
print("\n\t Empirical symbol distribution (RTT) per subject:\n")
for i in range(pax):
print("\n Subject", i)
for j in range(n_states):
print("\n\t\t p", j, " = {0:.5f}".format(all_p[i][j]))
all_p = np.vstack(all_p)
all_p /= pax
all_p_sum = np.sum(all_p, axis=0)
print("\n\t Empirical symbol distribution (RTT) for all subjects:\n")
for i in range(n_states):
print("\n\t\t p", i, " = {0:.5f}".format(all_p_sum[i]))
| 36.510417
| 87
| 0.628531
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 18:04:32 2020
@author: Dragana
"""
import mne
import microstates as mst
import numpy as np
HC_RS_path = 'C:/Users/.../Documents/RS_EEG/'
subj_folder = ['subj01', 'subj02', 'subj03', 'subj04', 'subj05']
# Parameteres setting up
chan_to_drop = ['E67', 'E73', 'E247', 'E251', 'E256', 'E243', 'E246', 'E250',
'E255', 'E82', 'E91', 'E254', 'E249', 'E245', 'E242', 'E253',
'E252', 'E248', 'E244', 'E241', 'E92', 'E102', 'E103', 'E111',
'E112', 'E120', 'E121', 'E133', 'E134', 'E145', 'E146', 'E156',
'E165', 'E166', 'E174', 'E175', 'E187', 'E188', 'E199', 'E200',
'E208', 'E209', 'E216', 'E217', 'E228', 'E229', 'E232', 'E233',
'E236', 'E237', 'E240', 'E218', 'E227', 'E231', 'E235', 'E239',
'E219', 'E225', 'E226', 'E230', 'E234', 'E238']
pax = len(subj_folder) # number of participants
n_states = 4
n_inits = 10
EGI256 = True
if EGI256 == True:
n_channels = 256 - len(chan_to_drop)
grouped_maps = np.array([], dtype=np.int64).reshape(0, n_channels)
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
# Segment the data in microstates
maps, segmentation, gev, gfp_peaks = mst.segment(data, n_states, n_inits)
grouped_maps = np.concatenate((grouped_maps, maps), axis=0)
# Transpose the maps from maps(n_maps, n_channels) to maps(n_channels, n_maps)
# and treat the n_maps as a sample in time.
grouped_maps_T = grouped_maps.transpose()
# Find the group maps using k-means clustering
group_maps, group_gev = mst.segment(grouped_maps_T, n_states, n_inits, use_peaks=False)
# Plot the maps
mst.viz.plot_maps(group_maps, epochs.info)
# Fitting the maps back to the original epoched data by subject
grouped_segment, all_p = [], []
for i, f in enumerate(subj_folder):
fname = HC_RS_path + f + '/' + f +'_clean-epo.fif'
epochs = mne.read_epochs(fname, preload=True)
if EGI256 == True:
epochs.drop_channels(chan_to_drop)
data = epochs.get_data()
n_epochs, n_chans, n_samples = data.shape
# Make the data 2D
data = np.hstack(data)
# Compute final microstate segmentations on the original data
activation = group_maps.dot(data)
segmentation = np.argmax(np.abs(activation), axis=0)
# Add all the per subject segmentations in one array
# (n_times, subjects)
grouped_segment.append(segmentation)
# Plot the segmentation per subject
sfreq = epochs.info['sfreq']
times = np.arange(0, len(data[1])/sfreq, 1/sfreq)
mst.viz.plot_segmentation(segmentation[:500], data[:, :500], times[:500])
# p_empirical
epoched_data = True
p_hat = mst.analysis.p_empirical(segmentation, n_epochs, n_samples, n_states,
epoched_data)
all_p.append(p_hat)
# p_empirical printing
print("\n\t Empirical symbol distribution (RTT) per subject:\n")
for i in range(pax):
print("\n Subject", i)
for j in range(n_states):
print("\n\t\t p", j, " = {0:.5f}".format(all_p[i][j]))
all_p = np.vstack(all_p)
all_p /= pax
all_p_sum = np.sum(all_p, axis=0)
print("\n\t Empirical symbol distribution (RTT) for all subjects:\n")
for i in range(n_states):
print("\n\t\t p", i, " = {0:.5f}".format(all_p_sum[i]))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d0c633f50b464b8e08988638cf34cd0815c70e55
| 1,142
|
py
|
Python
|
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | 1
|
2016-10-24T15:16:26.000Z
|
2016-10-24T15:16:26.000Z
|
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | null | null | null |
chunkymonkey/lib/base.py
|
shopzilla/chunky-monkey
|
2556055e87849e2a873a950a5e52429e516c8304
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2011 Shopzilla.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this 1 except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The base Controller API
Provides the BaseController class for subclassing.
"""
| 36.83871
| 74
| 0.75394
|
#
# Copyright 2011 Shopzilla.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this 1 except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""The base Controller API
Provides the BaseController class for subclassing.
"""
from pylons.controllers import WSGIController
from pylons.templating import render_mako as render
class BaseController(WSGIController):
def __call__(self, environ, start_response):
"""Invoke the Controller"""
# WSGIController.__call__ dispatches to the Controller method
# the request is routed to. This routing information is
# available in environ['pylons.routes_dict']
return WSGIController.__call__(self, environ, start_response)
| 0
| 0
| 0
| 359
| 0
| 0
| 0
| 54
| 67
|
7d5cf31371d57d1d5e01bffec3ad52101c96988a
| 134,221
|
py
|
Python
|
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
carculator/inventory.py
|
SimonVoelker/carculator
|
e40d664c9b5612250cf9ad2c6fa2a199b0bf88c5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import DATA_DIR
REMIND_FILES_DIR = DATA_DIR / "IAM"
| 36.793037
| 150
| 0.42471
|
from . import DATA_DIR
import sys
import glob
from .background_systems import BackgroundSystemModel
from .export import ExportInventory
from inspect import currentframe, getframeinfo
from pathlib import Path
from scipy import sparse
import csv
import itertools
import numexpr as ne
import numpy as np
import xarray as xr
REMIND_FILES_DIR = DATA_DIR / "IAM"
class InventoryCalculation:
"""
Build and solve the inventory for results characterization and inventory export
Vehicles to be analyzed can be filtered by passing a `scope` dictionary.
Some assumptions in the background system can also be adjusted by passing a `background_configuration` dictionary.
.. code-block:: python
scope = {
'powertrain':['BEV', 'FCEV', 'ICEV-p'],
}
bc = {'country':'CH', # considers electricity network losses for Switzerland
'custom electricity mix' : [[1,0,0,0,0,0,0,0,0,0], # in this case, 100% hydropower for the first year
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
], # in this case, 100% nuclear for the second year
'fuel blend':{
'cng':{ #specify fuel bland for compressed gas
'primary fuel':{
'type':'biogas',
'share':[0.9, 0.8, 0.7, 0.6] # shares per year. Must total 1 for each year.
},
'secondary fuel':{
'type':'syngas',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'diesel':{
'primary fuel':{
'type':'synthetic diesel',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'biodiesel - cooking oil',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'petrol':{
'primary fuel':{
'type':'petrol',
'share':[0.9, 0.8, 0.7, 0.6]
},
'secondary fuel':{
'type':'bioethanol - wheat straw',
'share': [0.1, 0.2, 0.3, 0.4]
}
},
'hydrogen':{
'primary fuel':{'type':'electrolysis', 'share':[1, 0, 0, 0]},
'secondary fuel':{'type':'smr - natural gas', 'share':[0, 1, 1, 1]}
}
},
'energy storage': {
'electric': {
'type':'NMC',
'origin': 'NO'
},
'hydrogen': {
'type':'carbon fiber'
}
}
}
InventoryCalculation(CarModel.array,
background_configuration=background_configuration,
scope=scope,
scenario="RCP26")
The `custom electricity mix` key in the background_configuration dictionary defines an electricity mix to apply,
under the form of one or several array(s), depending on teh number of years to analyze,
that should total 1, of which the indices correspond to:
- [0]: hydro-power
- [1]: nuclear
- [2]: natural gas
- [3]: solar power
- [4]: wind power
- [5]: biomass
- [6]: coal
- [7]: oil
- [8]: geothermal
- [9]: waste incineration
If none is given, the electricity mix corresponding to the country specified in `country` will be selected.
If no country is specified, Europe applies.
The `primary` and `secondary` fuel keys contain an array with shares of alternative petrol fuel for each year, to create a custom blend.
If none is provided, a blend provided by the Integrated Assessment model REMIND is used, which will depend on the REMIND energy scenario selected.
Here is a list of available fuel pathways:
Hydrogen technologies
--------------------
electrolysis
smr - natural gas
smr - natural gas with CCS
smr - biogas
smr - biogas with CCS
coal gasification
wood gasification
wood gasification with CCS
Natural gas technologies
------------------------
cng
biogas
syngas
Diesel technologies
-------------------
diesel
biodiesel - algae
biodiesel - cooking oil
synthetic diesel
Petrol technologies
-------------------
petrol
bioethanol - wheat straw
bioethanol - maize starch
bioethanol - sugarbeet
bioethanol - forest residues
synthetic gasoline
:ivar array: array from the CarModel class
:vartype array: CarModel.array
:ivar scope: dictionary that contains filters for narrowing the analysis
:ivar background_configuration: dictionary that contains choices for background system
:ivar scenario: REMIND energy scenario to use ("SSP2-Baseline": business-as-usual,
"SSP2-PkBudg1100": limits cumulative GHG emissions to 1,100 gigatons by 2100,
"static": no forward-looking modification of the background inventories).
"SSP2-Baseline" selected by default.
.. code-block:: python
"""
def __init__(
self, array, scope=None, background_configuration=None, scenario="SSP2-Base", method="recipe", method_type="midpoint"
):
if scope is None:
scope = {}
scope["size"] = array.coords["size"].values.tolist()
scope["powertrain"] = array.coords["powertrain"].values.tolist()
scope["year"] = array.coords["year"].values.tolist()
else:
scope["size"] = scope.get("size", array.coords["size"].values.tolist())
scope["powertrain"] = scope.get(
"powertrain", array.coords["powertrain"].values.tolist()
)
scope["year"] = scope.get("year", array.coords["year"].values.tolist())
self.scope = scope
self.scenario = scenario
array = array.sel(
powertrain=self.scope["powertrain"],
year=self.scope["year"],
size=self.scope["size"],
)
self.array = array.stack(desired=["size", "powertrain", "year"])
self.iterations = len(array.value.values)
self.number_of_cars = (
len(self.scope["size"])
* len(self.scope["powertrain"])
* len(self.scope["year"])
)
self.array_inputs = {
x: i for i, x in enumerate(list(self.array.parameter.values), 0)
}
self.array_powertrains = {
x: i for i, x in enumerate(list(self.array.powertrain.values), 0)
}
if not background_configuration is None:
self.background_configuration = background_configuration
else:
self.background_configuration = {}
if "energy storage" not in self.background_configuration:
self.background_configuration["energy storage"] = {
"electric": {"type": "NMC", "origin": "CN"}
}
else:
if "electric" not in self.background_configuration["energy storage"]:
self.background_configuration["energy storage"]["electric"] = {
"type": "NMC",
"origin": "CN",
}
else:
if (
"origin"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"origin"
] = "CN"
if (
"type"
not in self.background_configuration["energy storage"]["electric"]
):
self.background_configuration["energy storage"]["electric"][
"type"
] = "NMC"
self.inputs = self.get_dict_input()
self.bs = BackgroundSystemModel()
self.country = self.get_country_of_use()
self.add_additional_activities()
self.rev_inputs = self.get_rev_dict_input()
self.A = self.get_A_matrix()
self.mix = self.define_electricity_mix_for_fuel_prep()
self.fuel_blends = {}
self.define_fuel_blends()
self.set_actual_range()
self.index_cng = [self.inputs[i] for i in self.inputs if "ICEV-g" in i[0]]
self.index_combustion_wo_cng = [
self.inputs[i]
for i in self.inputs
if any(
ele in i[0]
for ele in ["ICEV-p", "HEV-p", "PHEV-p", "ICEV-d", "PHEV-d", "HEV-d"]
)
]
self.index_diesel = [self.inputs[i] for i in self.inputs if "ICEV-d" in i[0]]
self.index_all_petrol = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["ICEV-p", "HEV-p", "PHEV-p"])
]
self.index_petrol = [self.inputs[i] for i in self.inputs if "ICEV-p" in i[0]]
self.index_hybrid = [
self.inputs[i]
for i in self.inputs
if any(ele in i[0] for ele in ["HEV-p", "HEV-d"])
]
self.index_plugin_hybrid = [
self.inputs[i] for i in self.inputs if "PHEV" in i[0]
]
self.index_fuel_cell = [self.inputs[i] for i in self.inputs if "FCEV" in i[0]]
self.map_non_fuel_emissions = {
(
"Methane, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Methane direct emissions, suburban",
(
"Methane, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Methane direct emissions, rural",
(
"Lead",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Lead direct emissions, suburban",
(
"Ammonia",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Ammonia direct emissions, suburban",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "urban air close to ground"),
"kilogram",
): "NMVOC direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "urban air close to ground"),
"kilogram",
): "Hydrocarbons direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "low population density, long-term"),
"kilogram",
): "Dinitrogen oxide direct emissions, rural",
(
"Nitrogen oxides",
("air", "urban air close to ground"),
"kilogram",
): "Nitrogen oxides direct emissions, urban",
(
"Ammonia",
("air", "urban air close to ground"),
"kilogram",
): "Ammonia direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Particulate matters direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Carbon monoxide direct emissions, urban",
(
"Nitrogen oxides",
("air", "low population density, long-term"),
"kilogram",
): "Nitrogen oxides direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "non-urban air or from high stacks"),
"kilogram",
): "NMVOC direct emissions, suburban",
(
"Benzene",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Benzene direct emissions, suburban",
(
"Ammonia",
("air", "low population density, long-term"),
"kilogram",
): "Ammonia direct emissions, rural",
(
"Sulfur dioxide",
("air", "low population density, long-term"),
"kilogram",
): "Sulfur dioxide direct emissions, rural",
(
"NMVOC, non-methane volatile organic compounds, unspecified origin",
("air", "low population density, long-term"),
"kilogram",
): "NMVOC direct emissions, rural",
(
"Particulates, < 2.5 um",
("air", "urban air close to ground"),
"kilogram",
): "Particulate matters direct emissions, urban",
(
"Sulfur dioxide",
("air", "urban air close to ground"),
"kilogram",
): "Sulfur dioxide direct emissions, urban",
(
"Dinitrogen monoxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Dinitrogen oxide direct emissions, suburban",
(
"Carbon monoxide, fossil",
("air", "low population density, long-term"),
"kilogram",
): "Carbon monoxide direct emissions, rural",
(
"Methane, fossil",
("air", "urban air close to ground"),
"kilogram",
): "Methane direct emissions, urban",
(
"Carbon monoxide, fossil",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Carbon monoxide direct emissions, suburban",
(
"Lead",
("air", "urban air close to ground"),
"kilogram",
): "Lead direct emissions, urban",
(
"Particulates, < 2.5 um",
("air", "low population density, long-term"),
"kilogram",
): "Particulate matters direct emissions, rural",
(
"Sulfur dioxide",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Sulfur dioxide direct emissions, suburban",
(
"Benzene",
("air", "low population density, long-term"),
"kilogram",
): "Benzene direct emissions, rural",
(
"Nitrogen oxides",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Nitrogen oxides direct emissions, suburban",
(
"Lead",
("air", "low population density, long-term"),
"kilogram",
): "Lead direct emissions, rural",
(
"Benzene",
("air", "urban air close to ground"),
"kilogram",
): "Benzene direct emissions, urban",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "low population density, long-term"),
"kilogram",
): "Hydrocarbons direct emissions, rural",
(
"PAH, polycyclic aromatic hydrocarbons",
("air", "non-urban air or from high stacks"),
"kilogram",
): "Hydrocarbons direct emissions, suburban",
(
"Dinitrogen monoxide",
("air", "urban air close to ground"),
"kilogram",
): "Dinitrogen oxide direct emissions, urban",
}
self.index_emissions = [
self.inputs[i] for i in self.map_non_fuel_emissions.keys()
]
self.map_noise_emissions = {
(
"noise, octave 1, day time, urban",
("octave 1", "day time", "urban"),
"joule",
): "noise, octave 1, day time, urban",
(
"noise, octave 2, day time, urban",
("octave 2", "day time", "urban"),
"joule",
): "noise, octave 2, day time, urban",
(
"noise, octave 3, day time, urban",
("octave 3", "day time", "urban"),
"joule",
): "noise, octave 3, day time, urban",
(
"noise, octave 4, day time, urban",
("octave 4", "day time", "urban"),
"joule",
): "noise, octave 4, day time, urban",
(
"noise, octave 5, day time, urban",
("octave 5", "day time", "urban"),
"joule",
): "noise, octave 5, day time, urban",
(
"noise, octave 6, day time, urban",
("octave 6", "day time", "urban"),
"joule",
): "noise, octave 6, day time, urban",
(
"noise, octave 7, day time, urban",
("octave 7", "day time", "urban"),
"joule",
): "noise, octave 7, day time, urban",
(
"noise, octave 8, day time, urban",
("octave 8", "day time", "urban"),
"joule",
): "noise, octave 8, day time, urban",
(
"noise, octave 1, day time, suburban",
("octave 1", "day time", "suburban"),
"joule",
): "noise, octave 1, day time, suburban",
(
"noise, octave 2, day time, suburban",
("octave 2", "day time", "suburban"),
"joule",
): "noise, octave 2, day time, suburban",
(
"noise, octave 3, day time, suburban",
("octave 3", "day time", "suburban"),
"joule",
): "noise, octave 3, day time, suburban",
(
"noise, octave 4, day time, suburban",
("octave 4", "day time", "suburban"),
"joule",
): "noise, octave 4, day time, suburban",
(
"noise, octave 5, day time, suburban",
("octave 5", "day time", "suburban"),
"joule",
): "noise, octave 5, day time, suburban",
(
"noise, octave 6, day time, suburban",
("octave 6", "day time", "suburban"),
"joule",
): "noise, octave 6, day time, suburban",
(
"noise, octave 7, day time, suburban",
("octave 7", "day time", "suburban"),
"joule",
): "noise, octave 7, day time, suburban",
(
"noise, octave 8, day time, suburban",
("octave 8", "day time", "suburban"),
"joule",
): "noise, octave 8, day time, suburban",
(
"noise, octave 1, day time, rural",
("octave 1", "day time", "rural"),
"joule",
): "noise, octave 1, day time, rural",
(
"noise, octave 2, day time, rural",
("octave 2", "day time", "rural"),
"joule",
): "noise, octave 2, day time, rural",
(
"noise, octave 3, day time, rural",
("octave 3", "day time", "rural"),
"joule",
): "noise, octave 3, day time, rural",
(
"noise, octave 4, day time, rural",
("octave 4", "day time", "rural"),
"joule",
): "noise, octave 4, day time, rural",
(
"noise, octave 5, day time, rural",
("octave 5", "day time", "rural"),
"joule",
): "noise, octave 5, day time, rural",
(
"noise, octave 6, day time, rural",
("octave 6", "day time", "rural"),
"joule",
): "noise, octave 6, day time, rural",
(
"noise, octave 7, day time, rural",
("octave 7", "day time", "rural"),
"joule",
): "noise, octave 7, day time, rural",
(
"noise, octave 8, day time, rural",
("octave 8", "day time", "rural"),
"joule",
): "noise, octave 8, day time, rural",
}
self.elec_map = {
"Hydro": (
"electricity production, hydro, run-of-river",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Nuclear": (
"electricity production, nuclear, pressure water reactor",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Gas": (
"electricity production, natural gas, conventional power plant",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Solar": (
"electricity production, photovoltaic, 3kWp slanted-roof installation, multi-Si, panel, mounted",
"DE",
"kilowatt hour",
"electricity, low voltage",
),
"Wind": (
"electricity production, wind, 1-3MW turbine, onshore",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Biomass": (
"heat and power co-generation, wood chips, 6667 kW, state-of-the-art 2014",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Coal": (
"electricity production, hard coal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Oil": (
"electricity production, oil",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Geo": (
"electricity production, deep geothermal",
"DE",
"kilowatt hour",
"electricity, high voltage",
),
"Waste": (
"treatment of municipal solid waste, incineration",
"DE",
"kilowatt hour",
"electricity, for reuse in municipal waste incineration only",
),
}
self.index_noise = [self.inputs[i] for i in self.map_noise_emissions.keys()]
self.list_cat, self.split_indices = self.get_split_indices()
self.method = method
if self.method == "recipe":
self.method_type = method_type
else:
self.method_type = "midpoint"
self.impact_categories = self.get_dict_impact_categories()
# Load the B matrix
self.B = self.get_B_matrix()
def __getitem__(self, key):
"""
Make class['foo'] automatically filter for the parameter 'foo'
Makes the model code much cleaner
:param key: Parameter name
:type key: str
:return: `array` filtered after the parameter selected
"""
return self.temp_array.sel(parameter=key)
def get_results_table(self, split, sensitivity=False):
"""
Format an xarray.DataArray array to receive the results.
:param split: "components" or "impact categories". Split by impact categories only applicable when "endpoint" level is applied.
:return: xarrray.DataArray
"""
if split == "components":
cat = [
"direct - exhaust",
"direct - non-exhaust",
"energy chain",
"maintenance",
"glider",
"EoL",
"powertrain",
"energy storage",
"road",
]
dict_impact_cat = list(self.impact_categories.keys())
if sensitivity == False:
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
len(cat),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
cat,
np.arange(0, self.iterations),
],
dims=[
"impact_category",
"size",
"powertrain",
"year",
"impact",
"value",
],
)
else:
params = [a for a in self.array.value.values]
response = xr.DataArray(
np.zeros(
(
self.B.shape[1],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(self.scope["year"]),
self.iterations,
)
),
coords=[
dict_impact_cat,
self.scope["size"],
self.scope["powertrain"],
self.scope["year"],
params,
],
dims=["impact_category", "size", "powertrain", "year", "parameter"],
)
return response
def get_split_indices(self):
"""
Return list of indices to split the results into categories.
:return: list of indices
:rtype: list
"""
filename = "dict_split.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError("The dictionary of splits could not be found.")
with open(filepath) as f:
csv_list = [[val.strip() for val in r.split(";")] for r in f.readlines()]
(_, _, *header), *data = csv_list
csv_dict = {}
for row in data:
key, sub_key, *values = row
if key in csv_dict:
if sub_key in csv_dict[key]:
csv_dict[key][sub_key].append(
{"search by": values[0], "search for": values[1]}
)
else:
csv_dict[key][sub_key] = [
{"search by": values[0], "search for": values[1]}
]
else:
csv_dict[key] = {
sub_key: [{"search by": values[0], "search for": values[1]}]
}
flatten = itertools.chain.from_iterable
d = {}
l = []
d['direct - exhaust'] = []
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Cadmium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Copper", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Nickel", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Selenium", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Zinc", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].append(
self.inputs[("Chromium VI", ("air", "urban air close to ground"), "kilogram")]
)
d['direct - exhaust'].extend(self.index_emissions)
d['direct - exhaust'].extend(self.index_noise)
l.append(d['direct - exhaust'])
for cat in csv_dict["components"]:
d[cat] = list(
flatten(
[
self.get_index_of_flows([l["search for"]], l["search by"])
for l in csv_dict["components"][cat]
]
)
)
l.append(d[cat])
list_ind = [d[x] for x in d]
maxLen = max(map(len, list_ind))
for row in list_ind:
while len(row) < maxLen:
row.extend([len(self.inputs) - 1])
return list(d.keys()), list_ind
def calculate_impacts(
self, split="components", sensitivity=False
):
# Prepare an array to store the results
results = self.get_results_table(split, sensitivity=sensitivity)
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
# Fill in the A matrix with car parameters
self.set_inputs_in_A_matrix(self.array.values)
# Collect indices of activities contributing to the first level
arr = self.A[0, : -self.number_of_cars, -self.number_of_cars :].sum(axis=1)
ind = np.nonzero(arr)[0]
new_arr = np.float32(
np.zeros((self.A.shape[1], self.B.shape[1], len(self.scope["year"])))
)
f = np.float32(np.zeros((np.shape(self.A)[1])))
for y in self.scope["year"]:
if self.scenario != "static":
B = self.B.interp(year=y, kwargs={"fill_value": "extrapolate"}).values
else:
B = self.B[0].values
for a in ind:
f[:] = 0
f[a] = 1
X = np.float32(sparse.linalg.spsolve(self.A[0], f.T))
C = X * B
new_arr[a, :, self.scope["year"].index(y)] = C.sum(axis=1)
new_arr = new_arr.T.reshape(
len(self.scope["year"]), B.shape[0], 1, 1, self.A.shape[-1]
)
a = np.float32(self.A[:, :, -self.number_of_cars :].transpose(0, 2, 1))
arr = np.float32(ne.evaluate("a * new_arr * -1"))
arr = arr.transpose(1, 3, 0, 4, 2)
arr = arr[:, :, :, self.split_indices, :].sum(axis=4)
if not sensitivity:
for y in range(0, len(self.scope["year"])):
results[:, :, :, y, :, :] = arr[
:, y :: len(self.scope["year"]), y, :, :
].reshape(
(
B.shape[0],
len(self.scope["size"]),
len(self.scope["powertrain"]),
len(results.impact.values),
self.iterations,
)
)
else:
for y in range(0, len(self.scope["year"])):
results[:, :, :, y, :] = (
arr[:, y :: len(self.scope["year"]), y, :]
.sum(axis=2)
.reshape(
(
B.shape[0],
len(self.scope["size"]),
len(self.scope["powertrain"]),
self.iterations,
)
)
)
results /= results.sel(parameter="reference")
return results.astype("float32")
def add_additional_activities(self):
# Add as many rows and columns as cars to consider
# Also add additional columns and rows for electricity markets
# for fuel preparation and energy battery production
maximum = max(self.inputs.values())
for y in self.scope["year"]:
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(
set(self.scope["powertrain"])
):
maximum += 1
self.inputs[
(
"fuel supply for gasoline vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(
set(self.scope["powertrain"])
):
maximum += 1
self.inputs[
(
"fuel supply for diesel vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"ICEV-g"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"fuel supply for gas vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"FCEV"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"fuel supply for hydrogen vehicles, " + str(y),
self.country,
"kilogram",
"fuel",
)
] = maximum
if {"BEV", "PHEV-p", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
maximum += 1
self.inputs[
(
"electricity supply for electric vehicles, " + str(y),
self.country,
"kilowatt hour",
"electricity, low voltage, for battery electric vehicles",
)
] = maximum
maximum += 1
self.inputs[
(
"electricity market for fuel preparation, " + str(y),
self.country,
"kilowatt hour",
"electricity, low voltage",
)
] = maximum
maximum += 1
self.inputs[
(
"electricity market for energy storage production, " + str(y),
self.background_configuration["energy storage"]["electric"][
"origin"
],
"kilowatt hour",
"electricity, low voltage, for energy storage production",
)
] = maximum
for s in self.scope["size"]:
for pt in self.scope["powertrain"]:
for y in self.scope["year"]:
maximum += 1
if y < 1993:
euro_class = "EURO-0"
if 1993 <= y < 1997:
euro_class = "EURO-1"
if 1997 <= y < 2001:
euro_class = "EURO-2"
if 2001 <= y < 2006:
euro_class = "EURO-3"
if 2006 <= y < 2011:
euro_class = "EURO-4"
if 2001 <= y < 2015:
euro_class = "EURO-5"
if y >= 2015:
euro_class = "EURO-6"
name = (
"Passenger car, "
+ pt
+ ", "
+ s
+ ", "
+ str(y)
+ ", "
+ euro_class
)
self.inputs[
(
name,
self.background_configuration["country"],
"kilometer",
"transport, passenger car, " + euro_class,
)
] = maximum
def get_A_matrix(self):
"""
Load the A matrix. The A matrix contains exchanges of products (rows) between activities (columns).
:return: A matrix with three dimensions of shape (number of values, number of products, number of activities).
:rtype: numpy.ndarray
"""
filename = "A_matrix.csv"
filepath = (
Path(getframeinfo(currentframe()).filename)
.resolve()
.parent.joinpath("data/" + filename)
)
if not filepath.is_file():
raise FileNotFoundError("The technology matrix could not be found.")
initial_A = np.genfromtxt(filepath, delimiter=";")
new_A = np.identity(len(self.inputs))
new_A[0 : np.shape(initial_A)[0], 0 : np.shape(initial_A)[0]] = initial_A
# Resize the matrix to fit the number of iterations in `array`
new_A = np.resize(new_A, (self.array.shape[1], new_A.shape[0], new_A.shape[1]))
return new_A
def get_B_matrix(self):
"""
Load the B matrix. The B matrix contains impact assessment figures for a give impact assessment method,
per unit of activity. Its length column-wise equals the length of the A matrix row-wise.
Its length row-wise equals the number of impact assessment methods.
:param method: only "recipe" and "ilcd" available at the moment.
:param level: only "midpoint" available at the moment.
:return: an array with impact values per unit of activity for each method.
:rtype: numpy.ndarray
"""
if self.method == "recipe":
if self.method_type == "midpoint":
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_midpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 21, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*recipe_endpoint*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 3, len(self.inputs)))
else:
list_file_names = glob.glob(
str(REMIND_FILES_DIR) + "/*ilcd*{}*.csv".format(self.scenario)
)
B = np.zeros((len(list_file_names), 19, len(self.inputs)))
for f in list_file_names:
initial_B = np.genfromtxt(f, delimiter=";")
new_B = np.zeros((np.shape(initial_B)[0], len(self.inputs),))
new_B[0 : np.shape(initial_B)[0], 0 : np.shape(initial_B)[1]] = initial_B
B[list_file_names.index(f), :, :] = new_B
list_impact_categories = list(self.impact_categories.keys())
if self.scenario != "static":
response = xr.DataArray(
B,
coords=[
[2005, 2010, 2020, 2030, 2040, 2050],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
else:
response = xr.DataArray(
B,
coords=[
[2020],
list_impact_categories,
list(self.inputs.keys()),
],
dims=["year", "category", "activity"],
)
return response
def get_dict_input(self):
"""
Load a dictionary with tuple ("name of activity", "location", "unit", "reference product") as key, row/column
indices as values.
:return: dictionary with `label:index` pairs.
:rtype: dict
"""
filename = "dict_inputs_A_matrix.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of activity labels could not be found."
)
csv_dict = {}
count = 0
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if "(" in row[1]:
new_str = row[1].replace("(", "")
new_str = new_str.replace(")", "")
new_str = [s.strip() for s in new_str.split(",") if s]
t = ()
for s in new_str:
if "low population" in s:
s = "low population density, long-term"
t += (s,)
break
else:
t += (s.replace("'", ""),)
csv_dict[(row[0], t, row[2])] = count
else:
csv_dict[(row[0], row[1], row[2], row[3])] = count
count += 1
return csv_dict
def get_dict_impact_categories(self):
"""
Load a dictionary with available impact assessment methods as keys, and assessment level and categories as values.
..code-block:: python
{'recipe': {'midpoint': ['freshwater ecotoxicity',
'human toxicity',
'marine ecotoxicity',
'terrestrial ecotoxicity',
'metal depletion',
'agricultural land occupation',
'climate change',
'fossil depletion',
'freshwater eutrophication',
'ionising radiation',
'marine eutrophication',
'natural land transformation',
'ozone depletion',
'particulate matter formation',
'photochemical oxidant formation',
'terrestrial acidification',
'urban land occupation',
'water depletion',
'human noise',
'primary energy, non-renewable',
'primary energy, renewable']
}
}
:return: dictionary
:rtype: dict
"""
filename = "dict_impact_categories.csv"
filepath = DATA_DIR / filename
if not filepath.is_file():
raise FileNotFoundError(
"The dictionary of impact categories could not be found."
)
csv_dict = {}
with open(filepath) as f:
input_dict = csv.reader(f, delimiter=";")
for row in input_dict:
if row[0] == self.method and row[3] == self.method_type:
csv_dict[row[2]] = {'method':row[1],
'category':row[2],
'type':row[3],
'abbreviation':row[4],
'unit':row[5],
'source':row[6]}
return csv_dict
def get_rev_dict_input(self):
"""
Reverse the self.inputs dictionary.
:return: reversed dictionary
:rtype: dict
"""
return {v: k for k, v in self.inputs.items()}
def get_index_vehicle_from_array(
self, items_to_look_for, items_to_look_for_also=None, method="or"
):
"""
Return list of row/column indices of self.array of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string to search for
:return: list
"""
if not isinstance(items_to_look_for, list):
items_to_look_for = [items_to_look_for]
if not items_to_look_for_also is None:
if not isinstance(items_to_look_for_also, list):
items_to_look_for_also = [items_to_look_for_also]
list_vehicles = self.array.desired.values.tolist()
if method == "or":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
]
if method == "and":
return [
list_vehicles.index(c)
for c in list_vehicles
if set(items_to_look_for).intersection(c)
and set(items_to_look_for_also).intersection(c)
]
def get_index_of_flows(self, items_to_look_for, search_by="name"):
"""
Return list of row/column indices of self.A of labels that contain the string defined in `items_to_look_for`.
:param items_to_look_for: string
:param search_by: "name" or "compartment" (for elementary flows)
:return: list of row/column indices
:rtype: list
"""
if search_by == "name":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[0].lower() for ele in items_to_look_for)
]
if search_by == "compartment":
return [
int(self.inputs[c])
for c in self.inputs
if all(ele in c[1] for ele in items_to_look_for)
]
def export_lci(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a dictionary. Also return a list of arrays that contain pre-sampled random values if
:meth:`stochastic` of :class:`CarModel` class has been called.
:param presamples: boolean.
:param ecoinvent_compatibility: bool. If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: str. "3.5", "3.6" or "uvek"
:return: inventory, and optionally, list of arrays containing pre-sampled values.
:rtype: list
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(self.A, self.rev_inputs, db_name=db_name).write_lci(
presamples, ecoinvent_compatibility, ecoinvent_version
)
return lci
def export_lci_to_bw(
self,
presamples=True,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
db_name="carculator db",
):
"""
Export the inventory as a `brightway2` bw2io.importers.base_lci.LCIImporter object
with the inventory in the `data` attribute.
.. code-block:: python
# get the inventory
i, _ = ic.export_lci_to_bw()
# import it in a Brightway2 project
i.match_database('ecoinvent 3.6 cutoff', fields=('name', 'unit', 'location', 'reference product'))
i.match_database("biosphere3", fields=('name', 'unit', 'categories'))
i.match_database(fields=('name', 'unit', 'location', 'reference product'))
i.match_database(fields=('name', 'unit', 'categories'))
# Create an additional biosphere database for the few flows that do not
# exist in "biosphere3"
i.create_new_biosphere("additional_biosphere", relink=True)
# Check if all exchanges link
i.statistics()
# Register the database
i.write_database()
:return: LCIImport object that can be directly registered in a `brightway2` project.
:rtype: bw2io.importers.base_lci.LCIImporter
"""
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
if presamples == True:
lci, array = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return (lci, array)
else:
lci = ExportInventory(
self.A, self.rev_inputs, db_name=db_name
).write_lci_to_bw(presamples, ecoinvent_compatibility, ecoinvent_version)
return lci
def export_lci_to_excel(
self,
directory=None,
ecoinvent_compatibility=True,
ecoinvent_version="3.6",
software_compatibility="brightway2",
filename=None,
):
"""
Export the inventory as an Excel file (if the destination software is Brightway2) or a CSV file (if the destination software is Simapro) file.
Also return the file path where the file is stored.
:param directory: directory where to save the file.
:type directory: str
:param ecoinvent_compatibility: If True, compatible with ecoinvent. If False, compatible with REMIND-ecoinvent.
:param ecoinvent_version: "3.6", "3.5" or "uvek"
:param software_compatibility: "brightway2" or "simapro"
:return: file path where the file is stored.
:rtype: str
"""
if software_compatibility not in ("brightway2", "simapro"):
raise NameError(
"The destination software argument is not valid. Choose between 'brightway2' or 'simapro'."
)
# Simapro inventory only for ecoinvent 3.5 or UVEK
if software_compatibility == "simapro":
if ecoinvent_version == "3.6":
print(
"Simapro-compatible inventory export is only available for ecoinvent 3.5 or UVEK."
)
return
ecoinvent_compatibility = True
ecoinvent_version = "3.5"
# Create electricity and fuel market datasets
self.create_electricity_market_for_fuel_prep()
# Create electricity market dataset for battery production
self.create_electricity_market_for_battery_production()
self.set_inputs_in_A_matrix(self.array.values)
fp = ExportInventory(
self.A, self.rev_inputs, db_name=filename or "carculator db"
).write_lci_to_excel(
directory,
ecoinvent_compatibility,
ecoinvent_version,
software_compatibility,
filename,
)
return fp
def get_country_of_use(self):
if "country" not in self.background_configuration:
self.background_configuration["country"] = "RER"
return self.background_configuration["country"]
def define_electricity_mix_for_fuel_prep(self):
"""
This function defines a fuel mix based either on user-defined mix, or on default mixes for a given country.
The mix is calculated as the average mix, weighted by the distribution of annually driven kilometers.
:return:
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
if "custom electricity mix" in self.background_configuration:
# If a special electricity mix is specified, we use it
mix = self.background_configuration["custom electricity mix"]
else:
use_year = [
int(i)
for i in (
self.array.values[
self.array_inputs["lifetime kilometers"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
/ self.array.values[
self.array_inputs["kilometers per year"],
:,
self.get_index_vehicle_from_array(
[
"BEV",
"FCEV",
"PHEV-p",
"PHEV-d",
"ICEV-p",
"ICEV-d",
"HEV-p",
"HEV-d",
"ICEV-g",
]
),
]
)
.mean(axis=1)
.reshape(-1, len(self.scope["year"]))
.mean(axis=0)
]
mix = [
self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(
year=np.arange(y, y + use_year[self.scope["year"].index(y)]),
kwargs={"fill_value": "extrapolate"},
)
.mean(axis=0)
.values
if y + use_year[self.scope["year"].index(y)] <= 2050
else self.bs.electricity_mix.sel(
country=self.country,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=np.arange(y, 2051), kwargs={"fill_value": "extrapolate"})
.mean(axis=0)
.values
for y in self.scope["year"]
]
return mix
def define_renewable_rate_in_mix(self):
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
for y in self.scope["year"]:
if self.scenario == "static":
if self.method == "recipe":
if self.method_type == "midpoint":
co2_intensity_tech = (
self.B.sel(
category="climate change",
year=2020,
activity=list(self.elec_map.values()),
).values
* losses_to_low
) * 1000
else:
co2_intensity_tech = 0
else:
co2_intensity_tech = (
self.B.sel(
category="climate change - climate change fossil",
year=2020,
activity=list(self.elec_map.values()),
).values
* losses_to_low
) * 1000
else:
if self.method == "recipe":
if self.method_type == "midpoint":
co2_intensity_tech = (
self.B.sel(
category="climate change", activity=list(self.elec_map.values())
)
.interp(year=y, kwargs={"fill_value": "extrapolate"})
.values
* losses_to_low
) * 1000
else:
co2_intensity_tech = 0
else:
co2_intensity_tech = (
self.B.sel(
category="climate change - climate change fossil", activity=list(self.elec_map.values())
)
.interp(year=y, kwargs={"fill_value": "extrapolate"})
.values
* losses_to_low
) * 1000
sum_renew = (
self.mix[self.scope["year"].index(y)][0]
+ self.mix[self.scope["year"].index(y)][3]
+ self.mix[self.scope["year"].index(y)][4]
+ self.mix[self.scope["year"].index(y)][5]
+ self.mix[self.scope["year"].index(y)][8]
)
return sum_renew, co2_intensity_tech
def create_electricity_market_for_fuel_prep(self):
""" This function fills the electricity market that supplies battery charging operations
and hydrogen production through electrolysis.
"""
try:
losses_to_low = float(self.bs.losses[self.country]["LV"])
except KeyError:
# If losses for the country are not found, assume EU average
losses_to_low = float(self.bs.losses["RER"]["LV"])
# Fill the electricity markets for battery charging and hydrogen production
for y in self.scope["year"]:
m = np.array(self.mix[self.scope["year"].index(y)]).reshape(-1, 10, 1)
# Add electricity technology shares
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
)
] = (m * -1 * losses_to_low)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for fuel preparation" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def create_electricity_market_for_battery_production(self):
"""
This function fills in the column in `self.A` concerned with the electricity mix used for manufacturing battery cells
:return:
"""
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
try:
losses_to_low = float(self.bs.losses[battery_origin]["LV"])
except KeyError:
losses_to_low = float(self.bs.losses["CN"]["LV"])
mix_battery_manufacturing = (
self.bs.electricity_mix.sel(
country=battery_origin,
variable=[
"Hydro",
"Nuclear",
"Gas",
"Solar",
"Wind",
"Biomass",
"Coal",
"Oil",
"Geothermal",
"Waste",
],
)
.interp(year=self.scope["year"], kwargs={"fill_value": "extrapolate"})
.values
)
# Fill the electricity markets for battery production
for y in self.scope["year"]:
m = np.array(
mix_battery_manufacturing[self.scope["year"].index(y)]
).reshape(-1, 10, 1)
self.A[
np.ix_(
np.arange(self.iterations),
[self.inputs[self.elec_map[t]] for t in self.elec_map],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
)
] = (m * losses_to_low * -1)
# Add transmission network for high and medium voltage
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, high voltage",
"CH",
"kilometer",
"transmission network, electricity, high voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (6.58e-9 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, electricity, medium voltage",
"CH",
"kilometer",
"transmission network, electricity, medium voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (1.86e-8 * -1 * losses_to_low)
self.A[
:,
self.inputs[
(
"transmission network construction, long-distance",
"UCTE",
"kilometer",
"transmission network, long-distance",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (3.17e-10 * -1 * losses_to_low)
# Add distribution network, low voltage
self.A[
:,
self.inputs[
(
"distribution network construction, electricity, low voltage",
"CH",
"kilometer",
"distribution network, electricity, low voltage",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = (8.74e-8 * -1 * losses_to_low)
# Add supply of sulfur hexafluoride for transformers
self.A[
:,
self.inputs[
(
"market for sulfur hexafluoride, liquid",
"RER",
"kilogram",
"sulfur hexafluoride, liquid",
)
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
# Add SF_6 leakage
self.A[
:,
self.inputs[("Sulfur hexafluoride", ("air",), "kilogram")],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
] = ((5.4e-8 + 2.99e-9) * -1 * losses_to_low)
def get_share_biofuel(self):
region = self.bs.region_map[self.country]["RegionCode"]
scenario = self.scenario if self.scenario != "static" else "SSP2-Base"
share_biofuel = (
self.bs.biofuel.sel(
region=region, value=0, fuel_type="Biomass fuel", scenario=scenario,
)
.interp(year=self.scope["year"], kwargs={"fill_value": "extrapolate"})
.values
)
return share_biofuel
def find_fuel_shares(self, fuel_type):
default_fuels = {
"petrol": {"primary": "petrol", "secondary": "bioethanol - wheat straw"},
"diesel": {"primary": "diesel", "secondary": "biodiesel - cooking oil"},
"cng": {"primary": "cng", "secondary": "biogas"},
"hydrogen": {"primary": "electrolysis", "secondary": "smr - natural gas"},
}
if "fuel blend" in self.background_configuration:
if fuel_type in self.background_configuration["fuel blend"]:
primary = self.background_configuration["fuel blend"][fuel_type][
"primary fuel"
]["type"]
try:
secondary = self.background_configuration["fuel blend"][fuel_type][
"secondary fuel"
]["type"]
except:
secondary = default_fuels[fuel_type]["secondary"]
primary_share = self.background_configuration["fuel blend"][fuel_type][
"primary fuel"
]["share"]
secondary_share = 1 - np.array(primary_share)
else:
primary = default_fuels[fuel_type]["primary"]
secondary = default_fuels[fuel_type]["secondary"]
secondary_share = self.get_share_biofuel()
primary_share = 1 - np.array(secondary_share)
else:
primary = default_fuels[fuel_type]["primary"]
secondary = default_fuels[fuel_type]["secondary"]
secondary_share = self.get_share_biofuel()
primary_share = 1 - np.array(secondary_share)
return (primary, secondary, primary_share, secondary_share)
def set_actual_range(self):
"""
Set the actual range considering the blend.
Liquid bio-fuels and synthetic fuels typically have a lower calorific value. Hence, the need to recalculate
the vehicle range.
Modifies parameter `range` of `array` in place
"""
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["petrol"]["primary"]["lhv"]
share_secondary = self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["petrol"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-p", "HEV-p", "PHEV-p"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
for y in self.scope["year"]:
share_primary = self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
lhv_primary = self.fuel_blends["diesel"]["primary"]["lhv"]
share_secondary = self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
lhv_secondary = self.fuel_blends["diesel"]["secondary"]["lhv"]
index = self.get_index_vehicle_from_array(
["ICEV-d", "PHEV-d", "HEV-d"], y, method="and"
)
self.array.values[self.array_inputs["range"], :, index] = (
(
(
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_primary
* lhv_primary
)
+ (
self.array.values[self.array_inputs["fuel mass"], :, index]
* share_secondary
* lhv_secondary
)
)
* 1000
/ self.array.values[self.array_inputs["TtW energy"], :, index]
)
def define_fuel_blends(self):
"""
This function defines fuel blends from what is passed in `background_configuration`.
It populates a dictionary `self.fuel_blends` that contains the respective shares, lower heating values
and CO2 emission factors of the fuels used.
:return:
"""
fuels_lhv = {
"petrol": 42.4,
"bioethanol - wheat straw": 26.8,
"bioethanol - maize starch": 26.8,
"bioethanol - sugarbeet": 26.8,
"bioethanol - forest residues": 26.8,
"synthetic gasoline": 42.4,
"diesel": 42.8,
"biodiesel - cooking oil": 31.7,
"biodiesel - algae": 31.7,
"synthetic diesel": 43.3,
"cng": 55.5,
"biogas": 55.5,
"syngas": 55.5
}
fuels_CO2 = {
"petrol": 3.18,
"bioethanol - wheat straw": 1.91,
"bioethanol - maize starch": 1.91,
"bioethanol - sugarbeet": 1.91,
"bioethanol - forest residues": 1.91,
"synthetic gasoline": 3.18,
"diesel": 3.14,
"biodiesel - cooking oil": 2.85,
"biodiesel - algae": 2.85,
"synthetic diesel": 3.16,
"cng": 2.65,
"biogas": 2.65,
"syngas": 2.65
}
if {"ICEV-p", "HEV-p", "PHEV-p"}.intersection(set(self.scope["powertrain"])):
fuel_type = "petrol"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-d", "HEV-d", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "diesel"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {
"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary],
},
"secondary": {
"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[secondary],
"CO2": fuels_CO2[secondary],
},
}
if {"ICEV-g"}.intersection(set(self.scope["powertrain"])):
fuel_type = "cng"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary,
"share": primary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
"secondary": {"type": secondary,
"share": secondary_share,
"lhv": fuels_lhv[primary],
"CO2": fuels_CO2[primary]},
}
if {"FCEV"}.intersection(set(self.scope["powertrain"])):
fuel_type = "hydrogen"
primary, secondary, primary_share, secondary_share = self.find_fuel_shares(
fuel_type
)
self.create_fuel_markets(
fuel_type, primary, secondary, primary_share, secondary_share
)
self.fuel_blends[fuel_type] = {
"primary": {"type": primary, "share": primary_share},
"secondary": {"type": secondary, "share": secondary_share},
}
if {"BEV", "PHEV-p", "PHEV-d"}.intersection(set(self.scope["powertrain"])):
fuel_type = "electricity"
self.create_fuel_markets(fuel_type)
def create_fuel_markets(
self,
fuel_type,
primary=None,
secondary=None,
primary_share=None,
secondary_share=None,
):
"""
This function creates markets for fuel, considering a given blend, a given fuel type and a given year.
It also adds separate electricity input in case hydrogen from electrolysis is needed somewhere in the fuel supply chain.
:return:
"""
d_fuels = {
"electrolysis": {
"name": (
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from electrolysis, at H2 fuelling station",
),
"additional electricity": 58,
},
"smr - natural gas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w/o CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - natural gas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR NG w CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas, at H2 fuelling station",
),
"additional electricity": 0,
},
"smr - biogas with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from SMR of biogas with CCS, at H2 fuelling station",
),
"additional electricity": 0,
},
"coal gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
"RER",
"kilogram",
"Hydrogen, gaseous, 700 bar, from coal gasification, at H2 fuelling station",
),
"additional electricity": 0,
},
"wood gasification": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"wood gasification with CCS": {
"name": (
"Hydrogen, gaseous, 700 bar, from dual fluidised bed gasification of woody biomass with CCS, at H2 fuelling station",
"CH",
"kilogram",
"Hydrogen, gaseous, 700 bar",
),
"additional electricity": 0,
},
"cng": {
"name": (
"market for natural gas, from high pressure network (1-5 bar), at service station",
"GLO",
"kilogram",
"natural gas, from high pressure network (1-5 bar), at service station",
),
"additional electricity": 0,
},
"biogas": {
"name": (
"biogas upgrading - sewage sludge - amine scrubbing - best",
"CH",
"kilogram",
"biogas upgrading - sewage sludge - amine scrubbing - best",
),
"additional electricity": 0,
},
"syngas": {
"name": (
"Methane production, synthetic, from electrochemical methanation",
"RER",
"kilogram",
"Methane, synthetic",
),
"additional electricity": 58 * 0.50779661,
},
"diesel": {
"name": (
"market for diesel",
"Europe without Switzerland",
"kilogram",
"diesel",
),
"additional electricity": 0,
},
"biodiesel - algae": {
"name": (
"Biodiesel from algae",
"RER",
"kilogram",
"Biodiesel from algae",
),
"additional electricity": 0,
},
"biodiesel - cooking oil": {
"name": (
"Biodiesel from cooking oil",
"RER",
"kilogram",
"Biodiesel from cooking oil",
),
"additional electricity": 0,
},
"synthetic diesel": {
"name": (
"Diesel production, synthetic, Fischer Tropsch process",
"RER",
"kilogram",
"Diesel, synthetic",
),
"additional electricity": 58 * 0.2875,
},
"petrol": {
"name": (
"market for petrol, low-sulfur",
"Europe without Switzerland",
"kilogram",
"petrol, low-sulfur",
),
"additional electricity": 0,
},
"bioethanol - wheat straw": {
"name": (
"Ethanol from wheat straw pellets",
"RER",
"kilogram",
"Ethanol from wheat straw pellets",
),
"additional electricity": 0,
},
"bioethanol - forest residues": {
"name": (
"Ethanol from forest residues",
"RER",
"kilogram",
"Ethanol from forest residues",
),
"additional electricity": 0,
},
"bioethanol - sugarbeet": {
"name": (
"Ethanol from sugarbeet",
"RER",
"kilogram",
"Ethanol from sugarbeet",
),
"additional electricity": 0,
},
"bioethanol - maize starch": {
"name": (
"Ethanol from maize starch",
"RER",
"kilogram",
"Ethanol from maize starch",
),
"additional electricity": 0,
},
"synthetic gasoline": {
"name": (
"Gasoline production, synthetic, from methanol",
"RER",
"kilogram",
"Gasoline, synthetic",
),
"additional electricity": 58 * 0.328,
},
}
d_dataset_name = {
"petrol": "fuel supply for gasoline vehicles, ",
"diesel": "fuel supply for diesel vehicles, ",
"cng": "fuel supply for gas vehicles, ",
"hydrogen": "fuel supply for hydrogen vehicles, ",
"electricity": "electricity supply for electric vehicles, ",
}
if fuel_type != "electricity":
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
fuel_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
primary_fuel_activity_index = self.inputs[d_fuels[primary]["name"]]
secondary_fuel_activity_index = self.inputs[d_fuels[secondary]["name"]]
self.A[:, primary_fuel_activity_index, fuel_market_index] = (
-1 * primary_share[self.scope["year"].index(y)]
)
self.A[:, secondary_fuel_activity_index, fuel_market_index] = (
-1 * secondary_share[self.scope["year"].index(y)]
)
additional_electricity = (
d_fuels[primary]["additional electricity"]
* primary_share[self.scope["year"].index(y)]
) + (
d_fuels[secondary]["additional electricity"]
* secondary_share[self.scope["year"].index(y)]
)
if additional_electricity > 0:
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, fuel_market_index] = (
-1 * additional_electricity
)
else:
for y in self.scope["year"]:
dataset_name = d_dataset_name[fuel_type] + str(y)
electricity_market_index = [
self.inputs[i] for i in self.inputs if i[0] == dataset_name
][0]
electricity_mix_index = [
self.inputs[i]
for i in self.inputs
if i[0] == "electricity market for fuel preparation, " + str(y)
][0]
self.A[:, electricity_mix_index, electricity_market_index] = -1
def set_inputs_in_A_matrix(self, array):
"""
Fill-in the A matrix. Does not return anything. Modifies in place.
Shape of the A matrix (values, products, activities).
:param array: :attr:`array` from :class:`CarModel` class
"""
# Glider
self.A[
:,
self.inputs[
(
"market for glider, passenger car",
"GLO",
"kilogram",
"glider, passenger car",
)
],
-self.number_of_cars :,
] = (
(array[self.array_inputs["glider base mass"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
("Glider lightweighting", "GLO", "kilogram", "Glider lightweighting")
],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["lightweighting"], :]
* array[self.array_inputs["glider base mass"], :]
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"maintenance, passenger car",
"RER",
"unit",
"passenger car maintenance",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["curb mass"], :] / 1240 / 150000 * -1)
# Glider EoL
self.A[
:,
self.inputs[
(
"market for manual dismantling of used electric passenger car",
"GLO",
"unit",
"manual dismantling of used electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* (1 - array[self.array_inputs["combustion power share"], :])
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for manual dismantling of used passenger car with internal combustion engine",
"GLO",
"unit",
"manual dismantling of used passenger car with internal combustion engine",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["curb mass"], :]
* array[self.array_inputs["combustion power share"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Powertrain components
self.A[
:,
self.inputs[
(
"market for charger, electric passenger car",
"GLO",
"kilogram",
"charger, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["charger mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for converter, for electric passenger car",
"GLO",
"kilogram",
"converter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["converter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for electric motor, electric passenger car",
"GLO",
"kilogram",
"electric motor, electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["electric engine mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for inverter, for electric passenger car",
"GLO",
"kilogram",
"inverter, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["inverter mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[
(
"market for power distribution unit, for electric passenger car",
"GLO",
"kilogram",
"power distribution unit, for electric passenger car",
)
],
-self.number_of_cars :,
] = (
array[self.array_inputs["power distribution unit mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
l_elec_pt = [
"charger mass",
"converter mass",
"inverter mass",
"power distribution unit mass",
"electric engine mass",
"fuel cell stack mass",
"fuel cell ancillary BoP mass",
"fuel cell essential BoP mass",
"battery cell mass",
"battery BoP mass",
]
self.A[
:,
self.inputs[
(
"market for used powertrain from electric passenger car, manual dismantling",
"GLO",
"kilogram",
"used powertrain from electric passenger car, manual dismantling",
)
],
-self.number_of_cars :,
] = (
array[[self.array_inputs[l] for l in l_elec_pt], :].sum(axis=0)
/ array[self.array_inputs["lifetime kilometers"], :]
)
self.A[
:,
self.inputs[
(
"market for internal combustion engine, passenger car",
"GLO",
"kilogram",
"internal combustion engine, for passenger car",
)
],
-self.number_of_cars :,
] = (
(
array[
[
self.array_inputs[l]
for l in ["combustion engine mass", "powertrain mass"]
],
:,
].sum(axis=0)
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Ancillary BoP", "GLO", "kilogram", "Ancillary BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell ancillary BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Essential BoP", "GLO", "kilogram", "Essential BoP")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell essential BoP mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
self.A[
:,
self.inputs[("Stack", "GLO", "kilowatt", "Stack")],
-self.number_of_cars :,
] = (
array[self.array_inputs["fuel cell stack mass"], :]
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Start of printout
print(
"****************** IMPORTANT BACKGROUND PARAMETERS ******************",
end="\n * ",
)
# Energy storage
print(
"The country of use is " + self.country, end="\n * ",
)
battery_tech = self.background_configuration["energy storage"]["electric"][
"type"
]
battery_origin = self.background_configuration["energy storage"]["electric"][
"origin"
]
print(
"Power and energy batteries produced in "
+ battery_origin
+ " using "
+ battery_tech
+ " chemistry.",
end="\n * ",
)
# Use the NMC inventory of Schmidt et al. 2019
self.A[
:,
self.inputs[("Battery BoP", "GLO", "kilogram", "Battery BoP")],
-self.number_of_cars :,
] = (
(
array[self.array_inputs["battery BoP mass"], :]
* (1 + array[self.array_inputs["battery lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
battery_cell_label = (
"Battery cell, " + battery_tech,
"GLO",
"kilogram",
"Battery cell",
)
self.A[:, self.inputs[battery_cell_label], -self.number_of_cars :,] = (
(
array[self.array_inputs["battery cell mass"], :]
* (1 + array[self.array_inputs["fuel cell lifetime replacements"], :])
)
/ array[self.array_inputs["lifetime kilometers"], :]
* -1
)
# Set an input of electricity, given the country of manufacture
self.A[
:,
self.inputs[
(
"market group for electricity, medium voltage",
"World",
"kilowatt hour",
"electricity, medium voltage",
)
],
self.inputs[battery_cell_label],
] = 0
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(y)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity market for energy storage production" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
)
] = (
array[
self.array_inputs["battery cell production electricity"], :, index
].T
* self.A[
:,
self.inputs[battery_cell_label],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0]
],
]
).reshape(
self.iterations, 1, -1
)
index_A = [
self.inputs[c]
for c in self.inputs
if any(
ele in c[0]
for ele in ["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
]
index = self.get_index_vehicle_from_array(
["ICEV-d", "ICEV-p", "HEV-p", "PHEV-p", "PHEV-d", "HEV-d"]
)
self.A[
:,
self.inputs[
(
"polyethylene production, high density, granulate",
"RER",
"kilogram",
"polyethylene, high density, granulate",
)
],
index_A,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
index = self.get_index_vehicle_from_array("ICEV-g")
self.A[
:,
self.inputs[
(
"glass fibre reinforced plastic production, polyamide, injection moulded",
"RER",
"kilogram",
"glass fibre reinforced plastic, polyamide, injection moulded",
)
],
self.index_cng,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
if "hydrogen" in self.background_configuration["energy storage"]:
# If a customization dict is passed
hydro_tank_technology = self.background_configuration["energy storage"][
"hydrogen"
]["type"]
else:
hydro_tank_technology = "carbon fiber"
dict_tank_map = {
"carbon fiber": (
"Fuel tank, compressed hydrogen gas, 700bar",
"GLO",
"kilogram",
"Fuel tank, compressed hydrogen gas, 700bar",
),
"hdpe": (
"Fuel tank, compressed hydrogen gas, 700bar, with HDPE liner",
"RER",
"kilogram",
"Hydrogen tank",
),
"aluminium": (
"Fuel tank, compressed hydrogen gas, 700bar, with aluminium liner",
"RER",
"kilogram",
"Hydrogen tank",
),
}
index = self.get_index_vehicle_from_array("FCEV")
self.A[
:, self.inputs[dict_tank_map[hydro_tank_technology]], self.index_fuel_cell,
] = (
array[self.array_inputs["fuel tank mass"], :, index]
/ array[self.array_inputs["lifetime kilometers"], :, index]
* -1
).T
for y in self.scope["year"]:
sum_renew, co2_intensity_tech = self.define_renewable_rate_in_mix()
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ ", % of renewable: "
+ str(np.round(sum_renew * 100, 0))
+ "%"
+ ", GHG intensity per kWh: "
+ str(
int(
np.sum(
co2_intensity_tech * self.mix[self.scope["year"].index(y)]
)
)
)
+ " g. CO2-eq.",
end=end_str,
)
if any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in self.scope["powertrain"]
):
for y in self.scope["year"]:
index = self.get_index_vehicle_from_array(
["BEV", "PHEV-p", "PHEV-d"], y, method="and"
)
self.A[
np.ix_(
np.arange(self.iterations),
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "electricity supply for electric vehicles" in i[0]
],
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(
True for x in ["BEV", "PHEV-p", "PHEV-d"] if x in i[0]
)
],
)
] = (
array[self.array_inputs["electricity consumption"], :, index] * -1
).T.reshape(
self.iterations, 1, -1
)
if "FCEV" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("FCEV")
print(
"{} is completed by {}.".format(
self.fuel_blends["hydrogen"]["primary"]["type"],
self.fuel_blends["hydrogen"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["hydrogen"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "FCEV" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for hydrogen vehicles" in i[0]
],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array]
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if "ICEV-g" in self.scope["powertrain"]:
index = self.get_index_vehicle_from_array("ICEV-g")
print(
"{} is completed by {}.".format(
self.fuel_blends["cng"]["primary"]["type"],
self.fuel_blends["cng"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
# Primary fuel share
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "Passenger" in i[0] and "ICEV-g" in i[0]
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for gas vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based emissions from CNG, CO2
# The share and CO2 emissions factor of CNG is retrieved, if used
share_fossil = 0
CO2_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] == "cng":
share_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil gas in the blend is retrieved
# As well as the CO2 emission factor of the fuel
share_non_fossil = 0
CO2_non_fossil = 0
if self.fuel_blends["cng"]["primary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["primary"]["CO2"]
if self.fuel_blends["cng"]["secondary"]["type"] != "cng":
share_non_fossil += self.fuel_blends["cng"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["cng"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-d", "PHEV-d", "HEV-d"]]:
index = self.get_index_vehicle_from_array(["ICEV-d", "PHEV-d", "HEV-d"])
print(
"{} is completed by {}.".format(
self.fuel_blends["diesel"]["primary"]["type"],
self.fuel_blends["diesel"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-d", "PHEV-d", "HEV-d"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0] and "fuel supply for diesel vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["diesel"]["primary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] == "diesel":
share_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["diesel"]["primary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["primary"]["CO2"]
if self.fuel_blends["diesel"]["secondary"]["type"] != "diesel":
share_non_fossil += self.fuel_blends["diesel"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["diesel"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional diesel
# Emission factors from Spielmann et al., Transport Services Data v.2 (2007)
# Cadmium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg diesel
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg diesel
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg diesel
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg diesel
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg diesel
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg diesel
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
if [i for i in self.scope["powertrain"] if i in ["ICEV-p", "HEV-p", "PHEV-p"]]:
index = self.get_index_vehicle_from_array(["ICEV-p", "HEV-p", "PHEV-p"])
print(
"{} is completed by {}.".format(
self.fuel_blends["petrol"]["primary"]["type"],
self.fuel_blends["petrol"]["secondary"]["type"],
),
end="\n \t * ",
)
for y in self.scope["year"]:
if self.scope["year"].index(y) + 1 == len(self.scope["year"]):
end_str = "\n * "
else:
end_str = "\n \t * "
print(
"in "
+ str(y)
+ " _________________________________________ "
+ str(
np.round(
self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
* 100,
0,
)
)
+ "%",
end=end_str,
)
for y in self.scope["year"]:
ind_A = [
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "Passenger" in i[0]
and any(x in i[0] for x in ["ICEV-p", "HEV-p", "PHEV-p"])
]
ind_array = [
x for x in self.get_index_vehicle_from_array(y) if x in index
]
# Fuel supply
self.A[
:,
[
self.inputs[i]
for i in self.inputs
if str(y) in i[0]
and "fuel supply for gasoline vehicles" in i[0]
],
ind_A,
] = (
(array[self.array_inputs["fuel mass"], :, ind_array])
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_fossil = 0
CO2_fossil = 0
# Fuel-based CO2 emission from conventional petrol
if self.fuel_blends["petrol"]["primary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] == "petrol":
share_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, fossil", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil * CO2_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
share_non_fossil = 0
CO2_non_fossil = 0
# Fuel-based CO2 emission from alternative petrol
# The share of non-fossil fuel in the blend is retrieved
# As well as the CO2 emission factor of the fuel
if self.fuel_blends["petrol"]["primary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["primary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["primary"]["CO2"]
if self.fuel_blends["petrol"]["secondary"]["type"] != "petrol":
share_non_fossil += self.fuel_blends["petrol"]["secondary"]["share"][
self.scope["year"].index(y)
]
CO2_non_fossil = self.fuel_blends["petrol"]["secondary"]["CO2"]
self.A[
:,
self.inputs[("Carbon dioxide, from soil or biomass stock", ("air",), "kilogram")],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_non_fossil * CO2_non_fossil)
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Heavy metals emissions from conventional petrol
# Cadmium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Cadmium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Copper, 1.7 mg/kg gasoline
self.A[
:,
self.inputs[
("Copper", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.7e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium, 0.05 mg/kg gasoline
self.A[
:,
self.inputs[
("Chromium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 5.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Nickel, 0.07 mg/kg gasoline
self.A[
:,
self.inputs[
("Nickel", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 7.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Selenium, 0.01 mg/kg gasoline
self.A[
:,
self.inputs[
("Selenium", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-8
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Zinc, 1 mg/kg gasoline
self.A[
:,
self.inputs[
("Zinc", ("air", "urban air close to ground"), "kilogram")
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-6
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Chromium VI, 0.0001 mg/kg gasoline
self.A[
:,
self.inputs[
(
"Chromium VI",
("air", "urban air close to ground"),
"kilogram",
)
],
ind_A,
] = (
(
(array[self.array_inputs["fuel mass"], :, ind_array] * share_fossil)
* 1.0e-10
)
/ array[self.array_inputs["range"], :, ind_array]
* -1
).T
# Non-exhaust emissions
self.A[
:,
self.inputs[
(
"market for road wear emissions, passenger car",
"GLO",
"kilogram",
"road wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 1e-08)
self.A[
:,
self.inputs[
(
"market for tyre wear emissions, passenger car",
"GLO",
"kilogram",
"tyre wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 6e-08)
self.A[
:,
self.inputs[
(
"market for brake wear emissions, passenger car",
"GLO",
"kilogram",
"brake wear emissions, passenger car",
)
],
-self.number_of_cars :,
] = (array[self.array_inputs["driving mass"], :] * 5e-09)
# Infrastructure
self.A[
:,
self.inputs[("market for road", "GLO", "meter-year", "road")],
-self.number_of_cars :,
] = (5.37e-7 * array[self.array_inputs["driving mass"], :] * -1)
# Infrastructure maintenance
self.A[
:,
self.inputs[
("market for road maintenance", "RER", "meter-year", "road maintenance")
],
-self.number_of_cars :,
] = (1.29e-3 * -1)
# Exhaust emissions
# Non-fuel based emissions
self.A[:, self.index_emissions, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_non_fuel_emissions[self.rev_inputs[x]]]
for x in self.index_emissions
]
]
* -1
).transpose([1, 0, 2])
# Noise emissions
self.A[:, self.index_noise, -self.number_of_cars :] = (
array[
[
self.array_inputs[self.map_noise_emissions[self.rev_inputs[x]]]
for x in self.index_noise
]
]
* -1
).transpose([1, 0, 2])
print("*********************************************************************")
| 0
| 0
| 0
| 133,839
| 0
| 0
| 0
| 34
| 287
|
544162d5b108b9011d584715752e360c5e3a3bf6
| 4,955
|
py
|
Python
|
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | null | null | null |
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T22:59:52.000Z
|
2021-06-10T20:35:55.000Z
|
project/project/settings.py
|
gtrafimenkov/example-django-kubernetes
|
ddcf1d0b06152ca3615230be53cf9a5f837c09d9
|
[
"BSD-3-Clause"
] | null | null | null |
# Django settings for gtd project.
import os
from django.contrib.messages import constants as message_constants
DEBUG = get_debug_settings()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/Los_Angeles"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "todo:lists"
LOGOUT_REDIRECT_URL = "home"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SECURITY_WARN_AFTER = 5
SESSION_SECURITY_EXPIRE_AFTER = 12
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "project.wsgi.application"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.flatpages",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
"todo",
"django_extensions",
)
# Static files and uploads
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "project", "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Uploaded media
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Without this, uploaded files > 4MB end up with perm 0600, unreadable by web server process
FILE_UPLOAD_PERMISSIONS = 0o644
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "project", "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Your stuff: custom template context processors go here
]
},
}
]
# Override CSS class for the ERROR tag level to match Bootstrap class name
MESSAGE_TAGS = {message_constants.ERROR: "danger"}
####################################################################
# Environment specific settings
####################################################################
SECRET_KEY = os.environ.get('SECRET_KEY', 'lksdf98wrhkjs88dsf8-324ksdm')
# DEBUG = True
ALLOWED_HOSTS = ["*"]
DATABASES = get_db_settings()
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# TODO-specific settings
TODO_STAFF_ONLY = False
TODO_DEFAULT_LIST_SLUG = 'tickets'
TODO_DEFAULT_ASSIGNEE = None
TODO_PUBLIC_SUBMIT_REDIRECT = '/'
####################################################################
#
####################################################################
| 32.81457
| 101
| 0.638143
|
# Django settings for gtd project.
import os
from django.contrib.messages import constants as message_constants
def get_debug_settings():
return os.environ.get("DJANGO_DEBUG", "").lower() in ["true", "1", "yes", "y"]
DEBUG = get_debug_settings()
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "America/Los_Angeles"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "todo:lists"
LOGOUT_REDIRECT_URL = "home"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_SECURITY_WARN_AFTER = 5
SESSION_SECURITY_EXPIRE_AFTER = 12
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "project.wsgi.application"
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.flatpages",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
"todo",
"django_extensions",
)
# Static files and uploads
STATIC_URL = "/static/"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "project", "static")]
STATIC_ROOT = os.path.join(BASE_DIR, "static")
# Uploaded media
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
# Without this, uploaded files > 4MB end up with perm 0600, unreadable by web server process
FILE_UPLOAD_PERMISSIONS = 0o644
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "project", "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
# Your stuff: custom template context processors go here
]
},
}
]
# Override CSS class for the ERROR tag level to match Bootstrap class name
MESSAGE_TAGS = {message_constants.ERROR: "danger"}
####################################################################
# Environment specific settings
####################################################################
SECRET_KEY = os.environ.get('SECRET_KEY', 'lksdf98wrhkjs88dsf8-324ksdm')
# DEBUG = True
ALLOWED_HOSTS = ["*"]
def get_db_settings():
CPHTEST_ENVIRONMENT = os.environ.get('CPHTEST_ENVIRONMENT', 'local')
if CPHTEST_ENVIRONMENT == "local":
return {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if CPHTEST_ENVIRONMENT == "k8s":
return {
'default': {
'ENGINE': os.environ.get('DB_ENGINE', 'django.db.backends.postgresql'),
'NAME': os.environ.get('DB_NAME', 'cphtest'),
'USER': os.environ.get('DB_USER', 'cphtestuser'),
'PASSWORD': os.environ.get('DB_PASSWORD', 'django'),
'HOST': os.environ.get('DB_HOST', 'p1-postgresql.default.svc.cluster.local'),
'PORT': os.environ.get('DB_PORT', ''),
},
}
return {}
DATABASES = get_db_settings()
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# TODO-specific settings
TODO_STAFF_ONLY = False
TODO_DEFAULT_LIST_SLUG = 'tickets'
TODO_DEFAULT_ASSIGNEE = None
TODO_PUBLIC_SUBMIT_REDIRECT = '/'
####################################################################
#
####################################################################
| 0
| 0
| 0
| 0
| 0
| 975
| 0
| 0
| 46
|
1f010d3368e8fe21a4c6b38d8a3a7ce2c8c7822f
| 964
|
py
|
Python
|
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | 1
|
2021-11-20T22:09:23.000Z
|
2021-11-20T22:09:23.000Z
|
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | null | null | null |
resources/search.py
|
DanielNery/api-list-mscs-genius
|
9febbbb4211ca86a210803981cb5968077d7de72
|
[
"MIT"
] | null | null | null |
import os
HEADER = {
'User-Agent': 'CompuServe Classic/1.22',
'Accept': 'application/json',
'Host': os.getenv("HOST"),
'Authorization': f'Bearer {os.getenv("ACESS_TOKEN")}'
}
| 25.368421
| 85
| 0.607884
|
from flask_restful import Resource
import requests
import json
import os
import redis
HEADER = {
'User-Agent': 'CompuServe Classic/1.22',
'Accept': 'application/json',
'Host': os.getenv("HOST"),
'Authorization': f'Bearer {os.getenv("ACESS_TOKEN")}'
}
class Search(Resource):
"""Recurso responsável por retornar lista de artistas, para o usuário escolher"""
def get(self, artist_name):
"""
Retorna lista de artistas
"""
querystring = {"q": artist_name}
url = f"https://{os.getenv('HOST')}/search"
try:
response = requests.get(url=url, headers=HEADER, params=querystring)
if response.status_code != '200':
return json.loads(response.text), response.status_code
except Exception as e:
print(e)
return {"message": "Internal server error."}, 500
data = json.loads(response.text)
return data, 200
| 4
| 0
| 0
| 670
| 0
| 0
| 0
| -12
| 112
|
7baca6067411cc1ecfa07468272839cd744972f8
| 441
|
py
|
Python
|
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | 1
|
2021-02-10T18:14:55.000Z
|
2021-02-10T18:14:55.000Z
|
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | null | null | null |
string/firstUniqueCharacterInAString.py
|
G-MontaG/leetcode
|
444e8ee3f395c191a86eae0e42d028060ecd1686
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/first-unique-character-in-a-string/
| 27.5625
| 67
| 0.519274
|
# https://leetcode.com/problems/first-unique-character-in-a-string/
class Solution:
def firstUniqChar(self, s: str) -> int:
mapping = {}
for index, char in enumerate(s):
if char in mapping.keys():
mapping[char] = -1
else:
mapping[char] = index
for char in mapping:
if mapping[char] > -1:
return mapping[char]
return -1
| 0
| 0
| 0
| 351
| 0
| 0
| 0
| 0
| 22
|
0cc5410e4e819af67fb7073f0bb5d856a89be207
| 453
|
py
|
Python
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 5
|
2015-04-13T09:44:31.000Z
|
2017-10-19T01:07:58.000Z
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 49
|
2015-02-15T07:12:05.000Z
|
2022-03-11T23:11:43.000Z
|
ktapp/migrations/0037_ktuser_fav_period.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
| 21.571429
| 74
| 0.602649
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ktapp', '0036_ktuser_bio'),
]
operations = [
migrations.AddField(
model_name='ktuser',
name='fav_period',
field=models.CharField(max_length=250, null=True, blank=True),
preserve_default=True,
),
]
| 0
| 0
| 0
| 323
| 0
| 0
| 0
| 19
| 46
|
45ca169aee71ee56ada82a211aa1e50134aad821
| 2,133
|
py
|
Python
|
mmtrack/datasets/youtube_vis_dataset.py
|
benxiao/mmtracking
|
4363a05659d5f26da97b9725075dcbb3b13f775f
|
[
"Apache-2.0"
] | 1
|
2022-03-12T21:36:42.000Z
|
2022-03-12T21:36:42.000Z
|
mmtrack/datasets/youtube_vis_dataset.py
|
Readpistol/mmtracking
|
131b8fb7c632324f88c3240229e411e801380f2a
|
[
"Apache-2.0"
] | null | null | null |
mmtrack/datasets/youtube_vis_dataset.py
|
Readpistol/mmtracking
|
131b8fb7c632324f88c3240229e411e801380f2a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
| 48.477273
| 79
| 0.506329
|
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.datasets import DATASETS
from .coco_video_dataset import CocoVideoDataset
@DATASETS.register_module()
class YouTubeVISDataset(CocoVideoDataset):
"""YouTube VIS dataset for video instance segmentation."""
CLASSES_2019_version = ('person', 'giant_panda', 'lizard', 'parrot',
'skateboard', 'sedan', 'ape', 'dog', 'snake',
'monkey', 'hand', 'rabbit', 'duck', 'cat', 'cow',
'fish', 'train', 'horse', 'turtle', 'bear',
'motorbike', 'giraffe', 'leopard', 'fox', 'deer',
'owl', 'surfboard', 'airplane', 'truck', 'zebra',
'tiger', 'elephant', 'snowboard', 'boat', 'shark',
'mouse', 'frog', 'eagle', 'earless_seal',
'tennis_racket')
CLASSES_2021_version = ('airplane', 'bear', 'bird', 'boat', 'car', 'cat',
'cow', 'deer', 'dog', 'duck', 'earless_seal',
'elephant', 'fish', 'flying_disc', 'fox', 'frog',
'giant_panda', 'giraffe', 'horse', 'leopard',
'lizard', 'monkey', 'motorbike', 'mouse', 'parrot',
'person', 'rabbit', 'shark', 'skateboard', 'snake',
'snowboard', 'squirrel', 'surfboard',
'tennis_racket', 'tiger', 'train', 'truck',
'turtle', 'whale', 'zebra')
def __init__(self, dataset_version, *args, **kwargs):
self.set_dataset_classes(dataset_version)
super().__init__(*args, **kwargs)
@classmethod
def set_dataset_classes(cls, dataset_version):
if dataset_version == '2019':
cls.CLASSES = cls.CLASSES_2019_version
elif dataset_version == '2021':
cls.CLASSES = cls.CLASSES_2021_version
else:
raise NotImplementedError('Not supported YouTubeVIS dataset'
f'version: {dataset_version}')
| 0
| 1,975
| 0
| 0
| 0
| 0
| 0
| 41
| 68
|
b26e7b4e1b789021e57269548d99674e7d9e0fb6
| 2,198
|
py
|
Python
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T15:26:44.000Z
|
2021-05-25T15:26:44.000Z
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 138
|
2021-02-12T07:59:04.000Z
|
2022-03-26T12:07:19.000Z
|
easyDiffractionApp/Logic/DisplayModels/StatusModel.py
|
rozyczko/easyDiffractionApp
|
6b088e3cb19f943e6eee0e86c3c23515b7c6a084
|
[
"BSD-3-Clause"
] | 3
|
2021-05-07T07:08:25.000Z
|
2021-11-02T09:53:26.000Z
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = 'github.com/andrewsazonov'
__version__ = '0.0.1'
| 27.475
| 104
| 0.641492
|
# SPDX-FileCopyrightText: 2021 easyDiffraction contributors <[email protected]>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyDiffraction project <https://github.com/easyScience/easyDiffractionApp>
__author__ = 'github.com/andrewsazonov'
__version__ = '0.0.1'
from random import random
from PySide2.QtCore import QPointF
from PySide2.QtCharts import QtCharts
class StatusModel:
def __init__(self, paren=None):
super().__init__(parent)
def updateSeries(self):
"""
Generates new data and updates the GUI ChartView LineSeries.
"""
if not self._lowerSeriesRefs or not self._upperSeriesRefs:
return
lowerSeries = self._dataObj.get_lowerXY()
upperSeries = self._dataObj.get_upperXY()
for seriesRef in self._lowerSeriesRefs:
seriesRef.replace(lowerSeries)
for seriesRef in self._upperSeriesRefs:
seriesRef.replace(upperSeries)
def updateData(self, dataObj):
"""
Update ...
"""
self._dataObj = dataObj
self.updateSeries()
def addLowerSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._lowerSeriesRefs.append(seriesRef)
def addUpperSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._upperSeriesRefs.append(seriesRef)
class CalculatedDataModel:
def __init__(self, dataObj=None):
self._seriesRef = None
self._dataObj = dataObj
def updateSeries(self):
"""
Generates new data and updates the GUI ChartView LineSeries.
"""
if self._seriesRef is None:
return
series = self._dataObj.get_fit_XY()
self._seriesRef.replace(series)
def updateData(self, dataObj):
"""
Update ...
"""
self._dataObj = dataObj
self.updateSeries()
def setSeriesRef(self, seriesRef):
"""
Sets series to be a reference to the GUI ChartView LineSeries.
"""
self._seriesRef = seriesRef
| 2
| 0
| 0
| 1,751
| 0
| 0
| 0
| 33
| 114
|
41c794a5523ae6175185d6430eee0502fa65573d
| 1,349
|
py
|
Python
|
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | 1
|
2018-10-13T19:48:05.000Z
|
2018-10-13T19:48:05.000Z
|
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | null | null | null |
run.py
|
iustce/cesa-web
|
8b6b1fd8a66277b7319fdbf327e19948cc56917d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# python imports
import os
import subprocess
import sys, traceback
from flask.ext.migrate import MigrateCommand
from flask.ext.script import Manager
from database import manager as database_manager
try:
from project import app
from project.application import configure_app
from project.config import DefaultConfig, DevelopmentConfig, ProductionConfig
except ImportError:
print ' *** please install/update requirements or fix the problem ***'
traceback.print_exc(file=sys.stdout)
exit(0)
manager = Manager(app)
manager.add_command('database', database_manager)
manager.add_command('migration', MigrateCommand)
fwpath = os.path.abspath(os.path.dirname(__file__))
venv_dir = os.path.join(fwpath, 'venv')
if __name__ == '__main__':
manager.run()
| 24.981481
| 111
| 0.731653
|
# -*- coding: utf-8 -*-
# python imports
import os
import subprocess
import sys, traceback
from flask.ext.migrate import MigrateCommand
from flask.ext.script import Manager
from database import manager as database_manager
try:
from project import app
from project.application import configure_app
from project.config import DefaultConfig, DevelopmentConfig, ProductionConfig
except ImportError:
print ' *** please install/update requirements or fix the problem ***'
traceback.print_exc(file=sys.stdout)
exit(0)
manager = Manager(app)
manager.add_command('database', database_manager)
manager.add_command('migration', MigrateCommand)
fwpath = os.path.abspath(os.path.dirname(__file__))
venv_dir = os.path.join(fwpath, 'venv')
@manager.command
def run():
configure_app(app, DevelopmentConfig())
app.run(host='0.0.0.0', port=5000)
@manager.command
def import_local_config_file(filename):
if not os.path.isabs(filename):
filename = os.path.join(os.getcwd(), filename)
configure_app(app, filename, is_pyfile=True)
app.run(host='0.0.0.0', port=5000)
@manager.command
def test():
pass
@manager.command
def update_requirements():
subprocess.call([os.path.join(venv_dir, 'bin/pip'), 'install', '-r', os.path.join(fwpath, 'requirements')])
if __name__ == '__main__':
manager.run()
| 0
| 454
| 0
| 0
| 0
| 0
| 0
| 0
| 92
|
3622ebf53eb605a0ad50e3ba80cbe1fe001d8264
| 11,174
|
py
|
Python
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 13
|
2019-10-14T19:28:04.000Z
|
2022-03-24T09:46:50.000Z
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 50
|
2019-10-15T09:05:09.000Z
|
2022-03-28T10:51:22.000Z
|
docs/model.py
|
DLR-SC/gitlab2prov
|
0a548cf85121faa63ef9abbbf0d43aa4e0bc3d57
|
[
"MIT"
] | 2
|
2020-05-16T15:40:04.000Z
|
2021-09-14T12:08:19.000Z
|
"""PROV model fpr GitLab2PROV."""
__author__ = "Claas de Boer, Andreas Schreiber, Lynn von Kurnatowski"
__copyright__ = "Copyright 2020, German Aerospace Center (DLR) and individual contributors"
__license__ = "MIT"
__version__ = "0.5"
__status__ = "Development"
from prov.model import ProvDocument
from prov.dot import prov_to_dot
add = ProvDocument()
add.set_default_namespace("gitlab2prov:")
add.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
add.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
add.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
add.entity("File Version", other_attributes={"prov:type": "file_version", "old_path": "", "new_path": ""})
add.wasInformedBy("Commit", "Parent Commit")
add.wasAssociatedWith("Commit", "Committer")
add.wasAssociatedWith("Commit", "Author")
add.wasGeneratedBy("File", "Commit")
add.wasGeneratedBy("File Version", "Commit")
add.wasAttributedTo("File", "Author")
add.wasAttributedTo("File Version", "Author")
add.specializationOf("File Version", "File")
mod = ProvDocument()
mod.set_default_namespace("gitlab2prov:")
mod.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
mod.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": "",})
mod.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
mod.entity("File Version N", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.entity("File Version N-1", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.wasInformedBy("Commit", "Parent Commit")
mod.wasAssociatedWith("Commit", "Author")
mod.wasAssociatedWith("Commit", "Committer")
mod.used("Commit", "File Version N-1")
mod.wasGeneratedBy("File Version N", "Commit")
mod.wasRevisionOf("File Version N", "File Version N-1")
mod.specializationOf("File Version N", "File")
mod.specializationOf("File Version N-1", "File")
mod.wasAttributedTo("File Version N", "Author")
rem = ProvDocument()
rem.set_default_namespace("gitlab2prov:")
rem.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
rem.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
rem.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
rem.entity("File Version", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
rem.wasInformedBy("Commit", "Parent Commit")
rem.wasAssociatedWith("Commit", "Committer")
rem.wasAssociatedWith("Commit", "Author")
rem.wasInvalidatedBy("File Version", "Commit")
rem.specializationOf("File Version", "File")
com = ProvDocument()
com.set_default_namespace("gitlab2prov:")
com.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
com.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
com.activity("Commit Creation", other_attributes={"prov:type": "creation", "prov:startedAt": "", "prov:endedAt": ""})
com.activity("Commit Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
com.activity("Git Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
com.wasInformedBy("Commit Creation", "Git Commit")
com.entity("Commit", other_attributes={"prov:type": "commit_resource", "title": "", "message": "", "short_id": "", "id": ""})
com.entity("Commit Version", other_attributes={"prov:type": "commit_resource_version"})
com.entity("Annotated Commit Version", other_attributes={"prov:type": "commit_resource_version"},)
com.wasAssociatedWith("Commit Creation", "Creator")
com.wasAttributedTo("Commit", "Creator")
com.wasAttributedTo("Commit Version", "Creator")
com.wasGeneratedBy("Commit", "Commit Creation")
com.wasGeneratedBy("Commit Version", "Commit Creation")
com.wasAttributedTo("Annotated Commit Version", "Annotator")
com.wasAssociatedWith("Commit Annotation", "Annotator")
com.used("Commit Annotation", "Commit Version")
com.wasInformedBy("Commit Annotation", "Commit Creation")
com.wasGeneratedBy("Annotated Commit Version", "Commit Annotation")
com.specializationOf("Commit Version", "Commit")
com.specializationOf("Annotated Commit Version", "Commit")
com.wasDerivedFrom("Annotated Commit Version", "Commit Version")
mr = ProvDocument()
mr.set_default_namespace("gitlab2prov:")
mr.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""},)
mr.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
mr.activity("Merge Request Creation", other_attributes={"prov:type": "merge_request_creation", "prov:startedAt": "", "prov:endedAt": ""})
mr.activity("Merge Request Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
mr.entity("Merge Request", other_attributes={"prov:type": "merge_request_resource", "id": "", "iid": "", "title": "", "description": "", "web_url": "", "project_id": "", "source_branch": "", "target_branch": "", "source_project_url": "", "target_project_url": ""})
mr.entity("Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.entity("Annotated Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.wasInformedBy("Merge Request Annotation", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request Version", "Merge Request Creation")
mr.wasGeneratedBy("Annotated Merge Request Version", "Merge Request Annotation")
mr.used("Merge Request Annotation", "Merge Request Version")
mr.specializationOf("Merge Request Version", "Merge Request")
mr.specializationOf("Annotated Merge Request Version", "Merge Request")
mr.wasDerivedFrom("Annotated Merge Request Version", "Merge Request Version")
mr.wasAttributedTo("Annotated Merge Request Version", "Annotator")
mr.wasAttributedTo("Merge Request Version", "Creator")
mr.wasAttributedTo("Merge Request", "Creator")
mr.wasAssociatedWith("Merge Request Creation", "Creator")
mr.wasAssociatedWith("Merge Request Annotation", "Annotator")
iss = ProvDocument()
iss.set_default_namespace("gitlab2prov:")
iss.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
iss.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
iss.activity("Issue Creation", other_attributes={"prov:type": "issue_creation", "prov:startedAt": "", "prov:endedAt": ""})
iss.activity("Issue Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
iss.entity("Issue", other_attributes={"prov:type": "issue_resource", "id": "", "iid": "", "title": "", "description": "", "project_id": "", "web_url": ""})
iss.entity("Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.entity("Annotated Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.wasInformedBy("Issue Annotation", "Issue Creation")
iss.wasGeneratedBy("Issue", "Issue Creation")
iss.wasGeneratedBy("Issue Version", "Issue Creation")
iss.wasGeneratedBy("Annotated Issue Version", "Issue Annotation")
iss.used("Issue Annotation", "Issue Version")
iss.specializationOf("Issue Version", "Issue")
iss.specializationOf("Annotated Issue Version", "Issue")
iss.wasDerivedFrom("Annotated Issue Version", "Issue Version")
iss.wasAttributedTo("Annotated Issue Version", "Annotator")
iss.wasAttributedTo("Issue Version", "Creator")
iss.wasAttributedTo("Issue", "Creator")
iss.wasAssociatedWith("Issue Creation", "Creator")
iss.wasAssociatedWith("Issue Annotation", "Annotator")
release_tag_model = ProvDocument()
release_tag_model.set_default_namespace("gitlab2prov:")
release_tag_model.agent("User", {"name": "", "email": ""})
release_tag_model.activity("Release_Event")
release_tag_model.activity("Tag_Event")
release_tag_model.activity("Commit_Event")
release_tag_model.entity("Tag", {"prov:type": "prov:Collection", "name": "", "message": "", "commit": "", "target_commit": ""})
release_tag_model.entity("Release", {"prov:type": "prov:Collection", "name": "", "tag_name": "", "description": "", "created_at": "", "released_at": "", "commit_path": "", "tag_path": ""})
release_tag_model.entity("Commit", {"id": "", "short_id": "", "title": "", "message": "", "web_url": "", "created_at": ""})
release_tag_model.entity("Release_Evidence", {"sha": "", "filepath": "", "collected_at": ""})
release_tag_model.entity("Release_Asset", {"uri": "", "format": "", "filepath": ""})
release_tag_model.hadMember("Release_Asset", "Release")
release_tag_model.hadMember("Release_Evidence", "Release")
release_tag_model.hadMember("Tag", "Release")
release_tag_model.hadMember("Commit", "Tag")
release_tag_model.wasAssociatedWith("Commit_Event", "User")
release_tag_model.wasAssociatedWith("Release_Event", "User")
release_tag_model.wasAssociatedWith("Tag_Event", "User")
release_tag_model.wasAttributedTo("Release", "User")
release_tag_model.wasAttributedTo("Tag", "User")
release_tag_model.wasAttributedTo("Commit", "User")
release_tag_model.wasGeneratedBy("Release", "Release_Event")
release_tag_model.wasGeneratedBy("Tag", "Tag_Event")
release_tag_model.wasGeneratedBy("Commit", "Commit_Event")
for title, doc in [
("git_commit_model_add", add),
("git_commit_model_mod", mod),
("git_commit_model_del", rem),
("gitlab_commit_model", com),
("gitlab_issue_model", iss),
("gitlab_merge_request_model", mr),
("gitlab_release_tag_model", release_tag_model)
]:
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_pdf(
f"pdfs/{title}.pdf"
)
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_svg(
f"svgs/{title}.svg"
)
| 61.060109
| 264
| 0.707625
|
"""PROV model fpr GitLab2PROV."""
__author__ = "Claas de Boer, Andreas Schreiber, Lynn von Kurnatowski"
__copyright__ = "Copyright 2020, German Aerospace Center (DLR) and individual contributors"
__license__ = "MIT"
__version__ = "0.5"
__status__ = "Development"
from prov.model import ProvDocument
from prov.constants import PROV_LABEL
from prov.dot import prov_to_dot
add = ProvDocument()
add.set_default_namespace("gitlab2prov:")
add.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
add.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
add.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
add.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
add.entity("File Version", other_attributes={"prov:type": "file_version", "old_path": "", "new_path": ""})
add.wasInformedBy("Commit", "Parent Commit")
add.wasAssociatedWith("Commit", "Committer")
add.wasAssociatedWith("Commit", "Author")
add.wasGeneratedBy("File", "Commit")
add.wasGeneratedBy("File Version", "Commit")
add.wasAttributedTo("File", "Author")
add.wasAttributedTo("File Version", "Author")
add.specializationOf("File Version", "File")
mod = ProvDocument()
mod.set_default_namespace("gitlab2prov:")
mod.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""},)
mod.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
mod.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": "",})
mod.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
mod.entity("File Version N", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.entity("File Version N-1", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
mod.wasInformedBy("Commit", "Parent Commit")
mod.wasAssociatedWith("Commit", "Author")
mod.wasAssociatedWith("Commit", "Committer")
mod.used("Commit", "File Version N-1")
mod.wasGeneratedBy("File Version N", "Commit")
mod.wasRevisionOf("File Version N", "File Version N-1")
mod.specializationOf("File Version N", "File")
mod.specializationOf("File Version N-1", "File")
mod.wasAttributedTo("File Version N", "Author")
rem = ProvDocument()
rem.set_default_namespace("gitlab2prov:")
rem.activity("Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.activity("Parent Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
rem.agent("Committer", other_attributes={"prov:type": "user", "prov:role": "committer", "name": "", "email": ""})
rem.agent("Author", other_attributes={"prov:type": "user", "prov:role": "author", "name": "", "email": ""})
rem.entity("File", other_attributes={"prov:type": "file", "path_at_addition": ""})
rem.entity("File Version", other_attributes={"prov:type": "file_version", "new_path": "", "old_path": ""})
rem.wasInformedBy("Commit", "Parent Commit")
rem.wasAssociatedWith("Commit", "Committer")
rem.wasAssociatedWith("Commit", "Author")
rem.wasInvalidatedBy("File Version", "Commit")
rem.specializationOf("File Version", "File")
com = ProvDocument()
com.set_default_namespace("gitlab2prov:")
com.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
com.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
com.activity("Commit Creation", other_attributes={"prov:type": "creation", "prov:startedAt": "", "prov:endedAt": ""})
com.activity("Commit Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
com.activity("Git Commit", other_attributes={"prov:type": "commit", "title": "", "message": "", "id": "", "short_id": "", "prov:startedAt": "", "prov:endedAt": ""})
com.wasInformedBy("Commit Creation", "Git Commit")
com.entity("Commit", other_attributes={"prov:type": "commit_resource", "title": "", "message": "", "short_id": "", "id": ""})
com.entity("Commit Version", other_attributes={"prov:type": "commit_resource_version"})
com.entity("Annotated Commit Version", other_attributes={"prov:type": "commit_resource_version"},)
com.wasAssociatedWith("Commit Creation", "Creator")
com.wasAttributedTo("Commit", "Creator")
com.wasAttributedTo("Commit Version", "Creator")
com.wasGeneratedBy("Commit", "Commit Creation")
com.wasGeneratedBy("Commit Version", "Commit Creation")
com.wasAttributedTo("Annotated Commit Version", "Annotator")
com.wasAssociatedWith("Commit Annotation", "Annotator")
com.used("Commit Annotation", "Commit Version")
com.wasInformedBy("Commit Annotation", "Commit Creation")
com.wasGeneratedBy("Annotated Commit Version", "Commit Annotation")
com.specializationOf("Commit Version", "Commit")
com.specializationOf("Annotated Commit Version", "Commit")
com.wasDerivedFrom("Annotated Commit Version", "Commit Version")
mr = ProvDocument()
mr.set_default_namespace("gitlab2prov:")
mr.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""},)
mr.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
mr.activity("Merge Request Creation", other_attributes={"prov:type": "merge_request_creation", "prov:startedAt": "", "prov:endedAt": ""})
mr.activity("Merge Request Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
mr.entity("Merge Request", other_attributes={"prov:type": "merge_request_resource", "id": "", "iid": "", "title": "", "description": "", "web_url": "", "project_id": "", "source_branch": "", "target_branch": "", "source_project_url": "", "target_project_url": ""})
mr.entity("Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.entity("Annotated Merge Request Version", other_attributes={"prov:type": "merge_request_resource_version"},)
mr.wasInformedBy("Merge Request Annotation", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request", "Merge Request Creation")
mr.wasGeneratedBy("Merge Request Version", "Merge Request Creation")
mr.wasGeneratedBy("Annotated Merge Request Version", "Merge Request Annotation")
mr.used("Merge Request Annotation", "Merge Request Version")
mr.specializationOf("Merge Request Version", "Merge Request")
mr.specializationOf("Annotated Merge Request Version", "Merge Request")
mr.wasDerivedFrom("Annotated Merge Request Version", "Merge Request Version")
mr.wasAttributedTo("Annotated Merge Request Version", "Annotator")
mr.wasAttributedTo("Merge Request Version", "Creator")
mr.wasAttributedTo("Merge Request", "Creator")
mr.wasAssociatedWith("Merge Request Creation", "Creator")
mr.wasAssociatedWith("Merge Request Annotation", "Annotator")
iss = ProvDocument()
iss.set_default_namespace("gitlab2prov:")
iss.agent("Creator", other_attributes={"prov:type": "user", "prov:role": "creator", "name": ""})
iss.agent("Annotator", other_attributes={"prov:type": "user", "prov:role": "initiator", "name": ""})
iss.activity("Issue Creation", other_attributes={"prov:type": "issue_creation", "prov:startedAt": "", "prov:endedAt": ""})
iss.activity("Issue Annotation", other_attributes={"prov:type": "event", "prov:startedAt": "", "prov:endedAt": "", "event": ""})
iss.entity("Issue", other_attributes={"prov:type": "issue_resource", "id": "", "iid": "", "title": "", "description": "", "project_id": "", "web_url": ""})
iss.entity("Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.entity("Annotated Issue Version", other_attributes={"prov:type": "issue_resource_version"})
iss.wasInformedBy("Issue Annotation", "Issue Creation")
iss.wasGeneratedBy("Issue", "Issue Creation")
iss.wasGeneratedBy("Issue Version", "Issue Creation")
iss.wasGeneratedBy("Annotated Issue Version", "Issue Annotation")
iss.used("Issue Annotation", "Issue Version")
iss.specializationOf("Issue Version", "Issue")
iss.specializationOf("Annotated Issue Version", "Issue")
iss.wasDerivedFrom("Annotated Issue Version", "Issue Version")
iss.wasAttributedTo("Annotated Issue Version", "Annotator")
iss.wasAttributedTo("Issue Version", "Creator")
iss.wasAttributedTo("Issue", "Creator")
iss.wasAssociatedWith("Issue Creation", "Creator")
iss.wasAssociatedWith("Issue Annotation", "Annotator")
release_tag_model = ProvDocument()
release_tag_model.set_default_namespace("gitlab2prov:")
release_tag_model.agent("User", {"name": "", "email": ""})
release_tag_model.activity("Release_Event")
release_tag_model.activity("Tag_Event")
release_tag_model.activity("Commit_Event")
release_tag_model.entity("Tag", {"prov:type": "prov:Collection", "name": "", "message": "", "commit": "", "target_commit": ""})
release_tag_model.entity("Release", {"prov:type": "prov:Collection", "name": "", "tag_name": "", "description": "", "created_at": "", "released_at": "", "commit_path": "", "tag_path": ""})
release_tag_model.entity("Commit", {"id": "", "short_id": "", "title": "", "message": "", "web_url": "", "created_at": ""})
release_tag_model.entity("Release_Evidence", {"sha": "", "filepath": "", "collected_at": ""})
release_tag_model.entity("Release_Asset", {"uri": "", "format": "", "filepath": ""})
release_tag_model.hadMember("Release_Asset", "Release")
release_tag_model.hadMember("Release_Evidence", "Release")
release_tag_model.hadMember("Tag", "Release")
release_tag_model.hadMember("Commit", "Tag")
release_tag_model.wasAssociatedWith("Commit_Event", "User")
release_tag_model.wasAssociatedWith("Release_Event", "User")
release_tag_model.wasAssociatedWith("Tag_Event", "User")
release_tag_model.wasAttributedTo("Release", "User")
release_tag_model.wasAttributedTo("Tag", "User")
release_tag_model.wasAttributedTo("Commit", "User")
release_tag_model.wasGeneratedBy("Release", "Release_Event")
release_tag_model.wasGeneratedBy("Tag", "Tag_Event")
release_tag_model.wasGeneratedBy("Commit", "Commit_Event")
for title, doc in [
("git_commit_model_add", add),
("git_commit_model_mod", mod),
("git_commit_model_del", rem),
("gitlab_commit_model", com),
("gitlab_issue_model", iss),
("gitlab_merge_request_model", mr),
("gitlab_release_tag_model", release_tag_model)
]:
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_pdf(
f"pdfs/{title}.pdf"
)
prov_to_dot(doc, show_nary=False, use_labels=False, direction="BT").write_svg(
f"svgs/{title}.svg"
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 16
| 22
|
f495910cbad974850a149f592f1022624205f1c7
| 9,479
|
py
|
Python
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 31
|
2016-11-01T13:11:43.000Z
|
2022-02-28T05:05:16.000Z
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 1,172
|
2015-01-29T23:47:53.000Z
|
2022-03-30T05:22:01.000Z
|
scripts/go_stats_utils.py
|
kltm/go-site
|
fe6797ed1291bd0d12df83b7c9d670c91a0fb526
|
[
"BSD-3-Clause"
] | 92
|
2015-02-11T03:10:55.000Z
|
2022-03-01T08:16:02.000Z
|
# This is a hard coded list of evidence, better organized for readability
ev_all = ['EXP', 'IDA', 'IMP', 'IGI', 'IPI', 'IEP', 'IGC', 'RCA', 'IBA', 'IKR', 'IC', 'NAS', 'ND', 'TAS', 'HDA', 'HEP', 'HGI', 'HMP', 'ISA', 'ISM', 'ISO', 'ISS', 'IEA']
# This is a hard coded list of reference genomes that should always be present in a GO release
REFERENCE_GENOME_IDS = [
"NCBITaxon:9606",
"NCBITaxon:10116",
"NCBITaxon:10090",
"NCBITaxon:3702",
"NCBITaxon:7955",
"NCBITaxon:6239",
"NCBITaxon:559292",
"NCBITaxon:7227",
"NCBITaxon:44689",
"NCBITaxon:4896",
"NCBITaxon:83333"
]
BP_TERM_ID = "GO:0008150"
MF_TERM_ID = "GO:0003674"
CC_TERM_ID = "GO:0005575"
# useful grouping of evidences as discussed with Pascale
EVIDENCE_GROUPS = {
"EXP": ["EXP", "IDA", "IEP", "IGI", "IMP", "IPI"],
"HTP": ["HDA", "HEP", "HGI", "HMP", "HTP"],
"PHYLO": ["IBA", "IRD", "IKR", "IMR"],
"IEA": ["IEA"],
"ND": ["ND"],
"OTHER": ["IC", "IGC", "ISA", "ISM", "ISO", "ISS", "NAS", "RCA", "TAS"]
}
EVIDENCE_MIN_GROUPS = {
"EXPERIMENTAL" : EVIDENCE_GROUPS["EXP"] + EVIDENCE_GROUPS["HTP"],
"COMPUTATIONAL" : EVIDENCE_GROUPS["PHYLO"] + EVIDENCE_GROUPS["IEA"] + EVIDENCE_GROUPS["OTHER"]
}
global_session = None
def fetch(url):
"""
Error proof method to get data from HTTP request
If an error occured, return None
"""
global global_session
# Ensure we are using the same session - creating too many sessions could crash this script
if global_session is None:
global_session = requests_retry(global_session)
try:
r = global_session.get(url)
return r
except Exception as x:
print("Query GET " , url , " failed: ", x)
return None
def golr_fetch(golr_base_url, select_query):
"""
Error proof method to get data from GOLr
If an HTTP error occurs, return None, otherwise return the json object
"""
r = fetch(golr_base_url + select_query)
if r is None:
return None
response = r.json()
return response
# utility function to build a list from a solr/golr facet array
# utility function to transform a list [A, 1, B, 2] into a map {A: 1, B: 2}
# utility function to build a reverse map: { "a": 1, "b": 1, "c": 2 } -> {1: ["a", "b"], 2: ["c"]}
# utility function to cluster elements of an input map based on another map of synonyms
# similar as above but the value of each key is also a map
# reorder map (python 3.6 keeps order in which items are inserted in map: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value)
def bioentity_type(str_type):
"""
In a nutshell, collapse all RNA related types into RNA
"""
if "RNA" in str_type or "ribozyme" in str_type or "transcript" in str_type:
return "RNA_cluster"
return str_type
def sum_map_values(map):
"""
Utility function to sum up the values of a map. Assume the map values are all numbers
"""
total = 0
for key, val in map.items():
total += val
return total
| 31.387417
| 169
| 0.606815
|
import json
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from enum import Enum
# This is a hard coded list of evidence, better organized for readability
ev_all = ['EXP', 'IDA', 'IMP', 'IGI', 'IPI', 'IEP', 'IGC', 'RCA', 'IBA', 'IKR', 'IC', 'NAS', 'ND', 'TAS', 'HDA', 'HEP', 'HGI', 'HMP', 'ISA', 'ISM', 'ISO', 'ISS', 'IEA']
class CLOSURE_LABELS(Enum):
ISA = "isa_closure"
ISA_PARTOF = "isa_partof_closure"
REGULATES = "regulates_closure"
# This is a hard coded list of reference genomes that should always be present in a GO release
REFERENCE_GENOME_IDS = [
"NCBITaxon:9606",
"NCBITaxon:10116",
"NCBITaxon:10090",
"NCBITaxon:3702",
"NCBITaxon:7955",
"NCBITaxon:6239",
"NCBITaxon:559292",
"NCBITaxon:7227",
"NCBITaxon:44689",
"NCBITaxon:4896",
"NCBITaxon:83333"
]
BP_TERM_ID = "GO:0008150"
MF_TERM_ID = "GO:0003674"
CC_TERM_ID = "GO:0005575"
# useful grouping of evidences as discussed with Pascale
EVIDENCE_GROUPS = {
"EXP": ["EXP", "IDA", "IEP", "IGI", "IMP", "IPI"],
"HTP": ["HDA", "HEP", "HGI", "HMP", "HTP"],
"PHYLO": ["IBA", "IRD", "IKR", "IMR"],
"IEA": ["IEA"],
"ND": ["ND"],
"OTHER": ["IC", "IGC", "ISA", "ISM", "ISO", "ISS", "NAS", "RCA", "TAS"]
}
EVIDENCE_MIN_GROUPS = {
"EXPERIMENTAL" : EVIDENCE_GROUPS["EXP"] + EVIDENCE_GROUPS["HTP"],
"COMPUTATIONAL" : EVIDENCE_GROUPS["PHYLO"] + EVIDENCE_GROUPS["IEA"] + EVIDENCE_GROUPS["OTHER"]
}
def is_experimental(evidence_type):
return evidence_type in EVIDENCE_MIN_GROUPS["EXPERIMENTAL"]
def is_computational(evidence_type):
return evidence_type in EVIDENCE_MIN_GROUPS["COMPUTATIONAL"]
def get_evidence_min_group(evidence_type):
for group, codes in EVIDENCE_MIN_GROUPS.items():
if evidence_type in codes:
return group
return "ND"
def aspect_from_source(source):
if source == "molecular_function":
return "MF"
elif source == "biological_process":
return "BP"
elif source == "cellular_component":
return "CC"
return "UNK"
global_session = None
def requests_retry(retries = 3, backoff = 0.3, session = None):
session = session or requests.Session()
retry = Retry(
total = retries,
read = retries,
connect = retries,
backoff_factor = backoff,
status_forcelist = (429, 500, 502, 503, 504)
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def fetch(url):
"""
Error proof method to get data from HTTP request
If an error occured, return None
"""
global global_session
# Ensure we are using the same session - creating too many sessions could crash this script
if global_session is None:
global_session = requests_retry(global_session)
try:
r = global_session.get(url)
return r
except Exception as x:
print("Query GET " , url , " failed: ", x)
return None
def post(url, params):
global global_session
global_session = requests_retry(global_session)
try:
r = global_session.post(url, data = params)
return r
except Exception as x:
print("Query POST " , url , " failed: ", x)
return None
def golr_fetch(golr_base_url, select_query):
"""
Error proof method to get data from GOLr
If an HTTP error occurs, return None, otherwise return the json object
"""
r = fetch(golr_base_url + select_query)
if r is None:
return None
response = r.json()
return response
def golr_fetch_by_taxon(golr_base_url, select_query, taxon):
return golr_fetch(golr_base_url, select_query + "&fq=taxon:\"" + taxon + "\"")
def golr_fetch_by_taxa(golr_base_url, select_query, taxa):
tmp = ""
if isinstance(taxa, list):
tmp = "&fq=taxon:(\"" + taxa.join("\" ") + "\")"
else:
tmp = "&fq=taxon:\"" + taxa + "\""
print("*** ", golr_base_url + select_query + tmp)
return golr_fetch(golr_base_url, select_query + tmp)
# utility function to build a list from a solr/golr facet array
def build_list(items_list, min_size = None):
ls = []
for i in range(0, len(items_list), 2):
if min_size is None or items_list[i + 1] > min_size:
ls.append(items_list[i])
return ls
# utility function to transform a list [A, 1, B, 2] into a map {A: 1, B: 2}
def build_map(items_list, min_size = None):
map = {}
for i in range(0, len(items_list), 2):
if min_size is None or items_list[i + 1] > min_size:
map[items_list[i]] = items_list[i + 1]
return map
# utility function to build a reverse map: { "a": 1, "b": 1, "c": 2 } -> {1: ["a", "b"], 2: ["c"]}
def build_reverse_map(map):
reverse_map = { }
for key, val in map.items():
ls = []
if val in reverse_map:
ls = reverse_map[val]
else:
reverse_map[val] = ls
ls.append(key)
return reverse_map
# utility function to cluster elements of an input map based on another map of synonyms
def cluster_map(input_map, synonyms):
cluster = { }
for key, val in input_map.items():
temp = synonyms[key]
if temp in cluster:
val_cluster = cluster[temp]
cluster[temp] = val_cluster + val
else:
cluster[temp] = val
return cluster
# similar as above but the value of each key is also a map
def cluster_complex_map(input_map, synonyms):
cluster = { }
for key, val in input_map.items():
temp = synonyms[key]
# print("working on : " , key , val)
if temp in cluster:
temp_cluster = cluster[temp]
# print("cluster already found : ", temp , temp_cluster)
for key_cluster, val_cluster in temp_cluster.items():
temp_cluster[key_cluster] = val_cluster + val[key_cluster]
else:
cluster[temp] = val
return cluster
# reorder map (python 3.6 keeps order in which items are inserted in map: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value)
def ordered_map(map):
ordered_map = { }
for w in sorted(map, key=map.get, reverse=True):
ordered_map[w] = map[w]
return ordered_map
def extract_map(map, key_str):
extracted = { }
for key, val in map.items():
if key_str in key:
extracted[key] = val
return extracted
def merge_dict(dict_total, dict_diff):
new_dict = { }
for key, val in dict_total.items():
if type(val) == str:
new_dict[key] = val
elif type(val) == int or type(val) == float:
if val == 0:
diff_val = dict_diff[key] if key in dict_diff else 0
new_dict[key] = str(diff_val) + " / " + str(val) + "\t0%"
else:
diff_val = dict_diff[key] if key in dict_diff else 0
new_dict[key] = str(diff_val) + " / " + str(val) + "\t" + str(round(100 * diff_val / val, 2)) + "%"
elif type(val) == dict:
diff_val = dict_diff[key] if key in dict_diff else { }
new_dict[key] = merge_dict(val, diff_val)
else:
print("should not happened ! " , val , type(val))
return new_dict
def minus_dict(dict1, dict2):
new_dict = { }
for key, val in dict1.items():
if type(val) == str:
new_dict[key] = val
elif type(val) == int or type(val) == float:
diff_val = dict2[key] if key in dict2 else 0
new_dict[key] = val - diff_val
elif type(val) == dict:
diff_val = dict2[key] if key in dict2 else { }
new_dict[key] = merge_dict(val, diff_val)
else:
print("should not happened ! " , val , type(val))
return new_dict
def has_taxon(stats, taxon_id):
for taxon in stats["annotations"]["by_taxon"]:
if taxon_id in taxon:
return True
return False
def added_removed_species(current_stats, previous_stats):
results = {
"added" : { },
"removed" : { }
}
for taxon in current_stats["annotations"]["by_taxon"]:
taxon_id = taxon.split("|")[0]
if not has_taxon(previous_stats, taxon_id):
results["added"][taxon] = current_stats["annotations"]["by_taxon"][taxon]
for taxon in previous_stats["annotations"]["by_taxon"]:
taxon_id = taxon.split("|")[0]
if not has_taxon(current_stats, taxon_id):
results["removed"][taxon] = previous_stats["annotations"]["by_taxon"][taxon]
return results
def bioentity_type(str_type):
"""
In a nutshell, collapse all RNA related types into RNA
"""
if "RNA" in str_type or "ribozyme" in str_type or "transcript" in str_type:
return "RNA_cluster"
return str_type
def sum_map_values(map):
"""
Utility function to sum up the values of a map. Assume the map values are all numbers
"""
total = 0
for key, val in map.items():
total += val
return total
def write_json(key, content):
with open(key, 'w') as outfile:
try:
json.dump(content, outfile, indent=2)
finally:
outfile.close()
def write_text(key, content):
with open(key, 'w') as outfile:
try:
outfile.write(content)
finally:
outfile.close()
| 0
| 0
| 0
| 101
| 0
| 5,663
| 0
| 19
| 627
|
841c91691e5e3f9f8e364f9c80db23924bcbaafd
| 102
|
py
|
Python
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-human-standing
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 134
|
2015-05-19T15:24:18.000Z
|
2022-03-12T09:39:03.000Z
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-human-standing
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 46
|
2015-05-05T18:08:20.000Z
|
2022-01-28T11:12:42.000Z
|
notebooks/exercise_solutions/n00_python_intro_data-structures.py
|
pydy/pydy-tutorial-pycon-2014
|
72b1d8513e339e9b10e501bd3490caa3fa997bc4
|
[
"CC-BY-4.0"
] | 62
|
2015-06-16T01:50:51.000Z
|
2022-02-26T07:39:41.000Z
|
num_list = [1,2,3,4]
months = ['Jan', 'Feb', 'Mar', 'Apr']
months_dict = dict(zip(months, num_list))
| 20.4
| 41
| 0.607843
|
num_list = [1,2,3,4]
months = ['Jan', 'Feb', 'Mar', 'Apr']
months_dict = dict(zip(months, num_list))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fe92b53f5b3777d23e5c45c05e94c9a44f57b7aa
| 345
|
py
|
Python
|
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | null | null | null |
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | 51
|
2021-11-15T20:44:19.000Z
|
2022-02-10T08:33:08.000Z
|
calingen/interfaces/__init__.py
|
Mischback/django-calingen
|
3354c751e29d301609ec44e64d69a8729ec36de4
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
"""The application's interfaces that are used to connect the different components.
Notes
-----
This package's code is not really specific to the Django framework. It is an
abstraction layer.
Primary focus is the provision of a plugin API, that allows the app to be
extendable with third-party applications.
"""
| 26.538462
| 82
| 0.773913
|
# SPDX-License-Identifier: MIT
"""The application's interfaces that are used to connect the different components.
Notes
-----
This package's code is not really specific to the Django framework. It is an
abstraction layer.
Primary focus is the provision of a plugin API, that allows the app to be
extendable with third-party applications.
"""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8b3f5bf2170e6f1f55a1f584308e631727c5174c
| 1,387
|
py
|
Python
|
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | 1
|
2022-01-25T05:19:15.000Z
|
2022-01-25T05:19:15.000Z
|
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | null | null | null |
Final Project/src/main.py
|
tig3r66/CMPUT275
|
dd5b94dcf0436e281f4696959db07b56f5c0b9d8
|
[
"MIT"
] | null | null | null |
# ===================================
# Name: Edward (Eddie) Guo
# ID: 1576381
# Partner: Jason Kim
# CMPUT 275, Fall 2020
#
# Final Assignment: EEG Visualizer
# ===================================
"""
Contains the QApplication which holds the PlotWindow QMainWindow object. The
controller class is here for convenient additions of extra QMainWindows.
"""
import sys
# for UI
from PyQt5 import QtWidgets
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
controller = Controller()
controller.show_plot_window()
sys.exit(app.exec_())
| 26.673077
| 79
| 0.651045
|
# ===================================
# Name: Edward (Eddie) Guo
# ID: 1576381
# Partner: Jason Kim
# CMPUT 275, Fall 2020
#
# Final Assignment: EEG Visualizer
# ===================================
"""
Contains the QApplication which holds the PlotWindow QMainWindow object. The
controller class is here for convenient additions of extra QMainWindows.
"""
import sys
# for UI
from PyQt5 import QtCore, QtWidgets
from plot_window import PlotWindow
class Controller:
"""Controller class for slave QMainWindows. Used for expandability in case
the user wishes to create additional windows for the program (ex: home
window).
"""
def show_plot_window(self):
"""Creates the main window (EEG and FFT plots) from plot_window.py.
"""
self.plot_window = QtWidgets.QMainWindow()
self.ui = PlotWindow()
self.ui.setup_ui(self.plot_window)
self.plot_window.setWindowFlags(QtCore.Qt.Window)
self.plot_window.show()
app.aboutToQuit.connect(self.close_threads)
def close_threads(self):
"""Helper function that closes all running threads when the application
is about to quit.
"""
self.ui.close_threads()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
controller = Controller()
controller.show_plot_window()
sys.exit(app.exec_())
| 0
| 0
| 0
| 741
| 0
| 0
| 0
| 21
| 45
|
9bc6d6d5809746ae0dfad11e6d7e815c885010be
| 929
|
py
|
Python
|
utilipy/data_utils/tests/test_init.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 2
|
2020-11-15T01:48:45.000Z
|
2020-12-02T20:44:20.000Z
|
utilipy/data_utils/tests/test_init.py
|
nstarman/astroPHD
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 22
|
2020-09-13T17:58:24.000Z
|
2022-02-04T19:05:23.000Z
|
utilipy/data_utils/tests/test_init.py
|
nstarman/utilipy
|
17984942145d31126724df23500bafba18fb7516
|
[
"BSD-3-Clause"
] | 1
|
2020-04-21T22:41:01.000Z
|
2020-04-21T22:41:01.000Z
|
# -*- coding: utf-8 -*-
"""Test Code in __init__."""
__all__ = [
"test_get_path_to_file",
]
##############################################################################
# IMPORTS
# BUILT-IN
# PROJECT-SPECIFIC
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
# /def
# -------------------------------------------------------------------
##############################################################################
# END
| 21.113636
| 78
| 0.339074
|
# -*- coding: utf-8 -*-
"""Test Code in __init__."""
__all__ = [
"test_get_path_to_file",
]
##############################################################################
# IMPORTS
# BUILT-IN
import os.path
# PROJECT-SPECIFIC
from utilipy.data_utils.utils import get_path_to_file
##############################################################################
# PARAMETERS
##############################################################################
# CODE
##############################################################################
def test_get_path_to_file():
path = get_path_to_file("__init__.py", package="utilipy.data_utils")
assert isinstance(path, str)
assert os.path.join("utilipy", "data_utils", "__init__.py") in path
# /def
# -------------------------------------------------------------------
##############################################################################
# END
| 0
| 0
| 0
| 0
| 0
| 187
| 0
| 25
| 67
|
22e65f52c1dd2e9a786884bce3811c3aa03273e2
| 2,579
|
py
|
Python
|
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | null | null | null |
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | 9
|
2021-03-22T18:36:25.000Z
|
2021-04-20T17:39:47.000Z
|
eStore/migrations/0005_auto_20210420_2220.py
|
masrufjaman/gas-n-go
|
435e574a1b1bbd875a8a7aeade4d4c2dc1636b07
|
[
"MIT"
] | 2
|
2021-06-30T14:39:52.000Z
|
2021-08-12T19:41:11.000Z
|
# Generated by Django 3.1.7 on 2021-04-20 16:20
| 40.936508
| 150
| 0.598682
|
# Generated by Django 3.1.7 on 2021-04-20 16:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20210406_1400'),
('eStore', '0004_item_discount_price'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='items',
),
migrations.AddField(
model_name='order',
name='transaction_id',
field=models.BooleanField(max_length=200, null=True),
),
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.order'),
),
migrations.AddField(
model_name='orderitem',
name='quantity',
field=models.IntegerField(blank=True, default=0, null=True),
),
migrations.AddField(
model_name='orderitem',
name='username',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer'),
),
migrations.AlterField(
model_name='order',
name='username',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer', to_field='username'),
),
migrations.AlterField(
model_name='orderitem',
name='item',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.item'),
),
migrations.CreateModel(
name='ShippingAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('address', models.CharField(max_length=200, null=True)),
('city', models.CharField(max_length=200, null=True)),
('area', models.CharField(max_length=200, null=True)),
('road_no', models.CharField(max_length=200, null=True)),
('date_added', models.DateTimeField(auto_now_add=True)),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='eStore.order')),
('username', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='accounts.customer')),
],
),
]
| 0
| 0
| 0
| 2,432
| 0
| 0
| 0
| 30
| 68
|
72dfafe3d10bd2db54f014bbf5184b6be818ecf0
| 9,310
|
py
|
Python
|
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | 1
|
2021-10-23T00:39:47.000Z
|
2021-10-23T00:39:47.000Z
|
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/kfp/v2/dsl/experimental/for_loop.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods that supports argument for ParallelFor."""
import re
from typing import Any, Dict, List, Optional, Union
ItemList = List[Union[int, float, str, Dict[str, Any]]]
def _get_loop_item_type(type_name: str) -> Optional[str]:
"""Extracts the loop item type.
This method is used for extract the item type from a collection type.
For example:
List[str] -> str
typing.List[int] -> int
typing.Sequence[str] -> str
List -> None
str -> None
Args:
type_name: The collection type name, like `List`, Sequence`, etc.
Returns:
The collection item type or None if no match found.
"""
match = re.match('(typing\.)?(?:\w+)(?:\[(?P<item_type>.+)\])', type_name)
if match:
return match.group('item_type').lstrip().rstrip()
else:
return None
def _get_subvar_type(type_name: str) -> Optional[str]:
"""Extracts the subvar type.
This method is used for extract the value type from a dictionary type.
For example:
Dict[str, int] -> int
typing.Mapping[str, float] -> float
Args:
type_name: The dictionary type.
Returns:
The dictionary value type or None if no match found.
"""
match = re.match(
'(typing\.)?(?:\w+)(?:\[\s*(?:\w+)\s*,\s*(?P<value_type>.+)\])',
type_name)
if match:
return match.group('value_type').lstrip().rstrip()
else:
return None
| 34.868914
| 81
| 0.628249
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods that supports argument for ParallelFor."""
import re
from typing import Any, Dict, List, Optional, Tuple, Union, get_type_hints
from kfp.v2.components.experimental import pipeline_channel
ItemList = List[Union[int, float, str, Dict[str, Any]]]
def _get_loop_item_type(type_name: str) -> Optional[str]:
"""Extracts the loop item type.
This method is used for extract the item type from a collection type.
For example:
List[str] -> str
typing.List[int] -> int
typing.Sequence[str] -> str
List -> None
str -> None
Args:
type_name: The collection type name, like `List`, Sequence`, etc.
Returns:
The collection item type or None if no match found.
"""
match = re.match('(typing\.)?(?:\w+)(?:\[(?P<item_type>.+)\])', type_name)
if match:
return match.group('item_type').lstrip().rstrip()
else:
return None
def _get_subvar_type(type_name: str) -> Optional[str]:
"""Extracts the subvar type.
This method is used for extract the value type from a dictionary type.
For example:
Dict[str, int] -> int
typing.Mapping[str, float] -> float
Args:
type_name: The dictionary type.
Returns:
The dictionary value type or None if no match found.
"""
match = re.match(
'(typing\.)?(?:\w+)(?:\[\s*(?:\w+)\s*,\s*(?P<value_type>.+)\])',
type_name)
if match:
return match.group('value_type').lstrip().rstrip()
else:
return None
class LoopArgument(pipeline_channel.PipelineChannel):
"""Represents the argument that are looped over in a ParallelFor loop.
The class shouldn't be instantiated by the end user, rather it is
created automatically by a ParallelFor ops group.
To create a LoopArgument instance, use one of its factory methods::
LoopArgument.from_pipeline_channel(...)
LoopArgument.from_raw_items(...)
Attributes:
items_or_pipeline_channel: The raw items or the PipelineChannel object
this LoopArgument is associated to.
"""
LOOP_ITEM_NAME_BASE = 'loop-item'
LOOP_ITEM_PARAM_NAME_BASE = 'loop-item-param'
def __init__(
self,
items: Union[ItemList, pipeline_channel.PipelineChannel],
name_code: Optional[str] = None,
name_override: Optional[str] = None,
**kwargs,
):
"""Initializes a LoopArguments object.
Args:
items: List of items to loop over. If a list of dicts then, all
dicts must have the same keys and every key must be a legal
Python variable name.
name_code: A unique code used to identify these loop arguments.
Should match the code for the ParallelFor ops_group which created
these LoopArguments. This prevents parameter name collisions.
name_override: The override name for PipelineChannel.
**kwargs: Any other keyword arguments passed down to PipelineChannel.
"""
if (name_code is None) == (name_override is None):
raise ValueError(
'Expect one and only one of `name_code` and `name_override` to '
'be specified.')
if name_override is None:
super().__init__(name=self._make_name(name_code), **kwargs)
else:
super().__init__(name=name_override, **kwargs)
if not isinstance(items,
(list, tuple, pipeline_channel.PipelineChannel)):
raise TypeError(
f'Expected list, tuple, or PipelineChannel, got {items}.')
if isinstance(items, tuple):
items = list(items)
self.items_or_pipeline_channel = items
self._referenced_subvars: Dict[str, LoopArgumentVariable] = {}
if isinstance(items, list) and isinstance(items[0], dict):
subvar_names = set(items[0].keys())
# then this block creates loop_arg.variable_a and loop_arg.variable_b
for subvar_name in subvar_names:
loop_arg_var = LoopArgumentVariable(
loop_argument=self,
subvar_name=subvar_name,
)
self._referenced_subvars[subvar_name] = loop_arg_var
setattr(self, subvar_name, loop_arg_var)
def __getattr__(self, name: str):
# this is being overridden so that we can access subvariables of the
# LoopArgument (i.e.: item.a) without knowing the subvariable names ahead
# of time.
return self._referenced_subvars.setdefault(
name, LoopArgumentVariable(
loop_argument=self,
subvar_name=name,
))
def _make_name(self, code: str):
"""Makes a name for this loop argument from a unique code."""
return '{}-{}'.format(self.LOOP_ITEM_PARAM_NAME_BASE, code)
@classmethod
def from_pipeline_channel(
cls,
channel: pipeline_channel.PipelineChannel,
) -> 'LoopArgument':
"""Creates a LoopArgument object from a PipelineChannel object."""
return LoopArgument(
items=channel,
name_override=channel.name + '-' + cls.LOOP_ITEM_NAME_BASE,
task_name=channel.task_name,
channel_type=_get_loop_item_type(channel.channel_type),
)
@classmethod
def from_raw_items(
cls,
raw_items: ItemList,
name_code: str,
) -> 'LoopArgument':
"""Creates a LoopArgument object from raw item list."""
if len(raw_items) == 0:
raise ValueError('Got an empty item list for loop argument.')
return LoopArgument(
items=raw_items,
name_code=name_code,
channel_type=type(raw_items[0]).__name__,
)
@classmethod
def name_is_loop_argument(cls, name: str) -> bool:
"""Returns True if the given channel name looks like a loop argument.
Either it came from a withItems loop item or withParams loop
item.
"""
return ('-' + cls.LOOP_ITEM_NAME_BASE) in name \
or (cls.LOOP_ITEM_PARAM_NAME_BASE + '-') in name
class LoopArgumentVariable(pipeline_channel.PipelineChannel):
"""Represents a subvariable for a loop argument.
This is used for cases where we're looping over maps, each of which contains
several variables. If the user ran:
with dsl.ParallelFor([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]) as item:
...
Then there's one LoopArgumentVariable for 'a' and another for 'b'.
Attributes:
loop_argument: The original LoopArgument object this subvariable is
attached to.
subvar_name: The subvariable name.
"""
SUBVAR_NAME_DELIMITER = '-subvar-'
LEGAL_SUBVAR_NAME_REGEX = re.compile(r'^[a-zA-Z_][0-9a-zA-Z_]*$')
def __init__(
self,
loop_argument: LoopArgument,
subvar_name: str,
):
"""Initializes a LoopArgumentVariable instance.
Args:
loop_argument: The LoopArgument object this subvariable is based on
a subvariable to.
subvar_name: The name of this subvariable, which is the name of the
dict key that spawned this subvariable.
Raises:
ValueError is subvar name is illegal.
"""
if not self._subvar_name_is_legal(subvar_name):
raise ValueError(
f'Tried to create subvariable named {subvar_name}, but that is '
'not a legal Python variable name.')
self.subvar_name = subvar_name
self.loop_argument = loop_argument
super().__init__(
name=self._get_name_override(
loop_arg_name=loop_argument.name,
subvar_name=subvar_name,
),
task_name=loop_argument.task_name,
channel_type=_get_subvar_type(loop_argument.channel_type),
)
def _subvar_name_is_legal(self, proposed_variable_name: str) -> bool:
"""Returns True if the subvar name is legal."""
return re.match(self.LEGAL_SUBVAR_NAME_REGEX,
proposed_variable_name) is not None
def _get_name_override(self, loop_arg_name: str, subvar_name: str) -> str:
"""Gets the name.
Args:
loop_arg_name: the name of the loop argument parameter that this
LoopArgumentVariable is attached to.
subvar_name: The name of this subvariable.
Returns:
The name of this loop arg variable.
"""
return f'{loop_arg_name}{self.SUBVAR_NAME_DELIMITER}{subvar_name}'
| 0
| 1,202
| 0
| 5,925
| 0
| 0
| 0
| 61
| 69
|
e0cb3175c59da0065800bb2675b16b000572cbc4
| 9,948
|
py
|
Python
|
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
.github/workflows/templates/generate.py
|
s0undt3ch/salt-bootstrap
|
11e5a237a922425c0e11608eec37bb4fde8d4577
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
os.chdir(os.path.abspath(os.path.dirname(__file__)))
LINUX_DISTROS = [
"almalinux-8",
"amazon-2",
"arch",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-7",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
OSX = WINDOWS = []
STABLE_DISTROS = [
"amazon-2",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"oraclelinux-7",
"oraclelinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
PY2_BLACKLIST = [
"almalinux-8",
"centos-8",
"debian-10",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3000 = [
"almalinux-8",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"opensuse-tumbleweed",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3001 = [
"almalinux-8",
"debian-11",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3001_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3002_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
SALT_BRANCHES = [
"3000",
"3001",
"3001-0",
"3002",
"3002-0",
"master",
"latest",
]
BRANCH_DISPLAY_NAMES = {
"3000": "v3000",
"3001": "v3001",
"3001-0": "v3001.0",
"3002": "v3002",
"3002-0": "v3002.0",
"master": "Master",
"latest": "Latest",
}
STABLE_BRANCH_BLACKLIST = []
LATEST_PKG_BLACKLIST = []
DISTRO_DISPLAY_NAMES = {
"almalinux-8": "AlmaLinux 8",
"amazon-2": "Amazon 2",
"arch": "Arch",
"centos-7": "CentOS 7",
"centos-8": "CentOS 8",
"debian-10": "Debian 10",
"debian-11": "Debian 11",
"debian-9": "Debian 9",
"fedora-33": "Fedora 33",
"fedora-34": "Fedora 34",
"fedora-35": "Fedora 35",
"gentoo": "Gentoo",
"gentoo-systemd": "Gentoo (systemd)",
"opensuse-15": "Opensuse 15",
"opensuse-tumbleweed": "Opensuse Tumbleweed",
"oraclelinux-7": "Oracle Linux 7",
"oraclelinux-8": "Oracle Linux 8",
"rockylinux-8": "Rocky Linux 8",
"ubuntu-1804": "Ubuntu 18.04",
"ubuntu-2004": "Ubuntu 20.04",
"ubuntu-2104": "Ubuntu 21.04",
}
TIMEOUT_DEFAULT = 20
TIMEOUT_OVERRIDES = {
"gentoo": 90,
"gentoo-systemd": 90,
}
BRANCH_ONLY_OVERRIDES = [
"gentoo",
"gentoo-systemd",
]
if __name__ == "__main__":
generate_test_jobs()
| 28.918605
| 157
| 0.441998
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import datetime
os.chdir(os.path.abspath(os.path.dirname(__file__)))
LINUX_DISTROS = [
"almalinux-8",
"amazon-2",
"arch",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-7",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
OSX = WINDOWS = []
STABLE_DISTROS = [
"amazon-2",
"centos-7",
"centos-8",
"debian-10",
"debian-11",
"debian-9",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"oraclelinux-7",
"oraclelinux-8",
"ubuntu-1804",
"ubuntu-2004",
"ubuntu-2104",
]
PY2_BLACKLIST = [
"almalinux-8",
"centos-8",
"debian-10",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"gentoo",
"gentoo-systemd",
"opensuse-15",
"opensuse-tumbleweed",
"oraclelinux-8",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3000 = [
"almalinux-8",
"debian-11",
"fedora-33",
"fedora-34",
"fedora-35",
"opensuse-tumbleweed",
"rockylinux-8",
"ubuntu-2004",
"ubuntu-2104",
]
BLACKLIST_3001 = [
"almalinux-8",
"debian-11",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3001_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
BLACKLIST_3002_0 = [
"almalinux-8",
"debian-11",
"gentoo",
"gentoo-systemd",
"rockylinux-8",
"ubuntu-2104",
]
SALT_BRANCHES = [
"3000",
"3001",
"3001-0",
"3002",
"3002-0",
"master",
"latest",
]
BRANCH_DISPLAY_NAMES = {
"3000": "v3000",
"3001": "v3001",
"3001-0": "v3001.0",
"3002": "v3002",
"3002-0": "v3002.0",
"master": "Master",
"latest": "Latest",
}
STABLE_BRANCH_BLACKLIST = []
LATEST_PKG_BLACKLIST = []
DISTRO_DISPLAY_NAMES = {
"almalinux-8": "AlmaLinux 8",
"amazon-2": "Amazon 2",
"arch": "Arch",
"centos-7": "CentOS 7",
"centos-8": "CentOS 8",
"debian-10": "Debian 10",
"debian-11": "Debian 11",
"debian-9": "Debian 9",
"fedora-33": "Fedora 33",
"fedora-34": "Fedora 34",
"fedora-35": "Fedora 35",
"gentoo": "Gentoo",
"gentoo-systemd": "Gentoo (systemd)",
"opensuse-15": "Opensuse 15",
"opensuse-tumbleweed": "Opensuse Tumbleweed",
"oraclelinux-7": "Oracle Linux 7",
"oraclelinux-8": "Oracle Linux 8",
"rockylinux-8": "Rocky Linux 8",
"ubuntu-1804": "Ubuntu 18.04",
"ubuntu-2004": "Ubuntu 20.04",
"ubuntu-2104": "Ubuntu 21.04",
}
TIMEOUT_DEFAULT = 20
TIMEOUT_OVERRIDES = {
"gentoo": 90,
"gentoo-systemd": 90,
}
BRANCH_ONLY_OVERRIDES = [
"gentoo",
"gentoo-systemd",
]
def generate_test_jobs():
test_jobs = ""
branch_only_test_jobs = ""
for distro in LINUX_DISTROS + OSX + WINDOWS:
timeout_minutes = (
TIMEOUT_OVERRIDES[distro]
if distro in TIMEOUT_OVERRIDES
else TIMEOUT_DEFAULT
)
needs = " needs: lint"
if distro in BRANCH_ONLY_OVERRIDES:
needs = ""
current_test_jobs = ""
for branch in SALT_BRANCHES:
if branch == "latest":
if distro in LATEST_PKG_BLACKLIST:
continue
if distro in LINUX_DISTROS:
template = "linux.yml"
elif distro in OSX:
template = "osx.yml"
elif distro in WINDOWS:
template = "windows.yml"
else:
print("Don't know how to handle {}".format(distro))
with open(template) as rfh:
current_test_jobs += "\n{}\n".format(
rfh.read()
.replace(
"{python_version}-{bootstrap_type}-{branch}-{distro}",
"{branch}-{distro}",
)
.format(
distro=distro,
branch=branch,
display_name="{} Latest packaged release".format(
DISTRO_DISPLAY_NAMES[distro],
),
timeout_minutes=timeout_minutes,
needs=needs,
)
)
continue
for python_version in ("py2", "py3"):
if branch == "master" and python_version == "py2":
# Salt's master branch no longer supports Python 2
continue
try:
if int(branch.split("-")[0]) >= 3000 and python_version == "py2":
# Salt's 300X versions no longer supports Python 2
continue
except ValueError:
pass
for bootstrap_type in ("stable", "git"):
if bootstrap_type == "stable":
if branch == "master":
# For the master branch there's no stable build
continue
if distro not in STABLE_DISTROS:
continue
if branch in STABLE_BRANCH_BLACKLIST:
continue
if distro.startswith("fedora") and branch != "latest":
# Fedora does not keep old builds around
continue
if bootstrap_type == "git":
# .0 versions are a virtual version for pinning to the first point release of a major release, such as 3001, there is no git version.
if branch.endswith("-0"):
continue
if python_version == "py3":
if distro in ("arch"):
allowed_branches = ["master"]
try:
int_branch = int(branch)
if int_branch > 3000:
allowed_branches.append(branch)
except ValueError:
pass
if branch not in allowed_branches:
# Arch and Fedora default to py3.8
continue
if branch == "3000" and distro in BLACKLIST_3000:
continue
if branch == "3001" and distro in BLACKLIST_3001:
continue
if branch == "3001-0" and distro in BLACKLIST_3001_0:
continue
if branch == "3002-0" and distro in BLACKLIST_3002_0:
continue
if python_version == "py2" and distro in PY2_BLACKLIST:
continue
if distro in LINUX_DISTROS:
template = "linux.yml"
elif distro in OSX:
template = "osx.yml"
elif distro in WINDOWS:
template = "windows.yml"
else:
print("Don't know how to handle {}".format(distro))
with open(template) as rfh:
current_test_jobs += "\n{}\n".format(
rfh.read().format(
distro=distro,
branch=branch,
python_version=python_version,
bootstrap_type=bootstrap_type,
display_name="{} {} {} {}".format(
DISTRO_DISPLAY_NAMES[distro],
BRANCH_DISPLAY_NAMES[branch],
python_version.capitalize(),
bootstrap_type.capitalize(),
),
timeout_minutes=timeout_minutes,
needs=needs,
)
)
if distro in BRANCH_ONLY_OVERRIDES:
branch_only_test_jobs += current_test_jobs
else:
test_jobs += current_test_jobs
with open("lint.yml") as rfh:
lint_job = "\n{}\n".format(rfh.read())
with open("pre-commit.yml") as rfh:
pre_commit_job = "\n{}\n".format(rfh.read())
with open("../main.yml", "w") as wfh:
with open("main.yml") as rfh:
wfh.write(
"{}\n".format(
rfh.read()
.format(
jobs="{pre_commit}{lint}{test}".format(
lint=lint_job, test=test_jobs, pre_commit=pre_commit_job,
),
on="push, pull_request",
name="Testing",
)
.strip()
)
)
with open("../main-branch-only.yml", "w") as wfh:
with open("main.yml") as rfh:
wfh.write(
"{}\n".format(
rfh.read()
.format(
jobs="{test}".format(test=branch_only_test_jobs,),
on="push",
name="Branch Testing",
)
.strip()
)
)
if __name__ == "__main__":
generate_test_jobs()
| 0
| 0
| 0
| 0
| 0
| 6,927
| 0
| -6
| 45
|
4c2b178af364b6b782db82646942cb0a6c95a702
| 17,831
|
py
|
Python
|
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1
|
2020-11-06T01:09:28.000Z
|
2020-11-06T01:09:28.000Z
|
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/algorithms/single_sample/shor/shor.py
|
Nick-Singstock/qiskit-aqua
|
8c2bc57b78dec447faec3adbc966471a3206c2ef
|
[
"Apache-2.0"
] | 1
|
2020-11-06T01:09:43.000Z
|
2020-11-06T01:09:43.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Shor's Factoring algorithm.
"""
import logging
logger = logging.getLogger(__name__)
| 36.464213
| 119
| 0.555213
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM Corp. 2017 and later.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Shor's Factoring algorithm.
"""
import math
import array
import fractions
import logging
import numpy as np
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.aqua.utils.arithmetic import is_power
from qiskit.aqua import AquaError, Pluggable
from qiskit.aqua.utils import get_subsystem_density_matrix
from qiskit.aqua.algorithms import QuantumAlgorithm
from qiskit.aqua.circuits import FourierTransformCircuits as ftc
from qiskit.aqua.circuits.gates import mcu1
from qiskit.aqua.utils import summarize_circuits
logger = logging.getLogger(__name__)
class Shor(QuantumAlgorithm):
"""
The Shor's Factoring algorithm.
Adapted from https://github.com/ttlion/ShorAlgQiskit
"""
PROP_N = 'N'
PROP_A = 'a'
CONFIGURATION = {
'name': 'Shor',
'description': "The Shor's Factoring Algorithm",
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'shor_schema',
'type': 'object',
'properties': {
PROP_N: {
'type': 'integer',
'default': 15,
'minimum': 3
},
PROP_A: {
'type': 'integer',
'default': 2,
'minimum': 2
},
},
'additionalProperties': False
},
'problems': ['factoring'],
}
def __init__(self, N=15, a=2):
"""
Constructor.
Args:
N (int): The integer to be factored.
a (int): A random integer a that satisfies a < N and gcd(a, N) = 1
"""
self.validate(locals())
super().__init__()
# check the input integer
if N < 1 or N % 2 == 0:
raise AquaError('The input needs to be an odd integer greater than 1.')
self._N = N
if a >= N or math.gcd(a, self._N) != 1:
raise AquaError('The integer a needs to satisfy a < N and gcd(a, N) = 1.')
self._a = a
self._ret = {'factors': []}
# check if the input integer is a power
tf, b, p = is_power(N, return_decomposition=True)
if tf:
logger.info('The input integer is a power: {}={}^{}.'.format(N, b, p))
self._ret['factors'].append(b)
@classmethod
def init_params(cls, params, algo_input):
"""
Initialize via parameters dictionary and algorithm input instance.
Args:
params: parameters dictionary
algo_input: input instance
"""
if algo_input is not None:
raise AquaError("Input instance not supported.")
shor_params = params.get(Pluggable.SECTION_KEY_ALGORITHM)
N = shor_params.get(Shor.PROP_N)
return cls(N)
def _get_angles(self, a):
"""
Calculate the array of angles to be used in the addition in Fourier Space
"""
s = bin(int(a))[2:].zfill(self._n + 1)
angles = np.zeros([self._n + 1])
for i in range(0, self._n + 1):
for j in range(i, self._n + 1):
if s[j] == '1':
angles[self._n - i] += math.pow(2, -(j - i))
angles[self._n - i] *= np.pi
return angles
def _phi_add(self, circuit, q, inverse=False):
"""
Creation of the circuit that performs addition by a in Fourier Space
Can also be used for subtraction by setting the parameter inverse=True
"""
angle = self._get_angles(self._N)
for i in range(0, self._n + 1):
circuit.u1(-angle[i] if inverse else angle[i], q[i])
def _controlled_phi_add(self, circuit, q, ctl, inverse=False):
"""
Single controlled version of the _phi_add circuit
"""
angles = self._get_angles(self._N)
for i in range(0, self._n + 1):
angle = (-angles[i] if inverse else angles[i]) / 2
circuit.u1(angle, ctl)
circuit.cx(ctl, q[i])
circuit.u1(-angle, q[i])
circuit.cx(ctl, q[i])
circuit.u1(angle, q[i])
def _controlled_controlled_phi_add(self, circuit, q, ctl1, ctl2, a, inverse=False):
"""
Doubly controlled version of the _phi_add circuit
"""
angle = self._get_angles(a)
for i in range(self._n + 1):
# ccphase(circuit, -angle[i] if inverse else angle[i], ctl1, ctl2, q[i])
circuit.mcu1(-angle[i] if inverse else angle[i], [ctl1, ctl2], q[i])
def _controlled_controlled_phi_add_mod_N(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._phi_add(circuit, q, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_phi_add(circuit, q, aux)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
def _controlled_controlled_phi_add_mod_N_inv(self, circuit, q, ctl1, ctl2, aux, a):
"""
Circuit that implements the inverse of doubly controlled modular addition by a
"""
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.u3(np.pi, 0, np.pi, q[self._n])
circuit.cx(q[self._n], aux)
circuit.u3(np.pi, 0, np.pi, q[self._n])
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a)
self._controlled_phi_add(circuit, q, aux, inverse=True)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
circuit.cx(q[self._n], aux)
ftc.construct_circuit(
circuit=circuit,
qubits=[q[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
self._phi_add(circuit, q)
self._controlled_controlled_phi_add(circuit, q, ctl1, ctl2, a, inverse=True)
def _controlled_multiple_mod_N(self, circuit, ctl, q, aux, a):
"""
Circuit that implements single controlled modular multiplication by a
"""
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in range(0, self._n):
self._controlled_controlled_phi_add_mod_N(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
(2 ** i) * a % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
for i in range(0, self._n):
circuit.cswap(ctl, q[i], aux[i])
def modinv(a, m):
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
a_inv = modinv(a, self._N)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False
)
for i in reversed(range(self._n)):
self._controlled_controlled_phi_add_mod_N_inv(
circuit,
aux,
q[i],
ctl,
aux[self._n + 1],
math.pow(2, i) * a_inv % self._N
)
ftc.construct_circuit(
circuit=circuit,
qubits=[aux[i] for i in reversed(range(self._n + 1))],
do_swaps=False,
inverse=True
)
def construct_circuit(self):
"""Construct circuit.
Returns:
QuantumCircuit: quantum circuit.
"""
# Get n value used in Shor's algorithm, to know how many qubits are used
self._n = math.ceil(math.log(self._N, 2))
# quantum register where the sequential QFT is performed
self._up_qreg = QuantumRegister(2 * self._n, name='up')
# quantum register where the multiplications are made
self._down_qreg = QuantumRegister(self._n, name='down')
# auxilliary quantum register used in addition and multiplication
self._aux_qreg = QuantumRegister(self._n + 2, name='aux')
# Create Quantum Circuit
circuit = QuantumCircuit(self._up_qreg, self._down_qreg, self._aux_qreg)
# Initialize down register to 1 and create maximal superposition in top register
circuit.u2(0, np.pi, self._up_qreg)
circuit.u3(np.pi, 0, np.pi, self._down_qreg[0])
# Apply the multiplication gates as showed in the report in order to create the exponentiation
for i in range(0, 2 * self._n):
self._controlled_multiple_mod_N(
circuit,
self._up_qreg[i],
self._down_qreg,
self._aux_qreg,
int(pow(self._a, pow(2, i)))
)
# Apply inverse QFT
ftc.construct_circuit(circuit=circuit, qubits=self._up_qreg, do_swaps=True, inverse=True)
logger.info(summarize_circuits(circuit))
return circuit
def _get_factors(self, output_desired, t_upper):
"""
Apply the continued fractions to find r and the gcd to find the desired factors.
"""
x_value = int(output_desired, 2)
logger.info('In decimal, x_final value for this result is: {0}.'.format(x_value))
if x_value <= 0:
self._ret['results'][output_desired] = 'x_value is <= 0, there are no continued fractions.'
return False
logger.debug('Running continued fractions for this case.')
# Calculate T and x/T
T = pow(2, t_upper)
x_over_T = x_value / T
# Cycle in which each iteration corresponds to putting one more term in the
# calculation of the Continued Fraction (CF) of x/T
# Initialize the first values according to CF rule
i = 0
b = array.array('i')
t = array.array('f')
b.append(math.floor(x_over_T))
t.append(x_over_T - b[i])
while i >= 0:
# From the 2nd iteration onwards, calculate the new terms of the CF based
# on the previous terms as the rule suggests
if i > 0:
b.append(math.floor(1 / t[i - 1]))
t.append((1 / t[i - 1]) - b[i])
# Calculate the CF using the known terms
aux = 0
j = i
while j > 0:
aux = 1 / (b[j] + aux)
j = j - 1
aux = aux + b[0]
# Get the denominator from the value obtained
frac = fractions.Fraction(aux).limit_denominator()
denominator = frac.denominator
logger.debug('Approximation number {0} of continued fractions:'.format(i + 1))
logger.debug("Numerator:{0} \t\t Denominator: {1}.".format(frac.numerator, frac.denominator))
# Increment i for next iteration
i = i + 1
if denominator % 2 == 1:
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
logger.debug('Odd denominator, will try next iteration of continued fractions.')
continue
# If denominator even, try to get factors of N
# Get the exponential a^(r/2)
exponential = 0
if denominator < 1000:
exponential = pow(self._a, denominator / 2)
# Check if the value is too big or not
if math.isinf(exponential) or exponential > 1000000000:
self._ret['results'][output_desired] = 'denominator of continued fraction is too big.'
return False
# If the value is not to big (infinity), then get the right values and do the proper gcd()
putting_plus = int(exponential + 1)
putting_minus = int(exponential - 1)
one_factor = math.gcd(putting_plus, self._N)
other_factor = math.gcd(putting_minus, self._N)
# Check if the factors found are trivial factors or are the desired factors
if one_factor == 1 or one_factor == self._N or other_factor == 1 or other_factor == self._N:
logger.debug('Found just trivial factors, not good enough.')
# Check if the number has already been found, use i-1 because i was already incremented
if t[i - 1] == 0:
self._ret['results'][output_desired] = 'the continued fractions found exactly x_final/(2^(2n)).'
return False
if i >= self._N:
self._ret['results'][output_desired] = 'unable to find factors after too many attempts.'
return False
else:
logger.debug('The factors of {0} are {1} and {2}.'.format(self._N, one_factor, other_factor))
logger.debug('Found the desired factors.')
self._ret['results'][output_desired] = (one_factor, other_factor)
factors = sorted((one_factor, other_factor))
if factors not in self._ret['factors']:
self._ret['factors'].append(factors)
return True
def _run(self):
if not self._ret['factors']:
logger.debug('Running with N={} and a={}.'.format(self._N, self._a))
circuit = self.construct_circuit()
if self._quantum_instance.is_statevector:
logger.warning('The statevector_simulator might lead to subsequent computation using too much memory.')
result = self._quantum_instance.execute(circuit)
complete_state_vec = result.get_statevector(circuit)
# TODO: this uses too much memory
up_qreg_density_mat = get_subsystem_density_matrix(
complete_state_vec,
range(2 * self._n, 4 * self._n + 2)
)
up_qreg_density_mat_diag = np.diag(up_qreg_density_mat)
counts = dict()
for i, v in enumerate(up_qreg_density_mat_diag):
if not v == 0:
counts[bin(int(i))[2:].zfill(2 * self._n)] = v ** 2
else:
up_cqreg = ClassicalRegister(2 * self._n, name='m')
circuit.add_register(up_cqreg)
circuit.measure(self._up_qreg, up_cqreg)
counts = self._quantum_instance.execute(circuit).get_counts(circuit)
self._ret['results'] = dict()
# For each simulation result, print proper info to user and try to calculate the factors of N
for output_desired in list(counts.keys()):
# Get the x_value from the final state qubits
logger.info("------> Analyzing result {0}.".format(output_desired))
self._ret['results'][output_desired] = None
success = self._get_factors(output_desired, int(2 * self._n))
if success:
logger.info('Found factors {} from measurement {}.'.format(
self._ret['results'][output_desired], output_desired
))
else:
logger.info('Cannot find factors from measurement {} because {}'.format(
output_desired, self._ret['results'][output_desired]
))
return self._ret
| 0
| 460
| 0
| 16,237
| 0
| 0
| 0
| 231
| 290
|
14d1776a23dbeff91b7b113a7ec6193886a74ae5
| 3,802
|
py
|
Python
|
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | null | null | null |
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | null | null | null |
src/game.py
|
Ale-XYX/Contrast
|
6daf08e14826fbe382a6a8bbaa53f6c5a0494383
|
[
"Apache-2.0"
] | 2
|
2020-02-03T14:04:11.000Z
|
2020-05-15T16:44:33.000Z
|
# :^)
| 25.689189
| 80
| 0.583377
|
import re
import bz2
import pygame
import public
import sprites
import functions
import dictionaries
import random
def title(debug):
pygame.display.set_caption('Contrast')
pygame.display.set_icon(pygame.image.fromstring(bz2.decompress(
dictionaries.MEDIA['icon']), (32, 32), 'RGBA'))
info_text = public.FONT_LG.render(
'ENTER TO BEGIN', False, [public.WHITE] * 3)
play_button = sprites.Button((343, 290), 'Play')
music_button = sprites.Button((407, 290), 'Music')
button_cover = pygame.Surface((public.SWIDTH, 10))
button_cover.fill([public.BLACK] * 3)
if len(debug) != 1:
public.music = False
m = re.search('map_(.+?).tmx', debug[1])
if m:
public.level = int(m.group(1))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
public.all_sprites.update()
if public.end_title:
game()
return 0
public.screen.fill([public.BLACK] * 3)
public.screen.blit(dictionaries.IMAGES['Logo'], functions.center(
dictionaries.IMAGES['Logo']))
for sprite in public.all_sprites:
sprite.draw()
public.screen.blit(button_cover, (0, 345))
pygame.display.flip()
public.clock.tick(public.FPS)
def game():
if public.music:
dictionaries.MEDIA['greetings'].play(-1)
functions.generate_clouds()
functions.generate_level(True)
dt = public.clock.tick(public.FPS) / 1000
cover_alpha = 0
cover_surf = pygame.Surface((public.SWIDTH, public.SHEIGHT))
cover_surf.set_alpha(cover_alpha)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_w:
public.player.jump()
elif event.key == pygame.K_SPACE:
public.player.flip()
keys = pygame.key.get_pressed()
if keys[pygame.K_d] and not (public.player.died or public.player.won):
public.player.move('right')
elif keys[pygame.K_a] and not (public.player.died or public.player.won):
public.player.move('left')
else:
public.player.accelerating = False
if public.player.won and cover_alpha != 255:
cover_alpha += 1
cover_surf.set_alpha(cover_alpha)
if cover_alpha == 255:
end('A GAME BY TEAM-ABSTRACTANDROID')
return 0
if public.level == public.level_max:
end('More levels to come soon!')
return 0
public.all_sprites.update()
sorted_sprites = sorted(
public.all_sprites.sprites(), key=lambda x: x.layer)
public.screen.fill([public.bg_type] * 3)
for sprite in sorted_sprites:
sprite.draw()
public.screen.blit(cover_surf, (0, 0))
pygame.display.flip()
public.clock.tick(public.FPS)
def end(msg):
text_alpha = 0
credits_text = public.FONT_LG.render(
msg, False, [public.WHITE] * 3)
credits_text.set_alpha(text_alpha)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return 0
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
return 0
if text_alpha != 255:
text_alpha += 5
credits_text.set_alpha(text_alpha)
public.screen.fill([public.BLACK] * 3)
public.screen.blit(credits_text, functions.center(credits_text))
pygame.display.flip()
public.clock.tick(public.FPS)
# :^)
| 0
| 0
| 0
| 0
| 0
| 3,608
| 0
| -61
| 245
|
2e0aa5f7b3230ca90001a4c7c190460a296a87de
| 6,243
|
py
|
Python
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 4
|
2021-09-01T18:28:10.000Z
|
2022-03-29T09:43:34.000Z
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 3
|
2021-11-13T21:17:27.000Z
|
2021-11-15T18:29:14.000Z
|
tabledataextractor/input/from_html.py
|
ELchem/tabledataextractor
|
9eb38faf57611c26cdcaa8df13fd4e1cf36a4c21
|
[
"MIT"
] | 2
|
2021-10-07T01:20:39.000Z
|
2021-11-02T17:56:06.000Z
|
# -*- coding: utf-8 -*-
"""
Reads an `html` formatted table.
"""
import numpy as np
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
import copy
import logging
from tabledataextractor.exceptions import InputError
log = logging.getLogger(__name__)
def makearray(html_table):
"""
Creates a numpy array from an `.html` file, taking `rowspan` and `colspan` into account.
Modified from:
John Ricco, https://johnricco.github.io/2017/04/04/python-html/, *Using Python to scrape HTML tables with merged cells*
Added functionality for duplicating cell content for cells with `rowspan`/`colspan`.
The table has to be :math:`n*m`, rectangular, with the same number of columns in every row.
"""
n_cols = 0
n_rows = 0
for row in html_table.findAll("tr"):
col_tags = row.find_all(["td", "th"])
if len(col_tags) > 0:
n_rows += 1
if len(col_tags) > n_cols:
n_cols = len(col_tags)
# according to numpy documentation fill_value should be of type Union[int, float, complex]
# however, 'str' works just fine
array = np.full((n_rows, n_cols), fill_value="", dtype='<U60')
# list to store rowspan values
skip_index = [0 for i in range(0, n_cols)]
# iterating over each row in the table
row_counter = 0
for row in html_table.findAll("tr"):
# skip row if it's empty
if len(row.find_all(["td", "th"])) == 0:
continue
else:
# get all the cells containing data in this row
columns = row.find_all(["td", "th"])
col_dim = []
row_dim = []
col_dim_counter = -1
row_dim_counter = -1
col_counter = -1
this_skip_index = copy.deepcopy(skip_index)
for col in columns:
# determine all cell dimensions
colspan = col.get("colspan")
if not colspan:
col_dim.append(1)
else:
col_dim.append(int(colspan))
col_dim_counter += 1
rowspan = col.get("rowspan")
if not rowspan:
row_dim.append(1)
else:
row_dim.append(int(rowspan))
row_dim_counter += 1
# adjust column counter
if col_counter == -1:
col_counter = 0
else:
col_counter = col_counter + col_dim[col_dim_counter - 1]
while skip_index[col_counter] > 0:
col_counter += 1
# get cell contents
cell_data = col.get_text()
# insert data into cell
array[row_counter, col_counter] = cell_data
# Insert data into neighbouring rowspan/colspan cells
if colspan:
for spanned_col in range(col_counter+1, col_counter + int(colspan)):
array[row_counter, spanned_col] = cell_data
if rowspan:
for spanned_row in range(row_counter+1, row_counter + int(rowspan)):
array[spanned_row, col_counter] = cell_data
#record column skipping index
if row_dim[row_dim_counter] > 1:
this_skip_index[col_counter] = row_dim[row_dim_counter]
# adjust row counter
row_counter += 1
# adjust column skipping index
skip_index = [i - 1 if i > 0 else i for i in this_skip_index]
return array
def read_file(file_path, table_number=1):
"""Reads an .html file and returns a numpy array."""
file = open(file_path, encoding='UTF-8')
html_soup = BeautifulSoup(file, features='lxml')
file.close()
html_table = html_soup.find_all("table")[table_number-1]
array = makearray(html_table)
return array
def configure_selenium(browser='Firefox'):
"""
Configuration for `Selenium <https://selenium-python.readthedocs.io/>`_. Sets the path to ``geckodriver.exe``
:param browser: Which browser to use
:type browser: str
:return: Selenium driver
"""
if browser == 'Firefox':
options = FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path=r'C:\Users\juras\System\geckodriver\geckodriver.exe')
return driver
else:
return None
def read_url(url, table_number=1):
"""
Reads in a table from an URL and returns a numpy array. Will try `Requests <http://docs.python-requests.org/en/master/>`_ first. If it doesn't succeed, `Selenium <https://selenium-python.readthedocs.io/>`_ will be used.
:param url: Url of the page where the table is located
:type url: str
:param table_number: Number of Table on the web page.
:type table_number: int
"""
if not isinstance(table_number, int):
msg = 'Table number is not valid.'
log.critical(msg)
raise TypeError(msg)
# first try the requests package, if it fails do the selenium, which is much slower
try:
html_file = requests.get(url)
html_soup = BeautifulSoup(html_file.text, features='lxml')
html_table = html_soup.find_all("table")[table_number - 1]
array = makearray(html_table)
log.info("Package 'requests' was used.")
return array
except Exception:
driver = configure_selenium()
driver.get(url)
html_file = driver.page_source
html_soup = BeautifulSoup(html_file, features='lxml')
try:
html_table = html_soup.find_all("table")[table_number-1]
except IndexError:
raise InputError("table_number={} is out of range".format(table_number))
else:
array = makearray(html_table)
log.info("Package 'selenium' was used.")
return array
| 33.745946
| 223
| 0.606279
|
# -*- coding: utf-8 -*-
"""
Reads an `html` formatted table.
"""
import numpy as np
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.ie.options import Options as IeOptions
import copy
import logging
from tabledataextractor.exceptions import InputError
log = logging.getLogger(__name__)
def makearray(html_table):
"""
Creates a numpy array from an `.html` file, taking `rowspan` and `colspan` into account.
Modified from:
John Ricco, https://johnricco.github.io/2017/04/04/python-html/, *Using Python to scrape HTML tables with merged cells*
Added functionality for duplicating cell content for cells with `rowspan`/`colspan`.
The table has to be :math:`n*m`, rectangular, with the same number of columns in every row.
"""
n_cols = 0
n_rows = 0
for row in html_table.findAll("tr"):
col_tags = row.find_all(["td", "th"])
if len(col_tags) > 0:
n_rows += 1
if len(col_tags) > n_cols:
n_cols = len(col_tags)
# according to numpy documentation fill_value should be of type Union[int, float, complex]
# however, 'str' works just fine
array = np.full((n_rows, n_cols), fill_value="", dtype='<U60')
# list to store rowspan values
skip_index = [0 for i in range(0, n_cols)]
# iterating over each row in the table
row_counter = 0
for row in html_table.findAll("tr"):
# skip row if it's empty
if len(row.find_all(["td", "th"])) == 0:
continue
else:
# get all the cells containing data in this row
columns = row.find_all(["td", "th"])
col_dim = []
row_dim = []
col_dim_counter = -1
row_dim_counter = -1
col_counter = -1
this_skip_index = copy.deepcopy(skip_index)
for col in columns:
# determine all cell dimensions
colspan = col.get("colspan")
if not colspan:
col_dim.append(1)
else:
col_dim.append(int(colspan))
col_dim_counter += 1
rowspan = col.get("rowspan")
if not rowspan:
row_dim.append(1)
else:
row_dim.append(int(rowspan))
row_dim_counter += 1
# adjust column counter
if col_counter == -1:
col_counter = 0
else:
col_counter = col_counter + col_dim[col_dim_counter - 1]
while skip_index[col_counter] > 0:
col_counter += 1
# get cell contents
cell_data = col.get_text()
# insert data into cell
array[row_counter, col_counter] = cell_data
# Insert data into neighbouring rowspan/colspan cells
if colspan:
for spanned_col in range(col_counter+1, col_counter + int(colspan)):
array[row_counter, spanned_col] = cell_data
if rowspan:
for spanned_row in range(row_counter+1, row_counter + int(rowspan)):
array[spanned_row, col_counter] = cell_data
#record column skipping index
if row_dim[row_dim_counter] > 1:
this_skip_index[col_counter] = row_dim[row_dim_counter]
# adjust row counter
row_counter += 1
# adjust column skipping index
skip_index = [i - 1 if i > 0 else i for i in this_skip_index]
return array
def read_file(file_path, table_number=1):
"""Reads an .html file and returns a numpy array."""
file = open(file_path, encoding='UTF-8')
html_soup = BeautifulSoup(file, features='lxml')
file.close()
html_table = html_soup.find_all("table")[table_number-1]
array = makearray(html_table)
return array
def configure_selenium(browser='Firefox'):
"""
Configuration for `Selenium <https://selenium-python.readthedocs.io/>`_. Sets the path to ``geckodriver.exe``
:param browser: Which browser to use
:type browser: str
:return: Selenium driver
"""
if browser == 'Firefox':
options = FirefoxOptions()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path=r'C:\Users\juras\System\geckodriver\geckodriver.exe')
return driver
else:
return None
def read_url(url, table_number=1):
"""
Reads in a table from an URL and returns a numpy array. Will try `Requests <http://docs.python-requests.org/en/master/>`_ first. If it doesn't succeed, `Selenium <https://selenium-python.readthedocs.io/>`_ will be used.
:param url: Url of the page where the table is located
:type url: str
:param table_number: Number of Table on the web page.
:type table_number: int
"""
if not isinstance(table_number, int):
msg = 'Table number is not valid.'
log.critical(msg)
raise TypeError(msg)
# first try the requests package, if it fails do the selenium, which is much slower
try:
html_file = requests.get(url)
html_soup = BeautifulSoup(html_file.text, features='lxml')
html_table = html_soup.find_all("table")[table_number - 1]
array = makearray(html_table)
log.info("Package 'requests' was used.")
return array
except Exception:
driver = configure_selenium()
driver.get(url)
html_file = driver.page_source
html_soup = BeautifulSoup(html_file, features='lxml')
try:
html_table = html_soup.find_all("table")[table_number-1]
except IndexError:
raise InputError("table_number={} is out of range".format(table_number))
else:
array = makearray(html_table)
log.info("Package 'selenium' was used.")
return array
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 66
|
8a2527c8ebf711cd89d50a2c1b007f80d07a457b
| 924
|
py
|
Python
|
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
07/script.py
|
has-ctrl/advent-of-code-2021
|
09d309feb5082f108ab690f9e37abf6150b7283d
|
[
"MIT"
] | null | null | null |
import numpy as np
test_data = np.array([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
np_data = np.loadtxt("data.txt", delimiter=",", dtype=int)
def one(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they
spend to align to that position?
"""
median = np.median(data).astype(int)
return np.absolute(data - median).sum()
def two(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible so they can make you an
escape route! How much fuel must they spend to align to that position?
"""
mean = np.mean(data).astype(int)
diff = np.absolute(data - mean)
# 'Factorial for addition' is the same as (X^2 + X) / 2
return ((diff * diff + diff) / 2).astype(int).sum()
print(f"1. {one(np_data)}")
print(f"2. {two(np_data)}")
| 30.8
| 120
| 0.650433
|
import numpy as np
test_data = np.array([16, 1, 2, 0, 4, 2, 7, 1, 2, 14])
np_data = np.loadtxt("data.txt", delimiter=",", dtype=int)
def one(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible. How much fuel must they
spend to align to that position?
"""
median = np.median(data).astype(int)
return np.absolute(data - median).sum()
def two(data: np.ndarray) -> int:
"""
Determine the horizontal position that the crabs can align to using the least fuel possible so they can make you an
escape route! How much fuel must they spend to align to that position?
"""
mean = np.mean(data).astype(int)
diff = np.absolute(data - mean)
# 'Factorial for addition' is the same as (X^2 + X) / 2
return ((diff * diff + diff) / 2).astype(int).sum()
print(f"1. {one(np_data)}")
print(f"2. {two(np_data)}")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
46445e6276cdd339ed1cb28a14605af7c00ee8a9
| 787
|
py
|
Python
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 17
|
2018-06-12T10:07:42.000Z
|
2022-03-23T14:03:33.000Z
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 4
|
2018-10-29T17:55:34.000Z
|
2021-10-08T07:19:12.000Z
|
docs/src/callbackgen.py
|
aristanetworks/ctypegen
|
379f8e5c712c8deb0ed27cbf005d7706fa11e6e8
|
[
"Apache-2.0"
] | 7
|
2018-12-20T19:35:45.000Z
|
2021-05-18T03:42:17.000Z
|
# Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
#
# DON'T EDIT THIS FILE. It was generated by
# /usr/local/lib/python2.7/dist-packages/CTypeGen.py
# Please see AID/3558 for details on the contents of this file
#
# pylint: disable=unnecessary-pass,protected-access
Callback = CFUNCTYPE( c_int, c_int
, c_int
)
functionTypes = {
'callme': CFUNCTYPE( c_int, c_int
, c_int
, Callback
),
}
if __name__ == "__main__":
test_classes()
| 21.861111
| 64
| 0.684879
|
# Copyright (c) 2018 Arista Networks, Inc. All rights reserved.
# Arista Networks, Inc. Confidential and Proprietary.
#
# DON'T EDIT THIS FILE. It was generated by
# /usr/local/lib/python2.7/dist-packages/CTypeGen.py
# Please see AID/3558 for details on the contents of this file
#
from ctypes import * # pylint: disable=wildcard-import
from CTypeGenRun import * # pylint: disable=wildcard-import
# pylint: disable=unnecessary-pass,protected-access
Callback = CFUNCTYPE( c_int, c_int
, c_int
)
def decorateFunctions( lib ):
lib.callme.restype = c_int
lib.callme.argtypes = [
c_int,
c_int,
Callback ]
functionTypes = {
'callme': CFUNCTYPE( c_int, c_int
, c_int
, Callback
),
}
if __name__ == "__main__":
test_classes()
| 0
| 0
| 0
| 0
| 0
| 108
| 0
| 3
| 135
|
f5ccf91f07f564599f0a2cf7b1cc3268aa005d97
| 1,296
|
py
|
Python
|
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/ports.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.connection import NaErrorResponse, NaPagedResponse
from netapp.net import NetConnection
from netapp.net.net_port_info import NetPortInfo
conn = NetConnection("192.168.135.100", "admin", "mehmeh123")
print "LISTING ALL PORTS:"
print "-----------------------------------------------"
query = NetPortInfo(node="radontap-02")
response = conn.net_port_get_iter( desired_attributes="node,port".split(","), query=query )
if isinstance(response, NaPagedResponse):
for npi in response.output:
print "{}: {}".format( npi.port, npi )
while response.has_more():
next = response.next()
if isinstance(next.result, NaErrorResponse):
print "There was an error: {} : {}".format( next.result.error_code, next.result.reason )
else:
for npi in next.output:
print "{}: {}".format( npi.port, npi )
elif isinstance(response, NaErrorResponse):
print "There was an error: {} : {}".format( response.error_code, response.reason )
else:
for npi in response:
print "{}: {}".format( npi.port, npi )
print "GET A SINGLE PORT:"
print "-----------------------------------------------"
port_info = conn.net_port_get( node="radontap-02", port="e0c", desired_attributes="node,port".split(",") )
print port_info
| 35.027027
| 106
| 0.622685
|
from netapp.connection import NaErrorResponse, NaPagedResponse
from netapp.net import NetConnection
from netapp.net.net_port_info import NetPortInfo
conn = NetConnection("192.168.135.100", "admin", "mehmeh123")
print "LISTING ALL PORTS:"
print "-----------------------------------------------"
query = NetPortInfo(node="radontap-02")
response = conn.net_port_get_iter( desired_attributes="node,port".split(","), query=query )
if isinstance(response, NaPagedResponse):
for npi in response.output:
print "{}: {}".format( npi.port, npi )
while response.has_more():
next = response.next()
if isinstance(next.result, NaErrorResponse):
print "There was an error: {} : {}".format( next.result.error_code, next.result.reason )
else:
for npi in next.output:
print "{}: {}".format( npi.port, npi )
elif isinstance(response, NaErrorResponse):
print "There was an error: {} : {}".format( response.error_code, response.reason )
else:
for npi in response:
print "{}: {}".format( npi.port, npi )
print "GET A SINGLE PORT:"
print "-----------------------------------------------"
port_info = conn.net_port_get( node="radontap-02", port="e0c", desired_attributes="node,port".split(",") )
print port_info
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4041a20fc51def3b3801556656d9b21062ae0f2d
| 185
|
py
|
Python
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 60,067
|
2017-01-18T17:21:31.000Z
|
2022-03-31T21:37:45.000Z
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 66,955
|
2017-01-18T17:21:38.000Z
|
2022-03-31T23:56:11.000Z
|
torch/fx/experimental/unification/__init__.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 19,210
|
2017-01-18T17:45:04.000Z
|
2022-03-31T23:51:56.000Z
|
# type: ignore[attr-defined]
| 37
| 68
| 0.724324
|
# type: ignore[attr-defined]
from .core import unify, reify # noqa: F403
from .more import unifiable # noqa: F403
from .variable import var, isvar, vars, variables, Var # noqa: F403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 108
|
3be09ddb058024d53f0d37a425c547e2ad46cc57
| 2,147
|
py
|
Python
|
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
psinsights/rules.py
|
paulcronk/psinsights
|
cd465f20254fbdb30032ce40b6fe30d32de0d524
|
[
"Apache-2.0"
] | null | null | null |
###############################################################################
# Copyright 2012 FastSoft Inc.
# Copyright 2012 Devin Anderson <danderson (at) fastsoft (dot) com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
###############################################################################
| 28.626667
| 79
| 0.583605
|
###############################################################################
# Copyright 2012 FastSoft Inc.
# Copyright 2012 Devin Anderson <danderson (at) fastsoft (dot) com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
###############################################################################
from psinsights.rule import Rule as _Rule
class Rules(object):
def __contains__(self, name):
return name in self.__data
def __del__(self):
self.__data = None
self.__rule_map = None
def __getitem__(self, name):
rule = self.get(name)
if rule is None:
raise KeyError(name)
return rule
def __init__(self, data):
self.__data = data
self.__rule_map = {}
def __iter__(self):
return self.__data.iterkeys()
def __len__(self):
return len(self.__data)
def get(self, name, default=None):
rule_map = self.__rule_map
rule = rule_map.get(name)
if rule is None:
data = self.__data
rule_data = data.get(name)
if rule_data is None:
return default
rule = _Rule(rule_data)
rule_map[name] = rule
return rule
def items(self):
return list(self.iteritems())
def iteritems(self):
get = self.get
return ((k, get(k)) for k in self.__data.iterkeys())
iterkeys = __iter__
def itervalues(self):
get = self.get
return (get(k) for k in self.__data.iterkeys())
def keys(self):
return list(iter(self))
def values(self):
return list(self.itervalues())
| 0
| 0
| 0
| 1,274
| 0
| 0
| 0
| 20
| 46
|
662efd1261fb763f2ca5bdab861633e763419ddb
| 552
|
py
|
Python
|
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | null | null | null |
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | null | null | null |
exercicio-condicional/questao-1.py
|
maumneto/exercicio-python
|
bd57cd9f3b48c76ea3f8195544d347bc1b0c943e
|
[
"MIT"
] | 1
|
2020-04-27T15:01:10.000Z
|
2020-04-27T15:01:10.000Z
|
'''
Faa um programa que leia o salrio de um trabalhador e o valor
da prestao de um emprstimo. Se a prestao for maior que 20%
do salrio imprima: Emprstimo no concedido; caso contrrio imprima: Emprstimo concedido.
'''
# entrada de dados
salarao = float(input('Digite o valor do salario: '))
prestacao = float(input('Digite o valor da prestacao: '))
# condicional
if (prestacao > 0.2*salarao):
print('Emprestimo nao concedido!')
else:
print('Emprestimo concedido!')
# mensagem de trmino de algoritmo
print('Fim do algoritmo!')
| 30.666667
| 95
| 0.733696
|
'''
Faça um programa que leia o salário de um trabalhador e o valor
da prestação de um empréstimo. Se a prestação for maior que 20%
do salário imprima: “Empréstimo não concedido”; caso contrário imprima: “Empréstimo concedido”.
'''
# entrada de dados
salarao = float(input('Digite o valor do salario: '))
prestacao = float(input('Digite o valor da prestacao: '))
# condicional
if (prestacao > 0.2*salarao):
print('Emprestimo nao concedido!')
else:
print('Emprestimo concedido!')
# mensagem de término de algoritmo
print('Fim do algoritmo!')
| 38
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4b998a8b759bc5a4cf2d3b91ee6979cd04cfc889
| 12,997
|
py
|
Python
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 20
|
2017-03-24T08:25:50.000Z
|
2020-07-07T16:09:34.000Z
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 1
|
2017-11-02T17:46:48.000Z
|
2017-11-02T17:46:48.000Z
|
Firefly/services/firefly_security_and_monitoring/firefly_monitoring.py
|
Firefly-Automation/Firefly
|
fccf40b8f6e015ef34c292264184090eb8d860b7
|
[
"Apache-2.0"
] | 5
|
2017-04-11T02:27:38.000Z
|
2020-12-11T07:44:00.000Z
|
"""
Firefly Security and Monitoring
This is the core Firefly Security and Monitoring Service. There should be almost zero config to the user and firefly will monitor the entire house.
- Alarm System (Away)
- Alarm System (Night)
- Vacation Lighting
- Battery Monitor
- Smoke Alerts
- Flooding Alerts
"""
ALARM_DISARMED = 'disarmed'
ALARM_ARMED = 'armed'
ALARM_ARMED_MOTION = 'armed_motion'
ALARM_ARMED_NO_MOTION = 'armed_no_motion'
ALARM_TRIGGERED = 'triggered'
SYSTEM_DISABLED = 'system_diabled'
| 38.11437
| 183
| 0.701239
|
"""
Firefly Security and Monitoring
This is the core Firefly Security and Monitoring Service. There should be almost zero config to the user and firefly will monitor the entire house.
- Alarm System (Away)
- Alarm System (Night)
- Vacation Lighting
- Battery Monitor
- Smoke Alerts
- Flooding Alerts
"""
from Firefly import logging, scheduler, aliases
from Firefly.const import COMMAND_NOTIFY, EVENT_TYPE_BROADCAST, FIREFLY_SECURITY_MONITORING, SERVICE_NOTIFICATION, SOURCE_LOCATION, TYPE_DEVICE, WATER, SENSOR_DRY, SENSOR_WET
from Firefly.helpers.device import BATTERY, CONTACT, CONTACT_CLOSE, CONTACT_OPEN, MOTION, MOTION_ACTIVE, MOTION_INACTIVE
from Firefly.helpers.events import Command, Event
from Firefly.services.firefly_security_and_monitoring.battery_monitor import check_battery_from_event, generate_battery_notification_message
from Firefly.services.firefly_security_and_monitoring.secueity_settings import FireflySecuritySettings
from Firefly.services.firefly_security_and_monitoring.security_monitor import (check_all_security_contact_sensors, check_all_security_motion_sensors, generate_contact_warning_message,
process_contact_change, process_motion_change)
from Firefly.util.firefly_util import command_from_dict
from .const import ALARM_ARMED_MESSAGE_MOTION, ALARM_ARMED_MESSAGE_NO_MOTION, BATTERY_LOW, BATTERY_NO_NOTIFY_STATES, STATUS_TEMPLATE
ALARM_DISARMED = 'disarmed'
ALARM_ARMED = 'armed'
ALARM_ARMED_MOTION = 'armed_motion'
ALARM_ARMED_NO_MOTION = 'armed_no_motion'
ALARM_TRIGGERED = 'triggered'
SYSTEM_DISABLED = 'system_diabled'
class FireflySecurityAndMonitoring(object):
def __init__(self, firefly, enabled=True):
self.firefly = firefly
self.enabled = enabled
self.status = STATUS_TEMPLATE
self.alarm_status = ALARM_DISARMED
self.settings = FireflySecuritySettings()
def shutdown(self, **kwargs):
self.settings.save_config()
def get_alarm_status(self, **kwargs):
if not self.enabled:
return SYSTEM_DISABLED
return self.alarm_status
def event(self, event: Event, **kwargs):
logging.info('[FIREFLY SECURITY] event received: %s' % str(event))
if not self.enabled:
logging.info('[FIREFLY SECURITY] security and monitoring not enabled')
return
# Process Battery Notifications
if BATTERY in event.event_action:
self.process_battery_event(event)
# Process water event only if monitoring is enabled for the device.
if WATER in event.event_action:
if self.check_security_enabled(event.source):
self.process_water_event(event)
# Enter Secure Mode
if event.source == SOURCE_LOCATION and 'mode' in event.event_action:
mode = event.event_action['mode']
if self.check_secure_mode(mode):
self.enter_secure_mode()
# Exit secure mode
last_mode = self.firefly.location.lastMode
if not self.check_secure_mode(mode) and self.check_secure_mode(last_mode):
self.alarm_status = ALARM_DISARMED
self.status['status']['alarm'] = self.alarm_status
self.firefly.update_security_firebase(self.status)
self.send_notification('Security alarm disabled.')
self.broadcast_status()
return
if event.source not in self.firefly.components:
logging.info('[FIREFLY SECURITY] event source not in components: %s' % event.source)
return
# Process Events while in secure mode
if self.check_secure_mode():
if not self.check_security_enabled(event.source):
logging.info('[FIREFLY SECURITY] event source is not device')
return
self.process_event_secure_mode(event)
self.update_status(event)
def startup(self, **kwargs):
if self.check_secure_mode():
self.enter_secure_mode()
def check_secure_mode(self, mode=None, no_motion=True, motion=True):
"""
Args:
mode: The mode to check.
no_motion: Check for modes with no motion active.
motion: Check for modes with motion active.
Returns: (bool) is in secure mode
"""
if mode is None:
mode = self.firefly.location.mode
mode_secure_no_motion = mode in self.settings.secure_modes_no_motion
mode_secure_motion = mode in self.settings.secure_modes_motion
if no_motion and motion:
return mode_secure_motion or mode_secure_no_motion
elif no_motion:
return mode_secure_no_motion
elif motion:
return mode_secure_motion
return False
# TODO: Move this into security monitor
def generate_status(self, **kwargs):
if not self.enabled:
return
contact_states = check_all_security_contact_sensors(self.firefly.components, self.firefly.current_state)
motion_states = check_all_security_motion_sensors(self.firefly.components, self.firefly.current_state)
status_data = {
'status': {
'message': 'Message Placeholder',
'alarm': self.alarm_status
},
CONTACT: {
'message': '',
CONTACT_OPEN: {
'count': len(contact_states[CONTACT_OPEN]),
'devices': contact_states[CONTACT_OPEN]
},
CONTACT_CLOSE: {
'count': len(contact_states[CONTACT_CLOSE]),
'devices': contact_states[CONTACT_CLOSE]
}
},
MOTION: {
'message': '',
MOTION_ACTIVE: {
'count': len(motion_states[MOTION_ACTIVE]),
'devices': motion_states[MOTION_ACTIVE]
},
MOTION_INACTIVE: {
'count': len(motion_states[MOTION_INACTIVE]),
'devices': motion_states[MOTION_INACTIVE]
}
}
}
self.status = status_data
self.firefly.update_security_firebase(self.status)
def check_security_enabled(self, ff_id: str, filter_type=TYPE_DEVICE) -> bool:
if ff_id not in self.firefly.components:
logging.info('[FIREFLY SECURITY] component not found: %s' % ff_id)
return False
try:
component = self.firefly.components[ff_id]
return component.security and component.type == filter_type
except:
return False
# TODO: Move this into security monitor
def update_status(self, event: Event):
ff_id = event.source
if not self.check_security_enabled(ff_id):
return
# Update Contact Status
if CONTACT in event.event_action:
if event.event_action[CONTACT] == CONTACT_OPEN:
self.status[CONTACT][CONTACT_OPEN]['devices'].append(ff_id)
self.status[CONTACT][CONTACT_OPEN]['count'] = len(self.status[CONTACT][CONTACT_OPEN]['devices'])
try:
self.status[CONTACT][CONTACT_CLOSE]['devices'].remove(ff_id)
self.status[CONTACT][CONTACT_CLOSE]['count'] = len(self.status[CONTACT][CONTACT_CLOSE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
if event.event_action[CONTACT] == CONTACT_CLOSE:
self.status[CONTACT][CONTACT_CLOSE]['devices'].append(ff_id)
self.status[CONTACT][CONTACT_CLOSE]['count'] = len(self.status[CONTACT][CONTACT_CLOSE]['devices'])
try:
self.status[CONTACT][CONTACT_OPEN]['devices'].remove(ff_id)
self.status[CONTACT][CONTACT_OPEN]['count'] = len(self.status[CONTACT][CONTACT_OPEN]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
# Update Motion Status
if MOTION in event.event_action:
if event.event_action[MOTION] == MOTION_ACTIVE:
self.status[MOTION][MOTION_ACTIVE]['devices'].append(ff_id)
self.status[MOTION][MOTION_ACTIVE]['count'] = len(self.status[MOTION][MOTION_ACTIVE]['devices'])
try:
self.status[MOTION][MOTION_INACTIVE]['devices'].remove(ff_id)
self.status[MOTION][MOTION_INACTIVE]['count'] = len(self.status[MOTION][MOTION_INACTIVE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
if event.event_action[MOTION] == MOTION_INACTIVE:
self.status[MOTION][MOTION_INACTIVE]['devices'].append(ff_id)
self.status[MOTION][MOTION_INACTIVE]['count'] = len(self.status[MOTION][MOTION_INACTIVE]['devices'])
try:
self.status[MOTION][MOTION_ACTIVE]['devices'].remove(ff_id)
self.status[MOTION][MOTION_ACTIVE]['count'] = len(self.status[MOTION][MOTION_ACTIVE]['devices'])
except Exception as e:
logging.error('[FIREFLY SECURITY] error updating status: %s' % e)
self.firefly.update_security_firebase(self.status)
def process_event_secure_mode(self, event: Event):
alarm_triggered = False
contact_data = process_contact_change(event)
if contact_data['contact_event']:
self.send_notification(contact_data['message'])
if contact_data['alarm']:
alarm_triggered = True
logging.info('[FIREFLY SECURITY] ALARM TRIGGERED')
# TODO: Turn on listed lights, if no lights listed then turn on all lights
if self.check_secure_mode(no_motion=False):
motion_data = process_motion_change(event)
if motion_data['alarm']:
alarm_triggered = True
self.send_notification(motion_data['message'])
logging.info('[FIREFLY SECURITY] ALARM TRIGGERED')
if alarm_triggered:
self.trigger_alarm()
def trigger_alarm(self, **kwargs):
logging.info('TRIGGERING ALARM')
self.alarm_status = ALARM_TRIGGERED
lights = self.settings.lights
if not lights:
lights = self.get_devices_by_tag()
for ff_id in lights:
command = command_from_dict(ff_id, self.id, self.settings.light_command)
logging.info('FIREFLY SECURITY] sending command %s' % str(command))
self.firefly.send_command(command)
alarms = self.settings.alarms
if not alarms:
alarms = self.get_devices_by_tag(tags=['alarm'])
for ff_id in alarms:
command = Command(ff_id, self.id, self.settings.alarm_command)
self.firefly.send_command(command)
self.broadcast_status()
self.status['status']['alarm'] = self.alarm_status.replace('_', ' ')
self.firefly.update_security_firebase(self.status)
def enter_secure_mode(self, **kwargs):
logging.info('[FIREFLY SECURITY] Entering Secure Mode.')
# Grab snapshot of current state
current_state = self.firefly.current_state.copy()
components = self.firefly.components
contact_states = check_all_security_contact_sensors(components, current_state)
if contact_states[CONTACT_OPEN]:
message = generate_contact_warning_message(contact_states)
self.send_notification(message)
# If no contacts open then send notification that alarm is now armed.
if self.check_secure_mode(no_motion=False):
self.send_notification(ALARM_ARMED_MESSAGE_MOTION)
self.alarm_status = ALARM_ARMED_MOTION
else:
self.send_notification(ALARM_ARMED_MESSAGE_NO_MOTION)
self.alarm_status = ALARM_ARMED_NO_MOTION
self.status['status']['alarm'] = self.alarm_status.replace('_', ' ')
self.firefly.update_security_firebase(self.status)
self.broadcast_status()
def broadcast_status(self, **kwargs):
event = Event(self.id, EVENT_TYPE_BROADCAST, {
'status': self.alarm_status,
})
self.firefly.send_event(event)
def get_devices_by_tag(self, tags=['light'], **kwargs):
devices = []
for ff_id, component in self.firefly.components.items():
if component.type != TYPE_DEVICE:
continue
try:
for tag in component.tags:
if tag in tags:
devices.append(ff_id)
continue
except:
pass
return devices
def process_water_event(self, event: Event, **kwargs):
alias = aliases.get_alias(event.source)
if event.event_action.get(WATER) == SENSOR_WET:
self.send_notification('ALERT!!! Water detected by: %s' % alias)
self.trigger_alarm()
return
if event.event_action.get(WATER) == SENSOR_DRY:
self.send_notification('ALERT!!! Water no longer detected by: %s' % alias)
return
def process_battery_event(self, event: Event, **kwargs):
(battery_state, battery_level) = check_battery_from_event(event)
if battery_state in BATTERY_NO_NOTIFY_STATES:
if scheduler.cancel('%s_battery_notify' % event.source):
self.send_notification('Battery in %s has been replaced.')
return
message = generate_battery_notification_message(event.source, battery_state, battery_level)
self.send_notification(message)
if battery_state == BATTERY_LOW:
return
scheduler.runEveryH(4, self.send_notification, job_id='%s_battery_notify' % event.source, message=message)
return
def send_notification(self, message):
notify = Command(SERVICE_NOTIFICATION, self.id, COMMAND_NOTIFY, message=message)
self.firefly.send_command(notify)
@property
def id(self):
return FIREFLY_SECURITY_MONITORING
| 0
| 43
| 0
| 11,293
| 0
| 0
| 0
| 939
| 221
|
0c76885b70fe7b575d9278df97a40daf190c7e04
| 324
|
py
|
Python
|
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | null | null | null |
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | 3
|
2021-01-14T15:09:51.000Z
|
2021-02-12T17:05:18.000Z
|
optirocket/library/constants.py
|
Keith-Maxwell/OptiRocket
|
d99ac8d2b868b60a2bbf32f5a8a31ecdcaeea5b0
|
[
"MIT"
] | 1
|
2021-01-11T02:34:29.000Z
|
2021-01-11T02:34:29.000Z
|
# standard gravitational parameter for Earth = G*M
EARTH_GRAV_CONST = 3.986005e5 # (km^3/s^2)
# Earth Radius
EARTH_RADIUS = 6378.137 # (km)
# Earth rotation speed (calculated from sideral period)
EARTH_ROT_RATE = 6.300387486749 / 86164 # (rad/s)
# Earth gravitation at sea leve
EARTH_GRAV_SEA_LVL = 9.80665 # (m^2/s)
| 27
| 55
| 0.725309
|
# standard gravitational parameter for Earth = G*M
EARTH_GRAV_CONST = 3.986005e5 # (km^3/s^2)
# Earth Radius
EARTH_RADIUS = 6378.137 # (km)
# Earth rotation speed (calculated from sideral period)
EARTH_ROT_RATE = 6.300387486749 / 86164 # (rad/s)
# Earth gravitation at sea leve
EARTH_GRAV_SEA_LVL = 9.80665 # (m^2/s)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
42488845e1b00797f2c42f02abc38006597e292a
| 4,539
|
py
|
Python
|
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | 4
|
2021-10-22T01:33:16.000Z
|
2022-03-09T06:39:54.000Z
|
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | null | null | null |
my_utils/misc.py
|
Jennifercheukyin/High-Speed-Pedestrian-Crossing-Prediction
|
09cceb0efaf4d074ee16d11d8f91292ce9dec854
|
[
"MIT"
] | null | null | null |
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import torch.nn as nn
import torch.nn.init as init
import torch
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'MovingAverage', 'AverageMeter_Mat', 'Timer']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| 30.463087
| 118
| 0.592642
|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import errno
import os
import sys
import time
import math
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import numpy as np
import pdb
import torch
__all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'MovingAverage', 'AverageMeter_Mat', 'Timer']
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class MovingAverage(object):
def __init__(self, length):
self.length = length
self.count = 0
self.pointer = 0
self.values = np.zeros(length)
# self.avg = 0
def update(self, val):
self.values[self.pointer] = val
self.pointer += 1
if self.pointer == self.length:
self.pointer = 0
self.count += 1
self.count = np.minimum(self.count, self.length)
def avg(self):
return self.values.sum() / float(self.count)
def reset(self):
self.count = 0
self.pointer = 0
# self.avg = 0
self.values.fill(0)
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
# pdb.set_trace()
self.count += n
self.avg = self.sum / self.count
class AverageMeter_Mat(object):
def __init__(self,number_ID):
self.number_ID = number_ID
self.reset()
def reset(self):
# self.sum = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
# self.num = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
self.center = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda(), requires_grad=False)
# self.dif = Variable(torch.Tensor(self.number_ID,64).fill_(0).cuda())
self.sum = torch.Tensor(self.number_ID,64).fill_(0).cuda()
self.num = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.center = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.dif = torch.Tensor(self.number_ID,64).fill_(0).cuda()
# self.sum = torch.Tensor(self.number_ID,64).fill_(0)
# self.num = torch.Tensor(self.number_ID,64).fill_(0)
# self.center = torch.Tensor(self.number_ID,64).fill_(0)
# self.dif = torch.Tensor(self.number_ID,64).fill_(0)
def update(self, SIR, ID, n):
# pdb.set_trace()
self.sum[ID,:] += SIR.data
# pdb.set_trace()
self.num[ID,:] += 1*n
self.center[ID,:] = self.sum[ID] / self.num[ID]
# self.dif[ID,:] = SIR - Variable(self.center[ID])
# self.avg = 0.5*torch.mean(self.dif**2)
class Timer(object):
def __init__(self):
pass
def reset(self):
self.T = time.time()
def time(self, reset=False):
ti = time.time() - self.T
if reset:
self.reset()
return ti
| 0
| 0
| 0
| 2,632
| 0
| 0
| 0
| -31
| 236
|
ca91f55ea74fe8da53eabdf2dc43a829dbcf7253
| 1,697
|
py
|
Python
|
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | 1
|
2020-12-09T04:19:46.000Z
|
2020-12-09T04:19:46.000Z
|
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | null | null | null |
gradient_decent_simple_linear_regression.py
|
eshanmherath/linear-regression
|
5b473586679a4b4594706faeb2bb7e4922c7ab38
|
[
"MIT"
] | null | null | null |
import numpy as np
np.random.seed(111)
'''
The data is generated adding noise to the values from y = 0.8x + 2 equation
Therefore the expectation of the auto encoder is to get the values w and b closer to 0.8 and 2 respectively
'''
'''generate random x values'''
X_train = np.random.random((1, 50))[0]
'''get the reference y value'''
y_reference = 0.8*X_train + 2
'''add noise to the reference y value'''
y_train = y_reference + np.sqrt(0.01)*np.random.random((1, 50))[0]
W = np.random.random()
b = np.random.random()
'''number of training examples'''
m = len(X_train)
'''parameters'''
learning_rate = 0.01
epochs = 5000
'''send data to the gradient optimizer to optimize values for W and b'''
gradient_descent(X_train, y_train)
print('\nGradient optimization completed')
print('W Expected : 0.8' + ' Learned : ' + str(W))
print('b Expected : 2.0' + ' Learned : ' + str(b))
| 28.762712
| 107
| 0.625221
|
import numpy as np
np.random.seed(111)
'''
The data is generated adding noise to the values from y = 0.8x + 2 equation
Therefore the expectation of the auto encoder is to get the values w and b closer to 0.8 and 2 respectively
'''
'''generate random x values'''
X_train = np.random.random((1, 50))[0]
'''get the reference y value'''
y_reference = 0.8*X_train + 2
'''add noise to the reference y value'''
y_train = y_reference + np.sqrt(0.01)*np.random.random((1, 50))[0]
W = np.random.random()
b = np.random.random()
'''number of training examples'''
m = len(X_train)
'''parameters'''
learning_rate = 0.01
epochs = 5000
def gradient_descent(X, y):
global W, b, learning_rate, epochs
for _epoch in range(epochs):
hypothesis = W*X + b
'''cost function'''
cost = np.divide(1, 2*m) * np.sum((hypothesis-y) ** 2)
print(hypothesis)
exit()
'''partial derivatives of the cost function with respect to W and b'''
gradient_w = np.divide(1, m) * np.sum((hypothesis-y)*X)
gradient_b = np.divide(1, m) * np.sum(hypothesis-y)
'''calculating new W and b values simultaneously'''
temp_w = W - learning_rate*gradient_w
temp_b = b - learning_rate*gradient_b
'''updating W and b simultaneously'''
W = temp_w
b = temp_b
print('\nepoch ' + str(_epoch) + ' W : ' + str(W) + ' b : ' + str(b) + ' Cost : ' + str(cost))
'''send data to the gradient optimizer to optimize values for W and b'''
gradient_descent(X_train, y_train)
print('\nGradient optimization completed')
print('W Expected : 0.8' + ' Learned : ' + str(W))
print('b Expected : 2.0' + ' Learned : ' + str(b))
| 0
| 0
| 0
| 0
| 0
| 788
| 0
| 0
| 23
|
b2107b59ecdecdb0d53f298a0ed4ee2762c4cc8c
| 458
|
py
|
Python
|
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
1_mundo_exercicios/ex018.py
|
GuilhermeLima182/CursoDePython
|
7e72b117142794c38cbb14284d0fa6e1dbee5bf6
|
[
"MIT"
] | null | null | null |
#Faa um programa que leia um ngulo qualquer e mostre na tela
#o valor do seno,cosseno e tangente desse ngulo.
from math import radians, sin, cos, tan
angulo = int(input('Digite um ngulo: '))
sen = sin(radians(angulo))
cos = cos(radians(angulo))
tan = tan(radians(angulo))
print('O seno do ngulo {} {:.2f}'.format(angulo, sen))
print('O Cosseno do ngulo {} {:.2f}'.format(angulo, cos))
print('A tangente do ngulo {} {:.2f}'.format(angulo, tan))
| 38.166667
| 62
| 0.696507
|
#Faça um programa que leia um ângulo qualquer e mostre na tela
#o valor do seno,cosseno e tangente desse ângulo.
from math import radians, sin, cos, tan
angulo = int(input('Digite um ângulo: '))
sen = sin(radians(angulo))
cos = cos(radians(angulo))
tan = tan(radians(angulo))
print('O seno do ângulo {} é {:.2f}'.format(angulo, sen))
print('O Cosseno do ângulo {} é {:.2f}'.format(angulo, cos))
print('A tangente do ângulo {} é {:.2f}'.format(angulo, tan))
| 20
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
624916c3d5ec04f32ee59e6547283d5f7ef4f28e
| 1,313
|
py
|
Python
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/notebook
|
82c15074c24d64a1dfcb70a526bc1deb2ecffe68
|
[
"MIT"
] | 14
|
2016-04-13T08:10:02.000Z
|
2021-04-19T09:42:51.000Z
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | 88
|
2017-09-27T15:07:05.000Z
|
2019-10-02T04:05:03.000Z
|
source/_sample/sympy/stereograph.py
|
showa-yojyo/note
|
5f262ecda3df132cb66206c465d16e174061d6b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""stereograph.py: Compute length of a geodesic in the unit sphere.
"""
if __name__ == '__main__':
main()
| 32.825
| 76
| 0.581112
|
#!/usr/bin/env python
"""stereograph.py: Compute length of a geodesic in the unit sphere.
"""
from sympy import (symbols, Function, Matrix, factor, simplify, latex, sqrt)
from sympy.abc import (t, xi, eta)
from sympy.printing import print_latex
def main():
u, v, R = symbols('u v R', real=True)
xi, eta = symbols(r'\xi \eta', cls=Function)
numer = 4*R**2
denom = u**2 + v**2 + numer
# inverse of a stereographic projection from the south pole
# onto the XY plane:
pinv = Matrix([numer * u / denom,
numer * v / denom,
-(2 * R * (u**2 + v**2)) / denom]) # OK
if False:
# textbook style
Dpinv = simplify(pinv.jacobian([u, v]))
print_latex(Dpinv, mat_str='pmatrix', mat_delim=None) # OK?
tDpinvDpinv = factor(Dpinv.transpose() @ Dpinv)
print_latex(tDpinvDpinv, mat_str='pmatrix', mat_delim=None) # OK
tDpinvDpinv = tDpinvDpinv.subs([(u, xi(t)), (v, eta(t))])
dcdt = Matrix([xi(t).diff(), eta(t).diff()])
print_latex(simplify(
sqrt((dcdt.transpose() @ tDpinvDpinv).dot(dcdt))))
else:
# directly
dpinvc = pinv.subs([(u, xi(t)), (v, eta(t))]).diff(t, 1)
print_latex(sqrt(factor(dpinvc.dot(dpinvc))))
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 1,005
| 0
| 85
| 90
|
9ee6af17b80095ba1ce3ce97e7b719c8cc0ba35d
| 357
|
py
|
Python
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | 1
|
2020-11-15T07:40:18.000Z
|
2020-11-15T07:40:18.000Z
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | 3
|
2020-11-13T19:53:06.000Z
|
2020-11-16T01:23:10.000Z
|
visdialch/decoders/__init__.py
|
mohitsudhakar/visual-dialog-experiments
|
77cc65938b0ce99fc52b839b7821f29c7a6b32a0
|
[
"BSD-3-Clause"
] | null | null | null |
# from visdialch.decoders.gen import GenerativeDecoder
#from visdialch.decoders.disc import DiscriminativeDecoder
| 44.625
| 76
| 0.812325
|
# from visdialch.decoders.gen import GenerativeDecoder
#from visdialch.decoders.disc import DiscriminativeDecoder
from visdialch.decoders.decoder import DiscriminativeDecoder
def Decoder(model_config, *args):
name_dec_map = {"disc": DiscriminativeDecoder, "gen": GenerativeDecoder}
return name_dec_map[model_config["decoder"]](model_config, *args)
| 0
| 0
| 0
| 0
| 0
| 159
| 0
| 39
| 45
|
3687c80748ad58f744cedde41cab9e69281efc9e
| 44,472
|
py
|
Python
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 46
|
2020-07-31T10:14:05.000Z
|
2022-03-24T12:51:46.000Z
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 36
|
2020-10-06T19:01:38.000Z
|
2022-02-03T18:07:35.000Z
|
nitorch/io/volumes/mapping.py
|
balbasty/nitorch
|
d30c3125a8a66ea1434f2b39ed03338afd9724b4
|
[
"MIT"
] | 6
|
2021-01-05T14:59:05.000Z
|
2021-11-18T18:26:45.000Z
|
def cat(arrays, dim=0):
"""Concatenate mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic concatenation of all input arrays.
Its shape along dimension `dim` is the sum of all input shapes
along dimension `dim`.
"""
return CatArray(arrays, dim)
def stack(arrays, dim=0):
"""Stack mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic stack of all input arrays.
"""
arrays = [array.unsqueeze(dim=dim) for array in arrays]
return cat(arrays, dim=dim)
| 37.277452
| 93
| 0.565727
|
from copy import copy
import torch
from nitorch.core.py import make_list
from nitorch.core import dtypes
from nitorch.spatial import affine_sub, affine_permute, voxel_size as affvx
from nitorch.io.utils.indexing import (expand_index, guess_shape, compose_index, neg2pos,
is_droppedaxis, is_newaxis, is_sliceaxis,
invert_permutation, invert_slice, slice_navigator)
from ..utils import volutils
from ..mapping import MappedFile
class MappedArray(MappedFile):
"""Base class for mapped arrays.
Mapped arrays are usually stored on-disk, along with (diverse) metadata.
They can be symbolically sliced, allowing for partial reading and
(sometimes) writing of data from/to disk.
Chaining of symbolic slicing operations is implemented in this base
class. The actual io must be implemented by the child class.
Abstract Methods
----------------
Child classes MUST implement:
* self.data(...)
Child classes SHOULD implement:
* self.metadata(...) default -> returns empty dict
Child classes MAY implement:
* self.set_data(...) default -> raises cls.FailedWriteError
* self.set_metadata(...) default -> raises cls.FailedWriteError
* cls.save_new(...) default -> raises cls.FailedWriteError
* cls.savef_new(...) default -> raises cls.FailedWriteError
Child classes SHOULD register themselves in `readers.reader_classes`.
If they implement `save_new`, child classes SHOULD register
themselves in `writers.writer_classes`.
Properties
----------
dtype : np.dtype On-disk data type
slope : float Intensity slope from on-disk to unit
inter : float Intensity shift from on-disk to unit
affine : tensor Orientation matrix: maps spatial axes to 'world'
spatial : tuple[bool] Mask of 'spatial' axes (x, y, z, ...)
slicer : tuple[index_like] Indexing into the full on-disk array
permutation : tuple[int] Permutation of the original in-disk axes.
dim : int Number of axes
voxel_size : tuple[float] World size of the spatial dimensions
readable : AccessType See `AccessType`
writable : AccessType See `AccessType`
Types
-----
FailedReadError Error raised when failing to load
FailedWriteError Error raised when failing to save
Methods
-------
slice(tuple[index_like]) Subslice the array
permute(tuple[int]) Permute axes
transpose(int, int) Permute two axes
unsqueeze(int) Insert singleton dimension
squeeze(int) Remove singleton dimension
unbind -> tuple Unstack arrays along a dimension
chunk -> tuple Unstack arrays along a dimension by chunks
split -> tuple Unstack arrays along a dimension by chunks
data(...) -> tensor Load raw data to memory
fdata(...) -> tensor Load scaled floating-point data to memory
metadata(...) -> dict Load metadata to memory
set_data(dat, ...) Write raw data to disk
set_fdata(dat, ...) Write scaled floating-point data to disk
set_metadata(**meta) Write metadata to disk
Class methods
-------------
save_new(dat, file_like) Write new file populated with `dat`
savef_new(dat, file_like) Write new file populated with (scaled) `dat`
External functions
------------------
map(file_like) -> MappedArray Build a MappedArray
load(file_like) -> tensor Load raw data to memory from a file
loadf(file_like) -> tensor Load scaled data to memory from a file
save(dat, file_like) -> Save raw data into a new file
savef(dat, file_like) -> Save scaled data into a new file
cat(tuple[MappedArray]) Concatenate arrays along a dimension
Syntaxic sugar
--------------
__call__ -> fdata Load scaled floating-point data to memory
__array__ -> fdata Load scaled floating-point data to memory
__getitem__ -> slice Subslice the array
__setitem__ -> set_fdata Write scaled floating-point data to disk
__len__ Size of the first dimension (or 0 if scalar)
"""
fname: str = None # filename (can be None if in-memory proxy)
fileobj = None # file-like object (`write`, `seek`, etc)
is_compressed: bool = None # is compressed
dtype: torch.dtype = None # on-disk data type
slope: float = 1 # intensity slope
inter: float = 0 # intensity shift
affine = None # sliced voxel-to-world
_affine = None # original voxel-to-world
spatial: tuple = None # sliced spatial mask (len -> dim)
_spatial: tuple = None # original spatial mask (len -> _dim)
shape: tuple = None # sliced shape (len -> dim)
_shape: tuple = None # original shape (len -> _dim)
slicer: tuple = None # indexing into the parent
permutation: tuple = None # permutation of original dim (len -> _dim)
dim = property(lambda self: len(self.shape)) # Nb of sliced dimensions
_dim = property(lambda self: len(self._shape)) # Nb of original dimensions
voxel_size = property(lambda self: affvx(self.affine))
def __init__(self, **kwargs):
self._init(**kwargs)
def _init(self, **kwargs):
for key, val in kwargs:
setattr(self, key, val)
if self.permutation is None:
self.permutation = tuple(range(self._dim))
if self.slicer is None:
# same layout as on-disk
self.spatial = self._spatial
self.affine = self._affine
self.shape = self._shape
self.slicer = expand_index([Ellipsis], self._shape)
return self
def __str__(self):
return '{}(shape={}, dtype={})'.format(
type(self).__name__, self.shape, self.dtype)
__repr__ = __str__
def __len__(self):
if len(self.shape) > 0:
return self.shape[0]
else:
return 0
@classmethod
def possible_extensions(cls):
"""List all possible extensions"""
return tuple()
def __getitem__(self, index):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
return self.slice(index)
def slice(self, index, new_shape=None, _pre_expanded=False):
"""Extract a sub-part of the array.
Indices can only be slices, ellipses, integers or None.
Parameters
----------
index : tuple[slice or ellipsis or int or None]
Other Parameters
----------------
new_shape : sequence[int], optional
Output shape of the sliced object
_pre_expanded : bool, default=False
Set to True of `expand_index` has already been called on `index`
Returns
-------
subarray : type(self)
MappedArray object, with the indexing operations and affine
matrix relating to the new sub-array.
"""
index = expand_index(index, self.shape)
new_shape = guess_shape(index, self.shape)
if any(isinstance(idx, list) for idx in index) > 1:
raise ValueError('List indices not currently supported '
'(otherwise we enter advanced indexing '
'territory and it becomes too complicated).')
new = copy(self)
new.shape = new_shape
# compute new affine
if self.affine is not None:
spatial_shape = [sz for sz, msk in zip(self.shape, self.spatial)
if msk]
spatial_index = [idx for idx in index if not is_newaxis(idx)]
spatial_index = [idx for idx, msk in zip(spatial_index, self.spatial)
if msk]
affine, _ = affine_sub(self.affine, spatial_shape, tuple(spatial_index))
else:
affine = None
new.affine = affine
# compute new slicer
perm_shape = [self._shape[d] for d in self.permutation]
new.slicer = compose_index(self.slicer, index, perm_shape)
# compute new spatial mask
spatial = []
i = 0
for idx in new.slicer:
if is_newaxis(idx):
spatial.append(False)
else:
# original axis
if not is_droppedaxis(idx):
spatial.append(self._spatial[self.permutation[i]])
i += 1
new.spatial = tuple(spatial)
return new
def __setitem__(self, index, value):
"""Write scaled data to disk.
Parameters
----------
index : tuple
Tuple of indices (see `__getitem__`)
value : array or tensor
Array-like with shape `self[index].shape`
Returns
-------
self : type(self)
"""
if isinstance(value, MappedArray):
raise NotImplementedError
else:
self.__getitem__(index).set_fdata(value)
return self
def __call__(self, *args, **kwargs):
"""Get floating point data. See `fdata()`"""
return self.fdata(*args, **kwargs)
def __array__(self, dtype=None):
"""Convert to numpy array"""
return self.fdata(dtype=dtype, numpy=True)
def permute(self, dims):
"""Permute dimensions
Parameters
----------
dims : sequence[int]
A permutation of `range(self.dim)`
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the permutation.
"""
dims = list(dims)
if len(dims) != self.dim or len(dims) != len(set(dims)):
raise ValueError('there should be as many (unique) dimensions '
'as the array\'s dimension. Got {} and {}.'
.format(len(set(dims)), self.dim))
# permute tuples that relate to the current spatial dimensions
# (that part is easy)
shape = tuple(self.shape[d] for d in dims)
spatial = tuple(self.spatial[d] for d in dims)
# permute slicer
# 1) permute non-dropped dimensions
slicer_nodrop = list(filter(lambda x: not is_droppedaxis(x), self.slicer))
slicer_nodrop = [slicer_nodrop[d] for d in dims]
# 2) insert dropped dimensions
slicer = []
for idx in self.slicer:
if is_droppedaxis(idx):
slicer.append(idx)
else:
new_idx, *slicer_nodrop = slicer_nodrop
slicer.append(new_idx)
# permute permutation
# 1) insert None where new axes and remove dropped axes
old_perm = self.permutation
new_perm = []
drop_perm = []
for idx in self.slicer:
if is_newaxis(idx):
new_perm.append(None)
continue
p, *old_perm = old_perm
if not is_droppedaxis(idx):
new_perm.append(p)
else:
drop_perm.append(p)
# 2) permute
new_perm = [new_perm[d] for d in dims]
# 3) insert back dropped axes and remove new axes
perm = []
for idx in self.slicer:
if is_droppedaxis(idx):
p, *drop_perm = drop_perm
perm.append(p)
continue
p, *new_perm = new_perm
if not is_newaxis(p):
perm.append(p)
# permute affine
# (it's a bit more complicated: we need to find the
# permutation of the *current* *spatial* dimensions)
perm_spatial = [p for p in dims if self.spatial[p]]
perm_spatial = sorted(range(len(perm_spatial)),
key=lambda k: perm_spatial[k])
affine, _ = affine_permute(self.affine, perm_spatial, self.shape)
# create new object
new = copy(self)
new.shape = shape
new.spatial = spatial
new.permutation = tuple(perm)
new.slicer = tuple(slicer)
new.affine = affine
return new
def movedim(self, source, destination):
dim = self.dim
source = make_list(source)
destination = make_list(destination)
if len(destination) == 1:
# we assume that the user wishes to keep moved dimensions
# in the order they were provided
destination = destination[0]
if destination >= 0:
destination = list(range(destination, destination + len(source)))
else:
destination = list(range(destination + 1 - len(source), destination + 1))
if len(source) != len(destination):
raise ValueError('Expected as many source as destination positions.')
source = [dim + src if src < 0 else src for src in source]
destination = [dim + dst if dst < 0 else dst for dst in destination]
if len(set(source)) != len(source):
raise ValueError(f'Expected source positions to be unique but got '
f'{source}')
if len(set(destination)) != len(destination):
raise ValueError(f'Expected destination positions to be unique but got '
f'{destination}')
# compute permutation
positions_in = list(range(dim))
positions_out = [None] * dim
for src, dst in zip(source, destination):
positions_out[dst] = src
positions_in[src] = None
positions_in = filter(lambda x: x is not None, positions_in)
for i, pos in enumerate(positions_out):
if pos is None:
positions_out[i], *positions_in = positions_in
return self.permute(positions_out)
def transpose(self, dim0, dim1):
"""Transpose two dimensions
Parameters
----------
dim0 : int
First dimension
dim1 : int
Second dimension
Returns
-------
permarray : type(self)
MappedArray object, with the indexing operations and affine
matrix reflecting the transposition.
"""
permutation = list(range(self.dim))
permutation[dim0] = dim1
permutation[dim1] = dim0
return self.permute(permutation)
def data(self, dtype=None, device=None, casting='unsafe', rand=True,
cutoff=None, dim=None, numpy=False):
"""Load the array in memory
Parameters
----------
dtype : type or torch.dtype or np.dtype, optional
Output data type. By default, keep the on-disk data type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
pass
def fdata(self, dtype=None, device=None, rand=False, cutoff=None,
dim=None, numpy=False):
"""Load the scaled array in memory
This function differs from `data` in several ways:
* The output data type should be a floating point type.
* If an affine scaling (slope, intercept) is defined in the
file, it is applied to the data.
* the default output data type is `torch.get_default_dtype()`.
Parameters
----------
dtype : dtype_like, optional
Output data type. By default, use `torch.get_default_dtype()`.
Should be a floating point type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
Returns
-------
dat : tensor[dtype]
"""
# --- sanity check ---
dtype = torch.get_default_dtype() if dtype is None else dtype
info = dtypes.dtype(dtype)
if not info.is_floating_point:
raise TypeError('Output data type should be a floating point '
'type but got {}.'.format(dtype))
# --- get unscaled data ---
dat = self.data(dtype=dtype, device=device, rand=rand,
cutoff=cutoff, dim=dim, numpy=numpy)
# --- scale ---
if self.slope != 1:
dat *= float(self.slope)
if self.inter != 0:
dat += float(self.inter)
return dat
def set_data(self, dat, casting='unsafe'):
"""Write (partial) data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur:
* 'no': the data types should not be cast at all.
* 'equiv': only byte-order changes are allowed.
* 'safe': only casts which can preserve values are allowed.
* 'same_kind': only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe': any data conversions may be done.
* 'rescale': the input data is rescaled to match the dynamic
range of the output type. The minimum value in the data
is mapped to the minimum value of the data type and the
maximum value in the data is mapped to the maximum value
of the data type.
* 'rescale_zero': the input data is rescaled to match the
dynamic range of the output type, but ensuring that
zero maps to zero.
> If the data is signed and cast to a signed datatype,
zero maps to zero, and the scaling is chosen so that
both the maximum and minimum value in the data fit
in the output dynamic range.
> If the data is signed and cast to an unsigned datatype,
negative values "wrap around" (as with an unsafe cast).
> If the data is unsigned and cast to a signed datatype,
values are kept positive (the negative range is unused).
Returns
-------
self : type(self)
"""
raise self.FailedWriteError("Method not implemented in class {}."
.format(type(self).__name__))
def set_fdata(self, dat):
"""Write (partial) scaled data to disk.
Parameters
----------
dat : tensor
Tensor to write on disk. It should have shape `self.shape`
and a floating point data type.
Returns
-------
self : type(self)
"""
# --- sanity check ---
info = dtypes.dtype(dat.dtype)
if not info.is_floating_point:
raise TypeError('Input data type should be a floating point '
'type but got {}.'.format(dat.dtype))
if dat.shape != self.shape:
raise TypeError('Expected input shape {} but got {}.'
.format(self.shape, dat.shape))
# --- detach ---
if torch.is_tensor(dat):
dat = dat.detach()
# --- unscale ---
if self.inter != 0 or self.slope != 1:
dat = dat.clone() if torch.is_tensor(dat) else dat.copy()
if self.inter != 0:
dat -= float(self.inter)
if self.slope != 1:
dat /= float(self.slope)
# --- set unscaled data ---
self.set_data(dat)
return self
def metadata(self, keys=None):
"""Read metadata
.. note:: The values returned by this function always relate to
the full volume, even if we're inside a view. That is,
we always return the affine of the original volume.
To get an affine matrix that relates to the view,
use `self.affine`.
Parameters
----------
keys : sequence[str], optional
List of metadata to load. They can either be one of the
generic metadata keys define in `io.metadata`, or a
format-specific metadata key.
By default, all generic keys that are found in the file
are returned.
Returns
-------
metadata : dict
A dictionary of metadata
"""
return dict()
def set_metadata(self, **meta):
"""Write metadata
Parameters
----------
meta : dict, optional
Dictionary of metadata.
Fields that are absent from the dictionary or that have
value `None` are kept untouched.
Returns
-------
self : type(self)
"""
raise NotImplementedError("Method not implemented in class {}."
.format(type(self).__name__))
@classmethod
def save_new(cls, dat, file_like, like=None, casting='unsafe', **metadata):
"""Write an array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe', 'rescale'}, default='unsafe'
Controls what kind of data casting may occur.
See `MappedArray.set_data`
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
@classmethod
def savef_new(cls, dat, file_like, like=None, **metadata):
"""Write a scaled array to disk.
This function makes educated choices for the file format and
its metadata based on the file extension, the data type and the
other options provided.
The input data type must be a floating point type.
Parameters
----------
dat : tensor or array or MappedArray
Data to write
file_like : str or file object
Path to file or file object (with methods `seek`, `read`).
If the extension is known, it gets priority over `like` when
choosing the output format.
like : file or MappedArray
An array on-disk that should be used as a template for the new
file. Its metadata/layout/etc will be mimicked as much as possible.
metadata : dict
Metadata to store on disk. Values provided there will have
priority over `like`.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
raise cls.FailedWriteError("Method not implemented in class {}."
.format(cls.__name__))
def unsqueeze(self, dim, ndim=1):
"""Add a dimension of size 1 in position `dim`.
Parameters
----------
dim : int
The dimension is added to the right of `dim` if `dim < 0`
else it is added to the left of `dim`.
Returns
-------
MappedArray
"""
index = [slice(None)] * self.dim
if dim < 0:
dim = self.dim + dim + 1
index = index[:dim] + ([None] * ndim) + index[dim:]
return self[tuple(index)]
def squeeze(self, dim):
"""Remove all dimensions of size 1.
Parameters
----------
dim : int or sequence[int], optional
If provided, only this dimension is squeezed. It *must* be a
dimension of size 1.
Returns
-------
MappedArray
"""
if dim is None:
dim = [d for d in range(self.dim) if self.shape[d] == 1]
dim = make_list(dim)
ndim = len(self.shape)
dim = [ndim + d if d < 0 else d for d in dim]
if any(self.shape[d] != 1 for d in dim):
raise ValueError('Impossible to squeeze non-singleton dimensions.')
index = [slice(None) if d not in dim else 0 for d in range(self.dim)]
return self[tuple(index)]
def unbind(self, dim=0, keepdim=False):
"""Extract all arrays along dimension `dim` and drop that dimension.
Parameters
----------
dim : int, default=0
Dimension along which to unstack.
keepdim : bool, default=False
Do not drop the unstacked dimension.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
if keepdim:
index = index[:dim+1] + [None] + index[dim+1:]
out = []
for i in range(self.shape[dim]):
index[dim] = i
out.append(self[tuple(index)])
return out
def chunk(self, chunks, dim=0):
"""Split the array into smaller arrays of size `chunk` along `dim`.
Parameters
----------
chunks : int
Number of chunks.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
index = [slice(None)] * self.dim
out = []
for i in range(self.shape[dim]):
index[dim] = slice(i*chunks, (i+1)*chunks)
out.append(self[tuple(index)])
return out
def split(self, chunks, dim=0):
"""Split the array into smaller arrays along `dim`.
Parameters
----------
chunks : int or list[int]
If `int`: Number of chunks (see `self.chunk`)
Else: Size of each chunk. Must sum to `self.shape[dim]`.
dim : int, default=0
Dimensions along which to split.
Returns
-------
list[MappedArray]
"""
if isinstance(chunks, int):
return self.chunk(chunks, dim)
chunks = make_list(chunks)
if sum(chunks) != self.shape[dim]:
raise ValueError('Chunks must cover the full dimension. '
'Got {} and {}.'
.format(sum(chunks), self.shape[dim]))
index = [slice(None)] * self.dim
previous_chunks = 0
out = []
for chunk in chunks:
index[dim] = slice(previous_chunks, previous_chunks+chunk)
out.append(self[tuple(index)])
previous_chunks += chunk
return out
def channel_first(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the right.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = perm + spatial
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [slice(None)] * nb_channels \
+ [None] * add_channels \
+ [Ellipsis]
new = new.slice(tuple(index))
return new
def channel_last(self, atleast=0):
"""Permute the dimensions such that all spatial axes are on the left.
Parameters
----------
atleast : int, default=0
Make sure that at least this number of non-spatial dimensions
exist (new axes are inserted accordingly).
Returns
-------
MappedArray
"""
# 1) move spatial dimensions to the right
perm = []
spatial = []
for d, is_spatial in enumerate(self.spatial):
if is_spatial:
spatial.append(d)
else:
perm.append(d)
nb_channels = len(perm)
perm = spatial + perm
new = self.permute(perm)
# 2) add channel axes
add_channels = max(0, atleast - nb_channels)
if add_channels:
index = [Ellipsis] + [None] * add_channels
new = new.slice(tuple(index))
return new
class CatArray(MappedArray):
"""A concatenation of mapped arrays.
This is largely inspired by virtual concatenation of file_array in
SPM: https://github.com/spm/spm12/blob/master/@file_array/cat.m
"""
_arrays: tuple = []
_dim_cat: int = None
# defer attributes
fname = property(lambda self: tuple(a.fname for a in self._arrays))
fileobj = property(lambda self: tuple(a.fileobj for a in self._arrays))
is_compressed = property(lambda self: tuple(a.is_compressed for a in self._arrays))
dtype = property(lambda self: tuple(a.dtype for a in self._arrays))
slope = property(lambda self: tuple(a.slope for a in self._arrays))
inter = property(lambda self: tuple(a.inter for a in self._arrays))
_shape = property(lambda self: tuple(a._shape for a in self._arrays))
_dim = property(lambda self: tuple(a._dim for a in self._arrays))
affine = property(lambda self: tuple(a.affine for a in self._arrays))
_affine = property(lambda self: tuple(a._affine for a in self._arrays))
spatial = property(lambda self: tuple(a.spatial for a in self._arrays))
_spatial = property(lambda self: tuple(a._spatial for a in self._arrays))
slicer = property(lambda self: tuple(a.slicer for a in self._arrays))
permutation = property(lambda self: tuple(a.permutation for a in self._arrays))
voxel_size = property(lambda self: tuple(a.voxel_size for a in self._arrays))
def __init__(self, arrays, dim=0):
"""
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
"""
super().__init__()
arrays = list(arrays)
dim = dim or 0
self._dim_cat = dim
# sanity checks
shapes = []
for i, array in enumerate(arrays):
if not isinstance(array, MappedArray):
raise TypeError('Input arrays should be `MappedArray` '
'instances. Got {}.',format(type(array)))
shape = list(array.shape)
del shape[dim]
shapes.append(shape)
shape0, *shapes = shapes
if not all(shape == shape0 for shape in shapes):
raise ValueError('Shapes of all concatenated arrays should '
'be equal except in the concatenation dimension.')
# compute output shape
shape = list(arrays[0].shape)
dims = [array.shape[dim] for array in arrays]
shape[dim] = sum(dims)
self.shape = tuple(shape)
# concatenate
self._arrays = tuple(arrays)
def __str__(self):
dtype_str = tuple(str(dt) for dt in self.dtype)
dtype_str = '(' + ', '.join(dtype_str) + ')'
return '{}(shape={}, dtype={})'.format(
type(self).__name__, self.shape, dtype_str)
__repr__ = __str__
def slice(self, index, new_shape=None):
# overload slicer -> slice individual arrays
index = expand_index(index, self.shape)
new_shape = guess_shape(index, self.shape)
assert len(index) > 0, "index should never be empty here"
if any(isinstance(idx, list) for idx in index) > 1:
raise ValueError('List indices not currently supported '
'(otherwise we enter advanced indexing '
'territory and it becomes too complicated).')
index = list(index)
shape_cat = self.shape[self._dim_cat]
# find out which index corresponds to the concatenated dimension
# + compute the concatenated dimension in the output array
new_dim_cat = self._dim_cat
nb_old_dim = -1
for map_dim_cat, idx in enumerate(index):
if is_newaxis(idx):
# an axis was added: dim_cat moves to the right
new_dim_cat = new_dim_cat + 1
elif is_droppedaxis(idx):
# an axis was dropped: dim_cat moves to the left
new_dim_cat = new_dim_cat - 1
nb_old_dim += 1
else:
nb_old_dim += 1
if nb_old_dim >= self._dim_cat:
# found the concatenated dimension
break
index_cat = index[map_dim_cat]
index_cat = neg2pos(index_cat, shape_cat) # /!\ do not call it again
if is_droppedaxis(index_cat):
# if the concatenated dimension is dropped, return the
# corresponding array (sliced)
if index_cat < 0 or index_cat >= shape_cat:
raise IndexError('Index {} out of bounds [0, {}]'
.format(index_cat, shape_cat))
nb_pre = 0
for i in range(len(self._arrays)):
if nb_pre < index_cat:
# we haven't found the volume yet
nb_pre += self._arrays[i].shape[self._dim_cat]
continue
if i > index_cat:
# we've passed the volume
i = i - 1
nb_pre -= self._arrays[i].shape[self._dim_cat]
index_cat = index_cat - nb_pre
index[map_dim_cat] = index_cat
return self._arrays[i].slice(tuple(index), new_shape)
# else, we may have to drop some volumes and slice the others
assert is_sliceaxis(index_cat), "This should not happen"
arrays = self._arrays
step = index_cat.step or 1
if step < 0:
# if negative step:
# 1) invert everything
invert_index = [slice(None)] * self.dim
invert_index[self._dim_cat] = slice(None, None, -1)
arrays = [array[tuple(invert_index)] for array in arrays]
# 2) update index_cat
index_cat = invert_slice(index_cat, shape_cat, neg2pos=False)
# compute navigator
# (step is positive)
start, step, nb_elem_total = slice_navigator(index_cat, shape_cat, do_neg2pos=False)
nb_pre = 0 # nb of slices on the left of the cursor
kept_arrays = [] # arrays at least partly in bounds
starts = [] # start in each kept array
stops = [] # stop in each kept array
size_since_start = 0 # nb of in-bounds slices left of the cursor
while len(arrays) > 0:
# pop array
array, *arrays = arrays
size_cat = array.shape[self._dim_cat]
if nb_pre + size_cat < start:
# discarded volumes at the beginning
nb_pre += size_cat
continue
if nb_pre < start:
# first volume
kept_arrays.append(array)
starts.append(start - nb_pre)
elif index_cat.stop is None or nb_pre < index_cat.stop:
# other kept volume
kept_arrays.append(array)
skip = size_since_start - (size_since_start // step) * step
starts.append(skip)
# compute stopping point
nb_elem_prev = size_since_start // step
nb_elem_remaining = nb_elem_total - nb_elem_prev
nb_elem_this_volume = (size_cat - starts[-1]) // step
if nb_elem_remaining <= nb_elem_this_volume:
# last volume
stops.append(nb_elem_remaining)
break
# read as much as possible
size_since_start += size_cat
nb_pre += size_cat
stops.append(None)
continue
# slice kept arrays
arrays = []
for array, start, stop in zip(kept_arrays, starts, stops):
index[map_dim_cat] = slice(start, stop, step)
arrays.append(array[tuple(index)])
# create new CatArray
new = copy(self)
new._arrays = arrays
new._dim_cat = new_dim_cat
new.shape = new_shape
return new
def permute(self, dims):
# overload permutation -> permute individual arrays
new = copy(self)
new._arrays = [array.permute(dims) for array in new._arrays]
iperm = invert_permutation(dims)
new._dim_cat = iperm[new._dim_cat]
new.shape = tuple(self.shape[d] for d in dims)
return new
def data(self, *args, **kwargs):
# read individual arrays and concatenate them
# TODO: it would be more efficient to preallocate the whole
# array and pass the appropriate buffer to each reader but
# (1) we don't have the option to provide a buffer yet
# (2) everything's already quite inefficient
dats = [array.data(*args, **kwargs) for array in self._arrays]
print([dat.shape for dat in dats])
return volutils.cat(dats, dim=self._dim_cat)
def fdata(self, *args, **kwargs):
# read individual arrays and concatenate them
# TODO: it would be more efficient to preallocate the whole
# array and pass the appropriate buffer to each reader but
# (1) we don't have the option to provide a buffer yet
# (2) everything's already quite inefficient
dats = [array.fdata(*args, **kwargs) for array in self._arrays]
return volutils.cat(dats, dim=self._dim_cat)
def set_data(self, dat, *args, **kwargs):
# slice the input data and write it into each array
size_prev = 0
index = [None] * self.dim
for array in self._arrays:
size_cat = array.shape[self._dim_cat]
index[self._dim_cat] = slice(size_prev, size_prev + size_cat)
array._set_data(dat[tuple(index)], *args, **kwargs)
def set_fdata(self, dat, *args, **kwargs):
# slice the input data and write it into each array
size_prev = 0
index = [None] * self.dim
for array in self._arrays:
size_cat = array.shape[self._dim_cat]
index[self._dim_cat] = slice(size_prev, size_prev + size_cat)
array._set_fdata(dat[tuple(index)], *args, **kwargs)
def metadata(self, *args, **kwargs):
return tuple(array.metadata(*args, **kwargs) for array in self._arrays)
def set_metadata(self, **meta):
raise NotImplementedError('Cannot write metadata into concatenated '
'array')
def cat(arrays, dim=0):
"""Concatenate mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic concatenation of all input arrays.
Its shape along dimension `dim` is the sum of all input shapes
along dimension `dim`.
"""
return CatArray(arrays, dim)
def stack(arrays, dim=0):
"""Stack mapped arrays along a dimension.
Parameters
----------
arrays : sequence[MappedArray]
Arrays to concatenate. Their shapes should be identical
except along dimension `dim`.
dim : int, default=0
Dimension along white to concatenate the arrays
Returns
-------
CatArray
A symbolic stack of all input arrays.
"""
arrays = [array.unsqueeze(dim=dim) for array in arrays]
return cat(arrays, dim=dim)
| 0
| 2,897
| 0
| 39,913
| 0
| 0
| 0
| 328
| 222
|
1a2deaef0215145916e743664ab5b8b9ed9d9543
| 302
|
py
|
Python
|
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
blit.py
|
rwberendsen/blit
|
f025a286b04774ec6dc6a47823254484d3942b78
|
[
"MIT"
] | null | null | null |
"""
blit.py
Call if you want to run everything
"""
import sys
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 10.785714
| 39
| 0.63245
|
"""
blit.py
Call if you want to run everything
"""
import json
import os
import sys
import integrate
def main(argv):
with open('config.json', 'r') as f:
config = json.load(f)
integrate.integrate(**config)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 0
| 0
| 0
| 0
| 0
| 112
| 0
| -27
| 91
|
33a51d04c0e22dbd80245e03d033a309d7a8fdfd
| 367
|
py
|
Python
|
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
pacote dowlond/curso python/exercicio100.py
|
Kaue-Marin/Curso-Python
|
45f7920e288a49724a4284f14c7212bb1662ab5b
|
[
"MIT"
] | null | null | null |
numeros = []
# programa principal
sorteia()
somapar()
| 22.9375
| 47
| 0.577657
|
from random import randint
numeros = []
def sorteia():
for c in range(1, 5):
c = randint(1, 9)
numeros.append(c)
print(f'os valores da lista são {numeros}')
def somapar():
spar = 0
for c2 in numeros:
if c2 % 2 == 0:
spar += c2
print(f'a soma dos numeros pares é {spar}')
# programa principal
sorteia()
somapar()
| 4
| 0
| 0
| 0
| 0
| 241
| 0
| 5
| 66
|
7f79dcf3d85037aa0b27e51ab5ee77202b2f17ac
| 3,802
|
py
|
Python
|
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
ch03/pro1.py
|
Lucid-ak/deeplearnig_practice
|
e196d733ee9b910a9c7648e61e6934aea9d255b3
|
[
"MIT"
] | null | null | null |
import numpy as np #
import sys, os
sys.path.append(os.pardir)
def step_function(x):
'''
y = x > 0
return y.astype(np.int) #np.int dtype=int .
'''
return np.array(x>0, dtype=int) #dtype dtype=int
'''
network=init_network()
x=np.array([100,40])
y=forward(network, x)
print(y)
#print(y)
#plt.plot(x,y)
#plt.ylim(-0.1,1.1)
#plt.show()
'''
x_test,t_test = get_data()
network=init_networrk_mnist()
batch_size=100
accuracy_ct=0
for i in range(0, len(x_test), batch_size):#x_train len
x_batch = x_test[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) # ?
print(np.sum(p == t_test[i:i+batch_size]))
accuracy_ct += np.sum(p == t_test[i:i+batch_size])
'''
y=predict(network, x_test[i])
p=np.argmax(y)
if p==t_test[i] :
accuracy_ct+=1
'''
print("Accuracy:",str(float(accuracy_ct)/len(x_test)))
print(accuracy_ct)
'''
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28,28) # 28,28 .-> ?
print(img.shape)
img_show(img)
'''
| 22.104651
| 99
| 0.584955
|
import pickle
import numpy as np #비선형 퍼셉트론
import matplotlib.pylab as plt
import sys, os
sys.path.append(os.pardir)
from dataset.mnist import load_mnist
from PIL import Image
def AND(x1, x2):
x=np.array([x1,x2])
w=np.array([0.5, 0.5])
b= -0.7
theta = 0
tmp = np.sum(w*x)+b
if tmp<=theta:
return 0
elif tmp>theta:
return 1
def OR(x1, x2):
x = np.array([x1,x2])
w = np.array([0.5,0.5])
b = -0.3
theta = 0
sig=np.sum(w*x)+b
if sig>=theta:
return 1
else :
return 0
def NAND(x1, x2):
x=np.array([x1,x2])
w=np.array([0.5, 0.5])
b=-0.7
theta=0
sig=np.sum(w*x)+b
if sig<theta :
return 1
else :
return 0
def XOR(x1, x2):
y1=OR(x1,x2)
y2=NAND(x1,x2)
y=AND(y1,y2)
return y
def step_function(x):
'''
y = x > 0
return y.astype(np.int) #np.int와 dtype=int의 역할은 같다.
'''
return np.array(x>0, dtype=int) #dtype의 역할은 출력 결과를 dtype=int등으로 통해 원하는 자료형으로 변형하는 것
def sigmoid(x):
return 1/(1+np.exp(-x)) #브로드 캐스트 적용, 각 배열의 원소값에 대해 계산 후 결과값들을 배열로 변환
def ReLU(x):
return np.array(np.maximum(0, x))
def softmax(x): #c는 입력 값 중 최대
c=np.max(x)
exp_x=np.exp(x-c)
sum_exp_x=sum(exp_x)
y = exp_x/sum_exp_x #return exp_x/sum_exp_x로 바로 나타낼 수도 있지만 "가시성"을 위해 y로 따로 배정하여 계산
return y
def identity_function(x):
return x
def init_network(): #network 배열에 라벨링을 통해 각 가중치 및 편향 저장
network={}
network['W1']=np.array([[0.1,0.3,0.5],[0.2,0.4,0.6]])
network['b1']=np.array([0.1, 0.2, 0.3])
network['W2']=np.array([[0.1,0.4],[0.2,0.5],[0.3,0.6]])
network['b2']=np.array([0.1,0.2])
network['W3']=np.array([[0.1,0.3],[0.2,0.4]])
network['b3']=np.array([0.1,0.2])
return network
def init_networrk_mnist(): #라이브러리에서 weight, bias 가져오기
with open("sample_weight.pkl",'rb') as f:
network = pickle.load(f)
return network
def predict(network, x):
W1,W2, W3= network['W1'], network['W2'], network['W3']
b1,b2,b3 = network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1)+b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y=softmax(a3)
return y
def forward(network, x): #순방향(입력->출력) 구현 항상 비슷한 값을 도출한다.
W1,W2, W3= network['W1'], network['W2'], network['W3']
b1,b2,b3=network['b1'],network['b2'],network['b3']
a1 = np.dot(x,W1)+b1
z1 = softmax(a1)
a2 = np.dot(z1, W2) + b2
z2 = softmax(a2)
a3 = np.dot(z2, W3) + b3
y=identity_function(a3)
return y
def img_show(img):
pil_img=Image.fromarray(np.uint8(img))
pil_img.show()
def get_data():
(x_train, t_train), (x_test, t_test) = \
load_mnist(flatten=True, normalize=True, one_hot_label=False)
return x_test, t_test
'''
network=init_network()
x=np.array([100,40])
y=forward(network, x)
print(y)
#print(y)
#plt.plot(x,y)
#plt.ylim(-0.1,1.1)
#plt.show()
'''
x_test,t_test = get_data()
network=init_networrk_mnist()
batch_size=100
accuracy_ct=0
for i in range(0, len(x_test), batch_size):#x_train의 실제 개수 몰라도 len함수 쓰면 된다
x_batch = x_test[i:i+batch_size]
y_batch = predict(network, x_batch)
p = np.argmax(y_batch, axis=1) #가장 확률이 높은 원소 가져오기?
print(np.sum(p == t_test[i:i+batch_size]))
accuracy_ct += np.sum(p == t_test[i:i+batch_size])
'''
y=predict(network, x_test[i])
p=np.argmax(y)
if p==t_test[i] :
accuracy_ct+=1
'''
print("Accuracy:",str(float(accuracy_ct)/len(x_test)))
print(accuracy_ct)
'''
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28,28) #이거 패턴화 28,28로 안하면 원하는 이미지 안나온다.-> 이거 이용해서 암호화나 용량 줄이기도 가능?
print(img.shape)
img_show(img)
'''
| 654
| 0
| 0
| 0
| 0
| 2,029
| 0
| 16
| 407
|
206b2d2a2c251900c661943dfaa5e9366d3668b1
| 9,055
|
py
|
Python
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 3
|
2015-10-10T10:17:26.000Z
|
2020-12-14T09:42:19.000Z
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 41
|
2015-02-03T19:47:28.000Z
|
2017-02-06T23:24:26.000Z
|
slideatlas/security/blueprint.py
|
SlideAtlas/SlideAtlas-Server
|
3b9cbd56eaa29ae08ae521e75616ea230fe26397
|
[
"Apache-2.0"
] | 2
|
2016-04-04T18:23:27.000Z
|
2017-11-14T22:34:58.000Z
|
# coding=utf-8
from flask import Markup
################################################################################
__all__ = ('blueprint', 'register_with_app')
################################################################################
################################################################################
# TODO: find a way of automatically registering Shibboleth users with the
# appropriate group, similar to facebook_id
################################################################################
def add_config(app):
"""
Set Flask application configuration options.
These are options that should never change.
"""
# Flask-Security configuration
app.config.update(
### Frontend ###
SECURITY_FLASH_MESSAGES=True,
SECURITY_LOGIN_URL='/login',
SECURITY_LOGIN_USER_TEMPLATE='security/login.html',
SECURITY_MSG_DISABLED_ACCOUNT=('Password login is disabled for this account.', 'error'),
SECURITY_LOGOUT_URL='/logout',
# TODO: change '/sessions' to an endpoint name
SECURITY_POST_LOGIN_VIEW='/sessions',
SECURITY_POST_LOGOUT_VIEW='home',
### Password login options ###
SECURITY_DEFAULT_REMEMBER_ME=False,
## New account registration
SECURITY_REGISTERABLE=True,
SECURITY_REGISTER_URL='/login/password/register',
SECURITY_REGISTER_USER_TEMPLATE='security/register.html',
SECURITY_SEND_REGISTER_EMAIL=True,
SECURITY_EMAIL_SUBJECT_REGISTER='SlideAtlas: Account Created',
# uses 'welcome' email body template
# TODO: change the email body template, as the default contains a password confirmation link, and we want non-password users to receive a welcome email too
## Confirmation of user's email address
SECURITY_CONFIRMABLE=True,
SECURITY_CONFIRM_URL='/login/password/confirm',
SECURITY_SEND_CONFIRMATION_TEMPLATE='security/resend_confirmation.html',
SECURITY_EMAIL_SUBJECT_CONFIRM='SlideAtlas: Account Confirmation',
# uses 'confirmation_instructions' email body template
SECURITY_CONFIRM_EMAIL_WITHIN='5 days',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
SECURITY_MSG_EMAIL_CONFIRMED=(
Markup(
'Welcome to SlideAtlas! Your account has been confirmed.<br>'
'<br>'
'Site administrators may now grant you access to additional content. '
'You can also contact <a href="mailto:%(email)s">%(email)s</a> with any requests.' %
{'email': app.config['SLIDEATLAS_ADMIN_EMAIL']}
),
'success'),
## Recover / reset a lost password
SECURITY_RECOVERABLE=True,
SECURITY_RESET_URL='/login/password/reset',
SECURITY_FORGOT_PASSWORD_TEMPLATE='security/password_reset_1.html', # step 1
SECURITY_RESET_PASSWORD_TEMPLATE='security/password_reset_2.html', # step 2
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET='SlideAtlas: Password Reset Instructions',
# uses 'reset_instructions' email body template
SECURITY_RESET_PASSWORD_WITHIN='5 days',
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE='SlideAtlas: Password Reset Successful',
# uses 'reset_notice' email body template
## Change a password
SECURITY_CHANGEABLE=True,
SECURITY_CHANGE_URL='/login/password/change',
SECURITY_CHANGE_PASSWORD_TEMPLATE='security/password_change.html',
SECURITY_SEND_PASSWORD_CHANGE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE='SlideAtlas: Password Change Successful',
# uses 'change notice' email body template
### Other options ###
SECURITY_TRACKABLE=True, # record login statistics in User model
SECURITY_PASSWORDLESS=False, # an experimental feature
# custom salts can also be set for several other tokens, but this shouldn't be necessary
# TODO: there are a few other undocumented config settings in Flask-Security, explore them
)
# Flask-Login configuration
app.config.update(
SESSION_PROTECTION='basic', # some extra security for cookies, see documentation for details
REMEMBER_COOKIE_DOMAIN=app.session_interface.get_cookie_domain(app),
REMEMBER_COOKIE_HTTPONLY=True,
REMEMBER_COOKIE_SECURE=app.config['SLIDEATLAS_HTTPS'],
)
################################################################################
################################################################################
| 45.049751
| 163
| 0.661182
|
# coding=utf-8
import copy
from flask import Markup, url_for
from flask.ext.security import Security, MongoEngineUserDatastore, user_registered
from flask.ext.security.core import _SecurityState
from flask.ext.security.core import _context_processor as security_default_context_processor
from flask.ext.security.views import create_blueprint as security_create_blueprint
from flask.ext.security.views import send_confirmation as security_send_confirmation
from flask.ext.security.utils import send_mail
from slideatlas import models
from . import forms, views, login_provider
from .principal import register_principal
################################################################################
__all__ = ('blueprint', 'register_with_app')
################################################################################
def register_with_app(app):
add_config(app)
security, blueprint = create_security(app)
register_principal(app, security)
login_provider.add_views(app, blueprint)
# TODO: move the 'site_url' value to config file
security.mail_context_processor(lambda: dict(site_url='https://slide-atlas.org/'))
# TODO: make logins timeout
# may use the 'flask.ext.login.user_loaded_from_*' signals for this, to update the timeout
# furthermore, see the documentation 'flask.ext.login.needs_refresh', and implement re-login
# redirection directly to the user's corresponding login provider if a user's session becomes stale
user_registered.connect(on_user_registered, app)
################################################################################
# TODO: find a way of automatically registering Shibboleth users with the
# appropriate group, similar to facebook_id
def on_user_registered(app, user, confirm_token):
if isinstance(user, models.ShibbolethUser) or user.email.endswith('brown.edu'):
brown_group = models.Group.objects.with_id('529d244959a3aee20f8a00ae')
user.groups.append(brown_group)
user.save()
send_mail(
'SlideAtlas: New User Registered',
app.config['SLIDEATLAS_ADMIN_EMAIL'],
'new_user_notify',
user=user,
admin_user_url=url_for('%sview.edit_view' % user.__class__.__name__.lower(),
id=str(user.id),
_external=True)
)
################################################################################
def add_config(app):
"""
Set Flask application configuration options.
These are options that should never change.
"""
# Flask-Security configuration
app.config.update(
### Frontend ###
SECURITY_FLASH_MESSAGES=True,
SECURITY_LOGIN_URL='/login',
SECURITY_LOGIN_USER_TEMPLATE='security/login.html',
SECURITY_MSG_DISABLED_ACCOUNT=('Password login is disabled for this account.', 'error'),
SECURITY_LOGOUT_URL='/logout',
# TODO: change '/sessions' to an endpoint name
SECURITY_POST_LOGIN_VIEW='/sessions',
SECURITY_POST_LOGOUT_VIEW='home',
### Password login options ###
SECURITY_DEFAULT_REMEMBER_ME=False,
## New account registration
SECURITY_REGISTERABLE=True,
SECURITY_REGISTER_URL='/login/password/register',
SECURITY_REGISTER_USER_TEMPLATE='security/register.html',
SECURITY_SEND_REGISTER_EMAIL=True,
SECURITY_EMAIL_SUBJECT_REGISTER='SlideAtlas: Account Created',
# uses 'welcome' email body template
# TODO: change the email body template, as the default contains a password confirmation link, and we want non-password users to receive a welcome email too
## Confirmation of user's email address
SECURITY_CONFIRMABLE=True,
SECURITY_CONFIRM_URL='/login/password/confirm',
SECURITY_SEND_CONFIRMATION_TEMPLATE='security/resend_confirmation.html',
SECURITY_EMAIL_SUBJECT_CONFIRM='SlideAtlas: Account Confirmation',
# uses 'confirmation_instructions' email body template
SECURITY_CONFIRM_EMAIL_WITHIN='5 days',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
SECURITY_MSG_EMAIL_CONFIRMED=(
Markup(
'Welcome to SlideAtlas! Your account has been confirmed.<br>'
'<br>'
'Site administrators may now grant you access to additional content. '
'You can also contact <a href="mailto:%(email)s">%(email)s</a> with any requests.' %
{'email': app.config['SLIDEATLAS_ADMIN_EMAIL']}
),
'success'),
## Recover / reset a lost password
SECURITY_RECOVERABLE=True,
SECURITY_RESET_URL='/login/password/reset',
SECURITY_FORGOT_PASSWORD_TEMPLATE='security/password_reset_1.html', # step 1
SECURITY_RESET_PASSWORD_TEMPLATE='security/password_reset_2.html', # step 2
SECURITY_EMAIL_SUBJECT_PASSWORD_RESET='SlideAtlas: Password Reset Instructions',
# uses 'reset_instructions' email body template
SECURITY_RESET_PASSWORD_WITHIN='5 days',
SECURITY_SEND_PASSWORD_RESET_NOTICE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_NOTICE='SlideAtlas: Password Reset Successful',
# uses 'reset_notice' email body template
## Change a password
SECURITY_CHANGEABLE=True,
SECURITY_CHANGE_URL='/login/password/change',
SECURITY_CHANGE_PASSWORD_TEMPLATE='security/password_change.html',
SECURITY_SEND_PASSWORD_CHANGE_EMAIL=False, # TODO: do we want to send a confirmation email?
SECURITY_EMAIL_SUBJECT_PASSWORD_CHANGE_NOTICE='SlideAtlas: Password Change Successful',
# uses 'change notice' email body template
### Other options ###
SECURITY_TRACKABLE=True, # record login statistics in User model
SECURITY_PASSWORDLESS=False, # an experimental feature
# custom salts can also be set for several other tokens, but this shouldn't be necessary
# TODO: there are a few other undocumented config settings in Flask-Security, explore them
)
# Flask-Login configuration
app.config.update(
SESSION_PROTECTION='basic', # some extra security for cookies, see documentation for details
REMEMBER_COOKIE_DOMAIN=app.session_interface.get_cookie_domain(app),
REMEMBER_COOKIE_HTTPONLY=True,
REMEMBER_COOKIE_SECURE=app.config['SLIDEATLAS_HTTPS'],
)
################################################################################
def create_security(app):
# register Flask-Security with app and get blueprint
security = Security(app, SlideatlasMongoEngineUserDatastore(),
register_blueprint=False,
confirm_register_form=forms.RegisterForm,
login_form=forms.LoginForm)
# prevent Flask-Security from automatically creating register and confirm views
# by calling 'security_create_blueprint' with a different state
security_blueprint_state = copy.copy(security._state)
security_blueprint_state.registerable = False
security_blueprint_state.confirmable = False
blueprint = security_create_blueprint(security_blueprint_state, 'flask_security.core')
# add SlideAtlas's own register view, which doesn't immediately require a password
blueprint.add_url_rule(security.register_url,
endpoint='register',
view_func=views.register,
methods=['GET', 'POST'])
# use the Flask-Security's built-in view for re-sending a confirmation, which
# needs to be manually added, since 'confirmable' was set to False
blueprint.add_url_rule(security.confirm_url,
endpoint='send_confirmation',
view_func=security_send_confirmation,
methods=['GET', 'POST'])
# add SlideAtlas's own confirm view, which requires the user to set a password
blueprint.add_url_rule(security.confirm_url + '/<token>',
endpoint='confirm_email',
view_func=views.confirm_email,
methods=['GET', 'POST'])
# do work that Flask-Security would have done if 'register_blueprint' were True
app.register_blueprint(blueprint)
app.context_processor(security_default_context_processor)
return security, blueprint
################################################################################
class SlideatlasMongoEngineUserDatastore(MongoEngineUserDatastore):
def __init__(self):
# 'db' parameter is not necessary for this subclass
super(SlideatlasMongoEngineUserDatastore, self).__init__(None, models.User, None)
self.user_creation_model = models.PasswordUser
def create_user(self, **kwargs):
"""Creates and returns a new user from the given parameters."""
user = self.user_creation_model(**kwargs)
return self.put(user)
| 0
| 0
| 0
| 465
| 0
| 3,170
| 0
| 358
| 310
|
6e1958f96728d11d2e7418e4925be857a7286b3c
| 1,616
|
py
|
Python
|
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
flight/views.py
|
NedyalkoKr/airline
|
d704e8cd98901dc4bb0bf672cc2363432ada3f84
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import Http404
from flight.models import Flight, Passenger
def index(request):
''' display all flights '''
context = {
'main_header': 'Flights',
'title': 'Flights',
'flights': Flight.objects.all()
}
return render(request, 'flight/index.html', context)
def flight(request, flight_id):
''' return individual flight details and passengers on this flight'''
try:
flight = Flight.objects.get(pk=flight_id)
except Flight.DoesNotExist:
raise Http404(f'Flight {flight} does not exist.')
context = {
'flight': flight,
'passengers': flight.passengers.all(),
'non_passengers': Passenger.objects.exclude(flight=flight).all(),
'number_of_passengers': flight.passengers.count()
}
return render(request, 'flight/flight.html', context)
| 36.727273
| 99
| 0.678218
|
from django.shortcuts import render
from django.urls import reverse
from django.http import Http404, HttpResponseRedirect
from flight.models import Flight, Passenger
def index(request):
''' display all flights '''
context = {
'main_header': 'Flights',
'title': 'Flights',
'flights': Flight.objects.all()
}
return render(request, 'flight/index.html', context)
def flight(request, flight_id):
''' return individual flight details and passengers on this flight'''
try:
flight = Flight.objects.get(pk=flight_id)
except Flight.DoesNotExist:
raise Http404(f'Flight {flight} does not exist.')
context = {
'flight': flight,
'passengers': flight.passengers.all(),
'non_passengers': Passenger.objects.exclude(flight=flight).all(),
'number_of_passengers': flight.passengers.count()
}
return render(request, 'flight/flight.html', context)
def book(request, flight_id):
try:
passenger_id = int(request.POST['passenger'])
passenger = Passenger.objects.get(pk=passenger_id)
flight = Flight.objects.get(pk=flight_id)
except KeyError:
return render(request, 'flight/error.html', {'message': 'No passenger selected'})
except Flight.DoesNotExist:
return render(request, 'flight/error.html', {'message': 'No such flight exist'})
except Passenger.DoesNotExist:
return render(request, 'flight/error.html', {'message': 'No passenger with that id exist'})
passenger.flight.add(flight)
return HttpResponseRedirect(reverse('flight', args=(flight_id,)))
| 0
| 0
| 0
| 0
| 0
| 651
| 0
| 32
| 45
|
c2f3cfc4cf7bad08a1bd21dc39bb6765de3670b2
| 419
|
py
|
Python
|
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
setup1.py
|
Alexander437/Learning_repo
|
4e40ad419f8117d014f789119f4b3583067020bb
|
[
"CC0-1.0"
] | null | null | null |
from setuptools import setup, find_packages
from torch.utils import cpp_extension
setup(
name='my_lib',
version='0.0',
description='Learning setup',
packages=find_packages(),
ext_package='trt_pose',
ext_modules=[cpp_extension.CppExtension('plugins', [
'Learn_cpp/learn.cpp',
])],
cmdclass={'build_ext': cpp_extension.BuildExtension},
install_requires=[
],
)
| 23.277778
| 57
| 0.687351
|
from setuptools import setup, find_packages, Extension
from torch.utils import cpp_extension
setup(
name='my_lib',
version='0.0',
description='Learning setup',
packages=find_packages(),
ext_package='trt_pose',
ext_modules=[cpp_extension.CppExtension('plugins', [
'Learn_cpp/learn.cpp',
])],
cmdclass={'build_ext': cpp_extension.BuildExtension},
install_requires=[
],
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0
|
8678ddca56a8e9b76f05e9a0a06fe329c6224b43
| 8,342
|
py
|
Python
|
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
bookops_callno/normalizer.py
|
BookOps-CAT/bookops-callno
|
a8f1d2744b3b53844dc97a5400ae87a2db92cd4c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Optional
from pymarc import Field
from unidecode import unidecode, UnidecodeError
from bookops_callno.errors import CallNoConstructorError
def remove_trailing_punctuation(value: str) -> str:
"""
Removes any trailing periods, commas, etc.
Args:
value: string to be processed
Returns:
value
"""
if not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
while value[-1] in ".,:;-() ":
value = value[:-1]
return value
def normalize_value(value: str) -> str:
"""
Removes diacritics from string and changes to uppercase
"""
if not value:
return ""
elif not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
try:
value = value.replace("\u02b9", "") # Russian: modifier letter prime
value = value.replace("\u02bb", "") # Arabic modifier letter turned comma
value = value.replace("'", "")
value = unidecode(value, errors="strict")
value = remove_trailing_punctuation(value).upper()
return value
except UnidecodeError as exc:
raise CallNoConstructorError(
f"Unsupported character encountered. Error: '{exc}'."
)
def corporate_name_first_word(field: Field = None) -> Optional[str]:
"""
Returns the uppdercase first word of the corporate entity from
the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
words = field["a"].strip().split(" ")
name = normalize_value(words[0])
return name
def corporate_name_full(field: Field = None) -> Optional[str]:
"""
Returns an uppercase full name of corporate entity.
Uses the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("110", "610"):
return None
phrases = field["a"].strip().split("(")
name = normalize_value(phrases[0])
return name
def corporate_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the uppercase first letter of the corporate entity
based on the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
name = field["a"]
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the first letter of the last name of a personal author
Args:
field: pymarc.Field instance
Returns
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "100":
return None
name = field["a"].strip()
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_surname(field: Field = None) -> Optional[str]:
"""
Returns an uppercase surname of personal author. Includes any numeration from
the subield $b of 100 or 600 MARC tag.
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("100", "600"):
return None
elif field.indicator1 not in ("0", "1"):
return None
sub_a = field["a"].strip()
# include subfield $b if present
try:
sub_b = field["b"].strip()
name = f"{sub_a} {sub_b}"
except AttributeError:
name = sub_a
name = normalize_value(name)
# stop at comma to select surname
try:
stop = name.index(",")
name = name[:stop]
except ValueError:
pass
return name
def subject_corporate_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase corporate name to be used in subject segment
of the call number based on MARC tag 610
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "610":
return None
name = corporate_name_full(field)
return name
def subject_family_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase family name based on the 600 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
elif field.indicator1 != "3":
return None
try:
stop = field["a"].index("family")
name = field["a"][:stop]
except ValueError:
return None
name = normalize_value(name)
return name
def subject_personal_name(field: Field = None) -> Optional[str]:
"""
Returns personal name to be used in subject segment of the call
number. Use for biography or Dewey + Name patters, examples:
biography: B LOUIS XIV C
criticizm of works of an author: 813 ADAMS C
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
name = personal_name_surname(field)
return name
def subject_topic(field: Field = None) -> Optional[str]:
"""
Returns an uppercase topic to be used in the subject segment of the call
number based on MARC tag 650. Valid only for BPL call numbers.
Examples: programming language, name of operating system, etc.
Args:
field: pymarc.Field instance
Returns:
topic
"""
pass
def title_first_word(field: Field = None) -> Optional[str]:
"""
Returns an uppercase first word (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
word
"""
pass
def title_initial(field: Field = None) -> Optional[str]:
"""
Returns an uppercase initial (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "245":
return None
try:
ind2 = int(field.indicator2)
except ValueError:
return None
title = field["a"][ind2:]
title = normalize_value(title)
initial = title[0]
return initial
| 23.902579
| 82
| 0.593743
|
# -*- coding: utf-8 -*-
from typing import Optional
from pymarc import Field
from unidecode import unidecode, UnidecodeError
from bookops_callno.errors import CallNoConstructorError
def remove_trailing_punctuation(value: str) -> str:
"""
Removes any trailing periods, commas, etc.
Args:
value: string to be processed
Returns:
value
"""
if not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
while value[-1] in ".,:;-() ":
value = value[:-1]
return value
def normalize_value(value: str) -> str:
"""
Removes diacritics from string and changes to uppercase
"""
if not value:
return ""
elif not isinstance(value, str):
raise CallNoConstructorError(
"Invalid 'value' type used in argument. Must be a string."
)
try:
value = value.replace("\u02b9", "") # Russian: modifier letter prime
value = value.replace("\u02bb", "") # Arabic modifier letter turned comma
value = value.replace("'", "")
value = unidecode(value, errors="strict")
value = remove_trailing_punctuation(value).upper()
return value
except UnidecodeError as exc:
raise CallNoConstructorError(
f"Unsupported character encountered. Error: '{exc}'."
)
def corporate_name_first_word(field: Field = None) -> Optional[str]:
"""
Returns the uppdercase first word of the corporate entity from
the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
words = field["a"].strip().split(" ")
name = normalize_value(words[0])
return name
def corporate_name_full(field: Field = None) -> Optional[str]:
"""
Returns an uppercase full name of corporate entity.
Uses the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("110", "610"):
return None
phrases = field["a"].strip().split("(")
name = normalize_value(phrases[0])
return name
def corporate_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the uppercase first letter of the corporate entity
based on the 110 MARC tag
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "110":
return None
name = field["a"]
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_initial(field: Field = None) -> Optional[str]:
"""
Returns the first letter of the last name of a personal author
Args:
field: pymarc.Field instance
Returns
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "100":
return None
name = field["a"].strip()
name = normalize_value(name)
initial = name[0]
return initial
def personal_name_surname(field: Field = None) -> Optional[str]:
"""
Returns an uppercase surname of personal author. Includes any numeration from
the subield $b of 100 or 600 MARC tag.
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag not in ("100", "600"):
return None
elif field.indicator1 not in ("0", "1"):
return None
sub_a = field["a"].strip()
# include subfield $b if present
try:
sub_b = field["b"].strip()
name = f"{sub_a} {sub_b}"
except AttributeError:
name = sub_a
name = normalize_value(name)
# stop at comma to select surname
try:
stop = name.index(",")
name = name[:stop]
except ValueError:
pass
return name
def subject_corporate_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase corporate name to be used in subject segment
of the call number based on MARC tag 610
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "610":
return None
name = corporate_name_full(field)
return name
def subject_family_name(field: Field = None) -> Optional[str]:
"""
Returns an uppercase family name based on the 600 MARC tag
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
elif field.indicator1 != "3":
return None
try:
stop = field["a"].index("family")
name = field["a"][:stop]
except ValueError:
return None
name = normalize_value(name)
return name
def subject_personal_name(field: Field = None) -> Optional[str]:
"""
Returns personal name to be used in subject segment of the call
number. Use for biography or Dewey + Name patters, examples:
biography: B LOUIS XIV C
criticizm of works of an author: 813 ADAMS C
Args:
field: pymarc.Field instance
Returns:
name
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "600":
return None
name = personal_name_surname(field)
return name
def subject_topic(field: Field = None) -> Optional[str]:
"""
Returns an uppercase topic to be used in the subject segment of the call
number based on MARC tag 650. Valid only for BPL call numbers.
Examples: programming language, name of operating system, etc.
Args:
field: pymarc.Field instance
Returns:
topic
"""
pass
def title_first_word(field: Field = None) -> Optional[str]:
"""
Returns an uppercase first word (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
word
"""
pass
def title_initial(field: Field = None) -> Optional[str]:
"""
Returns an uppercase initial (skipping any articles) of
the title field (245 MARC tag subfield $a).
Args:
field: pymarc.Field instance
Returns:
initial
"""
if field is None:
return None
elif not isinstance(field, Field):
raise CallNoConstructorError(
"Invalid 'field' argument type. Must be pymarc.Field instance."
)
if field.tag != "245":
return None
try:
ind2 = int(field.indicator2)
except ValueError:
return None
title = field["a"][ind2:]
title = normalize_value(title)
initial = title[0]
return initial
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b721c28c4d1d01229eaf38efadeba74addb10f97
| 1,310
|
py
|
Python
|
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
ex31.py
|
Lorranysousc/ExerciciosDeRepeticao
|
4b8ac1c4eb3ac5d2739456a4f967e094fad70256
|
[
"MIT"
] | null | null | null |
'''O Sr. Manoel Joaquim expandiu seus negcios para alm dos negcios de 1,99 e agora possui uma loja de convenincias. Faa um programa que implemente uma caixa registradora rudimentar. O programa dever receber um nmero desconhecido de valores referentes aos preos das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve ento mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para ento calcular e mostrar o valor do troco. Aps esta operao, o programa dever voltar ao ponto inicial, para registrar a prxima compra. A sada deve ser conforme o exemplo abaixo: '''
from time import sleep
start = 1
while start == 1: #Reinicia o programa quando chega ao final.
print('LOJAS TABAJARA')
cont = 1
valor_produto = ''
total_compra = 0
while valor_produto != 0: #Recebe valor dos produtos comprados.
valor_produto = float(input(f'Produto {cont}: R$ '))
total_compra += valor_produto
cont += 1
if valor_produto == 0: #Finaliza o programa.
print(f'Total: R$ {total_compra:.2f}')
dinheiro_cliente = float(input('Dinheiro: R$ '))
troco = dinheiro_cliente - total_compra
print(f'Troco: R$ {troco:.2f}')
sleep(3)
| 65.5
| 660
| 0.703817
|
'''O Sr. Manoel Joaquim expandiu seus negócios para além dos negócios de 1,99 e agora possui uma loja de conveniências. Faça um programa que implemente uma caixa registradora rudimentar. O programa deverá receber um número desconhecido de valores referentes aos preços das mercadorias. Um valor zero deve ser informado pelo operador para indicar o final da compra. O programa deve então mostrar o total da compra e perguntar o valor em dinheiro que o cliente forneceu, para então calcular e mostrar o valor do troco. Após esta operação, o programa deverá voltar ao ponto inicial, para registrar a próxima compra. A saída deve ser conforme o exemplo abaixo: '''
from time import sleep
start = 1
while start == 1: #Reinicia o programa quando chega ao final.
print('LOJAS TABAJARA')
cont = 1
valor_produto = ''
total_compra = 0
while valor_produto != 0: #Recebe valor dos produtos comprados.
valor_produto = float(input(f'Produto {cont}: R$ '))
total_compra += valor_produto
cont += 1
if valor_produto == 0: #Finaliza o programa.
print(f'Total: R$ {total_compra:.2f}')
dinheiro_cliente = float(input('Dinheiro: R$ '))
troco = dinheiro_cliente - total_compra
print(f'Troco: R$ {troco:.2f}')
sleep(3)
| 32
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c9bdceebaee8f789e4c6a4a1d04b4ef5a1c5d7f9
| 399
|
py
|
Python
|
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | 1
|
2021-11-03T15:13:48.000Z
|
2021-11-03T15:13:48.000Z
|
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | null | null | null |
tests/unit/dummy/__init__.py
|
fabiannagel/schnetkit
|
bf0b9055bdc393d01ac6c3d5f17bb9db13297e32
|
[
"MIT"
] | 1
|
2022-02-02T17:34:05.000Z
|
2022-02-02T17:34:05.000Z
|
models = [Dummy]
| 15.96
| 37
| 0.573935
|
from schnetkit.engine import Stateful
class Dummy(Stateful):
def __init__(self, a=2):
self.a = a
self.state = "great"
def get_dict(self):
return {"a": self.a}
def get_state(self):
return {"state": self.state}
def restore(self, payload):
self.state = payload["state"]
def work(self):
self.state = "tired"
models = [Dummy]
| 0
| 0
| 0
| 318
| 0
| 0
| 0
| 16
| 45
|
9af2d928d6cc2a53fd788a67b6c0a78899bbda9e
| 1,106
|
py
|
Python
|
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
tracks/BamFeatures.py
|
goeckslab/jbrowse-archive-creator
|
438557136c9dd4eb0db89835e5d253e44b50a7a3
|
[
"AFL-3.0"
] | null | null | null |
#!/usr/bin/env python2
| 38.137931
| 114
| 0.699819
|
#!/usr/bin/env python2
import os
import json
import logging
from TrackDb import TrackDb
from util import subtools
from util import santitizer
class BamFeatures(TrackDb):
def __init__(self, trackName, trackLabel, trackDataURL, trackType, dataType, extraSettings=None):
super(BamFeatures, self).__init__(trackName, trackLabel, trackDataURL, trackType, dataType, extraSettings)
def prepareExtraSetting(self):
if 'category' not in self.extraSettings or not self.extraSettings['category']:
self.extraSettings['category'] = "Default group"
bam_track = dict()
bam_track['type'] = 'JBrowse/View/Track/Alignments2'
bam_track['storeClass'] = 'JBrowse/Store/SeqFeature/BAM'
bam_track['urlTemplate'] = os.path.join('bbi', self.trackName)
bam_track['baiUrlTemplate'] = os.path.join('bbi', self.extraSettings['index'])
bam_track['label'] = self.trackLabel
bam_track['category'] = self.extraSettings['category']
#extraConfigs = json.dumps(bam_track)
extraConfigs = bam_track
return extraConfigs
| 0
| 0
| 0
| 934
| 0
| 0
| 0
| -13
| 156
|
defb13f18fc11dc096d17386bf5d7d31a9e0c762
| 5,573
|
py
|
Python
|
coolamqp/uplink/handshake.py
|
smok-serwis/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | 4
|
2018-06-20T13:59:35.000Z
|
2021-08-31T12:03:59.000Z
|
coolamqp/uplink/handshake.py
|
piotrmaslanka/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | 33
|
2016-06-03T11:41:09.000Z
|
2020-07-09T17:48:28.000Z
|
coolamqp/uplink/handshake.py
|
smok-serwis/coolamqp
|
d57ada0d478bd1ca94743ae341f6819ba85ea253
|
[
"MIT"
] | null | null | null |
# coding=UTF-8
from __future__ import absolute_import, division, print_function
"""
Provides reactors that can authenticate an AQMP session
"""
import logging
from coolamqp import __version__
PUBLISHER_CONFIRMS = b'publisher_confirms'
CONSUMER_CANCEL_NOTIFY = b'consumer_cancel_notify'
CONNECTION_BLOCKED = b'connection.blocked'
SUPPORTED_EXTENSIONS = [
PUBLISHER_CONFIRMS,
CONSUMER_CANCEL_NOTIFY, # half assed support - we just .cancel the consumer, see #12
CONNECTION_BLOCKED
]
CLIENT_DATA = [
# because RabbitMQ is some kind of a fascist and does not allow
# these fields to be of type short-string
(b'product', (b'CoolAMQP', 'S')),
(b'version', (__version__.encode('utf8'), 'S')),
(b'copyright', (b'Copyright (C) 2016-2021 SMOK sp. z o.o.', 'S')),
(
b'information', (
b'Licensed under the MIT License.\nSee https://github.com/smok-serwis/coolamqp for details',
'S')),
(b'capabilities',
([(capa, (True, 't')) for capa in SUPPORTED_EXTENSIONS], 'F')),
]
WATCHDOG_TIMEOUT = 10
logger = logging.getLogger(__name__)
| 39.524823
| 104
| 0.624978
|
# coding=UTF-8
from __future__ import absolute_import, division, print_function
"""
Provides reactors that can authenticate an AQMP session
"""
import six
import typing as tp
import copy
import logging
from coolamqp.framing.definitions import ConnectionStart, ConnectionStartOk, \
ConnectionTune, ConnectionTuneOk, ConnectionOpen, ConnectionOpenOk
from coolamqp.framing.frames import AMQPMethodFrame
from coolamqp.uplink.connection.states import ST_ONLINE
from coolamqp.uplink.heartbeat import Heartbeater
from coolamqp import __version__
PUBLISHER_CONFIRMS = b'publisher_confirms'
CONSUMER_CANCEL_NOTIFY = b'consumer_cancel_notify'
CONNECTION_BLOCKED = b'connection.blocked'
SUPPORTED_EXTENSIONS = [
PUBLISHER_CONFIRMS,
CONSUMER_CANCEL_NOTIFY, # half assed support - we just .cancel the consumer, see #12
CONNECTION_BLOCKED
]
CLIENT_DATA = [
# because RabbitMQ is some kind of a fascist and does not allow
# these fields to be of type short-string
(b'product', (b'CoolAMQP', 'S')),
(b'version', (__version__.encode('utf8'), 'S')),
(b'copyright', (b'Copyright (C) 2016-2021 SMOK sp. z o.o.', 'S')),
(
b'information', (
b'Licensed under the MIT License.\nSee https://github.com/smok-serwis/coolamqp for details',
'S')),
(b'capabilities',
([(capa, (True, 't')) for capa in SUPPORTED_EXTENSIONS], 'F')),
]
WATCHDOG_TIMEOUT = 10
logger = logging.getLogger(__name__)
class Handshaker(object):
"""
Object that given a connection rolls the handshake.
"""
def __init__(self, connection, # type: coolamqp.uplink.connection.Connection
node_definition, # type: coolamqp.objects.NodeDefinition
on_success, # type: tp.Callable[[], None]
extra_properties=None # type: tp.Dict[bytes, tp.Tuple[tp.Any, str]]
):
"""
:param connection: Connection instance to use
:type node_definition: NodeDefinition
:param on_success: callable/0, on success
"""
self.connection = connection
self.login = node_definition.user.encode('utf8')
self.password = node_definition.password.encode('utf8')
self.virtual_host = node_definition.virtual_host.encode('utf8')
self.heartbeat = node_definition.heartbeat or 0
self.connection.watch_for_method(0, ConnectionStart,
self.on_connection_start)
# Callbacks
self.on_success = on_success
self.EXTRA_PROPERTIES = extra_properties or []
# Called by internal setup
def on_watchdog(self):
"""
Called WATCHDOG_TIMEOUT seconds after setup begins
If we are not ST_ONLINE after that much, something is wrong and pwn this connection.
"""
# Not connected in 20 seconds - abort
if self.connection.state != ST_ONLINE:
# closing the connection this way will get to Connection by channels of ListenerThread
self.connection.send(None)
def on_connection_start(self, payload # type: coolamqp.framing.base.AMQPPayload
):
sasl_mechanisms = payload.mechanisms.tobytes().split(b' ')
locale_supported = payload.locales.tobytes().split(b' ')
# Select a mechanism
if b'PLAIN' not in sasl_mechanisms:
raise ValueError('Server does not support PLAIN')
# Select capabilities
server_props = dict(payload.server_properties)
if b'capabilities' in server_props:
for label, fv in server_props[b'capabilities'][0]:
if label in SUPPORTED_EXTENSIONS:
if fv[0]:
self.connection.extensions.append(label)
self.connection.watchdog(WATCHDOG_TIMEOUT, self.on_watchdog)
self.connection.watch_for_method(0, ConnectionTune,
self.on_connection_tune)
CLIENT_DATA_c = copy.copy(CLIENT_DATA)
CLIENT_DATA_c.extend(self.EXTRA_PROPERTIES)
self.connection.send([
AMQPMethodFrame(0,
ConnectionStartOk(CLIENT_DATA_c, b'PLAIN',
b'\x00' + self.login + b'\x00' + self.password,
locale_supported[0]
))
])
def on_connection_tune(self, payload # type: coolamqp.framing.base.AMQPPayload
):
self.connection.frame_max = payload.frame_max
self.connection.heartbeat = min(payload.heartbeat, self.heartbeat)
self.connection.free_channels.extend(six.moves.xrange(1, (
65535 if payload.channel_max == 0 else payload.channel_max) + 1))
self.connection.watch_for_method(0, ConnectionOpenOk,
self.on_connection_open_ok)
self.connection.send([
AMQPMethodFrame(0, ConnectionTuneOk(payload.channel_max,
payload.frame_max,
self.connection.heartbeat)),
AMQPMethodFrame(0, ConnectionOpen(self.virtual_host))
])
# Install heartbeat handlers NOW, if necessary
if self.connection.heartbeat > 0:
Heartbeater(self.connection, self.connection.heartbeat)
def on_connection_open_ok(self, payload # type: coolamqp.framing.base.AMQPPayload
):
self.on_success()
| 0
| 0
| 0
| 4,096
| 0
| 0
| 0
| 197
| 177
|
08045555ebdef5af831c50bb02363844d684733e
| 10,852
|
py
|
Python
|
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | null | null | null |
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | 4
|
2020-03-13T14:45:49.000Z
|
2020-03-15T16:31:22.000Z
|
nodejs-mobile/test/testpy/__init__.py
|
xuelongqy/cnode
|
ac256264d329e68b6c5ae3281b0e7bb5a95ae164
|
[
"MIT"
] | 1
|
2020-03-15T16:02:18.000Z
|
2020-03-15T16:02:18.000Z
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
chakraBannedFlags = ["--expose_externalize_string"]
| 39.176895
| 89
| 0.646056
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
from os.path import join, dirname, exists, splitext, isdir, basename
import re
import ast
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
chakraBannedFlags = ["--expose_externalize_string"]
class SimpleTestCase(test.TestCase):
def __init__(self, path, file, arch, mode, context, config, jsEngine, additional=None):
super(SimpleTestCase, self).__init__(context, path, arch, mode)
self.file = file
self.config = config
self.arch = arch
self.mode = mode
self.jsEngine = jsEngine
if additional is not None:
self.additional_flags = additional
else:
self.additional_flags = []
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
def GetName(self):
return self.path[-1]
def GetCommand(self):
result = [self.config.context.GetVm(self.arch, self.mode)]
source = open(self.file).read()
flags_match = FLAGS_PATTERN.search(source)
if flags_match:
flag = flags_match.group(1).strip().split()
if self.jsEngine == "chakracore":
flag = filter(lambda x: x not in chakraBannedFlags, flag)
# The following block reads config.gypi to extract the v8_enable_inspector
# value. This is done to check if the inspector is disabled in which case
# the '--inspect' flag cannot be passed to the node process as it will
# cause node to exit and report the test as failed. The use case
# is currently when Node is configured --without-ssl and the tests should
# still be runnable but skip any tests that require ssl (which includes
# the inspector related tests). Also, if there is no ssl support the
# options '--use-bundled-ca' and '--use-openssl-ca' will also cause a
# similar failure so such tests are also skipped.
if len(flag) == 0:
pass
elif ('--inspect' in flag[0] or \
'--use-bundled-ca' in flag[0] or \
'--use-openssl-ca' in flag[0]) and \
self.context.v8_enable_inspector == 0:
print('Skipping as node was configured --without-ssl')
else:
result += flag
files_match = FILES_PATTERN.search(source);
additional_files = []
if files_match:
additional_files += files_match.group(1).strip().split()
for a_file in additional_files:
result.append(join(dirname(self.config.root), '..', a_file))
if self.additional_flags:
result += self.additional_flags
result += [self.file]
return result
def GetSource(self):
return open(self.file).read()
class MessageTestCase(SimpleTestCase):
def __init__(self, path, file, arch, mode, context, config, expected,
jsEngine, additional=None):
super(MessageTestCase, self).__init__(path, file, arch, mode, context,
config, jsEngine, additional)
self.expected = expected
def IgnoreLine(self, str):
"""Ignore empty lines and valgrind output."""
if not str.strip(): return True
else: return str.startswith('==') or str.startswith('**')
def IsFailureOutput(self, output):
f = file(self.expected)
# Skip initial '#' comment and spaces
#for line in f:
# if (not line.startswith('#')) and (not line.strip()):
# break
# Convert output lines to regexps that we can match
env = { 'basename': basename(self.file) }
patterns = [ ]
for line in f:
if not line.strip():
continue
pattern = re.escape(line.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = '^%s$' % pattern
patterns.append(pattern)
# Compare actual output with the expected
raw_lines = (output.stdout + output.stderr).split('\n')
outlines = [ s for s in raw_lines if not self.IgnoreLine(s) ]
if len(outlines) != len(patterns):
print "length differs."
print "expect=%d" % len(patterns)
print "actual=%d" % len(outlines)
print "patterns:"
for i in xrange(len(patterns)):
print "pattern = %s" % patterns[i]
print "outlines:"
for i in xrange(len(outlines)):
print "outline = %s" % outlines[i]
return True
for i in xrange(len(patterns)):
if not re.match(patterns[i], outlines[i]):
print "match failed"
print "line=%d" % i
print "expect=%s" % patterns[i]
print "actual=%s" % outlines[i]
return True
return False
def GetSource(self):
return (open(self.file).read()
+ "\n--- expected output ---\n"
+ open(self.expected).read())
class SimpleTestConfiguration(test.TestConfiguration):
def __init__(self, context, root, section, additional=None):
super(SimpleTestConfiguration, self).__init__(context, root)
self.section = section
if additional is not None:
self.additional_flags = additional
else:
self.additional_flags = []
def Ls(self, path):
return [f for f in os.listdir(path) if re.match('^test-.*\.m?js$', f)]
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], ""))
test_name = test[:-1] + [splitext(test[-1])[0]]
result.append(SimpleTestCase(test_name, file_path, arch, mode,
self.context, self, jsEngine,
self.additional_flags))
return result
def GetBuildRequirements(self):
return ['sample', 'sample=shell']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, '%s.status' % (self.section))
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
class ParallelTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(ParallelTestConfiguration, self).__init__(context, root, section,
additional)
def ListTests(self, current_path, path, arch, mode, jsEngine):
result = super(ParallelTestConfiguration, self).ListTests(
current_path, path, arch, mode, jsEngine)
for test in result:
test.parallel = True
return result
class AddonTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(AddonTestConfiguration, self).__init__(context, root, section,
additional)
def Ls(self, path):
def SelectTest(name):
return name.endswith('.js')
result = []
for subpath in os.listdir(path):
if os.path.isdir(join(path, subpath)):
for f in os.listdir(join(path, subpath)):
if SelectTest(f):
result.append([subpath, f[:-3]])
return result
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + t for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
result.append(
SimpleTestCase(test, file_path, arch, mode, self.context, self,
jsEngine, self.additional_flags))
return result
class AbortTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(AbortTestConfiguration, self).__init__(context, root, section,
additional)
def ListTests(self, current_path, path, arch, mode, jsEngine):
result = super(AbortTestConfiguration, self).ListTests(
current_path, path, arch, mode, jsEngine)
for test in result:
test.disable_core_files = True
return result
class MessageTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=None):
super(MessageTestConfiguration, self).__init__(context, root, section,
additional)
def Ls(self, path):
if isdir(path):
return [f for f in os.listdir(path)
if f.endswith('.js') or f.endswith('.mjs')]
else:
return []
def ListTests(self, current_path, path, arch, mode, jsEngine):
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
test_name = test[:-1] + [splitext(test[-1])[0]]
file_path = join(self.root, reduce(join, test[1:], ''))
file_prefix = file_path[:file_path.rfind('.')]
engine_output_path = file_prefix + (".%s.out" % jsEngine)
output_path = file_prefix + '.out'
if exists(engine_output_path):
output_path = engine_output_path
else:
if not exists(output_path):
raise Exception("Could not find %s" % output_path)
result.append(MessageTestCase(test_name, file_path, arch, mode,
self.context, self, output_path, jsEngine,
self.additional_flags))
return result
| 0
| 0
| 0
| 8,858
| 0
| 0
| 0
| 14
| 250
|
3e13d9f04c5b9e380942a3048140fa5f7f9bee3d
| 919
|
py
|
Python
|
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
patterns/creational/factory_method.py
|
zhaijingrong/patterns_in_python
|
8cb53a58cbb78dc7ed578887a8e7c481cfa72c80
|
[
"MIT"
] | null | null | null |
"""
--
1.
"""
if __name__ == '__main__':
cream_cake_factory = CreamCakeFactory()
cream_cake = cream_cake_factory.make_cake()
print(cream_cake)
fruit_cake_factory = FruitCakeFactory()
fruit_cake = fruit_cake_factory.make_cake()
print(fruit_cake)
| 19.145833
| 47
| 0.671382
|
"""
抽象工厂方法--对象创建型模式
1. 目标
定义一个用于创建对象的接口, 让子类决定实例化哪一个类, 使一个类的实例化延迟到子类。
"""
class CakeFactory(object):
def make_cake(self):
print('make a cake')
class CreamCakeFactory(CakeFactory):
def make_cake(self):
print('make a cream cake')
return CreamCake()
class FruitCakeFactory(CakeFactory):
def make_cake(self):
print('make a fruit cake')
return FruitCake()
class Cake(object):
def __repr__(self):
return 'This is a cake'
class CreamCake(Cake):
def __repr__(self):
return 'This is a cream cake'
class FruitCake(Cake):
def __repr__(self):
return 'This is a fruit cake'
if __name__ == '__main__':
cream_cake_factory = CreamCakeFactory()
cream_cake = cream_cake_factory.make_cake()
print(cream_cake)
fruit_cake_factory = FruitCakeFactory()
fruit_cake = fruit_cake_factory.make_cake()
print(fruit_cake)
| 168
| 0
| 0
| 443
| 0
| 0
| 0
| 0
| 138
|
8a8e864f9097a33ac84f3576473fa8671c78d0e2
| 1,583
|
py
|
Python
|
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
website/account/models.py
|
divmoe/DASHBOARD
|
42927dfca3797e0bde3e59288a156e33aec6790d
|
[
"MIT"
] | null | null | null |
# Create your models here.
| 34.413043
| 77
| 0.722678
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer (models.Model):
user=models.OneToOneField(User,null=True,on_delete=models.CASCADE)
name = models.CharField(max_length=100,null=True)
email= models.CharField(max_length=100,null=True)
phone= models.CharField(max_length=100,null=True)
photo=models.ImageField(null=True,blank=True)
def __str__ (self):
return self. name
class Tag(models.Model):
name = models.CharField(max_length=100,null=True)
def __str__ (self):
return self. name
class Product(models.Model):
CATOGORY={
('indoor','indoor'),
('OUT DOOR','OUT DOOR')
}
name=models.CharField(max_length=100,null=True)
price=models.FloatField(null=True)
catogory=models.CharField(max_length=100,null=True,choices=CATOGORY)
description=models.CharField(max_length=100,null=True,blank=True)
date_created=models.DateTimeField(auto_now_add=True,null=True)
tag = models.ManyToManyField(Tag)
def __str__ (self):
return self. name
class Order(models.Model):
STATUS={('pending','pending'),
('out for delivery','out for delivery'),
('Delivered','Delivered')
}
customer =models.ForeignKey(Customer,null=True,on_delete=models.SET_NULL)
product =models.ForeignKey(Product,null=True,on_delete=models.SET_NULL)
status= models.CharField(max_length=100,null=True,choices=STATUS)
date_created = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.product.name
| 0
| 0
| 0
| 1,392
| 0
| 0
| 0
| 29
| 134
|
0ce133badac8ace62355d38651dd265c044af4eb
| 1,169
|
py
|
Python
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86
|
2015-10-21T13:02:42.000Z
|
2022-03-14T07:50:50.000Z
|
chromeos/tools/concat_dbus_conf_files.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Concatenates D-Bus busconfig files."""
_BUSCONFIG_FILE_HEADER = b"""<!DOCTYPE busconfig
PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
"""
if __name__ == '__main__':
main()
| 26.568182
| 72
| 0.6929
|
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Concatenates D-Bus busconfig files."""
import sys
import xml.etree.ElementTree
_BUSCONFIG_FILE_HEADER = b"""<!DOCTYPE busconfig
PUBLIC "-//freedesktop//DTD D-Bus Bus Configuration 1.0//EN"
"http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
"""
def main():
if len(sys.argv) < 3:
sys.stderr.write('Usage: %s OUTFILE INFILES\n' % (sys.argv[0]))
sys.exit(1)
out_path = sys.argv[1]
in_paths = sys.argv[2:]
# Parse the first input file.
tree = xml.etree.ElementTree.parse(in_paths[0])
assert(tree.getroot().tag == 'busconfig')
# Append the remaining input files to the first file.
for path in in_paths[1:]:
current_tree = xml.etree.ElementTree.parse(path)
assert(current_tree.getroot().tag == 'busconfig')
for child in current_tree.getroot():
tree.getroot().append(child)
# Output the result.
with open(out_path, "wb") as f:
f.write(_BUSCONFIG_FILE_HEADER)
tree.write(f)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 657
| 0
| -4
| 68
|
02525ed7d476b11f1d77ac07f48e44ec57a3ff58
| 282
|
py
|
Python
|
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | 1
|
2022-02-08T01:47:03.000Z
|
2022-02-08T01:47:03.000Z
|
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | null | null | null |
rand.py
|
sriharikapu/RandomSequenceGenerator
|
7491e43b117be3e24eb5b7d66762699ef4d7593a
|
[
"CC0-1.0"
] | null | null | null |
import sys;
import numpy as np;
import pandas as pd;
np.set_printoptions(threshold=sys.maxsize)
# replace the range, sample size with your custom numbers
arr = np.array(np.random.choice(range(10000), 10000, replace=False))
print(arr)
DF = pd.DataFrame(arr)
DF.to_csv("temp.csv")
| 25.636364
| 69
| 0.755319
|
import sys;
import numpy as np;
import pandas as pd;
np.set_printoptions(threshold=sys.maxsize)
# replace the range, sample size with your custom numbers
arr = np.array(np.random.choice(range(10000), 10000, replace=False))
print(arr)
DF = pd.DataFrame(arr)
DF.to_csv("temp.csv")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
029bd971467b50c7eb6bed9f96f5aee865257af7
| 6,358
|
py
|
Python
|
capresize.py
|
fictorial/pygame-capresize
|
4084b05c7646be6d45acaedf7b274d9164bea7bf
|
[
"MIT"
] | 3
|
2017-01-03T09:21:06.000Z
|
2017-09-17T02:47:56.000Z
|
capresize.py
|
fictorial/pygame-capresize
|
4084b05c7646be6d45acaedf7b274d9164bea7bf
|
[
"MIT"
] | null | null | null |
capresize.py
|
fictorial/pygame-capresize
|
4084b05c7646be6d45acaedf7b274d9164bea7bf
|
[
"MIT"
] | null | null | null |
"""This module creates a Pygame surface from a source surface that
has "end caps" on its corners. The caps remain unscaled in the
destination surface and the rest is scaled/tiled.
This was inspired by Android's NinePatch and iOS'
resizableImageWithCapInsets
"""
import pygame
AUTHOR = 'Brian Hammond <[email protected]>'
LICENSE = 'MIT'
COPYRIGHT = 'Copyright (C) 2012 Fictorial LLC'
__version__ = '1.0.0'
def resize_with_caps(src, dst_size, cap_insets=None, grow='scale'):
"""Stretch nine-grid source surface to surface of desired size.
src
The source surface.
dst_size
The destination surface size (width, height). If height is
0 maintains aspect ratio of source surface.
cap_insets
The size of each of the 4 end caps (left, top, right,
bottom).
If None, the left and right end caps are taken as 1/2 the
source surface width; and, the top and bottom end caps are
taken as 1/2 the source surface height. In this case it's
expected that the center stretchy part is 1x1 pixel.
grow
The method used to grow portions of the source image that
are not end caps. The default is 'scale' which means the
relevant source surface portions are scaled before being
copied to the destination surface. The other option is
'tile' which instead tiles the relevant source surface
portions into the destination surface.
Source and destination surfaces are laid out as follows.
A B C
D E F
G H I
A, C, G, and I are the end caps; B and H stretch horizontally;
D and F stretch vertically; and E stretches in both directions.
Returns the destination surface.
"""
# s:source, d:destination,
# c:cap, m:middle/stretchable portion
# l:left, t:top, b:bottom, r:right
# w:width, h:height
sw, sh = src.get_size()
if cap_insets is None:
assert sw % 2 == 1 and sh % 2 == 1
cl, cr = sw // 2
ct, cb = sh // 2
else:
cl, ct, cr, cb = cap_insets
dw, dh = dst_size
if dh == 0:
dh = int(sh * dw / float(sw))
dst = pygame.surface.Surface((dw, dh), pygame.SRCALPHA, 32)
smw = sw - cl - cr
smh = sh - cb - ct
dmw = dw - cl - cr
dmh = dh - cb - ct
r = pygame.Rect
# render caps: A, C, G, I in that order
dst.blit(src, r(0, 0, cl, ct), r(0, 0, cl, ct))
dst.blit(src, r(dw - cr, 0, cr, ct), r(sw - cr, 0, cr, ct))
dst.blit(src, r(0, dh - cb, cl, cb), r(0, sh - cb, cl, cb))
dst.blit(src, r(dw - cr, dh - cb, cr, cb), r(sw - cr, sh - cb, cr, cb))
# extract subsurfaces from src for growable portions
B = src.subsurface(r(cl, 0, smw, ct))
D = src.subsurface(r(0, ct, cl, smh))
E = src.subsurface(r(cl, ct, smw, smh))
F = src.subsurface(r(sw - cr, ct, cr, smh))
H = src.subsurface(r(cl, sh - cb, smw, cb))
if grow == 'scale' or grow == 'stretch':
sc = pygame.transform.smoothscale
dst.blit(sc(B, (dmw, ct)), (cl, 0))
dst.blit(sc(D, (cl, dmh)), (0, ct))
dst.blit(sc(E, (dmw, dmh)), (cl, ct))
dst.blit(sc(F, (cr, dmh)), (dw - cr, ct))
dst.blit(sc(H, (dmw, cb)), (cl, dh - cb))
elif grow == 'tile':
n_across = dmw // smw
rem_px_across = dmw - n_across * smw
n_down = dmh // smh
rem_px_down = dmh - n_down * smh
render_across(B, 0, ct)
render_across(H, dh - smh, cb)
render_down(D, 0, cl)
render_down(F, dw - smw, cr)
y = ct
for i in range(int(n_down)):
render_across(E, y, smh)
y += smh
if rem_px_down > 0:
x = cl
for i in range(int(n_across)):
dst.blit(E, (x, y), r(0, 0, smw, rem_px_down))
x += smw
if rem_px_across > 0:
dst.blit(E, (x, y), r(0, 0, rem_px_across, rem_px_down))
return dst
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 800))
title = 'L: sources; R: stretch, tile, tile w/ leftovers, stretched button'
pygame.display.set_caption(title)
template = pygame.image.load('template.png').convert_alpha()
template_cap_insets = (24, 24, 24, 24)
template_tiled = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'tile')
template_tiled1 = resize_with_caps(template, (24 * 7 + 4, 24 * 6 + 6),
template_cap_insets, 'tile')
template_stretched = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'stretch')
#button = pygame.image.load('button.png').convert_alpha()
#button_stretched = resize_with_caps(button, (450, 120), (10, 9), 'scale')
button = pygame.image.load('textfield.png').convert_alpha()
button_cap_insets = (1, 6, 1, 4)
button_stretched = resize_with_caps(button, (450, 120),
button_cap_insets, 'scale')
clock = pygame.time.Clock()
running = True
while running:
dt = clock.tick(4) / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
if not running:
break
screen.fill((255, 255, 255))
screen.blit(template, (10, 10))
screen.blit(template_stretched, (150, 10))
screen.blit(template_tiled, (150, 24 * 9 + 20))
screen.blit(template_tiled1, (150, 2 * 24 * 9 + 30))
screen.blit(button, (10, 640))
screen.blit(button_stretched, (150, 640))
pygame.display.flip()
| 30.714976
| 79
| 0.56134
|
"""This module creates a Pygame surface from a source surface that
has "end caps" on its corners. The caps remain unscaled in the
destination surface and the rest is scaled/tiled.
This was inspired by Android's NinePatch and iOS'
resizableImageWithCapInsets
"""
import pygame
AUTHOR = 'Brian Hammond <[email protected]>'
LICENSE = 'MIT'
COPYRIGHT = 'Copyright (C) 2012 Fictorial LLC'
__version__ = '1.0.0'
def resize_with_caps(src, dst_size, cap_insets=None, grow='scale'):
"""Stretch nine-grid source surface to surface of desired size.
src
The source surface.
dst_size
The destination surface size (width, height). If height is
0 maintains aspect ratio of source surface.
cap_insets
The size of each of the 4 end caps (left, top, right,
bottom).
If None, the left and right end caps are taken as 1/2 the
source surface width; and, the top and bottom end caps are
taken as 1/2 the source surface height. In this case it's
expected that the center stretchy part is 1x1 pixel.
grow
The method used to grow portions of the source image that
are not end caps. The default is 'scale' which means the
relevant source surface portions are scaled before being
copied to the destination surface. The other option is
'tile' which instead tiles the relevant source surface
portions into the destination surface.
Source and destination surfaces are laid out as follows.
A B C
D E F
G H I
A, C, G, and I are the end caps; B and H stretch horizontally;
D and F stretch vertically; and E stretches in both directions.
Returns the destination surface.
"""
# s:source, d:destination,
# c:cap, m:middle/stretchable portion
# l:left, t:top, b:bottom, r:right
# w:width, h:height
sw, sh = src.get_size()
if cap_insets is None:
assert sw % 2 == 1 and sh % 2 == 1
cl, cr = sw // 2
ct, cb = sh // 2
else:
cl, ct, cr, cb = cap_insets
dw, dh = dst_size
if dh == 0:
dh = int(sh * dw / float(sw))
dst = pygame.surface.Surface((dw, dh), pygame.SRCALPHA, 32)
smw = sw - cl - cr
smh = sh - cb - ct
dmw = dw - cl - cr
dmh = dh - cb - ct
r = pygame.Rect
# render caps: A, C, G, I in that order
dst.blit(src, r(0, 0, cl, ct), r(0, 0, cl, ct))
dst.blit(src, r(dw - cr, 0, cr, ct), r(sw - cr, 0, cr, ct))
dst.blit(src, r(0, dh - cb, cl, cb), r(0, sh - cb, cl, cb))
dst.blit(src, r(dw - cr, dh - cb, cr, cb), r(sw - cr, sh - cb, cr, cb))
# extract subsurfaces from src for growable portions
B = src.subsurface(r(cl, 0, smw, ct))
D = src.subsurface(r(0, ct, cl, smh))
E = src.subsurface(r(cl, ct, smw, smh))
F = src.subsurface(r(sw - cr, ct, cr, smh))
H = src.subsurface(r(cl, sh - cb, smw, cb))
if grow == 'scale' or grow == 'stretch':
sc = pygame.transform.smoothscale
dst.blit(sc(B, (dmw, ct)), (cl, 0))
dst.blit(sc(D, (cl, dmh)), (0, ct))
dst.blit(sc(E, (dmw, dmh)), (cl, ct))
dst.blit(sc(F, (cr, dmh)), (dw - cr, ct))
dst.blit(sc(H, (dmw, cb)), (cl, dh - cb))
elif grow == 'tile':
n_across = dmw // smw
rem_px_across = dmw - n_across * smw
n_down = dmh // smh
rem_px_down = dmh - n_down * smh
def render_across(tile, y, h):
x = cl
for i in range(int(n_across)):
dst.blit(tile, (x, y))
x += smw
if rem_px_across > 0:
dst.blit(tile, (x, y), r(0, 0, rem_px_across, h))
render_across(B, 0, ct)
render_across(H, dh - smh, cb)
def render_down(tile, x, w):
y = ct
for i in range(int(n_down)):
dst.blit(tile, (x, y))
y += smh
if rem_px_down > 0:
dst.blit(tile, (x, y), r(0, 0, w, rem_px_down))
render_down(D, 0, cl)
render_down(F, dw - smw, cr)
y = ct
for i in range(int(n_down)):
render_across(E, y, smh)
y += smh
if rem_px_down > 0:
x = cl
for i in range(int(n_across)):
dst.blit(E, (x, y), r(0, 0, smw, rem_px_down))
x += smw
if rem_px_across > 0:
dst.blit(E, (x, y), r(0, 0, rem_px_across, rem_px_down))
return dst
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((640, 800))
title = 'L: sources; R: stretch, tile, tile w/ leftovers, stretched button'
pygame.display.set_caption(title)
template = pygame.image.load('template.png').convert_alpha()
template_cap_insets = (24, 24, 24, 24)
template_tiled = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'tile')
template_tiled1 = resize_with_caps(template, (24 * 7 + 4, 24 * 6 + 6),
template_cap_insets, 'tile')
template_stretched = resize_with_caps(template, (24 * 15, 24 * 9),
template_cap_insets, 'stretch')
#button = pygame.image.load('button.png').convert_alpha()
#button_stretched = resize_with_caps(button, (450, 120), (10, 9), 'scale')
button = pygame.image.load('textfield.png').convert_alpha()
button_cap_insets = (1, 6, 1, 4)
button_stretched = resize_with_caps(button, (450, 120),
button_cap_insets, 'scale')
clock = pygame.time.Clock()
running = True
while running:
dt = clock.tick(4) / 1000.0
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
if not running:
break
screen.fill((255, 255, 255))
screen.blit(template, (10, 10))
screen.blit(template_stretched, (150, 10))
screen.blit(template_tiled, (150, 24 * 9 + 20))
screen.blit(template_tiled1, (150, 2 * 24 * 9 + 30))
screen.blit(button, (10, 640))
screen.blit(button_stretched, (150, 640))
pygame.display.flip()
| 0
| 0
| 0
| 0
| 0
| 462
| 0
| 0
| 62
|
4e046307ff114abcb6a89f63202cc7a58a62ddb1
| 2,231
|
py
|
Python
|
Exercises/Exercise_Database-and-SQL/script2.py
|
npinak/Python-Projects
|
6e6463f4fde175fde60c9cca045e3c114b854505
|
[
"MIT"
] | 1
|
2021-10-16T16:22:14.000Z
|
2021-10-16T16:22:14.000Z
|
Exercises/Exercise_Database-and-SQL/script2.py
|
npinak/Python-Projects
|
6e6463f4fde175fde60c9cca045e3c114b854505
|
[
"MIT"
] | null | null | null |
Exercises/Exercise_Database-and-SQL/script2.py
|
npinak/Python-Projects
|
6e6463f4fde175fde60c9cca045e3c114b854505
|
[
"MIT"
] | null | null | null |
#insert("Water Glass", 10, 5)
#delete("Wine Glass")
#update(12,6,"Water Glass")
#print(view())
#create_table()
#insert("Orange",10,15)
delete("Orange")
print(view())
| 43.745098
| 207
| 0.693859
|
import psycopg2
def create_table():
conn = psycopg2.connect("dbname='database1' user='postgres' password ='naak' host ='localhost' port='5432'") # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)")
conn.commit()
conn.close()
def insert(item,quantity,price):
conn = psycopg2.connect("dbname='database1' user='postgres' password ='naak' host ='localhost' port='5432'") # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
#cur.execute("INSERT INTO store VALUES('%s','%s','%s')" %(item,quantity,price))
cur.execute("INSERT INTO store VALUES(%s,%s,%s)", (item,quantity,price))
conn.commit()
conn.close()
#insert("Water Glass", 10, 5)
def view():
conn = psycopg2.connect("dbname='database1' user='postgres' password ='naak' host ='localhost' port='5432'") # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("SELECT * from store")
rows = cur.fetchall()
conn.close()
return rows
def delete(item):
conn = psycopg2.connect("dbname='database1' user='postgres' password ='naak' host ='localhost' port='5432'") # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("DELETE FROM store WHERE item=%s",(item,))
conn.commit()
conn.close()
def update(quantity,price,item):
conn = psycopg2.connect("dbname='database1' user='postgres' password ='naak' host ='localhost' port='5432'") # Create connection to database, if no database then it will be created with this line of code
cur = conn.cursor() # Create cursor object
cur.execute("UPDATE store SET quantity =%s, price=%s WHERE item = %s",(quantity,price,item))
conn.commit()
conn.close()
#delete("Wine Glass")
#update(12,6,"Water Glass")
#print(view())
#create_table()
#insert("Orange",10,15)
delete("Orange")
print(view())
| 0
| 0
| 0
| 0
| 0
| 1,930
| 0
| -6
| 137
|
5da3320712a6045998829ed16068f41ad2ccdd8a
| 6,391
|
py
|
Python
|
scripts/nuscenes/eval/common/utils.py
|
mengmengliu1998/qd-3dt
|
9fcd1c0b165793e259deb46a64fcbbdc33735f2f
|
[
"BSD-3-Clause"
] | 1
|
2019-11-28T10:39:36.000Z
|
2019-11-28T10:39:36.000Z
|
scripts/nuscenes/eval/common/utils.py
|
mengmengliu1998/qd-3dt
|
9fcd1c0b165793e259deb46a64fcbbdc33735f2f
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/nuscenes/eval/common/utils.py
|
mengmengliu1998/qd-3dt
|
9fcd1c0b165793e259deb46a64fcbbdc33735f2f
|
[
"BSD-3-Clause"
] | null | null | null |
# nuScenes dev-kit.
# Code written by Holger Caesar, 2018.
from typing import List, Dict, Any
import numpy as np
from pyquaternion import Quaternion
from nuscenes.eval.common.data_classes import EvalBox
from nuscenes.utils.data_classes import Box
DetectionBox = Any # Workaround as direct imports lead to cyclic dependencies.
def center_distance(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the box centers (xy only).
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2]))
def velocity_l2(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the velocity vectors (xy only).
If the predicted velocities are nan, we return inf, which is subsequently clipped to 1.
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.velocity) - np.array(gt_box.velocity))
def yaw_diff(gt_box: EvalBox, eval_box: EvalBox, period: float = 2*np.pi) -> float:
"""
Returns the yaw angle difference between the orientation of two boxes.
:param gt_box: Ground truth box.
:param eval_box: Predicted box.
:param period: Periodicity in radians for assessing angle difference.
:return: Yaw angle difference in radians in [0, pi].
"""
yaw_gt = quaternion_yaw(Quaternion(gt_box.rotation))
yaw_est = quaternion_yaw(Quaternion(eval_box.rotation))
return abs(angle_diff(yaw_gt, yaw_est, period))
def angle_diff(x: float, y: float, period: float):
"""
Get the smallest angle difference between 2 angles: the angle from y to x.
:param x: To angle.
:param y: From angle.
:param period: Periodicity in radians for assessing angle difference.
:return: <float>. Signed smallest between-angle difference in range (-pi, pi).
"""
# calculate angle difference, modulo to [0, 2*pi]
diff = (x - y + period / 2) % period - period / 2
if diff > np.pi:
diff = diff - (2 * np.pi) # shift (pi, 2*pi] to (-pi, 0]
return diff
def attr_acc(gt_box: DetectionBox, pred_box: DetectionBox) -> float:
"""
Computes the classification accuracy for the attribute of this class (if any).
If the GT class has no attributes or the annotation is missing attributes, we assign an accuracy of nan, which is
ignored later on.
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: Attribute classification accuracy (0 or 1) or nan if GT annotation does not have any attributes.
"""
if gt_box.attribute_name == '':
# If the class does not have attributes or this particular sample is missing attributes, return nan, which is
# ignored later. Note that about 0.4% of the sample_annotations have no attributes, although they should.
acc = np.nan
else:
# Check that label is correct.
acc = float(gt_box.attribute_name == pred_box.attribute_name)
return acc
def scale_iou(sample_annotation: EvalBox, sample_result: EvalBox) -> float:
"""
This method compares predictions to the ground truth in terms of scale.
It is equivalent to intersection over union (IOU) between the two boxes in 3D,
if we assume that the boxes are aligned, i.e. translation and rotation are considered identical.
:param sample_annotation: GT annotation sample.
:param sample_result: Predicted sample.
:return: Scale IOU.
"""
# Validate inputs.
sa_size = np.array(sample_annotation.size)
sr_size = np.array(sample_result.size)
assert all(sa_size > 0), 'Error: sample_annotation sizes must be >0.'
assert all(sr_size > 0), 'Error: sample_result sizes must be >0.'
# Compute IOU.
min_wlh = np.minimum(sa_size, sr_size)
volume_annotation = np.prod(sa_size)
volume_result = np.prod(sr_size)
intersection = np.prod(min_wlh) # type: float
union = volume_annotation + volume_result - intersection # type: float
iou = intersection / union
return iou
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def boxes_to_sensor(boxes: List[EvalBox], pose_record: Dict, cs_record: Dict):
"""
Map boxes from global coordinates to the vehicle's sensor coordinate system.
:param boxes: The boxes in global coordinates.
:param pose_record: The pose record of the vehicle at the current timestamp.
:param cs_record: The calibrated sensor record of the sensor.
:return: The transformed boxes.
"""
boxes_out = []
for box in boxes:
# Create Box instance.
box = Box(box.translation, box.size, Quaternion(box.rotation))
# Move box to ego vehicle coord system.
box.translate(-np.array(pose_record['translation']))
box.rotate(Quaternion(pose_record['rotation']).inverse)
# Move box to sensor coord system.
box.translate(-np.array(cs_record['translation']))
box.rotate(Quaternion(cs_record['rotation']).inverse)
boxes_out.append(box)
return boxes_out
def cummean(x: np.array) -> np.array:
"""
Computes the cumulative mean up to each position in a NaN sensitive way
- If all values are NaN return an array of ones.
- If some values are NaN, accumulate arrays discording those entries.
"""
if sum(np.isnan(x)) == len(x):
# Is all numbers in array are NaN's.
return np.ones(len(x)) # If all errors are NaN set to error to 1 for all operating points.
else:
# Accumulate in a nan-aware manner.
sum_vals = np.nancumsum(x.astype(float)) # Cumulative sum ignoring nans.
count_vals = np.cumsum(~np.isnan(x)) # Number of non-nans up to each position.
return np.divide(sum_vals, count_vals, out=np.zeros_like(sum_vals), where=count_vals != 0)
| 37.594118
| 117
| 0.688625
|
# nuScenes dev-kit.
# Code written by Holger Caesar, 2018.
from typing import List, Dict, Any
import numpy as np
from pyquaternion import Quaternion
from nuscenes.eval.common.data_classes import EvalBox
from nuscenes.utils.data_classes import Box
DetectionBox = Any # Workaround as direct imports lead to cyclic dependencies.
def center_distance(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the box centers (xy only).
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2]))
def velocity_l2(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the velocity vectors (xy only).
If the predicted velocities are nan, we return inf, which is subsequently clipped to 1.
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.velocity) - np.array(gt_box.velocity))
def yaw_diff(gt_box: EvalBox, eval_box: EvalBox, period: float = 2*np.pi) -> float:
"""
Returns the yaw angle difference between the orientation of two boxes.
:param gt_box: Ground truth box.
:param eval_box: Predicted box.
:param period: Periodicity in radians for assessing angle difference.
:return: Yaw angle difference in radians in [0, pi].
"""
yaw_gt = quaternion_yaw(Quaternion(gt_box.rotation))
yaw_est = quaternion_yaw(Quaternion(eval_box.rotation))
return abs(angle_diff(yaw_gt, yaw_est, period))
def angle_diff(x: float, y: float, period: float):
"""
Get the smallest angle difference between 2 angles: the angle from y to x.
:param x: To angle.
:param y: From angle.
:param period: Periodicity in radians for assessing angle difference.
:return: <float>. Signed smallest between-angle difference in range (-pi, pi).
"""
# calculate angle difference, modulo to [0, 2*pi]
diff = (x - y + period / 2) % period - period / 2
if diff > np.pi:
diff = diff - (2 * np.pi) # shift (pi, 2*pi] to (-pi, 0]
return diff
def attr_acc(gt_box: DetectionBox, pred_box: DetectionBox) -> float:
"""
Computes the classification accuracy for the attribute of this class (if any).
If the GT class has no attributes or the annotation is missing attributes, we assign an accuracy of nan, which is
ignored later on.
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: Attribute classification accuracy (0 or 1) or nan if GT annotation does not have any attributes.
"""
if gt_box.attribute_name == '':
# If the class does not have attributes or this particular sample is missing attributes, return nan, which is
# ignored later. Note that about 0.4% of the sample_annotations have no attributes, although they should.
acc = np.nan
else:
# Check that label is correct.
acc = float(gt_box.attribute_name == pred_box.attribute_name)
return acc
def scale_iou(sample_annotation: EvalBox, sample_result: EvalBox) -> float:
"""
This method compares predictions to the ground truth in terms of scale.
It is equivalent to intersection over union (IOU) between the two boxes in 3D,
if we assume that the boxes are aligned, i.e. translation and rotation are considered identical.
:param sample_annotation: GT annotation sample.
:param sample_result: Predicted sample.
:return: Scale IOU.
"""
# Validate inputs.
sa_size = np.array(sample_annotation.size)
sr_size = np.array(sample_result.size)
assert all(sa_size > 0), 'Error: sample_annotation sizes must be >0.'
assert all(sr_size > 0), 'Error: sample_result sizes must be >0.'
# Compute IOU.
min_wlh = np.minimum(sa_size, sr_size)
volume_annotation = np.prod(sa_size)
volume_result = np.prod(sr_size)
intersection = np.prod(min_wlh) # type: float
union = volume_annotation + volume_result - intersection # type: float
iou = intersection / union
return iou
def quaternion_yaw(q: Quaternion) -> float:
"""
Calculate the yaw angle from a quaternion.
Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.
It does not work for a box in the camera frame.
:param q: Quaternion of interest.
:return: Yaw angle in radians.
"""
# Project into xy plane.
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
# Measure yaw using arctan.
yaw = np.arctan2(v[1], v[0])
return yaw
def boxes_to_sensor(boxes: List[EvalBox], pose_record: Dict, cs_record: Dict):
"""
Map boxes from global coordinates to the vehicle's sensor coordinate system.
:param boxes: The boxes in global coordinates.
:param pose_record: The pose record of the vehicle at the current timestamp.
:param cs_record: The calibrated sensor record of the sensor.
:return: The transformed boxes.
"""
boxes_out = []
for box in boxes:
# Create Box instance.
box = Box(box.translation, box.size, Quaternion(box.rotation))
# Move box to ego vehicle coord system.
box.translate(-np.array(pose_record['translation']))
box.rotate(Quaternion(pose_record['rotation']).inverse)
# Move box to sensor coord system.
box.translate(-np.array(cs_record['translation']))
box.rotate(Quaternion(cs_record['rotation']).inverse)
boxes_out.append(box)
return boxes_out
def cummean(x: np.array) -> np.array:
"""
Computes the cumulative mean up to each position in a NaN sensitive way
- If all values are NaN return an array of ones.
- If some values are NaN, accumulate arrays discording those entries.
"""
if sum(np.isnan(x)) == len(x):
# Is all numbers in array are NaN's.
return np.ones(len(x)) # If all errors are NaN set to error to 1 for all operating points.
else:
# Accumulate in a nan-aware manner.
sum_vals = np.nancumsum(x.astype(float)) # Cumulative sum ignoring nans.
count_vals = np.cumsum(~np.isnan(x)) # Number of non-nans up to each position.
return np.divide(sum_vals, count_vals, out=np.zeros_like(sum_vals), where=count_vals != 0)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0a483189639b429b0cd7104b8693e926d0cfbfc5
| 46,506
|
py
|
Python
|
parsetab.py
|
XgDuan/gem5-nvp-hw
|
c25d5511aac1816f3b4e31ced71017504c285e78
|
[
"BSD-3-Clause"
] | null | null | null |
parsetab.py
|
XgDuan/gem5-nvp-hw
|
c25d5511aac1816f3b4e31ced71017504c285e78
|
[
"BSD-3-Clause"
] | null | null | null |
parsetab.py
|
XgDuan/gem5-nvp-hw
|
c25d5511aac1816f3b4e31ced71017504c285e78
|
[
"BSD-3-Clause"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = 'l\x16\x9efz%\xc1\x8b\xa8O\x02{\x0b\xd1qJ'
_lr_action_items = {'PEEK':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[242,242,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'TRANS':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[8,8,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,8,-18,-16,-79,-20,8,-19,-21,-13,-8,-15,-12,-9,-11,]),'STAR':([5,11,20,22,61,81,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,185,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,227,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,33,-62,-61,-60,128,-136,-135,-112,-104,-101,-134,-103,-107,-139,147,-137,-106,147,147,147,-102,-130,-127,147,128,147,-108,-131,-138,147,147,-113,147,147,147,147,147,147,147,147,-114,147,147,33,147,-132,-110,-133,-105,147,-134,147,147,147,-109,-99,-111,147,-100,147,]),'SLASH':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,157,-137,-106,157,157,157,-102,-130,-127,157,157,-108,-131,-138,157,157,-113,157,157,157,157,157,157,157,157,-114,157,157,157,-132,-110,-133,-105,157,-134,157,157,157,-109,-99,-111,157,-100,157,]),'FLOATNUMBER':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,-95,-80,83,83,83,83,-79,83,-86,83,-94,83,-87,-92,-96,83,-88,-90,-97,-98,-91,-89,-93,]),'VOID':([0,1,2,5,7,15,45,56,62,73,120,149,161,172,178,220,229,230,233,235,237,238,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[3,3,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,3,-10,-33,-17,3,-26,3,-27,-28,-14,-80,-41,-36,-38,-40,-35,-37,3,-18,-16,-79,-20,3,-19,-21,-13,-8,-15,-12,-9,-11,]),'GLOBAL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[4,4,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,4,-18,-16,-79,-20,4,-19,-21,-13,-8,-15,-12,-9,-11,]),'NUMBER':([54,88,89,91,98,103,105,123,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[84,84,84,84,84,84,84,182,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,257,84,84,-95,-80,84,84,84,84,-79,84,-86,84,-94,84,-87,-92,-96,84,-88,-90,-97,-98,-91,-89,-93,]),',':([5,20,22,32,44,45,46,48,49,55,58,59,60,61,63,64,65,68,75,77,78,81,83,84,87,93,94,96,97,99,100,102,116,120,121,126,127,130,132,135,136,140,142,160,163,164,171,180,181,182,189,191,194,197,199,200,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,241,256,257,258,261,263,266,268,272,279,281,285,302,305,320,323,324,327,328,330,331,336,351,353,356,357,],[-70,-62,-61,51,51,-65,67,71,51,51,51,51,51,-60,51,51,114,119,-78,124,125,129,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,174,-64,177,51,-51,51,-106,-129,-128,-102,-130,-127,51,51,232,-76,-75,-77,-52,129,-139,262,-108,265,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,51,-56,-54,-55,-132,-110,-133,-105,51,51,314,51,-53,329,340,342,343,346,-109,-99,-111,51,51,361,364,-100,]),'GT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,155,-137,-106,155,155,155,-102,-130,-127,155,155,-108,-131,-138,155,-126,-113,155,155,-119,-120,-116,-117,-118,-115,-114,-125,155,155,-132,-110,-133,-105,155,-134,155,155,155,-109,-99,-111,155,-100,155,]),'NEW':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,-95,-80,86,86,86,86,-79,86,-86,86,-94,86,-87,-92,-96,86,-88,-90,-97,-98,-91,-89,-93,]),'RIGHTSHIFT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,158,-137,-106,158,158,158,-102,-130,-127,158,158,-108,-131,-138,158,-126,-113,158,158,158,158,-116,158,158,-115,-114,-125,158,158,-132,-110,-133,-105,158,-134,158,158,158,-109,-99,-111,158,-100,158,]),'DOT':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,140,199,201,205,261,263,266,268,305,328,330,331,357,],[-70,-62,-61,-60,-136,-135,134,-104,-101,-134,-103,-107,-139,-137,-106,-102,-108,-131,-138,-132,-110,-133,-105,-134,-109,-99,-111,-100,]),'LEFTSHIFT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,146,-137,-106,146,146,146,-102,-130,-127,146,146,-108,-131,-138,146,-126,-113,146,146,146,146,-116,146,146,-115,-114,-125,146,146,-132,-110,-133,-105,146,-134,146,146,146,-109,-99,-111,146,-100,146,]),'INCR':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,-95,-80,89,89,89,89,-79,89,-86,89,-94,89,-87,-92,-96,89,-88,-90,-97,-98,-91,-89,-93,]),'LE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,151,-137,-106,151,151,151,-102,-130,-127,151,151,-108,-131,-138,151,-126,-113,151,151,-119,-120,-116,-117,-118,-115,-114,-125,151,151,-132,-110,-133,-105,151,-134,151,151,151,-109,-99,-111,151,-100,151,]),'SEMI':([5,20,22,32,34,40,50,52,55,61,68,75,76,77,83,84,87,93,94,96,97,99,100,101,102,104,126,130,132,135,136,140,142,160,162,163,164,168,180,181,182,183,187,193,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,222,251,261,263,266,268,272,279,292,308,319,326,328,330,331,336,341,349,357,362,374,],[-70,-62,-61,-2,56,62,73,-72,-2,-60,117,-78,-71,-74,-136,-135,-112,-104,-101,-134,-103,-107,-139,149,-137,161,-2,-2,-106,-129,-128,-102,-130,-127,220,-2,-2,229,-76,-75,-77,-73,255,260,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,270,271,299,-132,-110,-133,-105,-2,-2,321,332,339,345,-109,-99,-111,-2,352,358,-100,368,376,]),'STATIC_CAST':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,-95,-80,90,90,90,90,-79,90,-86,90,-94,90,-87,-92,-96,90,-88,-90,-97,-98,-91,-89,-93,]),')':([5,20,22,44,45,49,52,53,57,58,59,60,61,63,64,66,72,75,76,77,78,79,80,81,82,83,84,87,93,94,96,97,99,100,102,106,107,108,109,111,112,113,116,120,125,127,129,132,135,136,138,140,142,143,160,180,181,182,183,184,186,189,190,191,192,194,195,196,197,199,201,202,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,240,241,256,257,258,261,262,263,264,266,267,268,285,290,302,303,304,305,306,307,317,322,325,328,330,331,347,351,353,354,357,359,367,371,],[-70,-62,-61,-2,-65,-2,-72,-2,-2,-2,110,-2,-60,-2,-2,115,122,-78,-71,-74,-49,126,-50,-58,130,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,163,164,165,166,168,169,170,173,-64,-2,-51,-2,-106,-129,-128,201,-102,-130,-2,-127,-76,-75,-77,-73,-48,-50,-52,-57,-58,-59,-139,261,-85,-84,-108,-131,266,268,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,289,-2,-56,-54,-55,-132,-2,-110,-2,-133,-2,-105,-2,319,-53,-83,328,-134,330,331,337,341,344,-109,-99,-111,357,-2,360,362,-100,365,373,374,]),'(':([4,5,8,9,10,17,18,19,23,24,26,32,35,54,85,88,89,90,91,92,98,100,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,199,203,242,243,245,246,247,248,249,250,252,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[28,-70,29,30,31,36,37,38,41,42,43,53,57,91,131,91,91,137,91,139,91,143,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,264,267,291,91,294,295,91,-95,297,298,301,-80,91,91,91,91,-79,91,-86,91,-94,91,-87,-92,-96,91,-88,-90,-97,-98,-91,-89,-93,]),'IS_INVALID':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,-95,-80,92,92,92,92,-79,92,-86,92,-94,92,-87,-92,-96,92,-88,-90,-97,-98,-91,-89,-93,]),'NE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,148,-137,-106,148,148,148,-102,-130,-127,148,148,-108,-131,-138,148,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,148,148,-132,-110,-133,-105,148,-134,148,148,148,-109,-99,-111,148,-100,148,]),'OUT_PORT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[9,9,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,9,-18,-16,-79,-20,9,-19,-21,-13,-8,-15,-12,-9,-11,]),'ENQUEUE':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[246,246,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'LT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,154,-137,-106,154,154,154,-102,-130,-127,154,154,-108,-131,-138,154,-126,-113,154,154,-119,-120,-116,-117,-118,-115,-114,-125,154,154,-132,-110,-133,-105,154,-134,154,154,154,-109,-99,-111,154,-100,154,]),'DOUBLE_COLON':([5,20,22,61,95,100,],[-70,39,-61,-60,141,-61,]),'PLUS':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,156,-137,-106,156,156,156,-102,-130,-127,156,156,-108,-131,-138,156,156,-113,156,156,156,156,-116,156,156,-115,-114,156,156,156,-132,-110,-133,-105,156,-134,156,156,156,-109,-99,-111,156,-100,156,]),'DECR':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,-95,-80,88,88,88,88,-79,88,-86,88,-94,88,-87,-92,-96,88,-88,-90,-97,-98,-91,-89,-93,]),'ACTION':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[10,10,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,10,-18,-16,-79,-20,10,-19,-21,-13,-8,-15,-12,-9,-11,]),':':([5,100,110,166,335,],[-70,144,167,224,144,]),'=':([5,74,],[-70,123,]),'ASSIGN':([5,20,22,32,55,61,83,84,87,93,94,96,97,99,100,102,127,132,135,136,140,142,160,189,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,279,328,330,331,357,],[-70,-62,-61,54,105,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,188,-106,-129,-128,-102,-130,-127,259,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,300,-132,-110,-133,-105,54,-109,-99,-111,-100,]),'$end':([0,1,2,5,6,7,12,13,15,25,27,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,286,288,293,309,313,315,318,334,338,339,348,350,],[-2,-2,-29,-70,0,-34,-5,-3,-39,-1,-4,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,-18,-16,-79,-20,-19,-21,-13,-8,-15,-12,-9,-11,]),'IDENT':([0,1,2,3,5,7,11,15,16,20,22,28,29,30,31,33,36,37,38,39,41,42,43,45,47,51,53,54,56,57,61,62,67,68,71,73,81,86,88,89,91,95,98,100,103,105,114,117,119,120,123,124,125,128,129,131,133,134,137,139,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,161,167,172,173,174,177,178,179,185,220,223,224,225,227,229,230,231,232,233,235,237,238,239,243,247,248,253,254,255,260,262,264,265,267,269,270,271,274,278,283,286,288,289,291,293,294,295,297,298,299,300,301,309,311,313,314,315,318,321,329,332,334,338,339,340,342,343,345,348,350,352,355,358,361,366,368,369,370,372,375,376,],[5,5,-29,-63,-70,-34,5,-39,5,-62,-61,5,5,5,5,5,5,5,5,5,5,5,5,-65,5,5,5,5,-6,5,-60,-7,5,5,5,-30,5,5,5,5,5,5,5,-61,5,5,5,5,5,-64,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,-32,5,5,5,5,5,5,5,5,5,5,-31,5,5,5,5,5,-10,5,5,-33,5,5,5,5,-17,5,5,5,-26,5,-27,-28,5,5,5,-95,-80,-41,-36,-38,5,5,5,5,-40,-35,-37,5,5,5,-18,-16,5,5,-79,5,5,5,5,-86,5,5,-20,5,-19,5,-21,5,-94,5,-44,-8,-15,-12,5,5,5,-87,-9,-11,-92,-96,-47,5,-88,-90,-97,-98,-91,-89,-93,]),'PROTOCOL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[14,14,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,14,-18,-16,-79,-20,14,-19,-21,-13,-8,-15,-12,-9,-11,]),'STRING':([14,21,51,54,88,89,91,98,103,105,123,124,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,259,262,264,265,267,293,298,299,300,321,329,345,346,352,355,361,364,366,368,369,370,372,375,376,],[34,40,75,96,96,96,96,96,96,96,181,75,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,256,96,96,-95,-80,302,96,96,305,96,-79,96,-86,96,-94,96,-87,356,-92,-96,96,371,-88,-90,-97,-98,-91,-89,-93,]),'STALL_AND_WAIT':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[249,249,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'OOD':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,-95,-80,99,99,99,99,-79,99,-86,99,-94,99,-87,-92,-96,99,-88,-90,-97,-98,-91,-89,-93,]),'ENUM':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[17,17,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,17,-18,-16,-79,-20,17,-19,-21,-13,-8,-15,-12,-9,-11,]),'ELSE':([253,293,355,],[-80,-79,363,]),'MACHINE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[18,18,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,18,-18,-16,-79,-20,18,-19,-21,-13,-8,-15,-12,-9,-11,]),'GE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,152,-137,-106,152,152,152,-102,-130,-127,152,152,-108,-131,-138,152,-126,-113,152,152,-119,-120,-116,-117,-118,-115,-114,-125,152,152,-132,-110,-133,-105,152,-134,152,152,152,-109,-99,-111,152,-100,152,]),'EXTERN_TYPE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[19,19,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,19,-18,-16,-79,-20,19,-19,-21,-13,-8,-15,-12,-9,-11,]),'[':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,140,199,201,205,261,263,266,268,305,328,330,331,357,],[-70,-62,-61,-60,-136,-135,133,-104,-101,-134,-103,-107,-139,-137,-106,-102,-108,-131,-138,-132,-110,-133,-105,-134,-109,-99,-111,-100,]),'INCLUDE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[21,21,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,21,-18,-16,-79,-20,21,-19,-21,-13,-8,-15,-12,-9,-11,]),']':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,133,135,136,140,142,160,196,197,198,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,261,262,263,266,268,303,328,330,331,357,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,-106,-2,-129,-128,-102,-130,-127,-85,-84,263,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,-132,-2,-110,-133,-105,-83,-109,-99,-111,-100,]),'IF':([179,247,248,253,293,299,321,345,352,355,363,366,368,369,370,372,375,376,],[250,250,-95,-80,-79,-86,-94,-87,-92,-96,250,-88,-90,-97,-98,-91,-89,-93,]),'AND':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,145,-137,-106,145,145,145,-102,-130,-127,145,145,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,145,-132,-110,-133,-105,145,-134,145,145,145,-109,-99,-111,145,-100,145,]),'DASH':([5,20,22,54,61,83,84,87,88,89,91,93,94,96,97,98,99,100,101,102,103,105,132,133,135,136,138,140,142,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,160,162,179,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,243,247,248,251,253,261,262,263,264,265,266,267,268,292,293,298,299,300,305,306,321,325,326,328,329,330,331,345,347,352,355,357,361,366,367,368,369,370,372,375,376,],[-70,-62,-61,98,-60,-136,-135,-112,98,98,98,-104,-101,-134,-103,98,-107,-139,153,-137,98,98,-106,98,153,153,153,-102,-130,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,-127,153,98,153,-108,-131,-138,153,153,-113,153,153,153,153,-116,153,153,-115,-114,153,153,98,98,-95,153,-80,-132,98,-110,98,98,-133,98,-105,153,-79,98,-86,98,-134,153,-94,153,153,-109,98,-99,-111,-87,153,-92,-96,-100,98,-88,153,-90,-97,-98,-91,-89,-93,]),'RETURN':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[243,243,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'EQ':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,150,-137,-106,150,150,150,-102,-130,-127,150,150,-108,-131,-138,150,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,150,150,-132,-110,-133,-105,150,-134,150,150,150,-109,-99,-111,150,-100,150,]),'STRUCT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[23,23,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,23,-18,-16,-79,-20,23,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_STOP_SLOTS':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[252,252,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'STATE_DECL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[24,24,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,24,-18,-16,-79,-20,24,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_ALLOCATE':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[245,245,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'LIT_BOOL':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,258,102,102,-95,-80,102,102,102,102,-79,102,-86,102,-94,102,-87,-92,-96,102,-88,-90,-97,-98,-91,-89,-93,]),'IS_VALID':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,-95,-80,85,85,85,85,-79,85,-86,85,-94,85,-87,-92,-96,85,-88,-90,-97,-98,-91,-89,-93,]),'NOT':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,-95,-80,103,103,103,103,-79,103,-86,103,-94,103,-87,-92,-96,103,-88,-90,-97,-98,-91,-89,-93,]),'{':([5,29,37,45,52,67,73,75,76,77,115,120,122,126,149,161,163,165,167,169,170,173,180,181,182,183,187,220,221,224,225,226,228,239,276,277,289,318,337,344,360,363,365,373,],[-70,47,47,-65,-72,47,-30,-78,-71,-74,172,-64,179,-2,-32,-31,-2,223,-2,230,231,47,-76,-75,-77,-73,179,-33,179,-2,-2,278,-23,47,311,-22,47,47,179,179,179,179,179,179,]),'}':([1,2,5,7,12,13,15,27,45,47,56,62,68,69,70,73,117,118,119,120,149,161,172,175,176,178,179,220,223,229,230,231,233,234,235,236,237,238,239,244,247,248,253,254,255,260,269,270,271,273,274,275,278,280,282,283,284,286,287,288,293,296,299,309,310,311,312,313,315,316,318,321,332,333,334,338,339,345,348,350,352,355,358,366,368,369,370,372,375,376,],[-2,-29,-70,-34,-5,-3,-39,-4,-65,-2,-6,-7,-2,120,-69,-30,-2,-68,-2,-64,-32,-31,-2,-66,-67,-10,253,-33,-2,-17,-2,-2,-26,286,-2,-25,-27,-28,-14,293,-82,-95,-80,-41,-36,-38,-40,-35,-37,309,-2,-43,-2,313,315,-2,-46,-18,-24,-16,-79,-81,-86,-20,-42,-2,334,-19,-21,-45,-13,-94,-44,348,-8,-15,-12,-87,-9,-11,-92,-96,-47,-88,-90,-97,-98,-91,-89,-93,]),'OR':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,159,-137,-106,159,159,159,-102,-130,-127,159,159,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,159,-132,-110,-133,-105,159,-134,159,159,159,-109,-99,-111,159,-100,159,]),'IN_PORT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[26,26,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,26,-18,-16,-79,-20,26,-19,-21,-13,-8,-15,-12,-9,-11,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'decl':([0,1,278,311,],[1,1,1,1,]),'obj_decl':([0,1,167,172,224,225,230,235,278,311,],[2,2,225,233,225,225,233,233,2,2,]),'statements':([122,187,221,337,344,360,363,365,373,],[178,254,269,350,355,366,369,372,375,]),'type_enums':([223,274,],[273,310,]),'pairsx':([51,124,],[76,183,]),'type_members':([172,230,235,],[234,280,287,]),'statements_inner':([179,247,],[244,296,]),'enumeration':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,314,329,361,],[93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,336,93,93,]),'file':([0,],[6,]),'type_state':([231,283,],[283,283,]),'type_member':([172,230,235,],[235,235,235,]),'aexpr':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,]),'param':([53,57,125,],[78,78,78,]),'literal':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,]),'params':([53,57,125,],[79,106,184,]),'statement':([179,247,],[247,247,]),'var':([54,88,89,91,98,103,105,131,133,139,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,177,179,232,243,247,262,264,265,267,291,294,295,297,298,300,301,329,343,361,],[94,94,94,94,94,94,94,195,94,202,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,241,94,285,94,94,94,94,94,94,320,322,323,324,94,94,327,94,354,94,]),'if_statement':([179,247,363,],[248,248,370,]),'type':([0,1,28,36,38,41,42,53,54,57,71,86,88,89,91,98,103,105,114,125,129,133,137,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,179,224,225,230,235,243,247,262,264,265,267,278,298,300,311,329,340,342,361,],[11,11,44,58,60,63,64,81,95,81,121,132,95,95,95,95,95,95,171,185,191,95,200,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,227,11,95,227,227,11,11,95,95,95,95,95,95,11,95,95,11,95,351,353,95,]),'empty':([0,1,32,44,47,49,53,55,57,58,59,60,63,64,68,117,119,125,126,129,130,133,143,163,164,167,172,223,224,225,230,231,235,241,262,264,267,272,274,278,279,283,285,311,336,351,],[12,12,52,52,70,52,80,52,80,52,52,52,52,52,70,70,70,186,52,192,52,196,196,52,52,228,236,275,228,228,236,284,236,52,196,196,196,52,275,12,52,284,52,12,52,52,]),'declsx':([0,1,278,311,],[13,27,13,13,]),'func_decl':([0,1,172,230,235,278,311,],[7,7,237,237,237,7,7,]),'func_def':([0,1,172,230,235,278,311,],[15,15,238,238,238,15,15,]),'idents':([29,37,67,173,239,289,318,],[46,59,116,239,288,318,338,]),'void':([0,1,172,230,235,278,311,],[16,16,16,16,16,16,16,]),'identx':([47,68,117,119,],[69,118,175,176,]),'type_states':([231,283,],[282,316,]),'pair':([51,124,],[77,77,]),'type_enum':([223,274,],[274,274,]),'typestr':([0,1,28,36,38,41,42,53,54,57,71,86,88,89,91,98,103,105,114,125,129,133,137,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,179,224,225,230,235,243,247,262,264,265,267,278,298,300,311,329,340,342,361,],[20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,]),'types':([53,57,129,],[82,107,190,]),'pairs':([32,44,49,55,58,59,60,63,64,126,130,163,164,241,272,279,285,336,351,],[50,66,72,104,108,109,111,112,113,187,193,221,222,290,308,50,317,349,359,]),'ident':([0,1,11,16,28,29,30,31,33,36,37,38,39,41,42,43,47,51,53,54,57,67,68,71,81,86,88,89,91,95,98,103,105,114,117,119,123,124,125,128,129,131,133,134,137,139,141,143,144,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,173,174,177,179,185,223,224,225,227,230,231,232,235,239,243,247,262,264,265,267,274,278,283,289,291,294,295,297,298,300,301,311,314,318,329,340,342,343,361,],[22,22,32,35,22,45,48,49,55,22,45,22,61,22,22,65,68,74,22,100,22,45,68,22,127,22,100,100,100,140,100,100,100,22,68,68,180,74,22,189,22,194,100,199,22,194,203,100,205,100,100,100,100,100,100,100,100,100,100,100,100,100,100,22,22,45,240,194,100,127,272,22,22,279,22,281,194,22,45,100,100,100,100,100,100,272,22,281,45,194,194,194,194,100,100,194,22,335,45,100,22,22,194,100,]),'obj_decls':([167,224,225,],[226,276,277,]),'expr':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[101,135,136,138,142,160,162,197,197,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,292,251,197,197,306,197,325,326,347,367,]),'exprs':([133,143,262,264,267,],[198,204,303,304,307,]),'decls':([0,278,311,],[25,312,333,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> file","S'",1,None,None,None),
('file -> decls','file',1,'p_file','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',218),
('empty -> <empty>','empty',0,'p_empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',222),
('decls -> declsx','decls',1,'p_decls','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',225),
('declsx -> decl declsx','declsx',2,'p_declsx__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',229),
('declsx -> empty','declsx',1,'p_declsx__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',239),
('decl -> PROTOCOL STRING SEMI','decl',3,'p_decl__protocol','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',243),
('decl -> INCLUDE STRING SEMI','decl',3,'p_decl__include','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',252),
('decl -> MACHINE ( idents ) : obj_decls { decls }','decl',9,'p_decl__machine0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',261),
('decl -> MACHINE ( idents pairs ) : obj_decls { decls }','decl',10,'p_decl__machine1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',265),
('decl -> ACTION ( ident pairs ) statements','decl',6,'p_decl__action','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',269),
('decl -> IN_PORT ( ident , type , var pairs ) statements','decl',10,'p_decl__in_port','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',273),
('decl -> OUT_PORT ( ident , type , var pairs ) SEMI','decl',10,'p_decl__out_port','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',277),
('decl -> TRANS ( idents , idents , ident ) idents','decl',9,'p_decl__trans0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',281),
('decl -> TRANS ( idents , idents ) idents','decl',7,'p_decl__trans1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',285),
('decl -> TRANS ( idents , idents , ident ) idents idents','decl',10,'p_decl__trans2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',289),
('decl -> TRANS ( idents , idents ) idents idents','decl',8,'p_decl__trans3','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',293),
('decl -> EXTERN_TYPE ( type pairs ) SEMI','decl',6,'p_decl__extern0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',297),
('decl -> GLOBAL ( type pairs ) { type_members }','decl',8,'p_decl__global','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',302),
('decl -> STRUCT ( type pairs ) { type_members }','decl',8,'p_decl__struct','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',307),
('decl -> ENUM ( type pairs ) { type_enums }','decl',8,'p_decl__enum','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',311),
('decl -> STATE_DECL ( type pairs ) { type_states }','decl',8,'p_decl__state_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',316),
('obj_decls -> obj_decl obj_decls','obj_decls',2,'p_obj_decls__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',323),
('obj_decls -> empty','obj_decls',1,'p_obj_decls__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',327),
('type_members -> type_member type_members','type_members',2,'p_type_members__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',331),
('type_members -> empty','type_members',1,'p_type_members__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',335),
('type_member -> obj_decl','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',339),
('type_member -> func_decl','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',340),
('type_member -> func_def','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',341),
('decl -> obj_decl','decl',1,'p_decl__obj_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',346),
('obj_decl -> type ident pairs SEMI','obj_decl',4,'p_obj_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',350),
('obj_decl -> type STAR ident pairs SEMI','obj_decl',5,'p_obj_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',354),
('obj_decl -> type ident ASSIGN expr SEMI','obj_decl',5,'p_obj_decl__2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',358),
('obj_decl -> type STAR ident ASSIGN expr SEMI','obj_decl',6,'p_obj_decl__3','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',363),
('decl -> func_decl','decl',1,'p_decl__func_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',369),
('func_decl -> void ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',373),
('func_decl -> type ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',374),
('func_decl -> void ident ( types ) pairs SEMI','func_decl',7,'p_func_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',378),
('func_decl -> type ident ( types ) pairs SEMI','func_decl',7,'p_func_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',379),
('decl -> func_def','decl',1,'p_decl__func_def','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',383),
('func_def -> void ident ( params ) pairs statements','func_def',7,'p_func_def__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',387),
('func_def -> type ident ( params ) pairs statements','func_def',7,'p_func_def__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',388),
('type_enums -> type_enum type_enums','type_enums',2,'p_type_enums__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',393),
('type_enums -> empty','type_enums',1,'p_type_enums__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',397),
('type_enum -> ident pairs SEMI','type_enum',3,'p_type_enum','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',401),
('type_states -> type_state type_states','type_states',2,'p_type_states__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',406),
('type_states -> empty','type_states',1,'p_type_states__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',410),
('type_state -> ident , enumeration pairs SEMI','type_state',5,'p_type_state','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',414),
('params -> param , params','params',3,'p_params__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',419),
('params -> param','params',1,'p_params__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',423),
('params -> empty','params',1,'p_params__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',427),
('param -> type ident','param',2,'p_param','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',431),
('param -> type STAR ident','param',3,'p_param__pointer','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',435),
('param -> type STAR ident ASSIGN STRING','param',5,'p_param__pointer_default','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',439),
('param -> type ident ASSIGN NUMBER','param',4,'p_param__default_number','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',443),
('param -> type ident ASSIGN LIT_BOOL','param',4,'p_param__default_bool','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',447),
('param -> type ident ASSIGN STRING','param',4,'p_param__default_string','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',451),
('types -> type , types','types',3,'p_types__multiple','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',456),
('types -> type','types',1,'p_types__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',460),
('types -> empty','types',1,'p_types__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',464),
('typestr -> typestr DOUBLE_COLON ident','typestr',3,'p_typestr__multi','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',468),
('typestr -> ident','typestr',1,'p_typestr__single','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',472),
('type -> typestr','type',1,'p_type__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',476),
('void -> VOID','void',1,'p_void','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',480),
('idents -> { identx }','idents',3,'p_idents__braced','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',485),
('idents -> ident','idents',1,'p_idents__bare','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',489),
('identx -> ident SEMI identx','identx',3,'p_identx__multiple_1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',493),
('identx -> ident , identx','identx',3,'p_identx__multiple_1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',494),
('identx -> ident identx','identx',2,'p_identx__multiple_2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',498),
('identx -> empty','identx',1,'p_identx__single','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',502),
('ident -> IDENT','ident',1,'p_ident','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',506),
('pairs -> , pairsx','pairs',2,'p_pairs__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',511),
('pairs -> empty','pairs',1,'p_pairs__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',515),
('pairsx -> pair , pairsx','pairsx',3,'p_pairsx__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',519),
('pairsx -> pair','pairsx',1,'p_pairsx__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',524),
('pair -> ident = STRING','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',529),
('pair -> ident = ident','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',530),
('pair -> ident = NUMBER','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',531),
('pair -> STRING','pair',1,'p_pair__literal','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',535),
('statements -> { statements_inner }','statements',3,'p_statements__inner','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',540),
('statements -> { }','statements',2,'p_statements__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',544),
('statements_inner -> statement statements_inner','statements_inner',2,'p_statements_inner__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',548),
('statements_inner -> statement','statements_inner',1,'p_statements_inner__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',552),
('exprs -> expr , exprs','exprs',3,'p_exprs__multiple','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',556),
('exprs -> expr','exprs',1,'p_exprs__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',560),
('exprs -> empty','exprs',1,'p_exprs__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',564),
('statement -> expr SEMI','statement',2,'p_statement__expression','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',568),
('statement -> expr ASSIGN expr SEMI','statement',4,'p_statement__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',572),
('statement -> ENQUEUE ( var , type ) statements','statement',7,'p_statement__enqueue','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',576),
('statement -> ENQUEUE ( var , type , expr ) statements','statement',9,'p_statement__enqueue_latency','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',580),
('statement -> STALL_AND_WAIT ( var , var ) SEMI','statement',7,'p_statement__stall_and_wait','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',584),
('statement -> PEEK ( var , type pairs ) statements','statement',8,'p_statement__peek','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',588),
('statement -> CHECK_ALLOCATE ( var ) SEMI','statement',5,'p_statement__check_allocate','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',592),
('statement -> CHECK_STOP_SLOTS ( var , STRING , STRING ) SEMI','statement',9,'p_statement__check_stop','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',596),
('statement -> RETURN expr SEMI','statement',3,'p_statement__return','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',600),
('statement -> if_statement','statement',1,'p_statement__if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',604),
('if_statement -> IF ( expr ) statements','if_statement',5,'p_if_statement__if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',608),
('if_statement -> IF ( expr ) statements ELSE statements','if_statement',7,'p_if_statement__if_else','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',612),
('if_statement -> IF ( expr ) statements ELSE if_statement','if_statement',7,'p_statement__if_else_if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',616),
('aexpr -> STATIC_CAST ( type , expr )','aexpr',6,'p_expr__static_cast','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',621),
('aexpr -> STATIC_CAST ( type , STRING , expr )','aexpr',8,'p_expr__static_cast_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',625),
('aexpr -> var','aexpr',1,'p_expr__var','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',629),
('aexpr -> type ident','aexpr',2,'p_expr__localvar','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',633),
('aexpr -> literal','aexpr',1,'p_expr__literal','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',637),
('aexpr -> enumeration','aexpr',1,'p_expr__enumeration','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',641),
('aexpr -> ident ( exprs )','aexpr',4,'p_expr__func_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',645),
('aexpr -> NEW type','aexpr',2,'p_expr__new','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',649),
('aexpr -> OOD','aexpr',1,'p_expr__null','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',653),
('aexpr -> aexpr DOT ident','aexpr',3,'p_expr__member','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',657),
('aexpr -> aexpr DOT ident ( exprs )','aexpr',6,'p_expr__member_method_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',661),
('aexpr -> aexpr [ exprs ]','aexpr',4,'p_expr__member_method_call_lookup','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',665),
('aexpr -> type DOUBLE_COLON ident ( exprs )','aexpr',6,'p_expr__class_method_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',669),
('expr -> aexpr','expr',1,'p_expr__aexpr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',673),
('expr -> expr STAR expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',677),
('expr -> expr SLASH expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',678),
('expr -> expr PLUS expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',679),
('expr -> expr DASH expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',680),
('expr -> expr LT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',681),
('expr -> expr GT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',682),
('expr -> expr LE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',683),
('expr -> expr GE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',684),
('expr -> expr EQ expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',685),
('expr -> expr NE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',686),
('expr -> expr AND expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',687),
('expr -> expr OR expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',688),
('expr -> expr RIGHTSHIFT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',689),
('expr -> expr LEFTSHIFT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',690),
('expr -> NOT expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',695),
('expr -> INCR expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',696),
('expr -> DECR expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',697),
('expr -> DASH expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',698),
('aexpr -> ( expr )','aexpr',3,'p_expr__parens','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',702),
('aexpr -> IS_VALID ( var )','aexpr',4,'p_expr__is_valid_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',706),
('aexpr -> IS_INVALID ( var )','aexpr',4,'p_expr__is_invalid_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',710),
('literal -> STRING','literal',1,'p_literal__string','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',714),
('literal -> NUMBER','literal',1,'p_literal__number','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',718),
('literal -> FLOATNUMBER','literal',1,'p_literal__float','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',722),
('literal -> LIT_BOOL','literal',1,'p_literal__bool','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',726),
('enumeration -> ident : ident','enumeration',3,'p_enumeration','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',730),
('var -> ident','var',1,'p_var','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',734),
]
| 275.183432
| 23,804
| 0.667505
|
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = 'l\x16\x9efz%\xc1\x8b\xa8O\x02{\x0b\xd1qJ'
_lr_action_items = {'PEEK':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[242,242,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'TRANS':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[8,8,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,8,-18,-16,-79,-20,8,-19,-21,-13,-8,-15,-12,-9,-11,]),'STAR':([5,11,20,22,61,81,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,185,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,227,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,33,-62,-61,-60,128,-136,-135,-112,-104,-101,-134,-103,-107,-139,147,-137,-106,147,147,147,-102,-130,-127,147,128,147,-108,-131,-138,147,147,-113,147,147,147,147,147,147,147,147,-114,147,147,33,147,-132,-110,-133,-105,147,-134,147,147,147,-109,-99,-111,147,-100,147,]),'SLASH':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,157,-137,-106,157,157,157,-102,-130,-127,157,157,-108,-131,-138,157,157,-113,157,157,157,157,157,157,157,157,-114,157,157,157,-132,-110,-133,-105,157,-134,157,157,157,-109,-99,-111,157,-100,157,]),'FLOATNUMBER':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,83,-95,-80,83,83,83,83,-79,83,-86,83,-94,83,-87,-92,-96,83,-88,-90,-97,-98,-91,-89,-93,]),'VOID':([0,1,2,5,7,15,45,56,62,73,120,149,161,172,178,220,229,230,233,235,237,238,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[3,3,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,3,-10,-33,-17,3,-26,3,-27,-28,-14,-80,-41,-36,-38,-40,-35,-37,3,-18,-16,-79,-20,3,-19,-21,-13,-8,-15,-12,-9,-11,]),'GLOBAL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[4,4,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,4,-18,-16,-79,-20,4,-19,-21,-13,-8,-15,-12,-9,-11,]),'NUMBER':([54,88,89,91,98,103,105,123,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[84,84,84,84,84,84,84,182,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,84,257,84,84,-95,-80,84,84,84,84,-79,84,-86,84,-94,84,-87,-92,-96,84,-88,-90,-97,-98,-91,-89,-93,]),',':([5,20,22,32,44,45,46,48,49,55,58,59,60,61,63,64,65,68,75,77,78,81,83,84,87,93,94,96,97,99,100,102,116,120,121,126,127,130,132,135,136,140,142,160,163,164,171,180,181,182,189,191,194,197,199,200,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,241,256,257,258,261,263,266,268,272,279,281,285,302,305,320,323,324,327,328,330,331,336,351,353,356,357,],[-70,-62,-61,51,51,-65,67,71,51,51,51,51,51,-60,51,51,114,119,-78,124,125,129,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,174,-64,177,51,-51,51,-106,-129,-128,-102,-130,-127,51,51,232,-76,-75,-77,-52,129,-139,262,-108,265,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,51,-56,-54,-55,-132,-110,-133,-105,51,51,314,51,-53,329,340,342,343,346,-109,-99,-111,51,51,361,364,-100,]),'GT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,155,-137,-106,155,155,155,-102,-130,-127,155,155,-108,-131,-138,155,-126,-113,155,155,-119,-120,-116,-117,-118,-115,-114,-125,155,155,-132,-110,-133,-105,155,-134,155,155,155,-109,-99,-111,155,-100,155,]),'NEW':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,86,-95,-80,86,86,86,86,-79,86,-86,86,-94,86,-87,-92,-96,86,-88,-90,-97,-98,-91,-89,-93,]),'RIGHTSHIFT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,158,-137,-106,158,158,158,-102,-130,-127,158,158,-108,-131,-138,158,-126,-113,158,158,158,158,-116,158,158,-115,-114,-125,158,158,-132,-110,-133,-105,158,-134,158,158,158,-109,-99,-111,158,-100,158,]),'DOT':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,140,199,201,205,261,263,266,268,305,328,330,331,357,],[-70,-62,-61,-60,-136,-135,134,-104,-101,-134,-103,-107,-139,-137,-106,-102,-108,-131,-138,-132,-110,-133,-105,-134,-109,-99,-111,-100,]),'LEFTSHIFT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,146,-137,-106,146,146,146,-102,-130,-127,146,146,-108,-131,-138,146,-126,-113,146,146,146,146,-116,146,146,-115,-114,-125,146,146,-132,-110,-133,-105,146,-134,146,146,146,-109,-99,-111,146,-100,146,]),'INCR':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,89,-95,-80,89,89,89,89,-79,89,-86,89,-94,89,-87,-92,-96,89,-88,-90,-97,-98,-91,-89,-93,]),'LE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,151,-137,-106,151,151,151,-102,-130,-127,151,151,-108,-131,-138,151,-126,-113,151,151,-119,-120,-116,-117,-118,-115,-114,-125,151,151,-132,-110,-133,-105,151,-134,151,151,151,-109,-99,-111,151,-100,151,]),'SEMI':([5,20,22,32,34,40,50,52,55,61,68,75,76,77,83,84,87,93,94,96,97,99,100,101,102,104,126,130,132,135,136,140,142,160,162,163,164,168,180,181,182,183,187,193,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,221,222,251,261,263,266,268,272,279,292,308,319,326,328,330,331,336,341,349,357,362,374,],[-70,-62,-61,-2,56,62,73,-72,-2,-60,117,-78,-71,-74,-136,-135,-112,-104,-101,-134,-103,-107,-139,149,-137,161,-2,-2,-106,-129,-128,-102,-130,-127,220,-2,-2,229,-76,-75,-77,-73,255,260,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,270,271,299,-132,-110,-133,-105,-2,-2,321,332,339,345,-109,-99,-111,-2,352,358,-100,368,376,]),'STATIC_CAST':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,90,-95,-80,90,90,90,90,-79,90,-86,90,-94,90,-87,-92,-96,90,-88,-90,-97,-98,-91,-89,-93,]),')':([5,20,22,44,45,49,52,53,57,58,59,60,61,63,64,66,72,75,76,77,78,79,80,81,82,83,84,87,93,94,96,97,99,100,102,106,107,108,109,111,112,113,116,120,125,127,129,132,135,136,138,140,142,143,160,180,181,182,183,184,186,189,190,191,192,194,195,196,197,199,201,202,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,240,241,256,257,258,261,262,263,264,266,267,268,285,290,302,303,304,305,306,307,317,322,325,328,330,331,347,351,353,354,357,359,367,371,],[-70,-62,-61,-2,-65,-2,-72,-2,-2,-2,110,-2,-60,-2,-2,115,122,-78,-71,-74,-49,126,-50,-58,130,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,163,164,165,166,168,169,170,173,-64,-2,-51,-2,-106,-129,-128,201,-102,-130,-2,-127,-76,-75,-77,-73,-48,-50,-52,-57,-58,-59,-139,261,-85,-84,-108,-131,266,268,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,289,-2,-56,-54,-55,-132,-2,-110,-2,-133,-2,-105,-2,319,-53,-83,328,-134,330,331,337,341,344,-109,-99,-111,357,-2,360,362,-100,365,373,374,]),'(':([4,5,8,9,10,17,18,19,23,24,26,32,35,54,85,88,89,90,91,92,98,100,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,199,203,242,243,245,246,247,248,249,250,252,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[28,-70,29,30,31,36,37,38,41,42,43,53,57,91,131,91,91,137,91,139,91,143,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,91,264,267,291,91,294,295,91,-95,297,298,301,-80,91,91,91,91,-79,91,-86,91,-94,91,-87,-92,-96,91,-88,-90,-97,-98,-91,-89,-93,]),'IS_INVALID':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,92,-95,-80,92,92,92,92,-79,92,-86,92,-94,92,-87,-92,-96,92,-88,-90,-97,-98,-91,-89,-93,]),'NE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,148,-137,-106,148,148,148,-102,-130,-127,148,148,-108,-131,-138,148,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,148,148,-132,-110,-133,-105,148,-134,148,148,148,-109,-99,-111,148,-100,148,]),'OUT_PORT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[9,9,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,9,-18,-16,-79,-20,9,-19,-21,-13,-8,-15,-12,-9,-11,]),'ENQUEUE':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[246,246,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'LT':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,154,-137,-106,154,154,154,-102,-130,-127,154,154,-108,-131,-138,154,-126,-113,154,154,-119,-120,-116,-117,-118,-115,-114,-125,154,154,-132,-110,-133,-105,154,-134,154,154,154,-109,-99,-111,154,-100,154,]),'DOUBLE_COLON':([5,20,22,61,95,100,],[-70,39,-61,-60,141,-61,]),'PLUS':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,156,-137,-106,156,156,156,-102,-130,-127,156,156,-108,-131,-138,156,156,-113,156,156,156,156,-116,156,156,-115,-114,156,156,156,-132,-110,-133,-105,156,-134,156,156,156,-109,-99,-111,156,-100,156,]),'DECR':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,88,-95,-80,88,88,88,88,-79,88,-86,88,-94,88,-87,-92,-96,88,-88,-90,-97,-98,-91,-89,-93,]),'ACTION':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[10,10,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,10,-18,-16,-79,-20,10,-19,-21,-13,-8,-15,-12,-9,-11,]),':':([5,100,110,166,335,],[-70,144,167,224,144,]),'=':([5,74,],[-70,123,]),'ASSIGN':([5,20,22,32,55,61,83,84,87,93,94,96,97,99,100,102,127,132,135,136,140,142,160,189,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,279,328,330,331,357,],[-70,-62,-61,54,105,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,188,-106,-129,-128,-102,-130,-127,259,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,300,-132,-110,-133,-105,54,-109,-99,-111,-100,]),'$end':([0,1,2,5,6,7,12,13,15,25,27,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,286,288,293,309,313,315,318,334,338,339,348,350,],[-2,-2,-29,-70,0,-34,-5,-3,-39,-1,-4,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,-18,-16,-79,-20,-19,-21,-13,-8,-15,-12,-9,-11,]),'IDENT':([0,1,2,3,5,7,11,15,16,20,22,28,29,30,31,33,36,37,38,39,41,42,43,45,47,51,53,54,56,57,61,62,67,68,71,73,81,86,88,89,91,95,98,100,103,105,114,117,119,120,123,124,125,128,129,131,133,134,137,139,141,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,161,167,172,173,174,177,178,179,185,220,223,224,225,227,229,230,231,232,233,235,237,238,239,243,247,248,253,254,255,260,262,264,265,267,269,270,271,274,278,283,286,288,289,291,293,294,295,297,298,299,300,301,309,311,313,314,315,318,321,329,332,334,338,339,340,342,343,345,348,350,352,355,358,361,366,368,369,370,372,375,376,],[5,5,-29,-63,-70,-34,5,-39,5,-62,-61,5,5,5,5,5,5,5,5,5,5,5,5,-65,5,5,5,5,-6,5,-60,-7,5,5,5,-30,5,5,5,5,5,5,5,-61,5,5,5,5,5,-64,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,-32,5,5,5,5,5,5,5,5,5,5,-31,5,5,5,5,5,-10,5,5,-33,5,5,5,5,-17,5,5,5,-26,5,-27,-28,5,5,5,-95,-80,-41,-36,-38,5,5,5,5,-40,-35,-37,5,5,5,-18,-16,5,5,-79,5,5,5,5,-86,5,5,-20,5,-19,5,-21,5,-94,5,-44,-8,-15,-12,5,5,5,-87,-9,-11,-92,-96,-47,5,-88,-90,-97,-98,-91,-89,-93,]),'PROTOCOL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[14,14,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,14,-18,-16,-79,-20,14,-19,-21,-13,-8,-15,-12,-9,-11,]),'STRING':([14,21,51,54,88,89,91,98,103,105,123,124,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,259,262,264,265,267,293,298,299,300,321,329,345,346,352,355,361,364,366,368,369,370,372,375,376,],[34,40,75,96,96,96,96,96,96,96,181,75,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,96,256,96,96,-95,-80,302,96,96,305,96,-79,96,-86,96,-94,96,-87,356,-92,-96,96,371,-88,-90,-97,-98,-91,-89,-93,]),'STALL_AND_WAIT':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[249,249,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'OOD':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,-95,-80,99,99,99,99,-79,99,-86,99,-94,99,-87,-92,-96,99,-88,-90,-97,-98,-91,-89,-93,]),'ENUM':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[17,17,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,17,-18,-16,-79,-20,17,-19,-21,-13,-8,-15,-12,-9,-11,]),'ELSE':([253,293,355,],[-80,-79,363,]),'MACHINE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[18,18,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,18,-18,-16,-79,-20,18,-19,-21,-13,-8,-15,-12,-9,-11,]),'GE':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,152,-137,-106,152,152,152,-102,-130,-127,152,152,-108,-131,-138,152,-126,-113,152,152,-119,-120,-116,-117,-118,-115,-114,-125,152,152,-132,-110,-133,-105,152,-134,152,152,152,-109,-99,-111,152,-100,152,]),'EXTERN_TYPE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[19,19,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,19,-18,-16,-79,-20,19,-19,-21,-13,-8,-15,-12,-9,-11,]),'[':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,140,199,201,205,261,263,266,268,305,328,330,331,357,],[-70,-62,-61,-60,-136,-135,133,-104,-101,-134,-103,-107,-139,-137,-106,-102,-108,-131,-138,-132,-110,-133,-105,-134,-109,-99,-111,-100,]),'INCLUDE':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[21,21,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,21,-18,-16,-79,-20,21,-19,-21,-13,-8,-15,-12,-9,-11,]),']':([5,20,22,61,83,84,87,93,94,96,97,99,100,102,132,133,135,136,140,142,160,196,197,198,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,261,262,263,266,268,303,328,330,331,357,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,-137,-106,-2,-129,-128,-102,-130,-127,-85,-84,263,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,-132,-2,-110,-133,-105,-83,-109,-99,-111,-100,]),'IF':([179,247,248,253,293,299,321,345,352,355,363,366,368,369,370,372,375,376,],[250,250,-95,-80,-79,-86,-94,-87,-92,-96,250,-88,-90,-97,-98,-91,-89,-93,]),'AND':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,145,-137,-106,145,145,145,-102,-130,-127,145,145,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,145,-132,-110,-133,-105,145,-134,145,145,145,-109,-99,-111,145,-100,145,]),'DASH':([5,20,22,54,61,83,84,87,88,89,91,93,94,96,97,98,99,100,101,102,103,105,132,133,135,136,138,140,142,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,160,162,179,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,243,247,248,251,253,261,262,263,264,265,266,267,268,292,293,298,299,300,305,306,321,325,326,328,329,330,331,345,347,352,355,357,361,366,367,368,369,370,372,375,376,],[-70,-62,-61,98,-60,-136,-135,-112,98,98,98,-104,-101,-134,-103,98,-107,-139,153,-137,98,98,-106,98,153,153,153,-102,-130,98,98,98,98,98,98,98,98,98,98,98,98,98,98,98,-127,153,98,153,-108,-131,-138,153,153,-113,153,153,153,153,-116,153,153,-115,-114,153,153,98,98,-95,153,-80,-132,98,-110,98,98,-133,98,-105,153,-79,98,-86,98,-134,153,-94,153,153,-109,98,-99,-111,-87,153,-92,-96,-100,98,-88,153,-90,-97,-98,-91,-89,-93,]),'RETURN':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[243,243,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'EQ':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,150,-137,-106,150,150,150,-102,-130,-127,150,150,-108,-131,-138,150,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,150,150,-132,-110,-133,-105,150,-134,150,150,150,-109,-99,-111,150,-100,150,]),'STRUCT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[23,23,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,23,-18,-16,-79,-20,23,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_STOP_SLOTS':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[252,252,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'STATE_DECL':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[24,24,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,24,-18,-16,-79,-20,24,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_ALLOCATE':([179,247,248,253,293,299,321,345,352,355,366,368,369,370,372,375,376,],[245,245,-95,-80,-79,-86,-94,-87,-92,-96,-88,-90,-97,-98,-91,-89,-93,]),'LIT_BOOL':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,188,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,102,258,102,102,-95,-80,102,102,102,102,-79,102,-86,102,-94,102,-87,-92,-96,102,-88,-90,-97,-98,-91,-89,-93,]),'IS_VALID':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,85,-95,-80,85,85,85,85,-79,85,-86,85,-94,85,-87,-92,-96,85,-88,-90,-97,-98,-91,-89,-93,]),'NOT':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,248,253,262,264,265,267,293,298,299,300,321,329,345,352,355,361,366,368,369,370,372,375,376,],[103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,103,-95,-80,103,103,103,103,-79,103,-86,103,-94,103,-87,-92,-96,103,-88,-90,-97,-98,-91,-89,-93,]),'{':([5,29,37,45,52,67,73,75,76,77,115,120,122,126,149,161,163,165,167,169,170,173,180,181,182,183,187,220,221,224,225,226,228,239,276,277,289,318,337,344,360,363,365,373,],[-70,47,47,-65,-72,47,-30,-78,-71,-74,172,-64,179,-2,-32,-31,-2,223,-2,230,231,47,-76,-75,-77,-73,179,-33,179,-2,-2,278,-23,47,311,-22,47,47,179,179,179,179,179,179,]),'}':([1,2,5,7,12,13,15,27,45,47,56,62,68,69,70,73,117,118,119,120,149,161,172,175,176,178,179,220,223,229,230,231,233,234,235,236,237,238,239,244,247,248,253,254,255,260,269,270,271,273,274,275,278,280,282,283,284,286,287,288,293,296,299,309,310,311,312,313,315,316,318,321,332,333,334,338,339,345,348,350,352,355,358,366,368,369,370,372,375,376,],[-2,-29,-70,-34,-5,-3,-39,-4,-65,-2,-6,-7,-2,120,-69,-30,-2,-68,-2,-64,-32,-31,-2,-66,-67,-10,253,-33,-2,-17,-2,-2,-26,286,-2,-25,-27,-28,-14,293,-82,-95,-80,-41,-36,-38,-40,-35,-37,309,-2,-43,-2,313,315,-2,-46,-18,-24,-16,-79,-81,-86,-20,-42,-2,334,-19,-21,-45,-13,-94,-44,348,-8,-15,-12,-87,-9,-11,-92,-96,-47,-88,-90,-97,-98,-91,-89,-93,]),'OR':([5,20,22,61,83,84,87,93,94,96,97,99,100,101,102,132,135,136,138,140,142,160,162,197,199,201,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,261,263,266,268,292,305,306,325,326,328,330,331,347,357,367,],[-70,-62,-61,-60,-136,-135,-112,-104,-101,-134,-103,-107,-139,159,-137,-106,159,159,159,-102,-130,-127,159,159,-108,-131,-138,-123,-126,-113,-122,-121,-119,-120,-116,-117,-118,-115,-114,-125,-124,159,-132,-110,-133,-105,159,-134,159,159,159,-109,-99,-111,159,-100,159,]),'IN_PORT':([0,1,2,5,7,15,45,56,62,73,120,149,161,178,220,229,239,253,254,255,260,269,270,271,278,286,288,293,309,311,313,315,318,334,338,339,348,350,],[26,26,-29,-70,-34,-39,-65,-6,-7,-30,-64,-32,-31,-10,-33,-17,-14,-80,-41,-36,-38,-40,-35,-37,26,-18,-16,-79,-20,26,-19,-21,-13,-8,-15,-12,-9,-11,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'decl':([0,1,278,311,],[1,1,1,1,]),'obj_decl':([0,1,167,172,224,225,230,235,278,311,],[2,2,225,233,225,225,233,233,2,2,]),'statements':([122,187,221,337,344,360,363,365,373,],[178,254,269,350,355,366,369,372,375,]),'type_enums':([223,274,],[273,310,]),'pairsx':([51,124,],[76,183,]),'type_members':([172,230,235,],[234,280,287,]),'statements_inner':([179,247,],[244,296,]),'enumeration':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,314,329,361,],[93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,93,336,93,93,]),'file':([0,],[6,]),'type_state':([231,283,],[283,283,]),'type_member':([172,230,235,],[235,235,235,]),'aexpr':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,87,]),'param':([53,57,125,],[78,78,78,]),'literal':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,97,]),'params':([53,57,125,],[79,106,184,]),'statement':([179,247,],[247,247,]),'var':([54,88,89,91,98,103,105,131,133,139,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,177,179,232,243,247,262,264,265,267,291,294,295,297,298,300,301,329,343,361,],[94,94,94,94,94,94,94,195,94,202,94,94,94,94,94,94,94,94,94,94,94,94,94,94,94,241,94,285,94,94,94,94,94,94,320,322,323,324,94,94,327,94,354,94,]),'if_statement':([179,247,363,],[248,248,370,]),'type':([0,1,28,36,38,41,42,53,54,57,71,86,88,89,91,98,103,105,114,125,129,133,137,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,179,224,225,230,235,243,247,262,264,265,267,278,298,300,311,329,340,342,361,],[11,11,44,58,60,63,64,81,95,81,121,132,95,95,95,95,95,95,171,185,191,95,200,95,95,95,95,95,95,95,95,95,95,95,95,95,95,95,227,11,95,227,227,11,11,95,95,95,95,95,95,11,95,95,11,95,351,353,95,]),'empty':([0,1,32,44,47,49,53,55,57,58,59,60,63,64,68,117,119,125,126,129,130,133,143,163,164,167,172,223,224,225,230,231,235,241,262,264,267,272,274,278,279,283,285,311,336,351,],[12,12,52,52,70,52,80,52,80,52,52,52,52,52,70,70,70,186,52,192,52,196,196,52,52,228,236,275,228,228,236,284,236,52,196,196,196,52,275,12,52,284,52,12,52,52,]),'declsx':([0,1,278,311,],[13,27,13,13,]),'func_decl':([0,1,172,230,235,278,311,],[7,7,237,237,237,7,7,]),'func_def':([0,1,172,230,235,278,311,],[15,15,238,238,238,15,15,]),'idents':([29,37,67,173,239,289,318,],[46,59,116,239,288,318,338,]),'void':([0,1,172,230,235,278,311,],[16,16,16,16,16,16,16,]),'identx':([47,68,117,119,],[69,118,175,176,]),'type_states':([231,283,],[282,316,]),'pair':([51,124,],[77,77,]),'type_enum':([223,274,],[274,274,]),'typestr':([0,1,28,36,38,41,42,53,54,57,71,86,88,89,91,98,103,105,114,125,129,133,137,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,179,224,225,230,235,243,247,262,264,265,267,278,298,300,311,329,340,342,361,],[20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,20,]),'types':([53,57,129,],[82,107,190,]),'pairs':([32,44,49,55,58,59,60,63,64,126,130,163,164,241,272,279,285,336,351,],[50,66,72,104,108,109,111,112,113,187,193,221,222,290,308,50,317,349,359,]),'ident':([0,1,11,16,28,29,30,31,33,36,37,38,39,41,42,43,47,51,53,54,57,67,68,71,81,86,88,89,91,95,98,103,105,114,117,119,123,124,125,128,129,131,133,134,137,139,141,143,144,145,146,147,148,150,151,152,153,154,155,156,157,158,159,167,172,173,174,177,179,185,223,224,225,227,230,231,232,235,239,243,247,262,264,265,267,274,278,283,289,291,294,295,297,298,300,301,311,314,318,329,340,342,343,361,],[22,22,32,35,22,45,48,49,55,22,45,22,61,22,22,65,68,74,22,100,22,45,68,22,127,22,100,100,100,140,100,100,100,22,68,68,180,74,22,189,22,194,100,199,22,194,203,100,205,100,100,100,100,100,100,100,100,100,100,100,100,100,100,22,22,45,240,194,100,127,272,22,22,279,22,281,194,22,45,100,100,100,100,100,100,272,22,281,45,194,194,194,194,100,100,194,22,335,45,100,22,22,194,100,]),'obj_decls':([167,224,225,],[226,276,277,]),'expr':([54,88,89,91,98,103,105,133,143,145,146,147,148,150,151,152,153,154,155,156,157,158,159,179,243,247,262,264,265,267,298,300,329,361,],[101,135,136,138,142,160,162,197,197,206,207,208,209,210,211,212,213,214,215,216,217,218,219,251,292,251,197,197,306,197,325,326,347,367,]),'exprs':([133,143,262,264,267,],[198,204,303,304,307,]),'decls':([0,278,311,],[25,312,333,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> file","S'",1,None,None,None),
('file -> decls','file',1,'p_file','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',218),
('empty -> <empty>','empty',0,'p_empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',222),
('decls -> declsx','decls',1,'p_decls','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',225),
('declsx -> decl declsx','declsx',2,'p_declsx__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',229),
('declsx -> empty','declsx',1,'p_declsx__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',239),
('decl -> PROTOCOL STRING SEMI','decl',3,'p_decl__protocol','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',243),
('decl -> INCLUDE STRING SEMI','decl',3,'p_decl__include','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',252),
('decl -> MACHINE ( idents ) : obj_decls { decls }','decl',9,'p_decl__machine0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',261),
('decl -> MACHINE ( idents pairs ) : obj_decls { decls }','decl',10,'p_decl__machine1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',265),
('decl -> ACTION ( ident pairs ) statements','decl',6,'p_decl__action','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',269),
('decl -> IN_PORT ( ident , type , var pairs ) statements','decl',10,'p_decl__in_port','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',273),
('decl -> OUT_PORT ( ident , type , var pairs ) SEMI','decl',10,'p_decl__out_port','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',277),
('decl -> TRANS ( idents , idents , ident ) idents','decl',9,'p_decl__trans0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',281),
('decl -> TRANS ( idents , idents ) idents','decl',7,'p_decl__trans1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',285),
('decl -> TRANS ( idents , idents , ident ) idents idents','decl',10,'p_decl__trans2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',289),
('decl -> TRANS ( idents , idents ) idents idents','decl',8,'p_decl__trans3','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',293),
('decl -> EXTERN_TYPE ( type pairs ) SEMI','decl',6,'p_decl__extern0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',297),
('decl -> GLOBAL ( type pairs ) { type_members }','decl',8,'p_decl__global','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',302),
('decl -> STRUCT ( type pairs ) { type_members }','decl',8,'p_decl__struct','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',307),
('decl -> ENUM ( type pairs ) { type_enums }','decl',8,'p_decl__enum','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',311),
('decl -> STATE_DECL ( type pairs ) { type_states }','decl',8,'p_decl__state_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',316),
('obj_decls -> obj_decl obj_decls','obj_decls',2,'p_obj_decls__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',323),
('obj_decls -> empty','obj_decls',1,'p_obj_decls__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',327),
('type_members -> type_member type_members','type_members',2,'p_type_members__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',331),
('type_members -> empty','type_members',1,'p_type_members__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',335),
('type_member -> obj_decl','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',339),
('type_member -> func_decl','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',340),
('type_member -> func_def','type_member',1,'p_type_member__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',341),
('decl -> obj_decl','decl',1,'p_decl__obj_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',346),
('obj_decl -> type ident pairs SEMI','obj_decl',4,'p_obj_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',350),
('obj_decl -> type STAR ident pairs SEMI','obj_decl',5,'p_obj_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',354),
('obj_decl -> type ident ASSIGN expr SEMI','obj_decl',5,'p_obj_decl__2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',358),
('obj_decl -> type STAR ident ASSIGN expr SEMI','obj_decl',6,'p_obj_decl__3','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',363),
('decl -> func_decl','decl',1,'p_decl__func_decl','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',369),
('func_decl -> void ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',373),
('func_decl -> type ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',374),
('func_decl -> void ident ( types ) pairs SEMI','func_decl',7,'p_func_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',378),
('func_decl -> type ident ( types ) pairs SEMI','func_decl',7,'p_func_decl__1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',379),
('decl -> func_def','decl',1,'p_decl__func_def','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',383),
('func_def -> void ident ( params ) pairs statements','func_def',7,'p_func_def__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',387),
('func_def -> type ident ( params ) pairs statements','func_def',7,'p_func_def__0','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',388),
('type_enums -> type_enum type_enums','type_enums',2,'p_type_enums__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',393),
('type_enums -> empty','type_enums',1,'p_type_enums__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',397),
('type_enum -> ident pairs SEMI','type_enum',3,'p_type_enum','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',401),
('type_states -> type_state type_states','type_states',2,'p_type_states__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',406),
('type_states -> empty','type_states',1,'p_type_states__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',410),
('type_state -> ident , enumeration pairs SEMI','type_state',5,'p_type_state','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',414),
('params -> param , params','params',3,'p_params__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',419),
('params -> param','params',1,'p_params__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',423),
('params -> empty','params',1,'p_params__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',427),
('param -> type ident','param',2,'p_param','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',431),
('param -> type STAR ident','param',3,'p_param__pointer','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',435),
('param -> type STAR ident ASSIGN STRING','param',5,'p_param__pointer_default','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',439),
('param -> type ident ASSIGN NUMBER','param',4,'p_param__default_number','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',443),
('param -> type ident ASSIGN LIT_BOOL','param',4,'p_param__default_bool','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',447),
('param -> type ident ASSIGN STRING','param',4,'p_param__default_string','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',451),
('types -> type , types','types',3,'p_types__multiple','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',456),
('types -> type','types',1,'p_types__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',460),
('types -> empty','types',1,'p_types__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',464),
('typestr -> typestr DOUBLE_COLON ident','typestr',3,'p_typestr__multi','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',468),
('typestr -> ident','typestr',1,'p_typestr__single','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',472),
('type -> typestr','type',1,'p_type__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',476),
('void -> VOID','void',1,'p_void','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',480),
('idents -> { identx }','idents',3,'p_idents__braced','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',485),
('idents -> ident','idents',1,'p_idents__bare','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',489),
('identx -> ident SEMI identx','identx',3,'p_identx__multiple_1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',493),
('identx -> ident , identx','identx',3,'p_identx__multiple_1','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',494),
('identx -> ident identx','identx',2,'p_identx__multiple_2','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',498),
('identx -> empty','identx',1,'p_identx__single','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',502),
('ident -> IDENT','ident',1,'p_ident','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',506),
('pairs -> , pairsx','pairs',2,'p_pairs__list','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',511),
('pairs -> empty','pairs',1,'p_pairs__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',515),
('pairsx -> pair , pairsx','pairsx',3,'p_pairsx__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',519),
('pairsx -> pair','pairsx',1,'p_pairsx__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',524),
('pair -> ident = STRING','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',529),
('pair -> ident = ident','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',530),
('pair -> ident = NUMBER','pair',3,'p_pair__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',531),
('pair -> STRING','pair',1,'p_pair__literal','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',535),
('statements -> { statements_inner }','statements',3,'p_statements__inner','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',540),
('statements -> { }','statements',2,'p_statements__none','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',544),
('statements_inner -> statement statements_inner','statements_inner',2,'p_statements_inner__many','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',548),
('statements_inner -> statement','statements_inner',1,'p_statements_inner__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',552),
('exprs -> expr , exprs','exprs',3,'p_exprs__multiple','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',556),
('exprs -> expr','exprs',1,'p_exprs__one','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',560),
('exprs -> empty','exprs',1,'p_exprs__empty','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',564),
('statement -> expr SEMI','statement',2,'p_statement__expression','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',568),
('statement -> expr ASSIGN expr SEMI','statement',4,'p_statement__assign','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',572),
('statement -> ENQUEUE ( var , type ) statements','statement',7,'p_statement__enqueue','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',576),
('statement -> ENQUEUE ( var , type , expr ) statements','statement',9,'p_statement__enqueue_latency','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',580),
('statement -> STALL_AND_WAIT ( var , var ) SEMI','statement',7,'p_statement__stall_and_wait','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',584),
('statement -> PEEK ( var , type pairs ) statements','statement',8,'p_statement__peek','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',588),
('statement -> CHECK_ALLOCATE ( var ) SEMI','statement',5,'p_statement__check_allocate','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',592),
('statement -> CHECK_STOP_SLOTS ( var , STRING , STRING ) SEMI','statement',9,'p_statement__check_stop','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',596),
('statement -> RETURN expr SEMI','statement',3,'p_statement__return','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',600),
('statement -> if_statement','statement',1,'p_statement__if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',604),
('if_statement -> IF ( expr ) statements','if_statement',5,'p_if_statement__if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',608),
('if_statement -> IF ( expr ) statements ELSE statements','if_statement',7,'p_if_statement__if_else','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',612),
('if_statement -> IF ( expr ) statements ELSE if_statement','if_statement',7,'p_statement__if_else_if','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',616),
('aexpr -> STATIC_CAST ( type , expr )','aexpr',6,'p_expr__static_cast','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',621),
('aexpr -> STATIC_CAST ( type , STRING , expr )','aexpr',8,'p_expr__static_cast_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',625),
('aexpr -> var','aexpr',1,'p_expr__var','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',629),
('aexpr -> type ident','aexpr',2,'p_expr__localvar','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',633),
('aexpr -> literal','aexpr',1,'p_expr__literal','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',637),
('aexpr -> enumeration','aexpr',1,'p_expr__enumeration','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',641),
('aexpr -> ident ( exprs )','aexpr',4,'p_expr__func_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',645),
('aexpr -> NEW type','aexpr',2,'p_expr__new','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',649),
('aexpr -> OOD','aexpr',1,'p_expr__null','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',653),
('aexpr -> aexpr DOT ident','aexpr',3,'p_expr__member','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',657),
('aexpr -> aexpr DOT ident ( exprs )','aexpr',6,'p_expr__member_method_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',661),
('aexpr -> aexpr [ exprs ]','aexpr',4,'p_expr__member_method_call_lookup','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',665),
('aexpr -> type DOUBLE_COLON ident ( exprs )','aexpr',6,'p_expr__class_method_call','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',669),
('expr -> aexpr','expr',1,'p_expr__aexpr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',673),
('expr -> expr STAR expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',677),
('expr -> expr SLASH expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',678),
('expr -> expr PLUS expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',679),
('expr -> expr DASH expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',680),
('expr -> expr LT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',681),
('expr -> expr GT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',682),
('expr -> expr LE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',683),
('expr -> expr GE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',684),
('expr -> expr EQ expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',685),
('expr -> expr NE expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',686),
('expr -> expr AND expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',687),
('expr -> expr OR expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',688),
('expr -> expr RIGHTSHIFT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',689),
('expr -> expr LEFTSHIFT expr','expr',3,'p_expr__binary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',690),
('expr -> NOT expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',695),
('expr -> INCR expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',696),
('expr -> DECR expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',697),
('expr -> DASH expr','expr',2,'p_expr__unary_op','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',698),
('aexpr -> ( expr )','aexpr',3,'p_expr__parens','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',702),
('aexpr -> IS_VALID ( var )','aexpr',4,'p_expr__is_valid_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',706),
('aexpr -> IS_INVALID ( var )','aexpr',4,'p_expr__is_invalid_ptr','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',710),
('literal -> STRING','literal',1,'p_literal__string','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',714),
('literal -> NUMBER','literal',1,'p_literal__number','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',718),
('literal -> FLOATNUMBER','literal',1,'p_literal__float','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',722),
('literal -> LIT_BOOL','literal',1,'p_literal__bool','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',726),
('enumeration -> ident : ident','enumeration',3,'p_enumeration','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',730),
('var -> ident','var',1,'p_var','/home/duan-xg/gem5-nvp/gem5/src/mem/slicc/parser.py',734),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
be98e688f595d130727ae63ee7025e42be5b6796
| 7,044
|
py
|
Python
|
src/clean.py
|
JohnlNguyen/Comment2Code
|
5875d0ce708f8d4965ef39cdae282f3995cdd7c0
|
[
"MIT"
] | null | null | null |
src/clean.py
|
JohnlNguyen/Comment2Code
|
5875d0ce708f8d4965ef39cdae282f3995cdd7c0
|
[
"MIT"
] | null | null | null |
src/clean.py
|
JohnlNguyen/Comment2Code
|
5875d0ce708f8d4965ef39cdae282f3995cdd7c0
|
[
"MIT"
] | null | null | null |
# import pygments
# from pygments.token import Comment, Text, Keyword, Name
# from bpe import Encoder
# from nltk import everygrams, ngrams
# from nltk.tokenize import word_tokenize
# from lexer import build_lexer
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("data", help="Path to training data")
args = ap.parse_args()
main_java(args.data)
| 29.974468
| 116
| 0.579784
|
from pdb import set_trace
import json
import itertools
# import pygments
import random
import csv
# from pygments.token import Comment, Text, Keyword, Name
# from bpe import Encoder
# from nltk import everygrams, ngrams
# from nltk.tokenize import word_tokenize
# from lexer import build_lexer
def remove_blank(code):
for i, line in enumerate(code):
if line == "\n":
code = code[:i]
break
return code
def split_string(s):
return list(itertools.chain(*[w.strip().split(' ') for w in s.split('\\n') if w]))
def is_different(a, b, n):
b = split_string(b)
a = split_string(a)
if abs(len(a) - len(b)) >= n:
return True
return sum([1 if x != y else 0 for x, y in zip(a, b)]) >= n
def tokenize_code(line, encoder):
return set(encoder.tokenize(line)) - {encoder.EOW, encoder.SOW, encoder.UNK, encoder.PAD}
def comment_contains_code(comment, code, lexer, n=5):
t_cod = []
for line in code:
t_cod.extend([x[1]
for x in list(pygments.lex(line, lexer)) if x[0] in Name])
# t_cod = " ".join(code)
t_cod = " ".join(t_cod)
t_com = ["".join(x) for x in ngrams(comment.replace("\\n", " "), n)]
t_cod = ["".join(x) for x in ngrams(t_cod, n)]
intersection = set(t_com).intersection(set(t_cod))
return len(intersection) > 0
def clean_code(code):
code = [c.strip() for c in code]
return code
def clean_comment(comment):
comment = comment.split("\\n")
comment = [c.replace("//", "").strip() for c in comment if c]
return "\\n".join(comment)
def print_info(data):
print("-" * 100)
print("Both {}".format(sum([1 for row in data if row['type'] == "BOTH"])))
print("Comment {}".format(
sum([1 for row in data if row['type'] == "COMMENT"])))
print("Code {}".format(sum([1 for row in data if row['type'] == "CODE"])))
print("Total {}".format(len(data)))
def write(data, fname):
with open(fname, 'w') as f:
json.dump([row for row in data if row["type"] != "COMMENT"], f)
def read(fname):
with open(fname, 'r') as f:
data = json.load(f)
return data
def isEnglish(s):
try:
s.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
def main_python():
data = read('../data/Pairs/code_comment_102813.json')
print_info(data)
cleaned = []
dedup = set()
cleaned = []
for row in data:
key = "{}#{}#{}#{}".format(
row['before_comment'], row['after_comment'], row['before_path'], row['after_path'])
if key not in dedup:
dedup.add(key)
if not isEnglish(row['before_comment']) or not isEnglish(row['after_comment']):
continue
if not row['before_comment'] or not row['after_comment'] or not row['before_code'] or not row['after_code']:
continue
if int(row['after_line']) < 10 or int(row['before_line']) < 10:
continue
for k in ['before_comment', 'after_comment']:
comment = clean_comment(row[k])
row[k] = comment
for k in ['before_code', 'after_code']:
code = clean_code(row[k])
row[k] = code
if row['before_comment'] != row['after_comment'] and row['before_code'][0] != row['after_code'][0]:
row['type'] = "BOTH"
if row['before_comment'] == row['after_comment'] or row['before_code'][0] == row['after_code'][0]:
row['type'] = "COMMENT"
cleaned.append(row)
# write(cleaned, '../data/Pairs/code_comment_10k.json')
print_info(cleaned)
def main_java(data_path):
data = read(data_path)
# lexer = build_lexer('java')
print_info(data)
dedup = set()
cleaned = []
for row in data:
key = "{}#{}#{}#{}".format(
row['before_comment'], row['after_comment'], row['before_path'], row['after_path'])
if key not in dedup:
dedup.add(key)
if not isEnglish(row['before_comment']) or not isEnglish(row['after_comment']):
continue
if not isEnglish("".join(row['before_code'])) or not isEnglish("".join(row['after_code'])):
continue
if not row['before_comment'] or not row['after_comment'] or not row['before_code'] or not row['after_code']:
continue
if int(row['after_line']) < 20 or int(row['before_line']) < 20:
continue
both_cond = row['before_comment'] != row['after_comment'] and "".join(
row['before_code']) != "".join(row['after_code'])
comment_cond = row['before_comment'] != row['after_comment'] and "".join(
row['before_code']) == "".join(row['after_code'])
code_cond = row['before_comment'] == row['after_comment'] and "".join(
row['before_code']) != "".join(row['after_code'])
if both_cond:
row['type'] = "BOTH"
elif comment_cond:
row['type'] = "COMMENT"
elif code_cond:
row['type'] = "CODE"
cleaned.append(row)
write(cleaned, data_path)
print_info(cleaned)
def merge_python():
code_changes = read('../data/Pairs/code_changes_6826.json')
# both_changes = read('../data/Pairs/code_comment_10k.json')
# lexer = build_lexer('python')
# for code, both in zip(code_changes, both_changes):
cleaned = []
for code in code_changes:
if code['before_code'] != code['after_code']:
cleaned.append(code)
print(len(cleaned))
def pprint_file_name(file_name, row):
org, project, commit, changed_file = file_name.split("#")
commit = row['commit']
changed_file = changed_file.split('__')[-1]
url = f'https://github.com/{org}/{project}/commit/{commit}'
line = row['after_line']
return url, changed_file, line
def split():
data = read('../data/Pairs/code_comment_738.json')
both = [row for row in data if row['type'] == "BOTH"]
code = [row for row in data if row['type'] == "CODE"]
to_inspect = []
for row in random.sample(both, 25):
keep = {
"id": row['after_path'] + "#" + row['after_line'],
"before_comment": row['before_comment'].split("\\n"),
"before_code": row['before_code'],
"after_code": row['after_code'],
"label": "1"
}
to_inspect.append(keep)
for row in random.sample(code, 25):
keep = {
"id": row['after_path'] + "#" + row['after_line'],
"before_comment": row['before_comment'].split("\\n"),
"before_code": row['before_code'],
"after_code": row['after_code'],
"label": "0"
}
to_inspect.append(keep)
with open('../data/john.json', 'w+') as f:
json.dump(to_inspect, f)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("data", help="Path to training data")
args = ap.parse_args()
main_java(args.data)
| 0
| 0
| 0
| 0
| 0
| 6,169
| 0
| -30
| 478
|
fa54df8ef50460a558172de0f2f2c4bd5f3c25e7
| 8,100
|
py
|
Python
|
qa/rpc-tests/zmq_test.py
|
gandrewstone/bitcoin
|
05de381c02eb4bfca94957733acadfa217527f25
|
[
"MIT"
] | 535
|
2015-09-04T15:10:08.000Z
|
2022-03-17T20:51:05.000Z
|
qa/rpc-tests/zmq_test.py
|
gandrewstone/bitcoin
|
05de381c02eb4bfca94957733acadfa217527f25
|
[
"MIT"
] | 1,269
|
2016-01-31T20:21:24.000Z
|
2022-03-16T01:20:08.000Z
|
qa/rpc-tests/zmq_test.py
|
gandrewstone/bitcoin
|
05de381c02eb4bfca94957733acadfa217527f25
|
[
"MIT"
] | 295
|
2015-10-19T16:12:29.000Z
|
2021-08-02T20:05:17.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
if __name__ == '__main__':
ZMQTest ().main ()
| 37.850467
| 245
| 0.564691
|
#!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Copyright (c) 2015-2017 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
import time
import test_framework.loginit
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import zmq
import struct
import http.client
import urllib.parse
def ZmqReceive(socket, timeout=30):
start = time.time()
while True:
try:
return socket.recv_multipart()
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN or time.time() - start >= timeout:
raise
time.sleep(0.05)
class ZMQTest (BitcoinTestFramework):
port = 28340 # ZMQ ports of these test must be unique so multiple tests can be run simultaneously
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashds")
self.zmqSubSocket.RCVTIMEO = 30000
self.zmqSubSocket.linger = 500
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(4, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashds=tcp://127.0.0.1:'+str(self.port), '-debug=respend', '-debug=dsproof', '-debug=mempool', '-debug=net', '-debug=zmq'],
['-debug=respend', '-debug=dsproof', '-debug=mempool', '-debug=net', '-debug=zmq'],
['-debug=respend', '-debug=dsproof', '-debug=mempool', '-debug=net', '-debug=zmq'],
[]
])
def run_test(self):
try:
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print("listen...")
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
for x in range(0,n*2):
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashRPC, hashZMQ) #tx hash from generate must be equal to the hash received over zmq
# Send all coins to a single new address so that we can be sure that we
# try double spending a p2pkh output in the subsequent step.
wallet = self.nodes[0].listunspent()
inputs = []
num_coins = 0
for t in wallet:
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
num_coins += 1
outputs = { self.nodes[0].getnewaddress() : num_coins * 49.95 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
rawtx = self.nodes[0].signrawtransaction(rawtx)
try:
hashRPC = self.nodes[0].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
print(e.error['message'])
assert(False)
self.sync_all()
#check we received zmq notification
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashRPC, hashZMQ) #tx hash from generate must be equal to the hash received over zmq
hashRPC = self.nodes[1].generate(1)
self.sync_all()
#check we received zmq notification
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashblock":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashRPC[0], hashZMQ) #blockhash from generate must be equal to the hash received over zmq
# Send 2 transactions that double spend each another
wallet = self.nodes[0].listunspent()
walletp2pkh = list(filter(lambda x : len(x["scriptPubKey"]) != 70, wallet)) # Find an input that is not P2PK
t = walletp2pkh.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = { self.nodes[1].getnewaddress() : t["amount"] }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
rawtx = self.nodes[0].signrawtransaction(rawtx)
try:
hashTxToDoubleSpend = self.nodes[1].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
print(e.error['message'])
assert(False)
self.sync_all()
#check we received zmq notification
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashTxToDoubleSpend, hashZMQ) #tx hash from generate must be equal to the hash received over zmq
outputs = { self.nodes[1].getnewaddress() : t["amount"] }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
rawtx = self.nodes[0].signrawtransaction(rawtx)
try:
hashtx = self.nodes[0].sendrawtransaction(rawtx['hex'])
except JSONRPCException as e:
assert("txn-mempool-conflict" in e.error['message'])
else:
assert(False)
self.sync_all()
# now we should receive a zmq ds msg because the tx was broadcast
msg = ZmqReceive(self.zmqSubSocket)
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashds":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashTxToDoubleSpend, hashZMQ) #double spent tx hash from generate must be equal to the hash received over zmq
finally:
self.zmqSubSocket.close()
self.zmqSubSocket = None
self.zmqContext.destroy()
self.zmqContext = None
if __name__ == '__main__':
ZMQTest ().main ()
def Test():
flags = standardFlags()
t = ZMQTest()
t.drop_to_pdb = True
t.main(flags)
| 0
| 0
| 0
| 7,130
| 0
| 346
| 0
| 27
| 247
|
083ca184ae9015b2a796f748eed530d0a8d956fc
| 1,204
|
py
|
Python
|
purifier_reader/hello.py
|
gittubbs/giuliot
|
5053d1355f3db8ce184f0e7a890228f261f3a27e
|
[
"MIT"
] | null | null | null |
purifier_reader/hello.py
|
gittubbs/giuliot
|
5053d1355f3db8ce184f0e7a890228f261f3a27e
|
[
"MIT"
] | null | null | null |
purifier_reader/hello.py
|
gittubbs/giuliot
|
5053d1355f3db8ce184f0e7a890228f261f3a27e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pipenv-shebang
import csv
import time
import configReader
from datetime import datetime
from miio import AirPurifierMiot
configFile = "config.yaml"
ip = configReader.fetchIpAddress(configFile,"airpurifier")
token = configReader.fetchToken(configFile,"airpurifier")
air = AirPurifierMiot(ip,token)
while(True):
try:
prop = air.get_properties()
dateTimeObj = datetime.now()
#print(prop)
temp = 0
hum = 0
aqi = 0
for e in prop:
if(e["did"] == "humidity"):
hum = e["value"]
if(e["did"] == "temperature"):
temp = e["value"]
if(e["did"] == "aqi"):
aqi = e["value"]
with open('../visualize/studies/data_3.csv', mode = 'a') as csv_file:
writer = csv.writer(csv_file, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#Timestamp, Temperatura, Umidit, AQI
writer.writerow([round(time.time() * 1000),temp,hum,aqi])
print(dateTimeObj)
print(temp)
print(hum)
print(aqi)
time.sleep(15.0 * 60)
except Exception:
pass
| 26.173913
| 97
| 0.563123
|
#!/usr/bin/env pipenv-shebang
import csv
import time
import yaml
import configReader
from datetime import datetime
from miio import AirPurifierMiot
configFile = "config.yaml"
ip = configReader.fetchIpAddress(configFile,"airpurifier")
token = configReader.fetchToken(configFile,"airpurifier")
air = AirPurifierMiot(ip,token)
while(True):
try:
prop = air.get_properties()
dateTimeObj = datetime.now()
#print(prop)
temp = 0
hum = 0
aqi = 0
for e in prop:
if(e["did"] == "humidity"):
hum = e["value"]
if(e["did"] == "temperature"):
temp = e["value"]
if(e["did"] == "aqi"):
aqi = e["value"]
with open('../visualize/studies/data_3.csv', mode = 'a') as csv_file:
writer = csv.writer(csv_file, delimiter=',',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#Timestamp, Temperatura, Umidità, AQI
writer.writerow([round(time.time() * 1000),temp,hum,aqi])
print(dateTimeObj)
print(temp)
print(hum)
print(aqi)
time.sleep(15.0 * 60)
except Exception:
pass
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| -10
| 22
|
289ddd19b53f8e810f7c2c16c9a4e2eb67fa678d
| 767
|
py
|
Python
|
devtools/conda-recipe-dev/manage_local_dev_version.py
|
uibcdf/NetLabTools
|
a61732c341fbb700dcacf8a66d3776e6dd7e4fa4
|
[
"MIT"
] | 1
|
2022-02-22T02:19:15.000Z
|
2022-02-22T02:19:15.000Z
|
devtools/conda-recipe-dev/manage_local_dev_version.py
|
uibcdf/NetLabTools
|
a61732c341fbb700dcacf8a66d3776e6dd7e4fa4
|
[
"MIT"
] | null | null | null |
devtools/conda-recipe-dev/manage_local_dev_version.py
|
uibcdf/NetLabTools
|
a61732c341fbb700dcacf8a66d3776e6dd7e4fa4
|
[
"MIT"
] | null | null | null |
import sys
if '--install' in sys.argv[1:]:
print('Building and installing local dev version via conda')
installing()
elif '--remove' in sys.argv[1:]:
print('Removing local dev package')
remove()
elif '--update' in sys.argv[1:]:
print('Updating local dev package')
update()
| 25.566667
| 71
| 0.681877
|
import os
import sys
from numpy.distutils.exec_command import exec_command
def installing():
status, output = exec_command('conda build . --no-anaconda-upload')
status, output = exec_command('conda build . --output')
status, output = exec_command('conda install --use-local '+output)
status, output = exec_command('conda build purge')
def remove():
status, output = exec_command('conda remove kinnetmt --yes')
def update():
remove()
installing()
if '--install' in sys.argv[1:]:
print('Building and installing local dev version via conda')
installing()
elif '--remove' in sys.argv[1:]:
print('Removing local dev package')
remove()
elif '--update' in sys.argv[1:]:
print('Updating local dev package')
update()
| 0
| 0
| 0
| 0
| 0
| 336
| 0
| 20
| 113
|
c03fa08f9a8575bf60141ba2ce0270b9fa23f1bc
| 911
|
py
|
Python
|
setup.py
|
awslabs/aws-sdk-api-changes
|
a3e6cbad729bb69f9d63373dc52c5e53faa349f2
|
[
"Apache-2.0"
] | 11
|
2020-04-27T22:53:01.000Z
|
2021-09-09T16:19:09.000Z
|
setup.py
|
awslabs/aws-sdk-api-changes
|
a3e6cbad729bb69f9d63373dc52c5e53faa349f2
|
[
"Apache-2.0"
] | 4
|
2020-03-05T17:41:25.000Z
|
2021-06-02T02:39:34.000Z
|
setup.py
|
awslabs/aws-sdk-api-changes
|
a3e6cbad729bb69f9d63373dc52c5e53faa349f2
|
[
"Apache-2.0"
] | 2
|
2020-03-12T10:23:51.000Z
|
2021-01-27T10:56:10.000Z
|
from setuptools import setup, find_packages
setup(
name="apichanges",
version='0.0.1',
description="AWS API Changes",
long_description=read('readme.md'),
long_description_content_type='text/markdown',
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'apichanges = apichanges.cli:cli']},
install_requires=[
"botocore>=1.12.228",
"Click==7.0",
"docutils==0.15.2",
"Jinja2>=2.10.1",
"jmespath==0.9.4",
"MarkupSafe==1.1.1",
"pycparser==2.19",
"pygit2==0.28.2",
"python-dateutil==2.8.0",
"six==1.13.0",
"lxml==4.4.2",
"feedgen>=0.9.0",
"urllib3==1.25.7"
],
)
| 22.775
| 55
| 0.553238
|
import os
from io import open
from setuptools import setup, find_packages
def read(fname):
return open(
os.path.join(os.path.dirname(__file__), fname),
encoding='utf-8').read()
setup(
name="apichanges",
version='0.0.1',
description="AWS API Changes",
long_description=read('readme.md'),
long_description_content_type='text/markdown',
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'apichanges = apichanges.cli:cli']},
install_requires=[
"botocore>=1.12.228",
"Click==7.0",
"docutils==0.15.2",
"Jinja2>=2.10.1",
"jmespath==0.9.4",
"MarkupSafe==1.1.1",
"pycparser==2.19",
"pygit2==0.28.2",
"python-dateutil==2.8.0",
"six==1.13.0",
"lxml==4.4.2",
"feedgen>=0.9.0",
"urllib3==1.25.7"
],
)
| 0
| 0
| 0
| 0
| 0
| 101
| 0
| -14
| 67
|
bfffe9a27da276a65e6f804ee59cd334d91df562
| 19,977
|
py
|
Python
|
core/utils.py
|
Special-K-s-Flightsim-Bots/DCSServerBot
|
7913a3ce684817d1033b7f45720d815668f71c34
|
[
"MIT"
] | 12
|
2021-05-05T05:21:57.000Z
|
2022-03-15T06:07:28.000Z
|
core/utils.py
|
Special-K-s-Flightsim-Bots/DCSServerBot
|
7913a3ce684817d1033b7f45720d815668f71c34
|
[
"MIT"
] | 5
|
2021-05-04T21:28:12.000Z
|
2022-01-11T21:29:59.000Z
|
core/utils.py
|
Special-K-s-Flightsim-Bots/DCSServerBot
|
7913a3ce684817d1033b7f45720d815668f71c34
|
[
"MIT"
] | 3
|
2021-05-15T17:57:23.000Z
|
2022-02-20T18:40:32.000Z
|
# utils.py
import os
import re
from configparser import ConfigParser
SAVED_GAMES = os.path.expandvars('%USERPROFILE%\\Saved Games')
REGEXP = {
'branch': re.compile(r'"branch": "(?P<branch>.*)"'),
'version': re.compile(r'"version": "(?P<version>.*)"')
}
PATCHNOTES_URL = 'https://www.digitalcombatsimulator.com/en/news/changelog/rss/'
config = ConfigParser()
config.read('config/dcsserverbot.ini')
| 44.892135
| 330
| 0.580818
|
# utils.py
import asyncio
import aiohttp
import discord
import math
import os
import psutil
import re
import socket
import subprocess
import psycopg2
import xmltodict
from core import const
from datetime import datetime, timedelta
from configparser import ConfigParser
from contextlib import closing, suppress
from discord.ext import commands
from typing import Union
SAVED_GAMES = os.path.expandvars('%USERPROFILE%\\Saved Games')
REGEXP = {
'branch': re.compile(r'"branch": "(?P<branch>.*)"'),
'version': re.compile(r'"version": "(?P<version>.*)"')
}
PATCHNOTES_URL = 'https://www.digitalcombatsimulator.com/en/news/changelog/rss/'
config = ConfigParser()
config.read('config/dcsserverbot.ini')
def findDCSInstallations(server_name=None):
installations = []
for dirname in os.listdir(SAVED_GAMES):
if os.path.isdir(os.path.join(SAVED_GAMES, dirname)):
settings = os.path.join(SAVED_GAMES, dirname, 'Config\\serverSettings.lua')
if os.path.exists(settings):
if server_name:
with open(settings, encoding='utf8') as f:
if '["name"] = "{}"'.format(server_name) in f.read():
installations.append(dirname)
else:
installations.append(dirname)
return installations
def changeServerSettings(server_name, name, value):
assert name in ['listStartIndex', 'password', 'name', 'maxPlayers'], 'Value can\'t be changed.'
if isinstance(value, str):
value = '"' + value + '"'
installation = findDCSInstallations(server_name)[0]
serverSettings = os.path.join(SAVED_GAMES, installation, 'Config\\serverSettings.lua')
tmpSettings = os.path.join(SAVED_GAMES, installation, 'Config\\serverSettings.tmp')
with open(serverSettings, encoding='utf8') as infile:
inlines = infile.readlines()
outlines = []
for line in inlines:
if '["{}"]'.format(name) in line:
# outlines.append('["{}"] = {}\n'.format(name, value))
outlines.append(re.sub(' = ([^,]*)', ' = {}'.format(value), line))
else:
outlines.append(line)
with open(tmpSettings, 'w', encoding='utf8') as outfile:
outfile.writelines(outlines)
os.remove(serverSettings)
os.rename(tmpSettings, serverSettings)
def getInstalledVersion(path):
branch = version = None
with open(os.path.join(os.path.expandvars(path), 'autoupdate.cfg'), encoding='utf8') as cfg:
lines = cfg.readlines()
for line in lines:
if '"branch"' in line:
match = REGEXP['branch'].search(line)
if match:
branch = match.group('branch')
elif '"version"' in line:
match = REGEXP['version'].search(line)
if match:
version = match.group('version')
return branch, version
async def getLatestVersion(branch):
async with aiohttp.ClientSession() as session:
async with session.get(PATCHNOTES_URL) as response:
xpars = xmltodict.parse(await response.text())
for item in xpars['rss']['channel']['item']:
if branch in item['link']:
return item['link'].split('/')[-2]
def match(name1, name2):
if name1 == name2:
return len(name1)
# remove any tags
n1 = re.sub('^[\[\<\(=].*[=\)\>\]]', '', name1).strip()
if len(n1) == 0:
n1 = name1
n2 = re.sub('^[\[\<\(=].*[=\)\>\]]', '', name2).strip()
if len(n2) == 0:
n2 = name2
# if the names are too short, return
if (len(n1) < 3 or len(n2) < 3) and (n1 != n2):
return 0
elif n1 in n2:
return len(n1)
elif n2 in n1:
return len(n2)
# remove any special characters
n1 = re.sub('[^a-zA-Z0-9 ]', '', n1).strip().lower()
n2 = re.sub('[^a-zA-Z0-9 ]', '', n2).strip().lower()
if (len(n1) == 0) or (len(n2) == 0):
return 0
# if the names are too short, return
if len(n1) < 3 or len(n2) < 3:
return 0
elif n1 in n2:
return len(n1)
elif n2 in n1:
return len(n2)
# remove any numbers
n1 = re.sub('[0-9 ]', '', n1).strip()
n2 = re.sub('[0-9 ]', '', n2).strip()
if (len(n1) == 0) or (len(n2) == 0):
return 0
# if the names are too short, return
if (len(n1) < 3 or len(n2) < 3) and (n1 != n2):
return 0
elif n1 in n2:
return len(n1)
elif n2 in n1:
return len(n2)
return 0
def match_user(self, data: Union[dict, discord.Member], rematch=False):
conn = self.pool.getconn()
try:
with closing(conn.cursor()) as cursor:
# try to match a DCS user with a Discord member
if isinstance(data, dict):
if not rematch:
sql = 'SELECT discord_id FROM players WHERE ucid = %s AND discord_id != -1'
cursor.execute(sql, (data['ucid'], ))
result = cursor.fetchone()
if result and result[0] != -1:
return self.bot.guilds[0].get_member(result[0])
# we could not find the user, so try to match them
dcs_name = data['name']
max_weight = 0
best_fit = None
for member in self.bot.get_all_members():
if member.nick:
weight = max(match(dcs_name, member.nick), match(dcs_name, member.name))
else:
weight = match(dcs_name, member.name)
if weight > max_weight:
max_weight = weight
best_fit = member
return best_fit
# try to match a Discord member with a DCS user that played on the servers
else:
max_weight = 0
best_fit = None
sql = 'SELECT ucid, name from players'
if rematch is False:
sql += ' WHERE discord_id = -1'
cursor.execute(sql)
for row in cursor.fetchall():
weight = max(match(data.nick, row['name']), match(data.name, row['name']))
if weight > max_weight:
max_weight = weight
best_fit = row['ucid']
return best_fit
except (Exception, psycopg2.DatabaseError) as error:
self.log.exception(error)
finally:
self.pool.putconn(conn)
async def wait_for_single_reaction(self, ctx, message):
def check_press(react, user):
return (react.message.channel == ctx.message.channel) & (user == ctx.message.author) & (react.message.id == message.id)
tasks = [self.bot.wait_for('reaction_add', check=check_press),
self.bot.wait_for('reaction_remove', check=check_press)]
try:
done, tasks = await asyncio.wait(tasks, timeout=120, return_when=asyncio.FIRST_COMPLETED)
if len(done) > 0:
react, _ = done.pop().result()
return react
else:
raise asyncio.TimeoutError
finally:
for task in tasks:
task.cancel()
async def selection_list(self, ctx, data, embed_formatter, num=5, marker=-1, marker_emoji='🔄'):
message = None
try:
j = 0
while len(data) > 0:
max_i = (len(data) % num) if (len(data) - j * num) < num else num
embed = embed_formatter(data[j * num:j * num + max_i], (marker - j * num) if marker in range(j * num, j * num + max_i + 1) else 0, marker_emoji)
message = await ctx.send(embed=embed)
if j > 0:
await message.add_reaction('◀️')
for i in range(1, max_i + 1):
if (j * num + i) != marker:
await message.add_reaction(chr(0x30 + i) + '\u20E3')
else:
await message.add_reaction(marker_emoji)
await message.add_reaction('⏹️')
if ((j + 1) * num) < len(data):
await message.add_reaction('▶️')
react = await wait_for_single_reaction(self, ctx, message)
await message.delete()
if react.emoji == '◀️':
j -= 1
message = None
elif react.emoji == '▶️':
j += 1
message = None
elif react.emoji == '⏹️':
return -1
elif react.emoji == marker_emoji:
return marker - j * num - 1
elif (len(react.emoji) > 1) and ord(react.emoji[0]) in range(0x31, 0x39):
return (ord(react.emoji[0]) - 0x31) + j * num
except asyncio.TimeoutError:
if message:
await message.delete()
return -1
async def yn_question(self, ctx, question, msg=None):
yn_embed = discord.Embed(title=question, color=discord.Color.red())
if msg is not None:
yn_embed.add_field(name=msg, value='_ _')
yn_msg = await ctx.send(embed=yn_embed)
await yn_msg.add_reaction('🇾')
await yn_msg.add_reaction('🇳')
react = await wait_for_single_reaction(self, ctx, yn_msg)
await yn_msg.delete()
return react.emoji == '🇾'
async def get_server(self, ctx: Union[discord.ext.commands.context.Context, str]):
for server_name, server in self.bot.DCSServers.items():
if isinstance(ctx, discord.ext.commands.context.Context):
if server['status'] == 'Unknown':
continue
if (int(server['status_channel']) == ctx.channel.id) or (int(server['chat_channel']) == ctx.channel.id) or (int(server['admin_channel']) == ctx.channel.id):
return server
else:
if server_name == ctx:
return server
return None
def has_role(item: str):
def predicate(ctx):
if ctx.guild is None:
raise commands.errors.NoPrivateMessage()
if 'ROLES' not in config or item not in config['ROLES']:
valid_roles = [item]
else:
valid_roles = [x.strip() for x in config['ROLES'][item].split(',')]
for role in ctx.author.roles:
if role.name in valid_roles:
return True
raise commands.errors.MissingRole(item)
return commands.check(predicate)
def isOpen(ip, port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.settimeout(3)
return s.connect_ex((ip, int(port))) == 0
async def get_external_ip():
async with aiohttp.ClientSession() as session:
async with session.get('https://api.ipify.org') as resp:
return await resp.text()
def findProcess(proc, installation):
for p in psutil.process_iter(['name', 'cmdline']):
if p.info['name'] == proc:
with suppress(Exception):
if installation in p.info['cmdline'][1]:
return p
return None
def DDtoDMS(dd):
frac, degrees = math.modf(dd)
frac, minutes = math.modf(frac * 60)
frac, seconds = math.modf(frac * 60)
return degrees, minutes, seconds, frac
def getActiveRunways(runways, wind):
retval = []
for runway in runways:
heading = int(runway[:2]) * 10
winddir = (wind['dir'] + 180) % 360
diff = abs((winddir - heading + 180 + 360) % 360 - 180)
if diff <= 90:
retval.append(runway)
if len(retval) == 0:
retval = ['n/a']
return retval
def start_dcs(self, installation):
self.log.debug('Launching DCS server with: "{}\\bin\\dcs.exe" --server --norender -w {}'.format(
os.path.expandvars(self.config['DCS']['DCS_INSTALLATION']), installation))
return subprocess.Popen(['dcs.exe', '--server', '--norender', '-w', installation], executable=os.path.expandvars(self.config['DCS']['DCS_INSTALLATION']) + '\\bin\\dcs.exe')
def start_srs(self, installation):
self.log.debug('Launching SRS server with: "{}\\SR-Server.exe" -cfg="{}"'.format(
os.path.expandvars(self.config['DCS']['SRS_INSTALLATION']), os.path.expandvars(self.config[installation]['SRS_CONFIG'])))
return subprocess.Popen(['SR-Server.exe', '-cfg={}'.format(os.path.expandvars(self.config[installation]['SRS_CONFIG']))], executable=os.path.expandvars(self.config['DCS']['SRS_INSTALLATION']) + '\\SR-Server.exe')
def format_mission_embed(self, mission):
server = self.bot.DCSServers[mission['server_name']]
if 'serverSettings' not in server:
self.bot.log.error('Can\'t format mission embed due to incomplete server data.')
return None
plugins = []
embed = discord.Embed(title='{} [{}/{}]\n{}'.format(mission['server_name'],
mission['num_players'], server['serverSettings']['maxPlayers'],
('"' + mission['current_mission'] + '"') if server['status'] in ['Running', 'Paused'] else ('_' + server['status'] + '_')),
color=discord.Color.blue())
embed.set_thumbnail(url=self.STATUS_IMG[server['status']])
embed.add_field(name='Map', value=mission['current_map'])
embed.add_field(name='Server-IP / Port', value=self.bot.external_ip + ':' + str(server['serverSettings']['port']))
if len(server['serverSettings']['password']) > 0:
embed.add_field(name='Password', value=server['serverSettings']['password'])
else:
embed.add_field(name='Password', value='_ _')
uptime = int(mission['mission_time'])
embed.add_field(name='Runtime', value=str(timedelta(seconds=uptime)))
if 'start_time' in mission:
if mission['date']['Year'] >= 1970:
date = datetime(mission['date']['Year'], mission['date']['Month'],
mission['date']['Day'], 0, 0).timestamp()
real_time = date + mission['start_time'] + uptime
value = str(datetime.fromtimestamp(real_time))
else:
value = '{}-{:02d}-{:02d} {}'.format(mission['date']['Year'], mission['date']['Month'],
mission['date']['Day'], timedelta(seconds=mission['start_time'] + uptime))
else:
value = '-'
embed.add_field(name='Date/Time in Mission', value=value)
embed.add_field(name='Avail. Slots',
value='🔹 {} | {} 🔸'.format(mission['num_slots_blue'] if 'num_slots_blue' in mission else '-', mission['num_slots_red'] if 'num_slots_red' in mission else '-'))
embed.add_field(name='▬' * 25, value='_ _', inline=False)
if 'weather' in mission:
if 'clouds' in mission and 'preset' in mission['clouds']:
embed.add_field(name='Preset', value=mission['clouds']['preset']['readableNameShort'])
else:
embed.add_field(name='Weather', value='Dynamic')
weather = mission['weather']
embed.add_field(name='Temperature', value=str(int(weather['season']['temperature'])) + ' °C')
embed.add_field(name='QNH', value='{:.2f} inHg'.format(weather['qnh'] * const.MMHG_IN_INHG))
embed.add_field(name='Wind', value='\u2002Ground: {}° / {} kts\n\u20026600 ft: {}° / {} kts\n26000 ft: {}° / {} kts'.format(
int(weather['wind']['atGround']['dir'] + 180) % 360, int(weather['wind']['atGround']['speed']),
int(weather['wind']['at2000']['dir'] + 180) % 360, int(weather['wind']['at2000']['speed']),
int(weather['wind']['at8000']['dir'] + 180) % 360, int(weather['wind']['at8000']['speed'])))
if 'clouds' in mission:
if 'preset' in mission['clouds']:
embed.add_field(name='Cloudbase',
value=f'{int(mission["clouds"]["base"] * const.METER_IN_FEET):,} ft')
else:
embed.add_field(name='Clouds', value='Base:\u2002\u2002\u2002\u2002 {:,} ft\nDensity:\u2002\u2002 {}/10\nThickness: {:,} ft'.format(
int(mission['clouds']['base'] * const.METER_IN_FEET), mission['clouds']['density'], int(mission['clouds']['thickness'] * const.METER_IN_FEET)))
else:
embed.add_field(name='Clouds', value='n/a')
visibility = weather['visibility']['distance']
if weather['enable_fog'] is True:
visibility = weather['fog']['visibility'] * const.METER_IN_FEET
embed.add_field(name='Visibility', value=f'{int(visibility):,} ft')
embed.add_field(name='▬' * 25, value='_ _', inline=False)
if 'SRSSettings' in server:
plugins.append('SRS')
if 'EXTERNAL_AWACS_MODE' in server['SRSSettings'] and 'EXTERNAL_AWACS_MODE_BLUE_PASSWORD' in server['SRSSettings'] and 'EXTERNAL_AWACS_MODE_RED_PASSWORD' in server['SRSSettings'] and server['SRSSettings']['EXTERNAL_AWACS_MODE'] is True:
value = '🔹 Pass: {}\n🔸 Pass: {}'.format(
server['SRSSettings']['EXTERNAL_AWACS_MODE_BLUE_PASSWORD'],
server['SRSSettings']['EXTERNAL_AWACS_MODE_RED_PASSWORD'])
else:
value = '_ _'
embed.add_field(name='SRS [{}]'.format(
server['SRSSettings']['SERVER_SRS_PORT']), value=value)
if 'lotAtcSettings' in server:
plugins.append('LotAtc')
embed.add_field(name='LotAtc [{}]'.format(server['lotAtcSettings']['port']), value='🔹 Pass: {}\n🔸 Pass: {}'.format(
server['lotAtcSettings']['blue_password'], server['lotAtcSettings']['red_password']))
if 'Tacview' in server['options']['plugins']:
name = 'Tacview'
if ('tacviewModuleEnabled' in server['options']['plugins']['Tacview'] and server['options']['plugins']['Tacview']['tacviewModuleEnabled'] is False) or ('tacviewFlightDataRecordingEnabled' in server['options']['plugins']['Tacview'] and server['options']['plugins']['Tacview']['tacviewFlightDataRecordingEnabled'] is False):
value = 'disabled'
else:
plugins.append('Tacview')
value = ''
tacview = server['options']['plugins']['Tacview']
if 'tacviewRealTimeTelemetryEnabled' in tacview and tacview['tacviewRealTimeTelemetryEnabled'] is True:
name += ' RT'
if 'tacviewRealTimeTelemetryPassword' in tacview and len(tacview['tacviewRealTimeTelemetryPassword']) > 0:
value += 'Password: {}\n'.format(tacview['tacviewRealTimeTelemetryPassword'])
elif 'tacviewHostTelemetryPassword' in tacview and len(tacview['tacviewHostTelemetryPassword']) > 0:
value += 'Password: "{}"\n'.format(tacview['tacviewHostTelemetryPassword'])
if 'tacviewRealTimeTelemetryPort' in tacview and len(tacview['tacviewRealTimeTelemetryPort']) > 0:
name += ' [{}]'.format(tacview['tacviewRealTimeTelemetryPort'])
if 'tacviewRemoteControlEnabled' in tacview and tacview['tacviewRemoteControlEnabled'] is True:
value += '**Remote Ctrl [{}]**\n'.format(tacview['tacviewRemoteControlPort'])
if 'tacviewRemoteControlPassword' in tacview and len(tacview['tacviewRemoteControlPassword']) > 0:
value += 'Password: {}'.format(tacview['tacviewRemoteControlPassword'])
if len(value) == 0:
value = 'enabled'
embed.add_field(name=name, value=value)
footer = '- Server is running DCS {}\n'.format(server['dcs_version'])
if len(plugins) > 0:
footer += '- The IP address of '
if len(plugins) == 1:
footer += plugins[0]
else:
footer += ', '.join(plugins[0:len(plugins) - 1]) + ' and ' + plugins[len(plugins) - 1]
footer += ' is the same as the server.\n'
for listener in self.bot.eventListeners:
if (type(listener).__name__ == 'UserStatisticsEventListener') and \
(mission['server_name'] in listener.statistics):
footer += '- User statistics are enabled for this server.'
embed.set_footer(text=footer)
return embed
| 90
| 0
| 3,680
| 0
| 0
| 15,107
| 0
| -9
| 745
|
5f2b905591861c9888da34fd68f816a7a4aefabb
| 666
|
py
|
Python
|
lab/urls.py
|
kuipumu/pharma_test_project
|
52c7604fe0a1e484510a41be9fad3ca39f321c0a
|
[
"MIT"
] | null | null | null |
lab/urls.py
|
kuipumu/pharma_test_project
|
52c7604fe0a1e484510a41be9fad3ca39f321c0a
|
[
"MIT"
] | null | null | null |
lab/urls.py
|
kuipumu/pharma_test_project
|
52c7604fe0a1e484510a41be9fad3ca39f321c0a
|
[
"MIT"
] | null | null | null |
"""lab app URL configuration"""
from django.urls import path
from django.utils.translation import gettext_lazy as _
from .views import (SampleCreateView, SampleDeleteView, SampleListView, SampleUpdateView)
urlpatterns = [
path(
_(''),
SampleListView.as_view(),
name='sample_list'
),
path(
_('create'),
SampleCreateView.as_view(),
name='sample_create'
),
path(
_('update/<int:pk>'),
SampleUpdateView.as_view(),
name='sample_update'
),
path(
_('delete/<int:pk>'),
SampleDeleteView.as_view(),
name='sample_delete'
)
]
| 21.483871
| 71
| 0.578078
|
"""lab app URL configuration"""
from django.urls import path
from django.utils.translation import gettext_lazy as _
from .views import (SampleCreateView, SampleDeleteView, SampleListView,
SampleUpdateView)
urlpatterns = [
path(
_(''),
SampleListView.as_view(),
name='sample_list'
),
path(
_('create'),
SampleCreateView.as_view(),
name='sample_create'
),
path(
_('update/<int:pk>'),
SampleUpdateView.as_view(),
name='sample_update'
),
path(
_('delete/<int:pk>'),
SampleDeleteView.as_view(),
name='sample_delete'
)
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0
|
a26b8dd049891ebc316fd75851d679a7536ef505
| 2,614
|
py
|
Python
|
examples/reverse_regex/run.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:08.000Z
|
2021-02-04T12:41:08.000Z
|
examples/reverse_regex/run.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 3
|
2017-07-08T16:28:52.000Z
|
2020-04-23T18:06:24.000Z
|
examples/reverse_regex/run.py
|
PatrikValkovic/grammpy
|
8308a1fd349bf9ea0d267360cc9a4ab20d1629e8
|
[
"MIT"
] | 1
|
2021-02-04T12:41:10.000Z
|
2021-02-04T12:41:10.000Z
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.03.2019 12:21
:Licence MIT
Part of grammpy
"""
from string import ascii_lowercase
from grammpy.transforms import ContextFree, InverseContextFree, InverseCommon
from grammpy.parsers import cyk
g = Grammar(terminals=list(ascii_lowercase + '()*+'),
nonterminals=[Symb, Concat, Or, Iterate],
rules=[SymbRule, Bracket, ConcatRewrite, ConcatRule, OrRewrite, OrRule, IterateRewrite, IterateRule],
start_symbol=Or)
if __name__ == '__main__':
gr = ContextFree.prepare_for_cyk(g)
while True:
read = input("Type regex or exit to quit: ").strip()
if read == "exit":
break
if len(read) == 0:
continue
root = cyk(gr, read)
root = InverseContextFree.reverse_cyk_transforms(root)
root = InverseCommon.splitted_rules(root)
for form in root.get:
print(form)
print("Quiting the application")
| 24.429907
| 113
| 0.620888
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 19.03.2019 12:21
:Licence MIT
Part of grammpy
"""
from string import ascii_lowercase
from grammpy import *
from grammpy.transforms import ContextFree, InverseContextFree, InverseCommon
from grammpy.parsers import cyk
class BaseNonterminal(Nonterminal):
@property
def get(self):
return self.to_rule.get()
class Symb(BaseNonterminal): pass
class Concat(BaseNonterminal): pass
class Iterate(BaseNonterminal): pass
class Or(BaseNonterminal): pass
class SymbRule(Rule):
rules = [([Symb], [ch]) for ch in ascii_lowercase]
def get(self):
yield self.to_symbols[0].s
class Bracket(Rule):
rule = ([Symb], ['(', Or, ')'])
def get(self):
return self.to_symbols[1].get
class IterateRewrite(Rule):
rule = ([Iterate], [Symb])
def get(self):
return self.to_symbols[0].get
class IterateRule(Rule):
MAX_ITERATIONS = 6
FILL_SYMBOL = ''
rule = ([Iterate], [Symb, '*'])
def get(self):
values = self.to_symbols[0].get
for v in values:
for i in range(IterateRule.MAX_ITERATIONS):
yield v * i
last_iter = str(v * int(IterateRule.MAX_ITERATIONS / 2))
yield last_iter + IterateRule.FILL_SYMBOL + last_iter
class ConcatRewrite(Rule):
rule = ([Concat], [Iterate])
def get(self):
return self.to_symbols[0].get
class ConcatRule(Rule):
rule = ([Concat], [Iterate, Concat])
def get(self):
for l in self.to_symbols[0].get:
for r in self.to_symbols[1].get:
yield l + r
class OrRewrite(Rule):
rule = ([Or], [Concat])
def get(self):
return self.to_symbols[0].get
class OrRule(Rule):
rule = ([Or], [Or, '+', Or])
def get(self):
yield from self.to_symbols[0].get
yield from self.to_symbols[2].get
g = Grammar(terminals=list(ascii_lowercase + '()*+'),
nonterminals=[Symb, Concat, Or, Iterate],
rules=[SymbRule, Bracket, ConcatRewrite, ConcatRule, OrRewrite, OrRule, IterateRewrite, IterateRule],
start_symbol=Or)
if __name__ == '__main__':
gr = ContextFree.prepare_for_cyk(g)
while True:
read = input("Type regex or exit to quit: ").strip()
if read == "exit":
break
if len(read) == 0:
continue
root = cyk(gr, read)
root = InverseContextFree.reverse_cyk_transforms(root)
root = InverseCommon.splitted_rules(root)
for form in root.get:
print(form)
print("Quiting the application")
| 0
| 41
| 0
| 1,237
| 0
| 0
| 0
| 0
| 344
|
49f3c7adc5bd9f88344a480098ff3f9f794adb46
| 5,625
|
py
|
Python
|
rivalcfg/devices/aerox3_wireless_wired.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 604
|
2016-03-31T12:22:26.000Z
|
2022-03-31T18:51:50.000Z
|
rivalcfg/devices/aerox3_wireless_wired.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 162
|
2016-04-17T10:58:08.000Z
|
2022-03-11T18:59:18.000Z
|
rivalcfg/devices/aerox3_wireless_wired.py
|
Clueninja/rivalcfg
|
f7e2a3480c5f0b9a0b992ba5af7ff2025b2af346
|
[
"WTFPL"
] | 89
|
2016-04-10T08:56:58.000Z
|
2022-03-18T21:04:10.000Z
|
from .. import usbhid
_BATTERY_CHARGING_FLAG = 0b10000000
profile = {
"name": "SteelSeries Aerox 3 Wireless",
"models": [
{
"name": "SteelSeries Aerox 3 Wireless (wired mode)",
"vendor_id": 0x1038,
"product_id": 0x183A,
"endpoint": 3,
},
],
"settings": {
"sensitivity": {
"label": "Sensibility presets",
"description": "Set sensitivity preset (DPI)",
"cli": ["-s", "--sensitivity"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2D],
"value_type": "multidpi_range",
"input_range": [100, 18000, 100],
"output_range": [0x00, 0xD6, 1.2],
"dpi_length_byte": 1,
"first_preset": 0,
"count_mode": "number",
"max_preset_count": 5,
"default": "400, 800, 1200, 2400, 3200",
},
"polling_rate": {
"label": "Polling rate",
"description": "Set polling rate (Hz)",
"cli": ["-p", "--polling-rate"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2B],
"value_type": "choice",
"choices": {
125: 0x03,
250: 0x02,
500: 0x01,
1000: 0x00,
},
"default": 1000,
},
"z1_color": {
"label": "Strip top LED color",
"description": "Set the color of the top LED",
"cli": ["--top-color", "--z1"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x00],
"value_type": "rgbcolor",
"default": "red",
},
"z2_color": {
"label": "Strip middle LED color",
"description": "Set the color of the middle LED",
"cli": ["--middle-color", "--z2"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x01],
"value_type": "rgbcolor",
"default": "lime",
},
"z3_color": {
"label": "Strip bottom LED color",
"description": "Set the color of the bottom LED",
"cli": ["--bottom-color", "--z3"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x02],
"value_type": "rgbcolor",
"default": "blue",
},
"reactive_color": {
"label": "Reactive color",
"description": "Set the color of the LEDs in reaction to a button click",
"cli": ["-a", "--reactive-color"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x26],
"value_type": "reactive_rgbcolor",
"default": "off",
},
"led_brightness": {
"label": "LED Brightness",
"description": "Set the brightness of the LEDs",
"cli": ["-l", "--led-brightness"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x23],
"command_suffix": [0x01, 0x01, 0x00, 0x30, 0x75, 0x00],
"value_type": "range",
"input_range": [0, 15, 1],
"output_range": [0x00, 0x0F, 1],
"default": 15,
},
"buttons_mapping": {
"label": "Buttons mapping",
"description": "Set the mapping of the buttons",
"cli": ["-b", "--buttons"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2A],
"value_type": "buttons",
# fmt: off
"buttons": {
"Button1": {"id": 0x01, "offset": 0x00, "default": "button1"},
"Button2": {"id": 0x02, "offset": 0x05, "default": "button2"},
"Button3": {"id": 0x03, "offset": 0x0A, "default": "button3"},
"Button4": {"id": 0x04, "offset": 0x0F, "default": "button4"},
"Button5": {"id": 0x05, "offset": 0x14, "default": "button5"},
"Button6": {"id": 0x06, "offset": 0x19, "default": "dpi"},
"ScrollUp" : {"id": 0x31, "offset": 0x1E, "default": "scrollup"},
"ScrollDown": {"id": 0x32, "offset": 0x23, "default": "scrolldown"},
},
"button_field_length": 5,
"button_disable": 0x00,
"button_keyboard": 0x51,
"button_multimedia": 0x61,
"button_dpi_switch": 0x30,
"button_scroll_up": None,
"button_scroll_down": None,
# fmt: on
"default": "buttons(button1=button1; button2=button2; button3=button3; button4=button4; button5=button5; button6=dpi; scrollup=scrollup; scrolldown=scrolldown; layout=qwerty)",
},
"rainbow_effect": {
"label": "rainbow effect",
"description": "Enable the rainbow effect (can be disabled by setting a color)",
"cli": ["-e", "--rainbow-effect"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x22, 0xFF],
"value_type": "none",
},
},
"battery_level": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x92],
"response_length": 2,
"is_charging": lambda data: bool(data[1] & _BATTERY_CHARGING_FLAG),
"level": lambda data: ((data[1] & ~_BATTERY_CHARGING_FLAG) - 1) * 5,
},
"save_command": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x11, 0x00],
},
}
| 38.793103
| 188
| 0.487467
|
from .. import usbhid
_BATTERY_CHARGING_FLAG = 0b10000000
profile = {
"name": "SteelSeries Aerox 3 Wireless",
"models": [
{
"name": "SteelSeries Aerox 3 Wireless (wired mode)",
"vendor_id": 0x1038,
"product_id": 0x183A,
"endpoint": 3,
},
],
"settings": {
"sensitivity": {
"label": "Sensibility presets",
"description": "Set sensitivity preset (DPI)",
"cli": ["-s", "--sensitivity"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2D],
"value_type": "multidpi_range",
"input_range": [100, 18000, 100],
"output_range": [0x00, 0xD6, 1.2],
"dpi_length_byte": 1,
"first_preset": 0,
"count_mode": "number",
"max_preset_count": 5,
"default": "400, 800, 1200, 2400, 3200",
},
"polling_rate": {
"label": "Polling rate",
"description": "Set polling rate (Hz)",
"cli": ["-p", "--polling-rate"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2B],
"value_type": "choice",
"choices": {
125: 0x03,
250: 0x02,
500: 0x01,
1000: 0x00,
},
"default": 1000,
},
"z1_color": {
"label": "Strip top LED color",
"description": "Set the color of the top LED",
"cli": ["--top-color", "--z1"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x00],
"value_type": "rgbcolor",
"default": "red",
},
"z2_color": {
"label": "Strip middle LED color",
"description": "Set the color of the middle LED",
"cli": ["--middle-color", "--z2"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x01],
"value_type": "rgbcolor",
"default": "lime",
},
"z3_color": {
"label": "Strip bottom LED color",
"description": "Set the color of the bottom LED",
"cli": ["--bottom-color", "--z3"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x21, 0x01, 0x02],
"value_type": "rgbcolor",
"default": "blue",
},
"reactive_color": {
"label": "Reactive color",
"description": "Set the color of the LEDs in reaction to a button click",
"cli": ["-a", "--reactive-color"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x26],
"value_type": "reactive_rgbcolor",
"default": "off",
},
"led_brightness": {
"label": "LED Brightness",
"description": "Set the brightness of the LEDs",
"cli": ["-l", "--led-brightness"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x23],
"command_suffix": [0x01, 0x01, 0x00, 0x30, 0x75, 0x00],
"value_type": "range",
"input_range": [0, 15, 1],
"output_range": [0x00, 0x0F, 1],
"default": 15,
},
"buttons_mapping": {
"label": "Buttons mapping",
"description": "Set the mapping of the buttons",
"cli": ["-b", "--buttons"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x2A],
"value_type": "buttons",
# fmt: off
"buttons": {
"Button1": {"id": 0x01, "offset": 0x00, "default": "button1"},
"Button2": {"id": 0x02, "offset": 0x05, "default": "button2"},
"Button3": {"id": 0x03, "offset": 0x0A, "default": "button3"},
"Button4": {"id": 0x04, "offset": 0x0F, "default": "button4"},
"Button5": {"id": 0x05, "offset": 0x14, "default": "button5"},
"Button6": {"id": 0x06, "offset": 0x19, "default": "dpi"},
"ScrollUp" : {"id": 0x31, "offset": 0x1E, "default": "scrollup"},
"ScrollDown": {"id": 0x32, "offset": 0x23, "default": "scrolldown"},
},
"button_field_length": 5,
"button_disable": 0x00,
"button_keyboard": 0x51,
"button_multimedia": 0x61,
"button_dpi_switch": 0x30,
"button_scroll_up": None,
"button_scroll_down": None,
# fmt: on
"default": "buttons(button1=button1; button2=button2; button3=button3; button4=button4; button5=button5; button6=dpi; scrollup=scrollup; scrolldown=scrolldown; layout=qwerty)",
},
"rainbow_effect": {
"label": "rainbow effect",
"description": "Enable the rainbow effect (can be disabled by setting a color)",
"cli": ["-e", "--rainbow-effect"],
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x22, 0xFF],
"value_type": "none",
},
},
"battery_level": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x92],
"response_length": 2,
"is_charging": lambda data: bool(data[1] & _BATTERY_CHARGING_FLAG),
"level": lambda data: ((data[1] & ~_BATTERY_CHARGING_FLAG) - 1) * 5,
},
"save_command": {
"report_type": usbhid.HID_REPORT_TYPE_OUTPUT,
"command": [0x11, 0x00],
},
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ba1e29dc08a0b2d8d84f5c9d56cbf91a9679293a
| 2,921
|
py
|
Python
|
wtl/wtlib/tests/models.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
wtl/wtlib/tests/models.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | 1
|
2017-09-19T17:06:49.000Z
|
2017-09-19T17:06:49.000Z
|
wtl/wtlib/tests/models.py
|
elegion/djangodash2013
|
3814123f9bff213a5d74db05db3caa83caea731c
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
| 38.434211
| 79
| 0.690175
|
from __future__ import unicode_literals
from django.test import TestCase
from wtl.wtlib.models import Library, LibraryVersion
from wtl.wtlib.tests.factories import (LibraryFactory, LibraryVersionFactory,
ProjectFactory)
class LibraryTestCase(TestCase):
def test_str(self):
x = LibraryFactory()
self.assertEqual(str(x), x.name)
class LibraryVersionTestCase(TestCase):
def test_str(self):
x = LibraryVersionFactory()
self.assertEqual(str(x), x.library.name + ' ' + x.version)
def test_update_totals(self):
l1 = LibraryFactory(name='l1')
l1v1 = LibraryVersionFactory(library=l1, version="1")
l1v2 = LibraryVersionFactory(library=l1, version="2")
l2 = LibraryFactory(name='l2')
l2v1 = LibraryVersionFactory(library=l2, version="1")
l2v2 = LibraryVersionFactory(library=l2, version="2")
p = ProjectFactory()
p.libraries.add(l1v1)
p.libraries.add(l1v2)
p.libraries.add(l2v1)
LibraryVersion.update_totals(project=p)
self.assertEqual(Library.objects.get(id=l1.id).total_users, 2)
self.assertEqual(Library.objects.get(id=l2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v2.id).total_users, 0)
def test_often_used_with(self):
lib1 = LibraryFactory()
lib2 = LibraryFactory()
lib3 = LibraryFactory()
lib4 = LibraryFactory()
ver1 = LibraryVersionFactory(library=lib1)
project_1_2 = ProjectFactory()
project_1_2.libraries.add(ver1)
project_1_2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_2__2 = ProjectFactory()
project_1_2__2.libraries.add(ver1)
project_1_2__2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_3 = ProjectFactory()
project_1_3.libraries.add(LibraryVersionFactory(library=lib1))
project_1_3.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4 = ProjectFactory()
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib2))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib4))
lib1_result = lib1.often_used_with()
self.assertEqual(lib2.name, lib1_result[0].name)
self.assertEqual(2, lib1_result[0].usage_count)
self.assertEqual(lib3.name, lib1_result[1].name)
self.assertEqual(1, lib1_result[1].usage_count)
class ProjectTestCase(TestCase):
def test_str(self):
x = ProjectFactory()
self.assertEqual(str(x), x.name)
| 0
| 0
| 0
| 2,588
| 0
| 0
| 0
| 153
| 137
|
2e4bea81e1ee83caa0412c214e95dd141dc84725
| 795
|
py
|
Python
|
setup.py
|
muchrooms/esp8266.py
|
f1a61ffd65b157b392b7e3fba2394db86167e0f6
|
[
"MIT"
] | 2
|
2017-06-17T13:57:39.000Z
|
2018-01-05T12:52:35.000Z
|
setup.py
|
muchrooms/esp8266.py
|
f1a61ffd65b157b392b7e3fba2394db86167e0f6
|
[
"MIT"
] | null | null | null |
setup.py
|
muchrooms/esp8266.py
|
f1a61ffd65b157b392b7e3fba2394db86167e0f6
|
[
"MIT"
] | 1
|
2022-01-16T13:32:11.000Z
|
2022-01-16T13:32:11.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="esp8266.py",
version="0.0.3",
author="letli",
author_email="[email protected]",
description="ESP8266 python library, a wrapper for AT commands (Hayes command set) using UART serial.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/muchrooms/esp8266.py",
#packages=setuptools.find_packages(),
packages=['esp8266'],
install_requires=[
'pySerial>=3.0'
],
classifiers=(
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 29.444444
| 107
| 0.647799
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="esp8266.py",
version="0.0.3",
author="letli",
author_email="[email protected]",
description="ESP8266 python library, a wrapper for AT commands (Hayes command set) using UART serial.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/muchrooms/esp8266.py",
#packages=setuptools.find_packages(),
packages=['esp8266'],
install_requires=[
'pySerial>=3.0'
],
classifiers=(
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a4230e78f330762f47b06e0b4892c0e8814460aa
| 443
|
py
|
Python
|
PYTHON-SALA DE AULA/Exercicios condicionais/exercicio-09-2.py
|
JaumVitor/HOMEWORK-PYTHON
|
aff564ac61802c7417d7280a73c1ed4a98978ed3
|
[
"Apache-2.0"
] | null | null | null |
PYTHON-SALA DE AULA/Exercicios condicionais/exercicio-09-2.py
|
JaumVitor/HOMEWORK-PYTHON
|
aff564ac61802c7417d7280a73c1ed4a98978ed3
|
[
"Apache-2.0"
] | null | null | null |
PYTHON-SALA DE AULA/Exercicios condicionais/exercicio-09-2.py
|
JaumVitor/HOMEWORK-PYTHON
|
aff564ac61802c7417d7280a73c1ed4a98978ed3
|
[
"Apache-2.0"
] | null | null | null |
sal = float ( input ('Saldo mdio ? ' ))
if (sal <= 200) :
nsal1= (sal* 0.90)
print ('Valor do crdito 10% > R${}'.format(nsal1))
if (300 >= sal > 200) :
nsal2 = (sal* 0.80)
print ('Valor do crdito 20% > R${}'.format(nsal2))
if ( 400 >= sal > 300) :
nsal3= (sal* 0.85)
print ('Valor do crdito 25% > R${}'.format(nsal3))
if (sal > 400) :
nsal4 = (sal* 0.70)
print ('Valor do crdito 30% > R${}'.format(nsal4))
| 31.642857
| 55
| 0.534989
|
sal = float ( input ('Saldo médio ? ' ))
if (sal <= 200) :
nsal1= (sal* 0.90)
print ('Valor do crédito 10% > R${}'.format(nsal1))
if (300 >= sal > 200) :
nsal2 = (sal* 0.80)
print ('Valor do crédito 20% > R${}'.format(nsal2))
if ( 400 >= sal > 300) :
nsal3= (sal* 0.85)
print ('Valor do crédito 25% > R${}'.format(nsal3))
if (sal > 400) :
nsal4 = (sal* 0.70)
print ('Valor do crédito 30% > R${}'.format(nsal4))
| 10
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
283c050fe512d7f3367c0888bdda57bb2a71a4be
| 3,438
|
py
|
Python
|
extractors/TFIDF.py
|
Asichurter/APISeqFewShot
|
b4b7843da1f53cdc1d1711537c31305e7d5c6555
|
[
"MIT"
] | 8
|
2020-05-14T19:29:41.000Z
|
2022-03-09T03:29:51.000Z
|
extractors/TFIDF.py
|
Asichurter/APISeqFewShot
|
b4b7843da1f53cdc1d1711537c31305e7d5c6555
|
[
"MIT"
] | null | null | null |
extractors/TFIDF.py
|
Asichurter/APISeqFewShot
|
b4b7843da1f53cdc1d1711537c31305e7d5c6555
|
[
"MIT"
] | null | null | null |
##############################################
# (ngram,api)
# TF-IDF
##############################################
if __name__ == '__main__':
calTFIDF(dataset_path='/home/asichurter/datasets/JSONs/virushare-10-3gram/all/',
dict_map_path='/home/asichurter/datasets/JSONs/virushare-10-3gram/data/wordMap.json',
is_class_dir=True,
level='item')
| 31.254545
| 106
| 0.575044
|
import os
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
from utils.file import loadJson, dumpJson
##############################################
# 根据序列数据集(ngram,api),先统计元素的样本内频率,
# 然后计算各个特征的TF-IDF值
##############################################
def calTFIDF(dataset_path,
dict_map_path, # 将序列元素转化为一个实值的映射的路径,通常为wordMap.json
is_class_dir=True,
level='item', # 在样本层次上上统计TFIDF还是类层次上
tfidf_dump_path=None,
top_k=2000): # 取tf-idf值最高的k个api
value_map = loadJson(dict_map_path)
value_min = min(value_map.values())
value_max = max(value_map.values())
value_size = value_max - value_min + 1
item_frq_mat = None
class_frq_mat = None
N = None
for i,folder in enumerate(tqdm(os.listdir(dataset_path))):
# if i==1000:
# return None
class_cache = None
if is_class_dir:
items = os.listdir(dataset_path + folder + '/')
else:
items = [folder+'.json']
for item in items:
data = loadJson(dataset_path + folder + '/' + item)
seqs = data['apis']
if len(seqs) < 10:
continue
# 映射token为实值
seqs = [value_map[s] for s in seqs]
hist, bins_sep = np.histogram(seqs,
range=(value_min-0.5,value_max+0.5), # 这样划分使得每一个int值都处于一个单独的bin中
bins=value_size,
normed=True)
# if frq_mat is None:
# frq_mat = np.expand_dims(hist, axis=0)
# else:
# frq_mat = np.concatenate((frq_mat,np.expand_dims(hist, axis=0)), axis=0)
if item_frq_mat is None:
item_frq_mat = np.expand_dims(hist, axis=0)
else:
item_frq_mat = np.concatenate((item_frq_mat,np.expand_dims(hist,axis=0)), axis=0)
if class_cache is None:
class_cache = np.expand_dims(hist, axis=0)
else:
class_cache = np.concatenate((item_frq_mat,np.expand_dims(hist,axis=0)), axis=0)
class_val = class_cache.sum(axis=0)
if class_frq_mat is None:
class_frq_mat = np.expand_dims(class_val, axis=0)
else:
class_frq_mat = np.concatenate((class_frq_mat, np.expand_dims(class_val, axis=0)), axis=1)
# 如果要计算类级别的tfidf,则把类内样本的元素频率相加作为整个类的频率向量,
# 然后在类的级别上计算tf和idf
if level == 'class':
frq_mat = class_frq_mat
else:
frq_mat = item_frq_mat
transformer = TfidfTransformer()
transformer.fit(frq_mat)
tf = np.mean(frq_mat, axis=0)
tfidf = tf*transformer.idf_
# 取tf-idf最大的k个api的下标
top_k_idxes = tfidf.argsort()[::-1][:top_k]
api_list = list(value_map.keys())
top_k_apis = [api_list[i] for i in top_k_idxes]
if tfidf_dump_path is not None:
api_tfidf = {api:val for api,val in zip(api_list,tfidf)}
dumpJson(api_tfidf, tfidf_dump_path)
print("- Done -")
return top_k_apis
# return tfidf, transformer
if __name__ == '__main__':
calTFIDF(dataset_path='/home/asichurter/datasets/JSONs/virushare-10-3gram/all/',
dict_map_path='/home/asichurter/datasets/JSONs/virushare-10-3gram/data/wordMap.json',
is_class_dir=True,
level='item')
| 453
| 0
| 0
| 0
| 0
| 2,720
| 0
| 44
| 133
|
55fdabe59356478a8bbb3c2ada5b0253a4277315
| 4,722
|
py
|
Python
|
pijuice/main.py
|
hferentschik/balena-strand
|
09a905110d95e8bab2a65cb94e4a3babc6d2a9d3
|
[
"MIT"
] | null | null | null |
pijuice/main.py
|
hferentschik/balena-strand
|
09a905110d95e8bab2a65cb94e4a3babc6d2a9d3
|
[
"MIT"
] | null | null | null |
pijuice/main.py
|
hferentschik/balena-strand
|
09a905110d95e8bab2a65cb94e4a3babc6d2a9d3
|
[
"MIT"
] | null | null | null |
import datetime
import json
import os
import time
import paho.mqtt.client as mqtt
from pijuice import PiJuice
from balena import Balena
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from time import sleep
from w1thermsensor import W1ThermSensor
WAKEALARM = '/sys/class/rtc/rtc0/wakealarm'
BROKER_ADDRESS = os.environ.get('MQTT_BROKER') or "mqtt"
BROKER_PORT = os.environ.get('MQTT_BROKER_PORT') or 80
SLEEP_INTERVAL = os.environ.get('SLEEP_INTERVAL') or 60
STAY_ALIVE = os.environ.get('STAY_ALIVE') or False
def get_battery_parameters(pj):
"""Get all PiJuice parameters and return as a dictionary"""
juice = {}
charge = pj.status.GetChargeLevel()
juice['charge'] = charge['data'] if charge['error'] == 'NO_ERROR' else charge['error']
# Temperature [C]
temperature = pj.status.GetBatteryTemperature()
juice['temperature'] = temperature['data'] if temperature['error'] == 'NO_ERROR' else temperature['error']
# Battery voltage [V]
vbat = pj.status.GetBatteryVoltage()
juice['vbat'] = vbat['data'] / 1000 if vbat['error'] == 'NO_ERROR' else vbat['error']
# Battery current [A]
ibat = pj.status.GetBatteryCurrent()
juice['ibat'] = ibat['data'] / 1000 if ibat['error'] == 'NO_ERROR' else ibat['error']
# I/O voltage [V]
vio = pj.status.GetIoVoltage()
juice['vio'] = vio['data'] / 1000 if vio['error'] == 'NO_ERROR' else vio['error']
# I/O current [A]
iio = pj.status.GetIoCurrent()
juice['iio'] = iio['data'] / 1000 if iio['error'] == 'NO_ERROR' else iio['error']
# Get power input (if power connected to the PiJuice board)
status = pj.status.GetStatus()
juice['power_input'] = status['data']['powerInput'] if status['error'] == 'NO_ERROR' else status['error']
# Get power input (if power connected to the Raspberry Pi board)
status = pj.status.GetStatus()
juice['power_input_board'] = status['data']['powerInput5vIo'] if status['error'] == 'NO_ERROR' else status['error']
return juice
def update_tag(tag, variable):
"""Set a tag for the Balena device."""
balena.models.tag.device.set(os.environ['BALENA_DEVICE_UUID'], str(tag), str(variable))
def set_alarm(interval):
"""Set upcoming wakealarm."""
wakeup_time = datetime.now() + timedelta(minutes=int(interval))
timestamp = '{0:.0f}\n'.format(wakeup_time.timestamp())
try:
with open(WAKEALARM, 'w') as f:
f.write('0\n')
with open(WAKEALARM, 'w') as f:
f.write(timestamp)
print('Wakealarm set to: %s' % wakeup_time)
update_tag("WAKEUP_TIME", wakeup_time.strftime("%Y-%m-%d %H:%M:%S"))
except OSError as e:
print('Error setting wake alarm: %s' % e)
def record_temperature():
"""Record current temperature and send to MQTT broker."""
sensor = W1ThermSensor()
temperature = sensor.get_temperature()
update_tag("TEMPERATURE", temperature)
client = mqtt.Client(transport="websockets")
client.connect(BROKER_ADDRESS, 80)
json_body = [
{
"time": str('{:%Y-%m-%dT%H:%M:%S}'.format(datetime.now(tzutc()))),
"measurement": "water-temperature",
"fields": {
"temperature": temperature,
"sensor": "DS18B20"
}
}
]
print("JSON body = " + str(json_body))
msg_info = client.publish("sensors", json.dumps(json_body))
if not msg_info.is_published():
msg_info.wait_for_publish()
client.disconnect()
def stay_alive(pj):
"""Enter endless loop recording temperature."""
while True:
record_temperature()
battery_data = get_battery_parameters(pj)
print(battery_data)
for key, value in battery_data.items():
update_tag(key, value)
sleep(60)
def shutdown(pj):
"""Shutdown Pi."""
shutdown_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
update_tag("SHUTDOWN_TIME", shutdown_time)
set_alarm(SLEEP_INTERVAL)
pj.power.SetPowerOff(60)
balena.models.supervisor.shutdown(device_uuid=os.environ['BALENA_DEVICE_UUID'],
app_id=os.environ['BALENA_APP_ID'])
# Start the SDK and record start tag
balena = Balena()
balena.auth.login_with_token(os.environ['BALENA_API_KEY'])
update_tag("START_TIME", datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# Wait for device I2C device to start
while not os.path.exists('/dev/i2c-1'):
print("Waiting to identify PiJuice")
time.sleep(0.1)
# Initiate PiJuice and make sure watchdog is disabled
pi_juice = PiJuice(1, 0x14)
pi_juice.power.SetWatchdog(0)
if STAY_ALIVE == '1':
stay_alive(pi_juice)
record_temperature()
shutdown(pi_juice)
| 31.691275
| 119
| 0.653748
|
import datetime
import json
import os
import time
import paho.mqtt.client as mqtt
from pijuice import PiJuice
from balena import Balena
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from time import sleep
from w1thermsensor import W1ThermSensor
WAKEALARM = '/sys/class/rtc/rtc0/wakealarm'
BROKER_ADDRESS = os.environ.get('MQTT_BROKER') or "mqtt"
BROKER_PORT = os.environ.get('MQTT_BROKER_PORT') or 80
SLEEP_INTERVAL = os.environ.get('SLEEP_INTERVAL') or 60
STAY_ALIVE = os.environ.get('STAY_ALIVE') or False
def get_battery_parameters(pj):
"""Get all PiJuice parameters and return as a dictionary"""
juice = {}
charge = pj.status.GetChargeLevel()
juice['charge'] = charge['data'] if charge['error'] == 'NO_ERROR' else charge['error']
# Temperature [C]
temperature = pj.status.GetBatteryTemperature()
juice['temperature'] = temperature['data'] if temperature['error'] == 'NO_ERROR' else temperature['error']
# Battery voltage [V]
vbat = pj.status.GetBatteryVoltage()
juice['vbat'] = vbat['data'] / 1000 if vbat['error'] == 'NO_ERROR' else vbat['error']
# Battery current [A]
ibat = pj.status.GetBatteryCurrent()
juice['ibat'] = ibat['data'] / 1000 if ibat['error'] == 'NO_ERROR' else ibat['error']
# I/O voltage [V]
vio = pj.status.GetIoVoltage()
juice['vio'] = vio['data'] / 1000 if vio['error'] == 'NO_ERROR' else vio['error']
# I/O current [A]
iio = pj.status.GetIoCurrent()
juice['iio'] = iio['data'] / 1000 if iio['error'] == 'NO_ERROR' else iio['error']
# Get power input (if power connected to the PiJuice board)
status = pj.status.GetStatus()
juice['power_input'] = status['data']['powerInput'] if status['error'] == 'NO_ERROR' else status['error']
# Get power input (if power connected to the Raspberry Pi board)
status = pj.status.GetStatus()
juice['power_input_board'] = status['data']['powerInput5vIo'] if status['error'] == 'NO_ERROR' else status['error']
return juice
def update_tag(tag, variable):
"""Set a tag for the Balena device."""
balena.models.tag.device.set(os.environ['BALENA_DEVICE_UUID'], str(tag), str(variable))
def set_alarm(interval):
"""Set upcoming wakealarm."""
wakeup_time = datetime.now() + timedelta(minutes=int(interval))
timestamp = '{0:.0f}\n'.format(wakeup_time.timestamp())
try:
with open(WAKEALARM, 'w') as f:
f.write('0\n')
with open(WAKEALARM, 'w') as f:
f.write(timestamp)
print('Wakealarm set to: %s' % wakeup_time)
update_tag("WAKEUP_TIME", wakeup_time.strftime("%Y-%m-%d %H:%M:%S"))
except OSError as e:
print('Error setting wake alarm: %s' % e)
def record_temperature():
"""Record current temperature and send to MQTT broker."""
sensor = W1ThermSensor()
temperature = sensor.get_temperature()
update_tag("TEMPERATURE", temperature)
client = mqtt.Client(transport="websockets")
client.connect(BROKER_ADDRESS, 80)
json_body = [
{
"time": str('{:%Y-%m-%dT%H:%M:%S}'.format(datetime.now(tzutc()))),
"measurement": "water-temperature",
"fields": {
"temperature": temperature,
"sensor": "DS18B20"
}
}
]
print("JSON body = " + str(json_body))
msg_info = client.publish("sensors", json.dumps(json_body))
if not msg_info.is_published():
msg_info.wait_for_publish()
client.disconnect()
def stay_alive(pj):
"""Enter endless loop recording temperature."""
while True:
record_temperature()
battery_data = get_battery_parameters(pj)
print(battery_data)
for key, value in battery_data.items():
update_tag(key, value)
sleep(60)
def shutdown(pj):
"""Shutdown Pi."""
shutdown_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
update_tag("SHUTDOWN_TIME", shutdown_time)
set_alarm(SLEEP_INTERVAL)
pj.power.SetPowerOff(60)
balena.models.supervisor.shutdown(device_uuid=os.environ['BALENA_DEVICE_UUID'],
app_id=os.environ['BALENA_APP_ID'])
# Start the SDK and record start tag
balena = Balena()
balena.auth.login_with_token(os.environ['BALENA_API_KEY'])
update_tag("START_TIME", datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
# Wait for device I2C device to start
while not os.path.exists('/dev/i2c-1'):
print("Waiting to identify PiJuice")
time.sleep(0.1)
# Initiate PiJuice and make sure watchdog is disabled
pi_juice = PiJuice(1, 0x14)
pi_juice.power.SetWatchdog(0)
if STAY_ALIVE == '1':
stay_alive(pi_juice)
record_temperature()
shutdown(pi_juice)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a89480e4be4df81910a15ef7357fd7cb7699984d
| 5,629
|
py
|
Python
|
v2ex_spider/rss_spider.py
|
yingziwu/v2ex_delete
|
8f1befcd43bd83c9531bf7180e23bbaf8b12b4ed
|
[
"Apache-2.0"
] | 82
|
2017-05-20T02:31:51.000Z
|
2021-12-29T08:08:10.000Z
|
v2ex_spider/rss_spider.py
|
yingziwu/v2ex_delete
|
8f1befcd43bd83c9531bf7180e23bbaf8b12b4ed
|
[
"Apache-2.0"
] | 8
|
2017-06-13T16:07:33.000Z
|
2018-04-01T07:35:07.000Z
|
v2ex_spider/rss_spider.py
|
yingziwu/v2ex_delete
|
8f1befcd43bd83c9531bf7180e23bbaf8b12b4ed
|
[
"Apache-2.0"
] | 4
|
2017-06-14T13:25:16.000Z
|
2020-12-15T09:52:44.000Z
|
'''
Created on May 9, 2017
@author: yingziwu
'''
if __name__ == '__main__':
Rss_spider()
print('Finish!')
| 34.746914
| 122
| 0.570794
|
'''
Created on May 9, 2017
@author: yingziwu
'''
import feedparser
import time
import re
import requests
from redis import Redis
from rq import Queue
import json
import os
import logging
from v2ex_spider import topic_spider
from v2ex_base.v2_sql import SQL
import settings
class Rss_spider(object):
'''
A Spider for v2ex's Rss.
Get the latest and hot topic on the index.
Using the rss generate the topic list that need to spider.
'''
def __init__(self):
'''
>>>from v2ex_spider import rss_spider
>>>rss_spider.Rss_spider()
'''
logging.info('start Rss spider')
self.v2ex_rss_url_list=['https://www.v2ex.com/index.xml',
'https://www.v2ex.com/feed/tab/qna.xml',
'https://www.v2ex.com/feed/tab/jobs.xml',
'https://www.v2ex.com/feed/tab/deals.xml',
'https://www.v2ex.com/feed/tab/city.xml',
'https://www.v2ex.com/feed/tab/play.xml',
'https://www.v2ex.com/feed/tab/apple.xml',
'https://www.v2ex.com/feed/tab/creative.xml',
'https://www.v2ex.com/feed/tab/tech.xml']
self.latest_hot_api=['https://www.v2ex.com/api/topics/latest.json','https://www.v2ex.com/api/topics/hot.json']
self.topic_sleep_time=10
logging.debug('open sql database')
self.SQ=SQL()
self.SQ.open_datebase()
self.redis_conn=Redis()
self.load_config()
#run
try:
self.latest_and_hot()
except APIError as e:
pass
self.gen_topic_queue()
#end
self.SQ.close_datebase()
logging.info('end the Rss spider')
def topics_id_rss(self):
logging.debug('fetch rss feeds')
topic_ids=list()
for v2ex_rss_url in self.v2ex_rss_url_list:
feed=feedparser.parse(v2ex_rss_url)
logging.debug('fetch rss feed: %s' % v2ex_rss_url)
items=feed["items"]
for item in items:
author=item["author"]
title=item["title"]
link=item["link"]
published=item[ "date" ]
summary=item["summary"]
topic_id=int(re.findall(r't\/(\d+)#?', link)[0])
topic_ids.append(topic_id)
topic_ids=set(topic_ids)
return topic_ids
def topics_id_sqlite(self):
logging.debug('SELECT ID FROM TOPIC')
sql='SELECT ID FROM TOPIC;'
self.SQ.cursor.execute(sql)
topics_ids=[x[0] for x in self.SQ.cursor.fetchall()]
return topics_ids
def latest_and_hot(self):
logging.debug('start latest_and_hot')
for url in self.latest_hot_api:
try:
resp=self.s.get(url, timeout=10)
except requests.exceptions.RequestException as e:
logging.error('latest_and_hot error')
logging.error('proxy_status: %s' % self.proxy_enable)
if self.proxy_enable is True:
logging.error('proxy: %s' % self.s.proxies)
logging.error(e)
raise e
if resp.status_code != 200:
logging.error('latest_and_hot error')
logging.error('proxy_status: %s' % self.proxy_enable)
if self.proxy_enable is True:
logging.error('proxy: %s' % self.s.proxies)
logging.error(APIError('latest_and_hot'))
raise APIError('latest_and_hot')
topics=resp.json()
for topic in topics:
t_id=topic["id"]
title=topic["title"]
author=topic["member"]["username"]
author_id=topic["member"]["id"]
content=topic["content"]
content_rendered=topic["content_rendered"]
replies=topic["replies"]
node=topic["node"]["id"]
created=topic["created"]
n_time=int(time.time())
self.SQ.write_to_db_base(t_id,title,author,author_id,content,content_rendered,replies,node,created,n_time)
self.SQ.conn.commit()
return
def gen_topic_queue(self):
logging.debug('start topic enqueue')
topics_sql=self.topics_id_sqlite()
if len(topics_sql) <= 2000:
return
topics_rss=self.topics_id_rss()
# load topics
if os.path.exists('.topics_all.json'):
with open('.topics_all.json','r') as f:
tmp_topics=json.load(f)
else:
tmp_topics=list()
t_queue=Queue('topic',connection=self.redis_conn)
# gen queue
for topic in topics_rss:
if topic not in topics_sql and topic not in tmp_topics:
topic_id=int(topic)
t_queue.enqueue(topic_spider.start,topic_id, self.topic_sleep_time)
#save topics
topics_all=list()
topics_all.extend(tmp_topics)
topics_all.extend(topics_rss)
topics_all.extend(topics_sql)
topics_all=list(set(topics_all))
with open('.topics_all.json','w') as f:
json.dump(topics_all, f)
return
def load_config(self):
logging.debug('load config')
self.proxy_enable=settings.i_proxy_enable
self.s=requests.session()
self.s.headers=settings.API_headers
if self.proxy_enable:
self.s.proxies=settings.i_proxies()
class APIError(ValueError):
pass
if __name__ == '__main__':
Rss_spider()
print('Finish!')
| 0
| 0
| 0
| 5,242
| 0
| 0
| 0
| -40
| 311
|
06b5e7449239a7083bbc31f928e2608669ee09f3
| 84
|
py
|
Python
|
alisu/custom_core/bot_raw_custom_methods_core/__init__.py
|
sh-himanshu/alisurobot
|
4d315acaf28156feb79864b56b64636c4217b4d4
|
[
"MIT"
] | 9
|
2021-08-17T18:30:13.000Z
|
2021-10-02T09:22:34.000Z
|
alisu/custom_core/bot_raw_custom_methods_core/__init__.py
|
sh-himanshu/alisurobot
|
4d315acaf28156feb79864b56b64636c4217b4d4
|
[
"MIT"
] | 1
|
2021-12-20T19:48:44.000Z
|
2021-12-20T19:48:44.000Z
|
alisu/custom_core/bot_raw_custom_methods_core/__init__.py
|
aksr-aashish/alisurobot
|
0b0c05ea74ba6126ca9b328de16c808c72be365d
|
[
"MIT"
] | 8
|
2021-08-17T21:14:13.000Z
|
2022-01-29T23:34:14.000Z
|
from typing import List
from . import langs
__all__: List[str] = [
"langs",
]
| 10.5
| 23
| 0.642857
|
from typing import List
from . import langs
__all__: List[str] = [
"langs",
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c8d7db68476f02b98f3c577ee4055d5696455d9
| 2,657
|
py
|
Python
|
lxmls/sequences/extended_feature.py
|
khui/lxmls-toolkit
|
f1a2fb9aa46257b509c79e83182d7d5495061398
|
[
"MIT"
] | null | null | null |
lxmls/sequences/extended_feature.py
|
khui/lxmls-toolkit
|
f1a2fb9aa46257b509c79e83182d7d5495061398
|
[
"MIT"
] | null | null | null |
lxmls/sequences/extended_feature.py
|
khui/lxmls-toolkit
|
f1a2fb9aa46257b509c79e83182d7d5495061398
|
[
"MIT"
] | 1
|
2020-01-09T17:02:50.000Z
|
2020-01-09T17:02:50.000Z
|
# ----------
# Feature Class
# Extracts features from a labeled corpus (only supported features are extracted
# ----------
| 33.632911
| 80
| 0.517877
|
from lxmls.sequences.id_feature import IDFeatures
# ----------
# Feature Class
# Extracts features from a labeled corpus (only supported features are extracted
# ----------
class ExtendedFeatures(IDFeatures):
def add_emission_features(self, sequence, pos, y, features):
x = sequence.x[pos]
# Get tag name from ID.
y_name = self.dataset.y_dict.get_label_name(y)
# Get word name from ID.
x_name = self.dataset.x_dict.get_label_name(x)
word = unicode(x_name)
# Generate feature name.
feat_name = "id:%s::%s" % (word, y_name)
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
if unicode.istitle(word):
# Generate feature name.
feat_name = "uppercased::%s" % y_name
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
if unicode.isdigit(word):
# Generate feature name.
feat_name = "number::%s" % y_name
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
if unicode.find(word, "-") != -1:
# Generate feature name.
feat_name = "hyphen::%s" % y_name
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
# Suffixes
max_suffix = 3
for i in xrange(max_suffix):
if len(word) > i+1:
suffix = word[-(i+1):]
# Generate feature name.
feat_name = "suffix:%s::%s" % (suffix, y_name)
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
# Prefixes
max_prefix = 3
for i in xrange(max_prefix):
if len(word) > i+1:
prefix = word[:i+1]
# Generate feature name.
feat_name = "prefix:%s::%s" % (prefix, y_name)
# Get feature ID from name.
feat_id = self.add_feature(feat_name)
# Append feature.
if feat_id != -1:
features.append(feat_id)
return features
| 0
| 0
| 0
| 2,460
| 0
| 0
| 0
| 28
| 44
|
e6c11cff9e76a688302b2d8a124c1cb1d590607b
| 13,081
|
py
|
Python
|
ml4a/dataset/dataset_utils.py
|
KushGabani/ml4a-guides
|
d71b61a99c417b9ace3404420b37d22f6da06153
|
[
"MIT"
] | 1,110
|
2016-06-02T23:58:41.000Z
|
2020-11-29T07:24:20.000Z
|
ml4a/dataset/dataset_utils.py
|
KushGabani/ml4a-guides
|
d71b61a99c417b9ace3404420b37d22f6da06153
|
[
"MIT"
] | 49
|
2016-08-14T22:58:41.000Z
|
2020-07-17T17:59:56.000Z
|
ml4a/dataset/dataset_utils.py
|
KushGabani/ml4a-guides
|
d71b61a99c417b9ace3404420b37d22f6da06153
|
[
"MIT"
] | 300
|
2016-06-13T23:06:55.000Z
|
2020-11-18T22:42:55.000Z
|
import sys
import argparse
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
allowable_actions = ['none', 'quantize', 'trace', 'hed', 'sketch', 'segment', 'simplify', 'face', 'upsample', 'sss']
# input, output
parser = argparse.ArgumentParser()
parser.add_argument("--input_src", help="input: directory of input images or movie file")
parser.add_argument("--max_num_images", type=int, help="maximum number of images to take (omit to use all)", default=None)
parser.add_argument("--shuffle", action="store_true", help="shuffle input images")
parser.add_argument("--min_dim", type=int, help="minimum width/height to allow for images", default=0)
parser.add_argument("--max_dim", type=int, help="maximum width/height to allow for images", default=1e8)
parser.add_argument("--output_dir", help="where to put output images (if \"None\" simply overwrite input)")
parser.add_argument("--pct_test", type=float, help="percentage that goes to test set (default 0)", default=0)
parser.add_argument("--save_mode", help="save output combined (pix2pix-style), split into directories, or just output", choices=['split','combined','output_only'], default='output_only')
parser.add_argument("--save_ext", help="image save extension (jpg/png)", choices=['jpg','png'], default='png')
# augmentation
parser.add_argument("--w", type=int, help="output image width (None means leave unchanged)", default=None)
parser.add_argument("--h", type=int, help="output image height (None means leave unchanged)", default=None)
parser.add_argument("--num_per", type=int, help="how many copies of original, augmented", default=1)
parser.add_argument("--frac", type=float, help="cropping ratio before resizing", default=1.0)
parser.add_argument("--frac_vary", type=float, help="cropping ratio vary", default=0.0)
parser.add_argument("--max_ang_rot", type=float, help="max rotation angle (degrees)", default=0)
parser.add_argument("--max_stretch", type=float, help="maximum stretching factor (0=none)", default=0)
parser.add_argument("--centered", action="store_true", help="to use centered crops instead of random ones")
# actions
parser.add_argument("--action", type=str, help="comma-separated: lis of actions from {%s} to take, e.g. trace,hed" % ','.join(allowable_actions), required=True, default="")
parser.add_argument("--target_face_image", type=str, help="image of target face to extract (if None, extract first found one)", default=None)
parser.add_argument("--face_crop", type=float, help="crop around target face first, with face fitting this fraction of the crop (default None, don't crop)", default=None)
parser.add_argument("--face_crop_lerp", type=float, help="smoothing parameter for shifting around lerp (default 1, no lerp)", default=1.0)
# data files
parser.add_argument("--hed_model_path", type=str, default='../data/HED_reproduced.npz', help="model path for HED")
parser.add_argument("--landmarks_path", type=str, default='../data/shape_predictor_68_face_landmarks.dat', help="path to face landmarks file")
parser.add_argument("--photosketch_path", type=str, default='../tools/PhotoSketch', help="path to PhotoSketch (if using it)")
parser.add_argument("--photosketch_model_path", type=str, default='../tools/PhotoSketch/pretrained', help="path to PhotoSketch checkpoint directory (if using it)")
parser.add_argument("--esrgan_path", type=str, default='../tools/ESRGAN', help="path to ESRGAN (if using it)")
parser.add_argument("--esrgan_model_path", type=str, default='../tools/ESRGAN/models', help="path to ESRGAN checkpoint directory (if using it)")
parser.add_argument("--sss_path", type=str, default='../tools/SIGGRAPH18SSS', help="path to SSS (if using it)")
parser.add_argument("--sss_model_path", type=str, default='../tools/SIGGRAPH18SSS/model', help="path to SSS checkpoint directory (if using it)")
args = parser.parse_args()
# import additional helpers as needed
if 'hed' in args.action.split(',') or 'simplify' in args.action.split(','):
if 'sketch' in args.action.split(','):
sys.path.append(args.photosketch_path)
if 'upsample' in args.action.split(','):
sys.path.append(args.esrgan_path)
if 'face' in args.action.split(','):
if 'sss' in args.action.split(','):
sys.path.append(args.sss_path)
if __name__ == '__main__':
#args = parser.parse_args()
main(args)
| 45.578397
| 205
| 0.655684
|
import os
import sys
from random import random, sample
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageDraw, ImageFile
from imutils import video
import cv2
ImageFile.LOAD_TRUNCATED_IMAGES = True
allowable_actions = ['none', 'quantize', 'trace', 'hed', 'sketch', 'segment', 'simplify', 'face', 'upsample', 'sss']
# input, output
parser = argparse.ArgumentParser()
parser.add_argument("--input_src", help="input: directory of input images or movie file")
parser.add_argument("--max_num_images", type=int, help="maximum number of images to take (omit to use all)", default=None)
parser.add_argument("--shuffle", action="store_true", help="shuffle input images")
parser.add_argument("--min_dim", type=int, help="minimum width/height to allow for images", default=0)
parser.add_argument("--max_dim", type=int, help="maximum width/height to allow for images", default=1e8)
parser.add_argument("--output_dir", help="where to put output images (if \"None\" simply overwrite input)")
parser.add_argument("--pct_test", type=float, help="percentage that goes to test set (default 0)", default=0)
parser.add_argument("--save_mode", help="save output combined (pix2pix-style), split into directories, or just output", choices=['split','combined','output_only'], default='output_only')
parser.add_argument("--save_ext", help="image save extension (jpg/png)", choices=['jpg','png'], default='png')
# augmentation
parser.add_argument("--w", type=int, help="output image width (None means leave unchanged)", default=None)
parser.add_argument("--h", type=int, help="output image height (None means leave unchanged)", default=None)
parser.add_argument("--num_per", type=int, help="how many copies of original, augmented", default=1)
parser.add_argument("--frac", type=float, help="cropping ratio before resizing", default=1.0)
parser.add_argument("--frac_vary", type=float, help="cropping ratio vary", default=0.0)
parser.add_argument("--max_ang_rot", type=float, help="max rotation angle (degrees)", default=0)
parser.add_argument("--max_stretch", type=float, help="maximum stretching factor (0=none)", default=0)
parser.add_argument("--centered", action="store_true", help="to use centered crops instead of random ones")
# actions
parser.add_argument("--action", type=str, help="comma-separated: lis of actions from {%s} to take, e.g. trace,hed" % ','.join(allowable_actions), required=True, default="")
parser.add_argument("--target_face_image", type=str, help="image of target face to extract (if None, extract first found one)", default=None)
parser.add_argument("--face_crop", type=float, help="crop around target face first, with face fitting this fraction of the crop (default None, don't crop)", default=None)
parser.add_argument("--face_crop_lerp", type=float, help="smoothing parameter for shifting around lerp (default 1, no lerp)", default=1.0)
# data files
parser.add_argument("--hed_model_path", type=str, default='../data/HED_reproduced.npz', help="model path for HED")
parser.add_argument("--landmarks_path", type=str, default='../data/shape_predictor_68_face_landmarks.dat', help="path to face landmarks file")
parser.add_argument("--photosketch_path", type=str, default='../tools/PhotoSketch', help="path to PhotoSketch (if using it)")
parser.add_argument("--photosketch_model_path", type=str, default='../tools/PhotoSketch/pretrained', help="path to PhotoSketch checkpoint directory (if using it)")
parser.add_argument("--esrgan_path", type=str, default='../tools/ESRGAN', help="path to ESRGAN (if using it)")
parser.add_argument("--esrgan_model_path", type=str, default='../tools/ESRGAN/models', help="path to ESRGAN checkpoint directory (if using it)")
parser.add_argument("--sss_path", type=str, default='../tools/SIGGRAPH18SSS', help="path to SSS (if using it)")
parser.add_argument("--sss_model_path", type=str, default='../tools/SIGGRAPH18SSS/model', help="path to SSS checkpoint directory (if using it)")
args = parser.parse_args()
# import additional helpers as needed
if 'hed' in args.action.split(',') or 'simplify' in args.action.split(','):
import hed_processing
if 'sketch' in args.action.split(','):
sys.path.append(args.photosketch_path)
import photosketch_processing
if 'upsample' in args.action.split(','):
sys.path.append(args.esrgan_path)
import esrgan_processing
if 'face' in args.action.split(','):
from face_processing import *
if 'sss' in args.action.split(','):
sys.path.append(args.sss_path)
import sss_processing
from processing import *
def try_make_dir(new_dir):
if not os.path.isdir(new_dir):
os.mkdir(new_dir)
def setup_output_dirs(output_dir, save_mode, include_test):
train_dir = os.path.join(output_dir, 'train')
test_dir = os.path.join(output_dir, 'test')
trainA_dir, trainB_dir, testA_dir, testB_dir = None, None, None, None
if include_test:
if save_mode == 'split':
trainA_dir = os.path.join(train_dir, 'train_A')
testA_dir = os.path.join(test_dir, 'test_A')
trainB_dir = os.path.join(train_dir, 'train_B')
testB_dir = os.path.join(test_dir, 'test_B')
else:
trainA_dir = train_dir
testA_dir = test_dir
trainB_dir = train_dir
testB_dir = test_dir
elif save_mode == 'split':
train_dir = output_dir
trainA_dir = os.path.join(output_dir, 'train_A')
trainB_dir = os.path.join(output_dir, 'train_B')
else:
train_dir = output_dir
trainA_dir = output_dir
trainB_dir = output_dir
try_make_dir(output_dir)
try_make_dir(train_dir)
try_make_dir(trainA_dir)
try_make_dir(trainB_dir)
if include_test:
try_make_dir(test_dir)
try_make_dir(testA_dir)
try_make_dir(testB_dir)
return trainA_dir, trainB_dir, testA_dir, testB_dir
def get_frame_indexes(max_num_images, num_images, shuffle):
num_samples = min(max_num_images if max_num_images is not None else 1e8, num_images)
sort_order = sample(range(num_images), num_samples) if shuffle else sorted(range(num_samples))
return sort_order
def augmentation(img, num_per, out_w, out_h, frac, frac_vary, max_ang_rot, max_stretch, centered):
imgs = []
for i in range(num_per):
ang = max_ang_rot * (-1.0 + 2.0 * random())
frac_amt = frac + frac_vary * (-1.0 + 2.0 * random())
stretch = max_stretch * (-1.0 + 2.0 * random())
newimg = crop_rot_resize(img, frac_amt, out_w, out_h, ang, stretch, centered)
imgs.append(newimg)
return imgs
def main(args):
input_src, shuffle, max_num_images, min_w, min_h, max_w, max_h = args.input_src, args.shuffle, args.max_num_images, args.min_dim, args.min_dim, args.max_dim, args.max_dim
output_dir, out_w, out_h, pct_test, save_mode, save_ext = args.output_dir, args.w, args.h, args.pct_test, args.save_mode, args.save_ext
num_per, frac, frac_vary, max_ang_rot, max_stretch, centered = args.num_per, args.frac, args.frac_vary, args.max_ang_rot, args.max_stretch, args.centered
action, target_face_image, face_crop, face_crop_lerp, landmarks_path, hed_model_path = args.action, args.target_face_image, args.face_crop, args.face_crop_lerp, args.landmarks_path, args.hed_model_path
#os.system('rm -rf %s'%output_dir)
# get list of actions
actions = action.split(',')
if False in [a in allowable_actions for a in actions]:
raise Exception('one of your actions does not exist')
# initialize face_processing if needed
if 'face' in actions:
initialize_face_processing(landmarks_path)
target_encodings = get_encodings(target_face_image) if target_face_image else None
# initialize photosketch if needed
if 'sketch' in actions:
photosketch_processing.setup(args.photosketch_model_path)
# initialize esrgan if needed
if 'upsample' in actions:
esrgan_processing.setup(args.esrgan_model_path)
# initialize SSS if needed
if 'sss' in actions:
sss_processing.setup(args.sss_model_path)
# setup output directories
if output_dir != 'None':
trainA_dir, trainB_dir, testA_dir, testB_dir = setup_output_dirs(output_dir, save_mode, pct_test>0)
# initialize input
ext = os.path.splitext(input_src)[1]
is_movie = ext.lower() in ['.mp4','.mov','.avi']
if is_movie:
cap = cv2.VideoCapture(input_src)
fps = video.FPS().start()
num_images = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pct_frames = list(np.linspace(0, 1, num_images))
all_frames = get_frame_indexes(max_num_images, num_images, shuffle)
else:
images = sorted([f for f in os.listdir(input_src) if os.path.isfile(os.path.join(input_src, f)) ])
num_images = len(images)
all_frames = get_frame_indexes(max_num_images, num_images, shuffle)
# training/test split
training = [1] * len(all_frames) * num_per
if pct_test > 0:
n_test = int(len(all_frames) * num_per * pct_test)
test_per = 1.0 / pct_test
test_idx = [int(test_per * (i+1) - 1) for i in range(n_test)]
for t in test_idx:
training[t] = 0
# iterate through each input
print("Iterating through %d input images" % len(all_frames))
for k, idx_frame in tqdm(enumerate(all_frames)):
if is_movie:
pct_frame = pct_frames[idx_frame]
frame = int(pct_frame * num_images)
cap.set(1, frame);
ret, img = cap.read()
frame_name = 'frame%06d' % frame
img = cv2pil(img)
else:
img_path = images[idx_frame]
frame_name = os.path.splitext(img_path)[0]
full_image_path = os.path.join(input_src, img_path)
img = Image.open(full_image_path).convert("RGB")
# skip images which are too small or too big
if img.width < min_w or img.height < min_h:
continue
if img.width > max_w or img.height > max_h:
continue
# first crop around face if requested
if face_crop is not None:
jx, jy, jw, jh = get_crop_around_face(img, target_encodings, out_w/out_h, face_crop, face_crop_lerp)
img = img.crop((jx, jy, jx + jw, jy + jh))
# preprocess/augment and produce input images
imgs0, imgs1 = augmentation(img, num_per, out_w, out_h, frac, frac_vary, max_ang_rot, max_stretch, centered), []
# process each input image to make output
for img0 in imgs0:
img = img0
for a in actions:
if a == 'segment':
img = segment(img)
elif a == 'colorize':
colors = [[255,255,255], [0,0,0], [127,0,0], [0, 0, 127], [0, 127, 0]]
img = quantize_colors(img, colors)
elif a == 'trace':
img = trace(img)
elif a == 'hed':
img = hed_processing.run_hed(img, hed_model_path)
elif a == 'sketch':
img = photosketch_processing.sketch(img)
elif a == 'simplify':
img = simplify(img, hed_model_path)
elif a == 'face':
img = extract_face(img, target_encodings)
elif a == 'sss':
img = sss_processing.run_sss(img)
elif a == 'upsample':
img = esrgan_processing.upsample(img)
img = img.resize((int(img.width/2), int(img.height/2)), resample=Image.BICUBIC) # go from 4x to 2x
elif a == 'none' or a == '':
pass
imgs1.append(img)
# save the images
for i, (img0, img1) in enumerate(zip(imgs0, imgs1)):
out_name = 'f%05d%s_%s.%s' % (idx_frame, '_%02d'%i if num_per>1 else '', frame_name, save_ext)
is_train = training[num_per * k + i]
if save_mode == 'combined':
output_dir = trainA_dir if is_train else testA_dir
img2 = Image.new('RGB', (out_w * 2, out_h))
img2.paste(img1.convert('RGB'), (0, 0))
img2.paste(img0.convert('RGB'), (out_w, 0))
img2.save(os.path.join(output_dir, out_name), quality=97)
else:
if output_dir == 'None':
img1.convert('RGB').save(full_image_path, quality=97)
else:
outputA_dir = trainA_dir if is_train else testA_dir
img1.convert('RGB').save(os.path.join(outputA_dir, out_name), quality=97)
if save_mode == 'split':
outputB_dir = trainB_dir if is_train else testB_dir
img0.convert('RGB').save(os.path.join(outputB_dir, out_name), quality=97)
#plt.figure(figsize=(20,10))
#plt.imshow(np.concatenate([img0, img1], axis=1))
if __name__ == '__main__':
#args = parser.parse_args()
main(args)
| 0
| 0
| 0
| 0
| 0
| 8,306
| 0
| 40
| 421
|
af4b747e08e26a56b9c717b8ed199674ed5f60b8
| 480
|
py
|
Python
|
python/coursera_python/SEC/sc.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16
|
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/SEC/sc.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8
|
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/SEC/sc.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5
|
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# to decrypt a msg
msg = input("Enter the encrypted message :")
en_msg = msg.upper()
#print(en_msg)
key = 1
for i in range(27):
str1 = ""
#key = 1
for letter in en_msg:
l = ord(letter) + key
if l > 90:
l = l - 90 + 64
str1 = str1 + chr(l)
key = key + 1
print(str1)
key = 1
'''for i in range(27):
str1 = ""
for letter in en_msg:
l = ord(letter) - key
if l < 65:
l = 91 - (65 - l) #- 65 + 64
str1 = str1 + chr(l)
key = key + 1
print(str1)
'''
| 12.972973
| 44
| 0.5375
|
# to decrypt a msg
msg = input("Enter the encrypted message :")
en_msg = msg.upper()
#print(en_msg)
key = 1
for i in range(27):
str1 = ""
#key = 1
for letter in en_msg:
l = ord(letter) + key
if l > 90:
l = l - 90 + 64
str1 = str1 + chr(l)
key = key + 1
print(str1)
key = 1
'''for i in range(27):
str1 = ""
for letter in en_msg:
l = ord(letter) - key
if l < 65:
l = 91 - (65 - l) #- 65 + 64
str1 = str1 + chr(l)
key = key + 1
print(str1)
'''
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cb800acabcc465078e1b3bddd2513be5b3b3a472
| 22,294
|
py
|
Python
|
code/map_graph_Custom.py
|
thedrdos/covid-map
|
745b1ea679da52638abd586e5357aec2cda4cb09
|
[
"MIT"
] | null | null | null |
code/map_graph_Custom.py
|
thedrdos/covid-map
|
745b1ea679da52638abd586e5357aec2cda4cb09
|
[
"MIT"
] | null | null | null |
code/map_graph_Custom.py
|
thedrdos/covid-map
|
745b1ea679da52638abd586e5357aec2cda4cb09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Jul 2020
@author: TheDrDOS
"""
# # Clear the Spyder console and variables
# try:
# from IPython import get_ipython
# get_ipython().magic('clear')
# get_ipython().magic('reset -f')
# except:
# pass
from bokeh.io import save, output_file
from bokeh.models import ColorBar, LogTicker, LinearColorMapper, NumeralTickFormatter
from bokeh.models import BasicTickFormatter, FuncTickFormatter
from bokeh.palettes import Magma11 as palette
from bokeh.palettes import Turbo256
from bokeh.plotting import figure
from bokeh.models import Div
from bokeh.models import ColumnDataSource, DateRangeSlider, Select, Spinner
from bokeh.models.tools import HoverTool, BoxZoomTool # for showing the hover tool
from bokeh_template_external_js import template as template_ext_js
from bokeh.models import CustomJS, TapTool, Toggle, TextInput, RadioButtonGroup
from bokeh.tile_providers import CARTODBPOSITRON_RETINA, get_provider
import os
import sys
import numpy as np
from bokeh.models import NumeralTickFormatter
# for adding the second axis
from bokeh.models import Range1d, DataRange1d
# for formatting the dates on the x axis
from bokeh.models import DatetimeTickFormatter
from bokeh.layouts import layout # For show multiple figures
# For adding spans (vertical/horizontal lines without end points)
from bokeh.models import Span
import pandas as pd
import json
import gzip
import sys
"""
# Assign output file
________________________________________________________________________________
"""
def filename(fullname):
""" Return the name of a file without its path or extension"""
return os.path.splitext(os.path.split(fullname)[1])[0]
this_filename = filename(os.path.basename(os.path.splitext(__file__)[0]))
javascript_path = './' + this_filename + '_js/'
localhost_path = './plots/'
# name the output file/s after the script file
output_filename = "./../site/plots/" + this_filename
output_file(output_filename + ".html",
title="Interactive Custom Map of World COVID19 Data with Time History") # title=filename(output_filename))
"""
# Support functions
________________________________________________________________________________
"""
"""
# Load key_to_filename
________________________________________________________________________________
"""
ext_datafiles = {
'path': "../site/plots/data/",
'rel_path': "./data/",
}
with gzip.GzipFile(ext_datafiles['path'] + 'filename_to_location.json.gz', 'r') as fin:
ext_datafiles['filename_to_location'] = json.loads(
fin.read().decode('utf-8'))
with gzip.GzipFile(ext_datafiles['path'] + 'location_to_filename.json.gz', 'r') as fin:
ext_datafiles['location_to_filename'] = json.loads(
fin.read().decode('utf-8'))
with gzip.GzipFile(ext_datafiles['path'] + 'location_to_mapfilename.json.gz', 'r') as fin:
ext_datafiles['location_to_mapfilename'] = json.loads(
fin.read().decode('utf-8'))
"""
# %% Load json file for initiallization
________________________________________________________________________________
"""
init_location = 'New York, US' # get location
with gzip.GzipFile(ext_datafiles['path'] + ext_datafiles['location_to_filename'][init_location] + '.json.gz', 'r') as fin:
init_datafile = json.loads(fin.read().decode('utf-8'))
init_data = dic_nan_decode(init_datafile['data'], init_datafile['nan_code'])
init_data['date'] = pd.to_datetime(init_data['date'])
latest_data_date = pd.to_datetime('today') #max(init_data['date'])
oldest_date_date = pd.to_datetime('20191101',format='%Y%m%d') #min(init_data['date'])
init_location = 'Earth' # get location
with gzip.GzipFile(ext_datafiles['path'] + ext_datafiles['location_to_filename'][init_location] + '_map.json.gz', 'r') as fin:
init_mapfile = json.loads(fin.read().decode('utf-8'))
init_map = init_mapfile['data']
# Create source data structure and initialize state map
source_graph = ColumnDataSource(init_data)
source_map = ColumnDataSource(init_map)
# Erase the underlying data to reduce the html filesize (will be loaded upon user tap feedback)
source_graph.data = {k: source_graph.data[k][-2:-1] for k in source_graph.data}
"""
# %% Make State graph for COVID data
________________________________________________________________________________
"""
# Set Soft Axis limits
ax_limits = {
'x': (
pd.Timestamp.now() - pd.DateOffset(months=4),
pd.Timestamp.now()
),
}
# Create figure
p_graph = figure(x_axis_type='datetime', y_axis_type="linear",
title='(Tap a state on the map above to show the corresponding COVID data here)',
# plot_width=800, plot_height=600,
tools="ypan,xpan,ywheel_zoom,xwheel_zoom,ybox_zoom,xbox_zoom,box_zoom,reset", active_scroll=None, active_drag='ypan',
toolbar_location='left',
sizing_mode='scale_width',
aspect_ratio=2,
visible=True,
y_axis_location='right',
) # Assign tools and make wheel_zoom default on
# Make the bar and line plots
glyphs = []
graph_init = {
'x': 'date',
'source': source_graph,
'y': [
'positiveIncreaseMAVPerMil',
'positiveActiveIncreaseMAVPerMil',
'deathIncreaseMAV10PerMil'
],
'legend_label': [
'Positive Increase (week avg)',
'Positive Active Increase (week avg)',
'Deaths Increase x10 (week avg)'
],
'line_color': [
'red', # mapper,
'yellowgreen', # color_mapper,
'blue'],
'line_width': [
4, 4, 4
],
'line_dash': [
'solid', 'solid', 'solid'],
'name': ['' for i in range(0, 3)],
}
for n, y in enumerate(graph_init['y']):
graph_init['name'][n] = y
for n, y in enumerate(graph_init['y']):
glyphs.append(
p_graph.line(
source=graph_init['source'],
x=graph_init['x'],
y=graph_init['y'][n],
# legend_label=graph_init['legend_label'][n],
line_color=graph_init['line_color'][n],
color=graph_init['line_color'][n],
line_width=graph_init['line_width'][n],
line_dash=graph_init['line_dash'][n],
name=graph_init['name'][n],
)
)
p_graph.yaxis[0].formatter = NumeralTickFormatter(format="0,0.0")
# Horizontal right axis zero span
zero_span = Span(
location=0, # Span the 0 line of the right y axis
dimension='width', line_color='gray',
line_dash='solid', line_width=3, line_alpha=0.4,
)
p_graph.add_layout(zero_span)
# Weekly span marks
duration = np.timedelta64(4, 'm')
ds = np.arange(ax_limits['x'][0], ax_limits['x'][1], dtype='datetime64[D]')
# use of timezones was depricated, before timezone=None was needed
day = np.timedelta64(1, 'D')
for d in ds:
if ((np.timedelta64(ds.max() - d) / day) % 7) == 0:
ts = (np.datetime64(d) - np.datetime64('1970-01-01T00:00:00')) / \
np.timedelta64(1, 's')
wloc = ts * 1000 # get the week mark location in a format compatible with annotations
p_graph.add_layout(
Span(location=wloc,
dimension='height', line_color='gray',
line_dash='dashed', line_width=2, line_alpha=0.5,
))
span_play_position = Span(location=latest_data_date,
dimension='height', line_color='gray',
line_dash='solid', line_width=2, line_alpha=0.5,
name='span_play_position')
p_graph.add_layout(span_play_position)
# # X axis formatting:
p_graph.x_range = Range1d(ax_limits['x'][0], ax_limits['x'][1])
p_graph.xaxis.major_label_orientation = -np.pi / 3 # slant the labels
dtformat = "%b-%d"
p_graph.xaxis.formatter = formatter = DatetimeTickFormatter( # Always show the same date formatting regardless of zoom
days=dtformat,
months=dtformat,
hours=dtformat,
minutes=dtformat)
# Add legend
#p_graph.legend.location = "top_left"
# Add a hover tool
hover = HoverTool()
hover.tooltips = [
#('Type', "$name"),
('', '$name: @$name{0,0.} on @date{%a-%b-%d}'),
]
# hover.mode = 'vline'
hover.formatters = {
'@date': 'datetime', # use 'datetime' formatter for '@date' field
'$name': 'printf' # use 'printf' formatter for the name of the column
}
hover.renderers = glyphs
p_graph.add_tools(hover)
p_graph_glyphs = glyphs
"""
%% Setup color map
________________________________________________________________________________
"""
palette = Turbo256[128:-1:5]
color_mapper = LinearColorMapper(
palette=palette, low=0, high=20 * len(palette))
color_bar = ColorBar(
color_mapper=color_mapper,
label_standoff=2, border_line_color=None, location=(0, 0),
bar_line_alpha=0.5,
major_label_text_align='left',
)
"""
# Make the map
________________________________________________________________________________
"""
p_map = figure(
title=latest_data_date.strftime('%Y-%m-%d'),
# x_range=minmax(DS_worlds_map.data['xc']), y_range=minmax(DS_worlds_map.data['yc']),
# x_range=(-1.4e7,-7.4e6),
# y_range=(2.88e6,6.28e6),
# sizing_mode='stretch_width',
tools="tap,pan,wheel_zoom,reset,save", active_tap='tap',
toolbar_location='left',
x_axis_location=None, y_axis_location=None,
x_axis_type="mercator", y_axis_type="mercator",
sizing_mode='scale_width',
aspect_ratio=2,
match_aspect=True,
)
p_map.grid.grid_line_color = None
bztool_s = BoxZoomTool(match_aspect=True)
p_map.add_tools(bztool_s)
p_map.toolbar.active_drag = None # bztool_s
# Add the map tiles
tile_provider = get_provider(CARTODBPOSITRON_RETINA)
p_map.add_tile(tile_provider)
# Add the states1
p_map_mpoly = p_map.multi_polygons(
xs='x', ys='y', source=source_map,
fill_color={'field': 'positiveIncreaseMAVPerMil',
'transform': color_mapper},
fill_alpha=0.6,
line_color="white",
line_width=1,
)
p_map.add_layout(color_bar, 'right')
# Add the hover tool to show the state name and number of counties
hoverm = HoverTool()
hoverm.tooltips = [
('Name', "@name"),
("Population", "@population{0,0.}"),
#("Current COVID Statistics","{}".format('-'*15)),
('Positive Cases', "@positive{0,0.}"),
('Recovered Cases', "@recovered{0,0.}"),
('Positive Active Cases', "@positiveActive{0,0.}"),
('Deaths', "@death{0,0.}"),
]
p_map.add_tools(hoverm)
# Add taptool to select from which state to show all the counties
with open(javascript_path + 'callback_map.js', 'r') as f:
callback_world_map = f.read()
callbacktap = CustomJS(args={'ext_datafiles': ext_datafiles,
'p_graph_glyphs': p_graph_glyphs,
'p_graph': p_graph,
},
code=callback_world_map)
taptool = p_map.select(type=TapTool)
taptool.callback = callbacktap
# Explicitly initialize x range
p_map.x_range = DataRange1d()
# %% Make data graphs reset on doubletap
p_graph.js_on_event('doubletap', CustomJS(args={'p': p_graph, }, code="""
p.reset.emit()
"""))
"""
# Map widgets
------------------------------------------------------------------------------------------------
"""
# Get the callback script used for many of the widgets
with open(javascript_path + 'callback_map_widgets.js', 'r') as f:
callback_widgets = f.read()
# Level radio buttons
radio_labels = ["Play \u25B6", "Step \u23ef", "Pause \u23f8"]
radioGroup_play_controls = RadioButtonGroup(
labels=radio_labels, active=2, name='radioGroup_play_controls')
radioGroup_play_controls.js_on_click(CustomJS(args={
'event': 'radioGroup_play_controls',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets))
# %% Make date range slider
date_range_slider = DateRangeSlider(value=((latest_data_date-pd.DateOffset(months=1)), (latest_data_date)),
start=(oldest_date_date), end=(latest_data_date),
name='date_range_slider')
date_range_slider.js_on_change("value", CustomJS(args={
'event': 'date_range_slider',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets
))
# Minumum time between animations on play, Spinner
spinner_minStepTime = Spinner(title="",
low=0, high=5, step=0.25, value=0.25, width=100,format=FuncTickFormatter(code="""
return tick.toString()+" sec"
"""),
name='spinner_minStepTime')
# Respond to taps on the graph
p_graph.js_on_event('tap', CustomJS(args={
'event': 'graph_tap',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
'source_graph': source_graph,
'p_graph': p_graph,
},
code=callback_widgets
))
# Level radio buttons
radio_labels = ["World Level", "States Level", "Counties Level"]
radioGroup_level_select = RadioButtonGroup(
labels=radio_labels, active=0, name='radioGroup_level_select')
radioGroup_level_select.js_on_click(CustomJS(args={
'event': 'level_select',
'mpoly': p_map_mpoly,
'ext_datafiles': ext_datafiles,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets))
# Choose to only see continental US
continental_states = [
'Alabama, US', 'Arizona, US', 'Arkansas, US', 'California, US', 'Colorado, US', 'Connecticut, US', 'Delaware, US', 'District of Columbia, US', 'Florida, US', 'Georgia, US', 'Idaho, US', 'Illinois, US', 'Indiana, US', 'Iowa, US', 'Kansas, US', 'Kentucky, US', 'Louisiana, US', 'Maine, US', 'Maryland, US', 'Massachusetts, US', 'Michigan, US', 'Minnesota, US', 'Mississippi, US', 'Missouri, US', 'Montana, US', 'Nebraska, US', 'Nevada, US', 'New Hampshire, US', 'New Jersey, US', 'New Mexico, US', 'New York, US', 'North Carolina, US', 'North Dakota, US', 'Ohio, US', 'Oklahoma, US', 'Oregon, US', 'Pennsylvania, US', 'Rhode Island, US', 'South Carolina, US', 'South Dakota, US', 'Tennessee, US', 'Texas, US', 'Utah, US', 'Vermont, US', 'Virginia, US', 'Washington, US', 'West Virginia, US', 'Wisconsin, US', 'Wyoming, US']
button_continental_us_only = Toggle(label="Continental US Only",
visible=True,
button_type='default',
name='button_continental_us_only')
button_continental_us_only.js_on_change('active', CustomJS(args={
'event':'button_continental_us_only',
'mpoly': p_map_mpoly,
'ext_datafiles': ext_datafiles,
'source_map': source_map,
'p_map': p_map,
'continental_states': continental_states,
}, code=callback_widgets))
# Selectors for the map
selectors_map = []
opts = [k for k in init_data.keys() if isinstance(init_data[k][0], int)
or isinstance(init_data[k][0], float)]
opts = sorted(opts)
select = Select(title="Data For Map Coloring:",
value=p_map_mpoly.glyph.fill_color['field'],
options=opts)
select.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
}, code="""
//console.log('select: value=' + this.value, this.toString())
mpoly.glyph.fill_color.field = this.value
mpoly.data_source.change.emit()
"""))
selectors_map.append(select)
# Range setting for map
map_range_widgets = []
text_input = TextInput(value=str(color_mapper.high), title="High Color")
text_input.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'color_mapper': color_mapper,
}, code="""
color_mapper.high = Number(this.value)
"""))
map_range_widgets.append(text_input)
text_input = TextInput(value=str(color_mapper.low), title="Low Color")
text_input.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'color_mapper': color_mapper,
}, code="""
color_mapper.low = Number(this.value)
"""))
map_range_widgets.append(text_input)
"""
# Line graph widgets
------------------------------------------------------------------------------------------------
"""
# Selectors for the line graphs
selectors_graph = []
opts = [k for k in init_data.keys() if isinstance(init_data[k][0], int)
or isinstance(init_data[k][0], float)]
opts = sorted(opts)
for n, g in enumerate(p_graph_glyphs):
select = Select(title=" ", # title="Data For Line "+str(n+1)+":",
value=g.glyph.y,
options=opts,
background=g.glyph.line_color,)
select.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'line': g,
}, code="""
//console.log('select: value=' + this.value, this.toString())
line.glyph.y.field = this.value
line.data_source.change.emit()
"""))
selectors_graph.append(select)
# %% Make heading for the whole thing
"""
# %% Make heading for the whole thing
"""
heading = Div(text="""
<h1> Wold Map Of COVID Data With Population Normalized Time History </h1>
<p>Shows all the countries colored according to last weeks average number of new COVID-19 cases per day with country population normalization (number of people per million).</p>
<ul>
<li>Higher color number corresponds to faster spread of the virus.</li>
<li>On the left of each graph thera are tools to zoom/pan/reset/save.</li>
<li>On Mobile: Use two finger to scroll the page.</li>
<li>Data last updated on: {data_update} </li>
<li>Graphs generated on: {graph_update} </li>
<li>Recovery data for countries is unavailable. Using estimates of approx 15days to recovery for those that don't die.</li>
</ul>
<h3> Tap on any country to show the COVID19 data time history graph below. </h3>
""".format(
data_update=pd.to_datetime(latest_data_date).strftime('%Y-%m-%d'),
graph_update=pd.Timestamp.now().strftime('%Y-%m-%d'),
))
footer = Div(text="""
<h3> Sources </h3>
<ul>
<li>GitHub repository for this project: <a href="https://github.com/thedrdos/covid-map"> https://github.com/thedrdos/covid-map </a>. </li>
<li>Produced using Python with Bokeh and other modules.</li>
<li>Country geographical Data from <a href="http://www.naturalearthdata.com/">Natural Earth</a>.</li>
<li>COVID-19 Data on Countries from <a href="https://coronavirus.jhu.edu">The John Hopkins University Coronavirus Resource Center</a>
or on <a href="https://github.com/CSSEGISandData/COVID-19">GitHub</a>.</li>
</ul>
""")
data_notes = Div(text="""
<h4> Data Defintions: </h4>
<ul>
<li>Compatible with the <a href="https://covidtracking.com/about-data/data-definitions"> COVID Tracking Project data defintions</a>. </li>
<li>Countries may have `positive`, `death`, `recovered`, and their derivatives available.</li>
<li>USA states have most/all the data available.</li>
<li>USA counties of stats may have `positive`, `death`, `recovered`, and their derivatives available.</li>
<li>`recovered` is estimated where not available as `positive`-`death` after 15 days.</li>
<li>USA counties do not have `recovered` reported data, they are estimates.</li>
<li>`positiveActive` denotes `positive`-`recovered`-`death`.</li>
<li>Suffix of `MAV` denotes a one week moving average.</li>
<li>Suffix of `10` denotes multiplied by 10.</li>
<li>Suffix of `PerMil` denotes population normalization (persons per million).</li>
</ul>
""")
# %% Combine all the graphs
"""
# %% Combine all the graphs
________________________________________________________________________________
"""
# Layout the figures and show them
p_map.sizing_mode = 'scale_width'
p_graph.sizing_mode = 'scale_width'
if len(sys.argv)==1:
print('Making non-mobile output version')
lout = layout([heading,
[selectors_map + map_range_widgets + [radioGroup_level_select] +
[button_continental_us_only]
#+[Spacer(background='black',height=2)]
+[Div(text="<center><i> Time History Animation </i></center>")]
#+[Spacer(background='black',height=2)]
+[[spinner_minStepTime,radioGroup_play_controls] ,date_range_slider], p_map],
[selectors_graph+[data_notes], p_graph],
footer
])
lout.margin = (4, 20, 4, 20) # top, right, bottom, left
lout.sizing_mode = 'scale_width'
save(lout, template=template_ext_js(['jquary', 'pako']))
# view(output_filename+'.html')
# view('http://localhost:7800/'+localhost_path+this_filename+'.html')
elif sys.argv[1]=='mobile':
print('Making mobile output version')
lout_mobile = layout([
heading,
[selectors_map]+map_range_widgets,
[radioGroup_level_select,
button_continental_us_only],
p_map,
[spinner_minStepTime,radioGroup_play_controls ,date_range_slider],
p_graph,
[[[selectors_graph]],data_notes],
footer
])
lout_mobile.margin = (4, 20, 4, 20) # top, right, bottom, left
lout_mobile.sizing_mode = 'scale_width'
save(lout_mobile,filename=output_filename+'_mobile.html',template=template_ext_js(['jquary', 'pako']))
| 37.280936
| 825
| 0.661344
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Jul 2020
@author: TheDrDOS
"""
# # Clear the Spyder console and variables
# try:
# from IPython import get_ipython
# get_ipython().magic('clear')
# get_ipython().magic('reset -f')
# except:
# pass
from bokeh.io import show, save, output_file
from bokeh.util.browser import view
from bokeh.models import ColorBar, ColorMapper, LogColorMapper, LogTicker, LinearColorMapper, NumeralTickFormatter
from bokeh.models import BasicTicker as LinearTicker
from bokeh.models import BasicTickFormatter, LogTicker, FixedTicker, FuncTickFormatter
from bokeh.palettes import Magma11 as palette
from bokeh.palettes import Inferno256, Turbo256
from bokeh.plotting import figure
from bokeh.models import Div
from bokeh.models import ColumnDataSource, DateRangeSlider, Select, Spinner
from bokeh.models.tools import HoverTool, BoxZoomTool # for showing the hover tool
from bokeh_template_external_js import template as template_ext_js
from bokeh import events
from bokeh.models import Label, LabelSet, CustomJS, TapTool, Toggle, Button, Spacer, TextInput, RadioButtonGroup
from bokeh.tile_providers import CARTODBPOSITRON_RETINA, get_provider
import os
import sys
import numpy as np
import datetime
from datetime import date, timedelta
from bokeh.models import NumeralTickFormatter, BasicTickFormatter
# for adding the second axis
from bokeh.models import LogAxis, LinearAxis, Range1d, DataRange1d
# for formatting the dates on the x axis
from bokeh.models import DatetimeTickFormatter
from bokeh.layouts import column, row, gridplot, layout # For show multiple figures
# For adding spans (vertical/horizontal lines without end points)
from bokeh.models import Span
import pandas as pd
import pickle
import progress_bar as pbar
import json
import gzip
import sys
"""
# Assign output file
________________________________________________________________________________
"""
def filename(fullname):
""" Return the name of a file without its path or extension"""
return os.path.splitext(os.path.split(fullname)[1])[0]
this_filename = filename(os.path.basename(os.path.splitext(__file__)[0]))
javascript_path = './' + this_filename + '_js/'
localhost_path = './plots/'
# name the output file/s after the script file
output_filename = "./../site/plots/" + this_filename
output_file(output_filename + ".html",
title="Interactive Custom Map of World COVID19 Data with Time History") # title=filename(output_filename))
"""
# Support functions
________________________________________________________________________________
"""
def dic_nan_decode(d, nan_code):
for k in d:
d[k] = array_element_replace(d[k], nan_code, float('NaN'))
return d
def array_element_replace(arr, old_value, new_value):
for i in range(0, len(arr)):
if isinstance(arr[i], list):
arr[i] = array_element_replace(arr[i], old_value, new_value)
else:
if arr[i] == old_value:
arr[i] = new_value
return arr
"""
# Load key_to_filename
________________________________________________________________________________
"""
ext_datafiles = {
'path': "../site/plots/data/",
'rel_path': "./data/",
}
with gzip.GzipFile(ext_datafiles['path'] + 'filename_to_location.json.gz', 'r') as fin:
ext_datafiles['filename_to_location'] = json.loads(
fin.read().decode('utf-8'))
with gzip.GzipFile(ext_datafiles['path'] + 'location_to_filename.json.gz', 'r') as fin:
ext_datafiles['location_to_filename'] = json.loads(
fin.read().decode('utf-8'))
with gzip.GzipFile(ext_datafiles['path'] + 'location_to_mapfilename.json.gz', 'r') as fin:
ext_datafiles['location_to_mapfilename'] = json.loads(
fin.read().decode('utf-8'))
"""
# %% Load json file for initiallization
________________________________________________________________________________
"""
init_location = 'New York, US' # get location
with gzip.GzipFile(ext_datafiles['path'] + ext_datafiles['location_to_filename'][init_location] + '.json.gz', 'r') as fin:
init_datafile = json.loads(fin.read().decode('utf-8'))
init_data = dic_nan_decode(init_datafile['data'], init_datafile['nan_code'])
init_data['date'] = pd.to_datetime(init_data['date'])
latest_data_date = pd.to_datetime('today') #max(init_data['date'])
oldest_date_date = pd.to_datetime('20191101',format='%Y%m%d') #min(init_data['date'])
init_location = 'Earth' # get location
with gzip.GzipFile(ext_datafiles['path'] + ext_datafiles['location_to_filename'][init_location] + '_map.json.gz', 'r') as fin:
init_mapfile = json.loads(fin.read().decode('utf-8'))
init_map = init_mapfile['data']
# Create source data structure and initialize state map
source_graph = ColumnDataSource(init_data)
source_map = ColumnDataSource(init_map)
# Erase the underlying data to reduce the html filesize (will be loaded upon user tap feedback)
source_graph.data = {k: source_graph.data[k][-2:-1] for k in source_graph.data}
"""
# %% Make State graph for COVID data
________________________________________________________________________________
"""
# Set Soft Axis limits
ax_limits = {
'x': (
pd.Timestamp.now() - pd.DateOffset(months=4),
pd.Timestamp.now()
),
}
# Create figure
p_graph = figure(x_axis_type='datetime', y_axis_type="linear",
title='(Tap a state on the map above to show the corresponding COVID data here)',
# plot_width=800, plot_height=600,
tools="ypan,xpan,ywheel_zoom,xwheel_zoom,ybox_zoom,xbox_zoom,box_zoom,reset", active_scroll=None, active_drag='ypan',
toolbar_location='left',
sizing_mode='scale_width',
aspect_ratio=2,
visible=True,
y_axis_location='right',
) # Assign tools and make wheel_zoom default on
# Make the bar and line plots
glyphs = []
graph_init = {
'x': 'date',
'source': source_graph,
'y': [
'positiveIncreaseMAVPerMil',
'positiveActiveIncreaseMAVPerMil',
'deathIncreaseMAV10PerMil'
],
'legend_label': [
'Positive Increase (week avg)',
'Positive Active Increase (week avg)',
'Deaths Increase x10 (week avg)'
],
'line_color': [
'red', # mapper,
'yellowgreen', # color_mapper,
'blue'],
'line_width': [
4, 4, 4
],
'line_dash': [
'solid', 'solid', 'solid'],
'name': ['' for i in range(0, 3)],
}
for n, y in enumerate(graph_init['y']):
graph_init['name'][n] = y
for n, y in enumerate(graph_init['y']):
glyphs.append(
p_graph.line(
source=graph_init['source'],
x=graph_init['x'],
y=graph_init['y'][n],
# legend_label=graph_init['legend_label'][n],
line_color=graph_init['line_color'][n],
color=graph_init['line_color'][n],
line_width=graph_init['line_width'][n],
line_dash=graph_init['line_dash'][n],
name=graph_init['name'][n],
)
)
p_graph.yaxis[0].formatter = NumeralTickFormatter(format="0,0.0")
# Horizontal right axis zero span
zero_span = Span(
location=0, # Span the 0 line of the right y axis
dimension='width', line_color='gray',
line_dash='solid', line_width=3, line_alpha=0.4,
)
p_graph.add_layout(zero_span)
# Weekly span marks
duration = np.timedelta64(4, 'm')
ds = np.arange(ax_limits['x'][0], ax_limits['x'][1], dtype='datetime64[D]')
# use of timezones was depricated, before timezone=None was needed
day = np.timedelta64(1, 'D')
for d in ds:
if ((np.timedelta64(ds.max() - d) / day) % 7) == 0:
ts = (np.datetime64(d) - np.datetime64('1970-01-01T00:00:00')) / \
np.timedelta64(1, 's')
wloc = ts * 1000 # get the week mark location in a format compatible with annotations
p_graph.add_layout(
Span(location=wloc,
dimension='height', line_color='gray',
line_dash='dashed', line_width=2, line_alpha=0.5,
))
span_play_position = Span(location=latest_data_date,
dimension='height', line_color='gray',
line_dash='solid', line_width=2, line_alpha=0.5,
name='span_play_position')
p_graph.add_layout(span_play_position)
# # X axis formatting:
p_graph.x_range = Range1d(ax_limits['x'][0], ax_limits['x'][1])
p_graph.xaxis.major_label_orientation = -np.pi / 3 # slant the labels
dtformat = "%b-%d"
p_graph.xaxis.formatter = formatter = DatetimeTickFormatter( # Always show the same date formatting regardless of zoom
days=dtformat,
months=dtformat,
hours=dtformat,
minutes=dtformat)
# Add legend
#p_graph.legend.location = "top_left"
# Add a hover tool
hover = HoverTool()
hover.tooltips = [
#('Type', "$name"),
('', '$name: @$name{0,0.} on @date{%a-%b-%d}'),
]
# hover.mode = 'vline'
hover.formatters = {
'@date': 'datetime', # use 'datetime' formatter for '@date' field
'$name': 'printf' # use 'printf' formatter for the name of the column
}
hover.renderers = glyphs
p_graph.add_tools(hover)
p_graph_glyphs = glyphs
"""
%% Setup color map
________________________________________________________________________________
"""
palette = Turbo256[128:-1:5]
color_mapper = LinearColorMapper(
palette=palette, low=0, high=20 * len(palette))
color_bar = ColorBar(
color_mapper=color_mapper,
label_standoff=2, border_line_color=None, location=(0, 0),
bar_line_alpha=0.5,
major_label_text_align='left',
)
"""
# Make the map
________________________________________________________________________________
"""
p_map = figure(
title=latest_data_date.strftime('%Y-%m-%d'),
# x_range=minmax(DS_worlds_map.data['xc']), y_range=minmax(DS_worlds_map.data['yc']),
# x_range=(-1.4e7,-7.4e6),
# y_range=(2.88e6,6.28e6),
# sizing_mode='stretch_width',
tools="tap,pan,wheel_zoom,reset,save", active_tap='tap',
toolbar_location='left',
x_axis_location=None, y_axis_location=None,
x_axis_type="mercator", y_axis_type="mercator",
sizing_mode='scale_width',
aspect_ratio=2,
match_aspect=True,
)
p_map.grid.grid_line_color = None
bztool_s = BoxZoomTool(match_aspect=True)
p_map.add_tools(bztool_s)
p_map.toolbar.active_drag = None # bztool_s
# Add the map tiles
tile_provider = get_provider(CARTODBPOSITRON_RETINA)
p_map.add_tile(tile_provider)
# Add the states1
p_map_mpoly = p_map.multi_polygons(
xs='x', ys='y', source=source_map,
fill_color={'field': 'positiveIncreaseMAVPerMil',
'transform': color_mapper},
fill_alpha=0.6,
line_color="white",
line_width=1,
)
p_map.add_layout(color_bar, 'right')
# Add the hover tool to show the state name and number of counties
hoverm = HoverTool()
hoverm.tooltips = [
('Name', "@name"),
("Population", "@population{0,0.}"),
#("Current COVID Statistics","{}".format('-'*15)),
('Positive Cases', "@positive{0,0.}"),
('Recovered Cases', "@recovered{0,0.}"),
('Positive Active Cases', "@positiveActive{0,0.}"),
('Deaths', "@death{0,0.}"),
]
p_map.add_tools(hoverm)
# Add taptool to select from which state to show all the counties
with open(javascript_path + 'callback_map.js', 'r') as f:
callback_world_map = f.read()
callbacktap = CustomJS(args={'ext_datafiles': ext_datafiles,
'p_graph_glyphs': p_graph_glyphs,
'p_graph': p_graph,
},
code=callback_world_map)
taptool = p_map.select(type=TapTool)
taptool.callback = callbacktap
# Explicitly initialize x range
p_map.x_range = DataRange1d()
# %% Make data graphs reset on doubletap
p_graph.js_on_event('doubletap', CustomJS(args={'p': p_graph, }, code="""
p.reset.emit()
"""))
"""
# Map widgets
------------------------------------------------------------------------------------------------
"""
# Get the callback script used for many of the widgets
with open(javascript_path + 'callback_map_widgets.js', 'r') as f:
callback_widgets = f.read()
# Level radio buttons
radio_labels = ["Play \u25B6", "Step \u23ef", "Pause \u23f8"]
radioGroup_play_controls = RadioButtonGroup(
labels=radio_labels, active=2, name='radioGroup_play_controls')
radioGroup_play_controls.js_on_click(CustomJS(args={
'event': 'radioGroup_play_controls',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets))
# %% Make date range slider
date_range_slider = DateRangeSlider(value=((latest_data_date-pd.DateOffset(months=1)), (latest_data_date)),
start=(oldest_date_date), end=(latest_data_date),
name='date_range_slider')
date_range_slider.js_on_change("value", CustomJS(args={
'event': 'date_range_slider',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets
))
# Minumum time between animations on play, Spinner
spinner_minStepTime = Spinner(title="",
low=0, high=5, step=0.25, value=0.25, width=100,format=FuncTickFormatter(code="""
return tick.toString()+" sec"
"""),
name='spinner_minStepTime')
# Respond to taps on the graph
p_graph.js_on_event('tap', CustomJS(args={
'event': 'graph_tap',
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
'source_map': source_map,
'p_map': p_map,
'source_graph': source_graph,
'p_graph': p_graph,
},
code=callback_widgets
))
# Level radio buttons
radio_labels = ["World Level", "States Level", "Counties Level"]
radioGroup_level_select = RadioButtonGroup(
labels=radio_labels, active=0, name='radioGroup_level_select')
radioGroup_level_select.js_on_click(CustomJS(args={
'event': 'level_select',
'mpoly': p_map_mpoly,
'ext_datafiles': ext_datafiles,
'source_map': source_map,
'p_map': p_map,
},
code=callback_widgets))
# Choose to only see continental US
continental_states = [
'Alabama, US', 'Arizona, US', 'Arkansas, US', 'California, US', 'Colorado, US', 'Connecticut, US', 'Delaware, US', 'District of Columbia, US', 'Florida, US', 'Georgia, US', 'Idaho, US', 'Illinois, US', 'Indiana, US', 'Iowa, US', 'Kansas, US', 'Kentucky, US', 'Louisiana, US', 'Maine, US', 'Maryland, US', 'Massachusetts, US', 'Michigan, US', 'Minnesota, US', 'Mississippi, US', 'Missouri, US', 'Montana, US', 'Nebraska, US', 'Nevada, US', 'New Hampshire, US', 'New Jersey, US', 'New Mexico, US', 'New York, US', 'North Carolina, US', 'North Dakota, US', 'Ohio, US', 'Oklahoma, US', 'Oregon, US', 'Pennsylvania, US', 'Rhode Island, US', 'South Carolina, US', 'South Dakota, US', 'Tennessee, US', 'Texas, US', 'Utah, US', 'Vermont, US', 'Virginia, US', 'Washington, US', 'West Virginia, US', 'Wisconsin, US', 'Wyoming, US']
button_continental_us_only = Toggle(label="Continental US Only",
visible=True,
button_type='default',
name='button_continental_us_only')
button_continental_us_only.js_on_change('active', CustomJS(args={
'event':'button_continental_us_only',
'mpoly': p_map_mpoly,
'ext_datafiles': ext_datafiles,
'source_map': source_map,
'p_map': p_map,
'continental_states': continental_states,
}, code=callback_widgets))
# Selectors for the map
selectors_map = []
opts = [k for k in init_data.keys() if isinstance(init_data[k][0], int)
or isinstance(init_data[k][0], float)]
opts = sorted(opts)
select = Select(title="Data For Map Coloring:",
value=p_map_mpoly.glyph.fill_color['field'],
options=opts)
select.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'mpoly': p_map_mpoly,
}, code="""
//console.log('select: value=' + this.value, this.toString())
mpoly.glyph.fill_color.field = this.value
mpoly.data_source.change.emit()
"""))
selectors_map.append(select)
# Range setting for map
map_range_widgets = []
text_input = TextInput(value=str(color_mapper.high), title="High Color")
text_input.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'color_mapper': color_mapper,
}, code="""
color_mapper.high = Number(this.value)
"""))
map_range_widgets.append(text_input)
text_input = TextInput(value=str(color_mapper.low), title="Low Color")
text_input.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'color_mapper': color_mapper,
}, code="""
color_mapper.low = Number(this.value)
"""))
map_range_widgets.append(text_input)
"""
# Line graph widgets
------------------------------------------------------------------------------------------------
"""
# Selectors for the line graphs
selectors_graph = []
opts = [k for k in init_data.keys() if isinstance(init_data[k][0], int)
or isinstance(init_data[k][0], float)]
opts = sorted(opts)
for n, g in enumerate(p_graph_glyphs):
select = Select(title=" ", # title="Data For Line "+str(n+1)+":",
value=g.glyph.y,
options=opts,
background=g.glyph.line_color,)
select.js_on_change("value", CustomJS(args={
'ext_datafiles': ext_datafiles,
'line': g,
}, code="""
//console.log('select: value=' + this.value, this.toString())
line.glyph.y.field = this.value
line.data_source.change.emit()
"""))
selectors_graph.append(select)
# %% Make heading for the whole thing
"""
# %% Make heading for the whole thing
"""
heading = Div(text="""
<h1> Wold Map Of COVID Data With Population Normalized Time History </h1>
<p>Shows all the countries colored according to last weeks average number of new COVID-19 cases per day with country population normalization (number of people per million).</p>
<ul>
<li>Higher color number corresponds to faster spread of the virus.</li>
<li>On the left of each graph thera are tools to zoom/pan/reset/save.</li>
<li>On Mobile: Use two finger to scroll the page.</li>
<li>Data last updated on: {data_update} </li>
<li>Graphs generated on: {graph_update} </li>
<li>Recovery data for countries is unavailable. Using estimates of approx 15days to recovery for those that don't die.</li>
</ul>
<h3> Tap on any country to show the COVID19 data time history graph below. </h3>
""".format(
data_update=pd.to_datetime(latest_data_date).strftime('%Y-%m-%d'),
graph_update=pd.Timestamp.now().strftime('%Y-%m-%d'),
))
footer = Div(text="""
<h3> Sources </h3>
<ul>
<li>GitHub repository for this project: <a href="https://github.com/thedrdos/covid-map"> https://github.com/thedrdos/covid-map </a>. </li>
<li>Produced using Python with Bokeh and other modules.</li>
<li>Country geographical Data from <a href="http://www.naturalearthdata.com/">Natural Earth</a>.</li>
<li>COVID-19 Data on Countries from <a href="https://coronavirus.jhu.edu">The John Hopkins University Coronavirus Resource Center</a>
or on <a href="https://github.com/CSSEGISandData/COVID-19">GitHub</a>.</li>
</ul>
""")
data_notes = Div(text="""
<h4> Data Defintions: </h4>
<ul>
<li>Compatible with the <a href="https://covidtracking.com/about-data/data-definitions"> COVID Tracking Project data defintions</a>. </li>
<li>Countries may have `positive`, `death`, `recovered`, and their derivatives available.</li>
<li>USA states have most/all the data available.</li>
<li>USA counties of stats may have `positive`, `death`, `recovered`, and their derivatives available.</li>
<li>`recovered` is estimated where not available as `positive`-`death` after 15 days.</li>
<li>USA counties do not have `recovered` reported data, they are estimates.</li>
<li>`positiveActive` denotes `positive`-`recovered`-`death`.</li>
<li>Suffix of `MAV` denotes a one week moving average.</li>
<li>Suffix of `10` denotes multiplied by 10.</li>
<li>Suffix of `PerMil` denotes population normalization (persons per million).</li>
</ul>
""")
# %% Combine all the graphs
"""
# %% Combine all the graphs
________________________________________________________________________________
"""
# Layout the figures and show them
p_map.sizing_mode = 'scale_width'
p_graph.sizing_mode = 'scale_width'
if len(sys.argv)==1:
print('Making non-mobile output version')
lout = layout([heading,
[selectors_map + map_range_widgets + [radioGroup_level_select] +
[button_continental_us_only]
#+[Spacer(background='black',height=2)]
+[Div(text="<center><i> Time History Animation </i></center>")]
#+[Spacer(background='black',height=2)]
+[[spinner_minStepTime,radioGroup_play_controls] ,date_range_slider], p_map],
[selectors_graph+[data_notes], p_graph],
footer
])
lout.margin = (4, 20, 4, 20) # top, right, bottom, left
lout.sizing_mode = 'scale_width'
save(lout, template=template_ext_js(['jquary', 'pako']))
# view(output_filename+'.html')
# view('http://localhost:7800/'+localhost_path+this_filename+'.html')
elif sys.argv[1]=='mobile':
print('Making mobile output version')
lout_mobile = layout([
heading,
[selectors_map]+map_range_widgets,
[radioGroup_level_select,
button_continental_us_only],
p_map,
[spinner_minStepTime,radioGroup_play_controls ,date_range_slider],
p_graph,
[[[selectors_graph]],data_notes],
footer
])
lout_mobile.margin = (4, 20, 4, 20) # top, right, bottom, left
lout_mobile.sizing_mode = 'scale_width'
save(lout_mobile,filename=output_filename+'_mobile.html',template=template_ext_js(['jquary', 'pako']))
| 0
| 0
| 0
| 0
| 0
| 382
| 0
| 223
| 202
|
fceabbb74d3274f16916b7935a7ec5684e3f5c21
| 193
|
py
|
Python
|
blog/urls.py
|
sz-lhl/myblogtest
|
21467b36287a7e07e089ec0e8495eb598783e31c
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
sz-lhl/myblogtest
|
21467b36287a7e07e089ec0e8495eb598783e31c
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
sz-lhl/myblogtest
|
21467b36287a7e07e089ec0e8495eb598783e31c
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^post/(?P<pk>[0-9]+)/$',views.detail,name='detail'),
]
| 19.3
| 62
| 0.632124
|
from django.conf.urls import url
from . import views
app_name = 'blog'
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^post/(?P<pk>[0-9]+)/$',views.detail,name='detail'),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
275a93d01f327c22e46db6bb02fd9ad490838980
| 1,033
|
py
|
Python
|
reversi/strategies/coordinator/selector.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 10
|
2020-07-24T22:04:51.000Z
|
2022-03-25T06:09:48.000Z
|
reversi/strategies/coordinator/selector.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 12
|
2021-04-30T09:53:18.000Z
|
2022-02-25T04:16:02.000Z
|
reversi/strategies/coordinator/selector.py
|
y-tetsu/othello
|
73eabfe22d6b44bbfa0b436e6287e3e7356620f4
|
[
"MIT"
] | 1
|
2021-11-25T13:12:32.000Z
|
2021-11-25T13:12:32.000Z
|
"""Selector
"""
| 25.825
| 86
| 0.592449
|
"""Selector
"""
from reversi.strategies.common import AbstractSelector
class Selector(AbstractSelector):
"""Selector
"""
def select_moves(self, color, board, moves, scores, depth):
"""select_moves
"""
return moves
class Selector_W(Selector):
"""Selector_W
ワースト値に基づいて手を絞る
"""
def __init__(self, depth=3, limit=3):
self.depth = depth
self.limit = limit
def select_moves(self, color, board, moves, scores, depth):
"""select_moves
"""
moves = super().select_moves(color, board, moves, scores, depth)
if depth >= self.depth: # 一定以上の深さの場合
worst_score = min([score for score in scores.values()])
worst_moves = [key for key in scores.keys() if scores[key] == worst_score]
# 次の手の候補数がリミット以上の間は絞る
if len(moves) - len(worst_moves) >= self.limit:
for worst_move in worst_moves:
moves.remove(worst_move) # 最もスコアの低い手を削除
return moves
| 165
| 0
| 0
| 858
| 0
| 0
| 0
| 33
| 69
|
de65064f55beb0b25a599d489a5b2d753e5481b3
| 3,105
|
py
|
Python
|
turnovertools/video/output.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | null | null | null |
turnovertools/video/output.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | 3
|
2021-03-22T00:44:24.000Z
|
2021-06-26T19:32:31.000Z
|
turnovertools/video/output.py
|
morganwl/turnovertools
|
ea911853033ed5087b40852b5adc3b8f5d0a903d
|
[
"MIT"
] | null | null | null |
"""Video output tools for turnovertools."""
| 34.5
| 74
| 0.589372
|
"""Video output tools for turnovertools."""
import ffmpeg
from timecode import Timecode
class VideoFile(object):
"""A videofile which can either be imported or exported from."""
def __init__(self, filepath, **kwargs):
self.filepath = filepath
self._probe()
def _probe(self):
probe = ffmpeg.probe(self.filepath)
vid_stream = next(stream for stream in probe['streams'] if
stream['codec_type'] == 'video')
self.framerate = vid_stream['r_frame_rate']
self.duration = Timecode(self.framerate,
start_seconds=float(probe['format']
['duration']))
if 'timecode' in probe['format']['tags']:
self.src_start_tc = Timecode(
self.framerate, probe['format']['tags']['timecode'])
else:
self.src_start_tc = Timecode(clip.framerate, '00:00:00:00')
self.src_end_tc = clip.src_start_tc + clip.duration
width = vid_stream['width']
height = vid_stream['height']
self.scale = (int(width), int(height))
aw, ah = vid_stream['display_aspect_ratio'].split(':')
self.aspect_ratio = float(aw) / float(ah)
self.bitrate = int(probe['format']['bit_rate'])
def stream_frames(self, frames):
for frame in frames:
if not isinstance(frame, Timecode):
frame = Timecode(self.framerate, frame)
job = JPEG(
self.get_ffmpeg_input(self.ss_at(frame),
self.frames_to_seconds(1))).output()
yield capture_out(job)
def get_ffmpeg_input(self, ss=None, t=None, **kwargs):
kwargs = dict()
if ss is not None:
kwargs['ss'] = ss
if t is not None:
kwargs['t'] = t
return ffmpeg.input(self.filepath, **kwargs)
def ss_at(self, tc):
return range_to_real_offset(self.src_start_tc, tc,
self.framerate)
def frames_to_seconds(self, frames):
return frames / framerate_to_float(self.framerate)
class OutputPreset(object):
kwargs = {}
def __init__(self, input, filepath='pipe:', **kwargs):
self.input = input
self.filepath = filepath
def output(self):
return self.input.output(self.filepath, **self.kwargs)
class JPEG(OutputPreset):
kwargs = {'format': 'image2pipe', 'vcodec': 'mjpeg', 'q':1, 'vsync':0}
def range_to_real_offset(start_tc, end_tc, framerate):
frames = (end_tc - start_tc).frames
framerate = framerate_to_float(framerate)
return frames / framerate
def framerate_to_float(framerate):
try:
framerate = float(framerate)
except ValueError:
dividend, divisor = framerate.split('/')
framerate = int(dividend) / int(divisor)
else:
if framerate == 23.98:
framerate = 23.976
return framerate
def capture_out(stream):
vid, _ = ffmpeg.run(stream, capture_stdout=True, capture_stderr=False)
return vid
| 0
| 0
| 0
| 2,362
| 0
| 515
| 0
| 0
| 183
|
de9ec2fa7a690686c868ff79459cba2ad12aba35
| 325
|
py
|
Python
|
app/stops/migrations/0005_auto_20190622_2032.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | null | null | null |
app/stops/migrations/0005_auto_20190622_2032.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | 9
|
2019-12-04T23:23:07.000Z
|
2022-02-10T08:12:30.000Z
|
app/stops/migrations/0005_auto_20190622_2032.py
|
IvanBodnar/subway-api
|
36d17533995394fc5a5e6e1707ef312778296869
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-22 20:32
| 18.055556
| 47
| 0.587692
|
# Generated by Django 2.2.2 on 2019-06-22 20:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stops', '0004_auto_20190622_1914'),
]
operations = [
migrations.RenameModel(
old_name='Stops',
new_name='Stop',
),
]
| 0
| 0
| 0
| 219
| 0
| 0
| 0
| 11
| 46
|
6d211e6dff89896320320bc5f8bea4a1d0f85732
| 544
|
py
|
Python
|
webapp/news/views.py
|
Varlok009/tabletop-games-bboard
|
41037d4c1bcbbf563051979b630e104006254c4f
|
[
"MIT"
] | null | null | null |
webapp/news/views.py
|
Varlok009/tabletop-games-bboard
|
41037d4c1bcbbf563051979b630e104006254c4f
|
[
"MIT"
] | 1
|
2022-03-20T07:47:04.000Z
|
2022-03-20T07:47:04.000Z
|
webapp/news/views.py
|
Varlok009/tabletop-games-bboard
|
41037d4c1bcbbf563051979b630e104006254c4f
|
[
"MIT"
] | 1
|
2022-03-01T18:09:26.000Z
|
2022-03-01T18:09:26.000Z
|
from flask import Blueprint
blueprint = Blueprint('news', __name__, url_prefix='/news')
| 32
| 78
| 0.674632
|
from flask import Blueprint, render_template, request
from webapp.methods import get_news
blueprint = Blueprint('news', __name__, url_prefix='/news')
@blueprint.route('/news', methods=['GET'])
@blueprint.route('/news/<int:id>', methods=['GET'])
def news():
news_id = request.args['id']
news_data = list(filter(lambda n: n['id'] == news_id, get_news()))[0]
if news_data['title']:
title = news_data['title']
else:
title = 'Новости'
return render_template('news.html', news_data=news_data, page_title=title)
| 14
| 362
| 0
| 0
| 0
| 0
| 0
| 40
| 45
|
1c2aebb86482ed8ba85003c7cb15999950ea331e
| 5,125
|
py
|
Python
|
ape_trezor/client.py
|
unparalleled-js/ape-trezor
|
61cf94e3d49089a3c9f753ce18bc9b7323e82e6a
|
[
"Apache-2.0"
] | null | null | null |
ape_trezor/client.py
|
unparalleled-js/ape-trezor
|
61cf94e3d49089a3c9f753ce18bc9b7323e82e6a
|
[
"Apache-2.0"
] | null | null | null |
ape_trezor/client.py
|
unparalleled-js/ape-trezor
|
61cf94e3d49089a3c9f753ce18bc9b7323e82e6a
|
[
"Apache-2.0"
] | null | null | null |
from typing import Tuple
from ape_trezor.exceptions import (TrezorClientError)
def extract_signature_vrs_bytes(signature_bytes: bytes) -> Tuple[int, bytes, bytes]:
"""
Breaks `signature_bytes` into 3 chunks vrs, where `v` is 1 byte, `r` is 32
bytes, and `s` is 32 bytes.
"""
if signature_bytes is None:
raise TrezorClientError("No data in signature bytes.")
return signature_bytes[-1], signature_bytes[:32], signature_bytes[32:64]
__all__ = [
"TrezorClient",
"TrezorAccountClient",
]
| 34.166667
| 96
| 0.636098
|
from typing import Any, Dict, Tuple
from eth_typing.evm import ChecksumAddress
from trezorlib import ethereum # type: ignore
from trezorlib.client import get_default_client # type: ignore
from trezorlib.exceptions import PinException, TrezorFailure # type: ignore
from trezorlib.messages import TransactionType # type: ignore
from trezorlib.tools import parse_path as parse_hdpath # type: ignore
from trezorlib.transport import TransportException # type: ignore
from ape_trezor.exceptions import (
TrezorAccountException,
TrezorClientConnectionError,
TrezorClientError,
)
from ape_trezor.hdpath import HDBasePath, HDPath
class TrezorClient:
"""
This class is a client for the Trezor device.
"""
def __init__(self, hd_root_path: HDBasePath):
try:
self.client = get_default_client()
except TransportException:
raise TrezorClientConnectionError()
# Handles an unhandled usb exception in Trezor transport
except Exception as exc:
raise TrezorClientError(f"Error: {exc}")
self._hd_root_path = hd_root_path
def get_account_path(self, account_id: int) -> str:
account_path = str(self._hd_root_path.get_account_path(account_id))
try:
return ethereum.get_address(self.client, parse_hdpath(account_path))
except (PinException, TrezorFailure) as exc:
message = "You have entered an invalid PIN."
raise TrezorAccountException(message) from exc
def extract_signature_vrs_bytes(signature_bytes: bytes) -> Tuple[int, bytes, bytes]:
"""
Breaks `signature_bytes` into 3 chunks vrs, where `v` is 1 byte, `r` is 32
bytes, and `s` is 32 bytes.
"""
if signature_bytes is None:
raise TrezorClientError("No data in signature bytes.")
return signature_bytes[-1], signature_bytes[:32], signature_bytes[32:64]
class TrezorAccountClient:
"""
This class represents an account on the Trezor device when you know the full
account HD path.
"""
def __init__(
self,
address: ChecksumAddress,
account_hd_path: HDPath,
):
try:
self.client = get_default_client()
except TransportException:
raise TrezorClientConnectionError()
self._address = address
self._account_hd_path = account_hd_path
def __str__(self):
return self._address
@property
def address(self) -> str:
return self._address
def sign_personal_message(self, message: bytes) -> Tuple[int, bytes, bytes]:
"""
Sign an Ethereum message only following the EIP 191 specification and
using your Trezor device. You will need to follow the prompts on the device
to validate the message data.
"""
ethereum_message_signature = ethereum.sign_message(
self.client, parse_hdpath(self._account_hd_path.path), message
)
return extract_signature_vrs_bytes(signature_bytes=ethereum_message_signature.signature)
# TODO: Uncomment when Trezor has released the EIP 712 update
# def sign_typed_data(self, domain_hash: bytes, message_hash: bytes)
# -> Tuple[int, bytes, bytes]:
# """
# Sign an Ethereum message following the EIP 712 specification.
# """
# ethereum_typed_data_signature = ethereum.sign_typed_data_hash(
# self.client, parse_hdpath(self._account_hd_path.path), domain_hash, message_hash
# )
# return extract_signature_vrs_bytes(
# signature_bytes=ethereum_typed_data_signature.signature)
def sign_transaction(self, txn: Dict[Any, Any]) -> Tuple[int, bytes, bytes]:
tx_type = txn["type"]
if isinstance(tx_type, TransactionType.STATIC):
tuple_reply = ethereum.sign_tx(
self.client,
parse_hdpath(self._account_hd_path.path),
nonce=txn["nonce"],
gas_price=txn["gas_price"],
gas_limit=txn["gas_limit"],
to=txn["receiver"],
value=txn["value"],
data=txn.get("data"),
chain_id=txn.get("chain_id"),
tx_type=tx_type,
)
elif isinstance(tx_type, TransactionType.DYNAMIC):
tuple_reply = ethereum.sign_tx_eip1559(
self.client,
parse_hdpath(self._account_hd_path.path),
nonce=txn["nonce"],
gas_limit=txn["gas_limit"],
to=txn["receiver"],
value=txn["value"],
data=txn.get("data"),
chain_id=txn["chain_id"],
max_gas_fee=txn["max_fee"],
max_priority_fee=txn["max_priority_fee"],
access_list=txn.get("access_list"),
)
else:
raise TrezorAccountException(f"Message type {tx_type} is not supported.")
return (
tuple_reply[0],
tuple_reply[1],
tuple_reply[2],
)
__all__ = [
"TrezorClient",
"TrezorAccountClient",
]
| 0
| 47
| 0
| 3,939
| 0
| 0
| 0
| 288
| 319
|