hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f1243eea047ef5906118e7ffee57622af60818f
| 719
|
py
|
Python
|
examples/read_eeprom.py
|
Galch/pysoem
|
7dbc6c2aec9f26c7cff911c3aeef0fc73f6c71cc
|
[
"MIT"
] | 48
|
2018-05-17T09:25:59.000Z
|
2022-03-18T08:54:36.000Z
|
examples/read_eeprom.py
|
Galch/pysoem
|
7dbc6c2aec9f26c7cff911c3aeef0fc73f6c71cc
|
[
"MIT"
] | 52
|
2019-07-26T06:54:55.000Z
|
2022-03-31T09:42:20.000Z
|
examples/read_eeprom.py
|
Galch/pysoem
|
7dbc6c2aec9f26c7cff911c3aeef0fc73f6c71cc
|
[
"MIT"
] | 23
|
2019-03-07T02:37:47.000Z
|
2022-03-18T08:53:45.000Z
|
"""Prints name and description of available network adapters."""
import sys
if __name__ == '__main__':
print('script started')
if len(sys.argv) > 1:
read_eeprom_of_first_slave(sys.argv[1])
else:
print('give ifname as script argument')
| 21.147059
| 83
| 0.585535
|
"""Prints name and description of available network adapters."""
import sys
import pysoem
def read_eeprom_of_first_slave(ifname):
master = pysoem.Master()
master.open(ifname)
if master.config_init() > 0:
first_slave = master.slaves[0]
for i in range(0, 0x80, 2):
print('{:04x}:'.format(i), end='')
print('|'.join('{:02x}'.format(x) for x in first_slave.eeprom_read(i)))
else:
print('no slave available')
master.close()
if __name__ == '__main__':
print('script started')
if len(sys.argv) > 1:
read_eeprom_of_first_slave(sys.argv[1])
else:
print('give ifname as script argument')
| 0
| 0
| 0
| 0
| 0
| 413
| 0
| -8
| 45
|
8129a772f7f24252ee1ca4c2af3fd32a8236f8a3
| 380
|
py
|
Python
|
scan_lib/neros.py
|
Lannix/ScanNet
|
3a2783017ff7b9d53fd3610b28f46370c01b9145
|
[
"Apache-2.0"
] | null | null | null |
scan_lib/neros.py
|
Lannix/ScanNet
|
3a2783017ff7b9d53fd3610b28f46370c01b9145
|
[
"Apache-2.0"
] | null | null | null |
scan_lib/neros.py
|
Lannix/ScanNet
|
3a2783017ff7b9d53fd3610b28f46370c01b9145
|
[
"Apache-2.0"
] | null | null | null |
CONSTANT_EPOH = 100
CONSTANT_EPOH_1 = 10
| 22.352941
| 80
| 0.715789
|
from scan_lib import nero as n
CONSTANT_EPOH = 100
CONSTANT_EPOH_1 = 10
def go_to_HELL():
n.save_model(n.trening_model(0, CONSTANT_EPOH)) # предобучение и сохранение
for i in range(1,2000):
n.save_model(n.trening_model(1, CONSTANT_EPOH_1))
n.save_model(n.trening_model(2, CONSTANT_EPOH_1))
n.save_model(n.trening_model(3, CONSTANT_EPOH_1))
| 46
| 0
| 0
| 0
| 0
| 256
| 0
| 9
| 45
|
9a8c6517db70701e563affe64146a10b0e74c08e
| 2,034
|
py
|
Python
|
tklife/__main__.py
|
Aesonus/TkLife
|
8e8f585be7f522134b9a5746b22185c2394d3d8d
|
[
"MIT"
] | null | null | null |
tklife/__main__.py
|
Aesonus/TkLife
|
8e8f585be7f522134b9a5746b22185c2394d3d8d
|
[
"MIT"
] | 4
|
2021-05-02T17:33:19.000Z
|
2021-05-16T18:53:51.000Z
|
tklife/__main__.py
|
Aesonus/TkLife
|
8e8f585be7f522134b9a5746b22185c2394d3d8d
|
[
"MIT"
] | null | null | null |
"""Shows a sample tklife application"""
from .constants import PADX, PADY
PADDING = {
PADX: 6,
PADY: 6
}
App().mainloop()
| 33.9
| 150
| 0.657325
|
"""Shows a sample tklife application"""
from tkinter import StringVar, Widget
from tkinter.constants import END
from tkinter.ttk import Button, Entry, Label
from .widgets import NewTable
from .constants import COLUMNSPAN, PADX, PADY
from .mixins import generate_event_for
from . import Main
from .arrange import Autogrid
PADDING = {
PADX: 6,
PADY: 6
}
@generate_event_for
def show_dialog_for(widget):
"""Example of event generation decorator for a function"""
return '<<ShowDialog>>'
class App(Main):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.title('Sample Application')
def _create_vars(self):
self.entry_var = StringVar(value="Test Value")
def _create_widgets(self):
Label(self, text='Example Entry:')
Entry(self, textvariable=self.entry_var)
button = Button(self, text='Show Dialog')
button.configure(command=self.show_dialog_for(button))
button = Button(self, text='Show For This Button',)
button.configure(command=show_dialog_for(button))
def get_widget_text(widget: Entry):
return widget.get()
table = NewTable(self, headers=(('Test', get_widget_text), ('The', get_widget_text), ('Table', get_widget_text), ))
widgets_ = []
for row in range(4):
for col in range(3):
widgets_.append(Entry(table.table))
widgets_[-1].insert(0, 'Row {}; Col {}'.format(row, col))
table.cell_widgets = widgets_
def _layout_widgets(self):
for widget, grid_coords in Autogrid((2, 1), 1).zip_dicts(self.winfo_children(), grid_kwargs_list=({}, {},), fill_grid_kwargs={COLUMNSPAN: 2}):
widget.grid(**grid_coords, **PADDING)
def _create_events(self):
self.bind('<<ShowDialog>>', lambda e: print(e.widget))
@generate_event_for
def show_dialog_for(self, widget):
"""Example of event generation decorator for an instance method"""
return '<<ShowDialog>>'
App().mainloop()
| 0
| 262
| 0
| 1,346
| 0
| 0
| 0
| 93
| 201
|
8cc875fd00221a9cb482a6ff240319c6dd13b16b
| 9,868
|
py
|
Python
|
aiida/tools/dbexporters/tcod_plugins/__init__.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/tools/dbexporters/tcod_plugins/__init__.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/tools/dbexporters/tcod_plugins/__init__.py
|
astamminger/aiida_core
|
b01ad8236f21804f273c9d2a0365ecee62255cbb
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
def TcodExporterFactory(module):
"""
Return a suitable BaseTcodtranslator subclass.
"""
from aiida.common.pluginloader import BaseFactory
return BaseFactory(module, BaseTcodtranslator, 'aiida.tools.dbexporters.tcod_plugins')
| 34.027586
| 90
| 0.631334
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
def TcodExporterFactory(module):
"""
Return a suitable BaseTcodtranslator subclass.
"""
from aiida.common.pluginloader import BaseFactory
return BaseFactory(module, BaseTcodtranslator, 'aiida.tools.dbexporters.tcod_plugins')
class BaseTcodtranslator(object):
"""
Base translator from calculation-specific input and output parameters
to TCOD CIF dictionary tags.
"""
_plugin_type_string = None
@classmethod
def get_software_package(cls,calc,**kwargs):
"""
Returns the package or program name that was used to produce
the structure. Only package or program name should be used,
e.g. 'VASP', 'psi3', 'Abinit', etc.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_software_package_version(cls,calc,**kwargs):
"""
Returns software package version used to compute and produce
the computed structure file. Only version designator should be
used, e.g. '3.4.0', '2.1rc3'.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_software_package_compilation_timestamp(cls,calc,**kwargs):
"""
Returns the timestamp of package/program compilation in ISO 8601
format.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_software_executable_path(cls,calc,**kwargs):
"""
Returns the file-system path to the executable that was run for
this computation.
"""
try:
code = calc.inp.code
if not code.is_local():
return code.get_attr('remote_exec_path')
except Exception:
return None
return None
@classmethod
def get_total_energy(cls,calc,**kwargs):
"""
Returns the total energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_one_electron_energy(cls,calc,**kwargs):
"""
Returns one electron energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_exchange_correlation_energy(cls,calc,**kwargs):
"""
Returns exchange correlation (XC) energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_ewald_energy(cls,calc,**kwargs):
"""
Returns Ewald energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_hartree_energy(cls,calc,**kwargs):
"""
Returns Hartree energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_fermi_energy(cls,calc,**kwargs):
"""
Returns Fermi energy in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_number_of_electrons(cls,calc,**kwargs):
"""
Returns the number of electrons.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_computation_wallclock_time(cls,calc,**kwargs):
"""
Returns the computation wallclock time in seconds.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_type_symbol(cls,calc,**kwargs):
"""
Returns a list of atom types. Each atom site MUST occur only
once in this list. List MUST be sorted.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_type_valence_configuration(cls,calc,**kwargs):
"""
Returns valence configuration of each atom type. The list order
MUST be the same as of get_atom_type_symbol().
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_type_basisset(cls,calc,**kwargs):
"""
Returns a list of basisset names for each atom type. The list
order MUST be the same as of get_atom_type_symbol().
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_site_residual_force_Cartesian_x(cls,calc,**kwargs):
"""
Returns a list of x components for Cartesian coordinates of
residual force for atom. The list order MUST be the same as in
the resulting structure.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_site_residual_force_Cartesian_y(cls,calc,**kwargs):
"""
Returns a list of y components for Cartesian coordinates of
residual force for atom. The list order MUST be the same as in
the resulting structure.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_atom_site_residual_force_Cartesian_z(cls,calc,**kwargs):
"""
Returns a list of z components for Cartesian coordinates of
residual force for atom. The list order MUST be the same as in
the resulting structure.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_X(cls,calc,**kwargs):
"""
Returns a number of points in the Brillouin zone along reciprocal
lattice vector X.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_Y(cls,calc,**kwargs):
"""
Returns a number of points in the Brillouin zone along reciprocal
lattice vector Y.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_Z(cls,calc,**kwargs):
"""
Returns a number of points in the Brillouin zone along reciprocal
lattice vector Z.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_shift_X(cls,calc,**kwargs):
"""
Returns the shift of the Brillouin zone points along reciprocal
lattice vector X.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_shift_Y(cls,calc,**kwargs):
"""
Returns the shift of the Brillouin zone points along reciprocal
lattice vector Y.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_BZ_integration_grid_shift_Z(cls,calc,**kwargs):
"""
Returns the shift of the Brillouin zone points along reciprocal
lattice vector Z.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_integration_smearing_method(cls,calc,**kwargs):
"""
Returns the smearing method name as string.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_integration_smearing_method_other(cls,calc,**kwargs):
"""
Returns the smearing method name as string if the name is different
from specified in cif_dft.dic.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_integration_Methfessel_Paxton_order(cls,calc,**kwargs):
"""
Returns the order of Methfessel-Paxton approximation if used.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_kinetic_energy_cutoff_wavefunctions(cls,calc,**kwargs):
"""
Returns kinetic energy cutoff for wavefunctions in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_kinetic_energy_cutoff_charge_density(cls,calc,**kwargs):
"""
Returns kinetic energy cutoff for charge density in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_kinetic_energy_cutoff_EEX(cls,calc,**kwargs):
"""
Returns kinetic energy cutoff for exact exchange (EEX)
operator in eV.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_pseudopotential_atom_type(cls,calc,**kwargs):
"""
Returns a list of atom types. Each atom type MUST occur only
once in this list. List MUST be sorted.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_pseudopotential_type(cls,calc,**kwargs):
"""
Returns a list of pseudopotential types. List MUST be sorted
by atom types.
"""
raise NotImplementedError("not implemented in base class")
@classmethod
def get_pseudopotential_type_other_name(cls,calc,**kwargs):
"""
Returns a list of other pseudopotential type names. List MUST be
sorted by atom types.
"""
raise NotImplementedError("not implemented in base class")
| 0
| 7,907
| 0
| 1,057
| 0
| 0
| 0
| 0
| 23
|
d14b244e69a897030f5533e366ea8b8c35a26b69
| 11,778
|
py
|
Python
|
edgedb/asyncio_client.py
|
ambv/edgedb-python
|
28de03948f1281110f8d11884683e08e2b593e91
|
[
"Apache-2.0"
] | null | null | null |
edgedb/asyncio_client.py
|
ambv/edgedb-python
|
28de03948f1281110f8d11884683e08e2b593e91
|
[
"Apache-2.0"
] | null | null | null |
edgedb/asyncio_client.py
|
ambv/edgedb-python
|
28de03948f1281110f8d11884683e08e2b593e91
|
[
"Apache-2.0"
] | null | null | null |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
__all__ = (
'create_async_client', 'AsyncIOClient'
)
logger = logging.getLogger(__name__)
| 30.434109
| 79
| 0.616403
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import logging
import socket
import ssl
import typing
from . import abstract
from . import base_client
from . import compat
from . import con_utils
from . import errors
from . import transaction
from .protocol import asyncio_proto
__all__ = (
'create_async_client', 'AsyncIOClient'
)
logger = logging.getLogger(__name__)
class AsyncIOConnection(base_client.BaseConnection):
__slots__ = ("_loop",)
_close_exceptions = (Exception, asyncio.CancelledError)
def __init__(self, loop, *args, **kwargs):
super().__init__(*args, **kwargs)
self._loop = loop
def is_closed(self):
protocol = self._protocol
return protocol is None or not protocol.connected
async def connect_addr(self, addr, timeout):
try:
await compat.wait_for(self._connect_addr(addr), timeout)
except asyncio.TimeoutError as e:
raise TimeoutError from e
async def sleep(self, seconds):
await asyncio.sleep(seconds)
def _protocol_factory(self):
return asyncio_proto.AsyncIOProtocol(self._params, self._loop)
async def _connect_addr(self, addr):
tr = None
try:
if isinstance(addr, str):
# UNIX socket
tr, pr = await self._loop.create_unix_connection(
self._protocol_factory, addr
)
else:
try:
tr, pr = await self._loop.create_connection(
self._protocol_factory, *addr, ssl=self._params.ssl_ctx
)
except ssl.CertificateError as e:
raise con_utils.wrap_error(e) from e
except ssl.SSLError as e:
raise con_utils.wrap_error(e) from e
else:
con_utils.check_alpn_protocol(
tr.get_extra_info('ssl_object')
)
except socket.gaierror as e:
# All name resolution errors are considered temporary
raise errors.ClientConnectionFailedTemporarilyError(str(e)) from e
except OSError as e:
raise con_utils.wrap_error(e) from e
except Exception:
if tr is not None:
tr.close()
raise
pr.set_connection(self)
try:
await pr.connect()
except OSError as e:
if tr is not None:
tr.close()
raise con_utils.wrap_error(e) from e
except Exception:
if tr is not None:
tr.close()
raise
self._protocol = pr
self._addr = addr
def _dispatch_log_message(self, msg):
for cb in self._log_listeners:
self._loop.call_soon(cb, self, msg)
class _PoolConnectionHolder(base_client.PoolConnectionHolder):
__slots__ = ()
_event_class = asyncio.Event
async def close(self, *, wait=True):
if self._con is None:
return
if wait:
await self._con.close()
else:
self._pool._loop.create_task(self._con.close())
async def wait_until_released(self, timeout=None):
await self._release_event.wait()
class _AsyncIOPoolImpl(base_client.BasePoolImpl):
__slots__ = ('_loop',)
_holder_class = _PoolConnectionHolder
def __init__(
self,
connect_args,
*,
max_concurrency: typing.Optional[int],
connection_class,
):
if not issubclass(connection_class, AsyncIOConnection):
raise TypeError(
f'connection_class is expected to be a subclass of '
f'edgedb.asyncio_client.AsyncIOConnection, '
f'got {connection_class}')
self._loop = None
super().__init__(
connect_args,
lambda *args: connection_class(self._loop, *args),
max_concurrency=max_concurrency,
)
def _ensure_initialized(self):
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._queue = asyncio.LifoQueue(maxsize=self._max_concurrency)
self._first_connect_lock = asyncio.Lock()
self._resize_holder_pool()
def _set_queue_maxsize(self, maxsize):
self._queue._maxsize = maxsize
async def _maybe_get_first_connection(self):
async with self._first_connect_lock:
if self._working_addr is None:
return await self._get_first_connection()
async def acquire(self, timeout=None):
self._ensure_initialized()
async def _acquire_impl():
ch = await self._queue.get() # type: _PoolConnectionHolder
try:
proxy = await ch.acquire() # type: AsyncIOConnection
except (Exception, asyncio.CancelledError):
self._queue.put_nowait(ch)
raise
else:
# Record the timeout, as we will apply it by default
# in release().
ch._timeout = timeout
return proxy
if self._closing:
raise errors.InterfaceError('pool is closing')
if timeout is None:
return await _acquire_impl()
else:
return await compat.wait_for(
_acquire_impl(), timeout=timeout)
async def _release(self, holder):
if not isinstance(holder._con, AsyncIOConnection):
raise errors.InterfaceError(
f'release() received invalid connection: '
f'{holder._con!r} does not belong to any connection pool'
)
timeout = None
# Use asyncio.shield() to guarantee that task cancellation
# does not prevent the connection from being returned to the
# pool properly.
return await asyncio.shield(holder.release(timeout))
async def aclose(self):
"""Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``close()`` the pool will terminate by calling
_AsyncIOPoolImpl.terminate() .
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
"""
if self._closed:
return
if not self._loop:
self._closed = True
return
self._closing = True
try:
warning_callback = self._loop.call_later(
60, self._warn_on_long_close)
release_coros = [
ch.wait_until_released() for ch in self._holders]
await asyncio.gather(*release_coros)
close_coros = [
ch.close() for ch in self._holders]
await asyncio.gather(*close_coros)
except (Exception, asyncio.CancelledError):
self.terminate()
raise
finally:
warning_callback.cancel()
self._closed = True
self._closing = False
def _warn_on_long_close(self):
logger.warning(
'AsyncIOClient.aclose() is taking over 60 seconds to complete. '
'Check if you have any unreleased connections left. '
'Use asyncio.wait_for() to set a timeout for '
'AsyncIOClient.aclose().')
class AsyncIOIteration(transaction.BaseTransaction, abstract.AsyncIOExecutor):
__slots__ = ("_managed",)
def __init__(self, retry, client, iteration):
super().__init__(retry, client, iteration)
self._managed = False
async def __aenter__(self):
if self._managed:
raise errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
return self
async def __aexit__(self, extype, ex, tb):
self._managed = False
return await self._exit(extype, ex)
async def _ensure_transaction(self):
if not self._managed:
raise errors.InterfaceError(
"Only managed retriable transactions are supported. "
"Use `async with transaction:`"
)
await super()._ensure_transaction()
class AsyncIORetry(transaction.BaseRetry):
def __aiter__(self):
return self
async def __anext__(self):
# Note: when changing this code consider also
# updating Retry.__next__.
if self._done:
raise StopAsyncIteration
if self._next_backoff:
await asyncio.sleep(self._next_backoff)
self._done = True
iteration = AsyncIOIteration(self, self._owner, self._iteration)
self._iteration += 1
return iteration
class AsyncIOClient(base_client.BaseClient, abstract.AsyncIOExecutor):
"""A lazy connection pool.
A Client can be used to manage a set of connections to the database.
Connections are first acquired from the pool, then used, and then released
back to the pool. Once a connection is released, it's reset to close all
open cursors and other resources *except* prepared statements.
Clients are created by calling
:func:`~edgedb.asyncio_client.create_async_client`.
"""
__slots__ = ()
_impl_class = _AsyncIOPoolImpl
async def ensure_connected(self):
await self._impl.ensure_connected()
return self
async def aclose(self):
"""Attempt to gracefully close all connections in the pool.
Wait until all pool connections are released, close them and
shut down the pool. If any error (including cancellation) occurs
in ``aclose()`` the pool will terminate by calling
AsyncIOClient.terminate() .
It is advisable to use :func:`python:asyncio.wait_for` to set
a timeout.
"""
await self._impl.aclose()
def transaction(self) -> AsyncIORetry:
return AsyncIORetry(self)
async def __aenter__(self):
return await self.ensure_connected()
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.aclose()
def create_async_client(
dsn=None,
*,
max_concurrency=None,
host: str = None,
port: int = None,
credentials: str = None,
credentials_file: str = None,
user: str = None,
password: str = None,
database: str = None,
tls_ca: str = None,
tls_ca_file: str = None,
tls_security: str = None,
wait_until_available: int = 30,
timeout: int = 10,
):
return AsyncIOClient(
connection_class=AsyncIOConnection,
max_concurrency=max_concurrency,
# connect arguments
dsn=dsn,
host=host,
port=port,
credentials=credentials,
credentials_file=credentials_file,
user=user,
password=password,
database=database,
tls_ca=tls_ca,
tls_ca_file=tls_ca_file,
tls_security=tls_security,
wait_until_available=wait_until_available,
timeout=timeout,
)
| 0
| 0
| 6,280
| 3,411
| 0
| 893
| 0
| -11
| 405
|
86965314682fe859926efa3c62312c76b8803870
| 1,918
|
py
|
Python
|
examples/test_trading.py
|
testnet-exchange/python-viabtc-api
|
be7be15944004498be8da9e4435020d97408a352
|
[
"Apache-2.0"
] | 33
|
2018-07-31T07:50:15.000Z
|
2020-08-04T13:51:40.000Z
|
examples/test_trading.py
|
urantialife/python-viabtc-api
|
be7be15944004498be8da9e4435020d97408a352
|
[
"Apache-2.0"
] | 7
|
2018-08-23T14:48:11.000Z
|
2019-07-15T02:17:12.000Z
|
examples/test_trading.py
|
urantialife/python-viabtc-api
|
be7be15944004498be8da9e4435020d97408a352
|
[
"Apache-2.0"
] | 20
|
2018-08-08T16:29:41.000Z
|
2020-08-04T13:51:43.000Z
|
import sys
from ViaBTCAPI.ViaBTCAPI import ViaBTCAPI
EXCHANGE_URL = "http://localhost:8080/"
USER_ID = 1
USER_ID_2 = 2
UPDATE_MONEY = 0.1
ORDER_PRICE = 0.1
if len(sys.argv) > 1:
EXCHANGE_URL = sys.argv[1]
api = ViaBTCAPI(EXCHANGE_URL)
# get consts from exchange
resp = api.market_list()
m = resp["result"][0]
market, stock, money = m["name"], m["stock"], m["money"]
# balance change
r = api.balance_query(user_id=USER_ID, asset=money)
balance_before = float(r["result"][money]["available"])
_ = api.balance_update(user_id=USER_ID, asset=money, amount=UPDATE_MONEY)
r = api.balance_query(user_id=USER_ID, asset=money)
balance_after = float(r["result"][money]["available"])
assert(balance_after == balance_before + UPDATE_MONEY)
# limit order creation
r = api.order_put_limit(
user_id=USER_ID, market=market, side='BUY', amount=UPDATE_MONEY, price=ORDER_PRICE,
taker_fee_rate=0, maker_fee_rate=0)
r = api.order_depth(market=market, limit=10)
bid_prices = [float(b[0]) for b in r["result"]["bids"]]
assert(ORDER_PRICE in bid_prices)
bid_volume = [float(b[1]) for b in r["result"]["bids"] if float(b[0]) == ORDER_PRICE][0]
# create the second user and execute the order
_ = api.balance_update(user_id=USER_ID_2, asset=stock, amount=bid_volume)
r = api.order_put_limit(
user_id=USER_ID_2, market=market, side='SELL', amount=bid_volume, price=ORDER_PRICE,
taker_fee_rate=0, maker_fee_rate=0)
r = api.order_depth(market=market, limit=10)
prices = [float(b[0]) for b in r["result"]["bids"] + r["result"]["asks"]]
assert(ORDER_PRICE not in prices)
# reset balances
for user_id in [USER_ID, USER_ID_2]:
for asset in [money, stock]:
r = api.balance_query(user_id=user_id, asset=asset)
balance_current = float(r["result"][asset]["available"])
r = api.balance_update(user_id=user_id, asset=asset, amount=(-1) * balance_current)
print("All tests have been passed!")
| 31.442623
| 91
| 0.717935
|
import sys
from ViaBTCAPI.ViaBTCAPI import ViaBTCAPI
EXCHANGE_URL = "http://localhost:8080/"
USER_ID = 1
USER_ID_2 = 2
UPDATE_MONEY = 0.1
ORDER_PRICE = 0.1
if len(sys.argv) > 1:
EXCHANGE_URL = sys.argv[1]
api = ViaBTCAPI(EXCHANGE_URL)
# get consts from exchange
resp = api.market_list()
m = resp["result"][0]
market, stock, money = m["name"], m["stock"], m["money"]
# balance change
r = api.balance_query(user_id=USER_ID, asset=money)
balance_before = float(r["result"][money]["available"])
_ = api.balance_update(user_id=USER_ID, asset=money, amount=UPDATE_MONEY)
r = api.balance_query(user_id=USER_ID, asset=money)
balance_after = float(r["result"][money]["available"])
assert(balance_after == balance_before + UPDATE_MONEY)
# limit order creation
r = api.order_put_limit(
user_id=USER_ID, market=market, side='BUY', amount=UPDATE_MONEY, price=ORDER_PRICE,
taker_fee_rate=0, maker_fee_rate=0)
r = api.order_depth(market=market, limit=10)
bid_prices = [float(b[0]) for b in r["result"]["bids"]]
assert(ORDER_PRICE in bid_prices)
bid_volume = [float(b[1]) for b in r["result"]["bids"] if float(b[0]) == ORDER_PRICE][0]
# create the second user and execute the order
_ = api.balance_update(user_id=USER_ID_2, asset=stock, amount=bid_volume)
r = api.order_put_limit(
user_id=USER_ID_2, market=market, side='SELL', amount=bid_volume, price=ORDER_PRICE,
taker_fee_rate=0, maker_fee_rate=0)
r = api.order_depth(market=market, limit=10)
prices = [float(b[0]) for b in r["result"]["bids"] + r["result"]["asks"]]
assert(ORDER_PRICE not in prices)
# reset balances
for user_id in [USER_ID, USER_ID_2]:
for asset in [money, stock]:
r = api.balance_query(user_id=user_id, asset=asset)
balance_current = float(r["result"][asset]["available"])
r = api.balance_update(user_id=user_id, asset=asset, amount=(-1) * balance_current)
print("All tests have been passed!")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ea5282e356e5e5fd276625a31e5b9590f02122ed
| 1,066
|
py
|
Python
|
Speech Synthesis/SS.py
|
saber1malek/HelpMate
|
d69ee6cc1e67fca3e87fa6ea132a0569e4667994
|
[
"MIT"
] | 3
|
2020-06-07T08:56:40.000Z
|
2020-06-07T13:00:21.000Z
|
Speech Synthesis/SS.py
|
saber1malek/HelpMate
|
d69ee6cc1e67fca3e87fa6ea132a0569e4667994
|
[
"MIT"
] | 1
|
2020-06-07T09:06:12.000Z
|
2020-06-07T09:06:12.000Z
|
Speech Synthesis/SS.py
|
saber1malek/HelpMate
|
d69ee6cc1e67fca3e87fa6ea132a0569e4667994
|
[
"MIT"
] | 1
|
2020-06-07T07:13:55.000Z
|
2020-06-07T07:13:55.000Z
|
# HelpMate Speech Synthesis Connection 2020
from google.cloud import texttospeech
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Set the text input to be synthesized
synthesis_input = texttospeech.types.SynthesisInput(text="Hello, World!")
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)
# Select the type of audio file you want returned
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(synthesis_input, voice, audio_config)
# The response's audio_content is binary.
with open('output.mp3', 'wb') as out:
# Write the response to the output file.
out.write(response.audio_content)
print('Audio content written to file "output.mp3"')
| 35.533333
| 74
| 0.77955
|
# HelpMate Speech Synthesis Connection 2020
from google.cloud import texttospeech
# Instantiates a client
client = texttospeech.TextToSpeechClient()
# Set the text input to be synthesized
synthesis_input = texttospeech.types.SynthesisInput(text="Hello, World!")
# Build the voice request, select the language code ("en-US") and the ssml
# voice gender ("neutral")
voice = texttospeech.types.VoiceSelectionParams(
language_code='en-US',
ssml_gender=texttospeech.enums.SsmlVoiceGender.NEUTRAL)
# Select the type of audio file you want returned
audio_config = texttospeech.types.AudioConfig(
audio_encoding=texttospeech.enums.AudioEncoding.MP3)
# Perform the text-to-speech request on the text input with the selected
# voice parameters and audio file type
response = client.synthesize_speech(synthesis_input, voice, audio_config)
# The response's audio_content is binary.
with open('output.mp3', 'wb') as out:
# Write the response to the output file.
out.write(response.audio_content)
print('Audio content written to file "output.mp3"')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
53010981e0eedacff4461e5e4a9beb5fada6a61c
| 7,649
|
py
|
Python
|
arbitrage/private_markets/anxpro.py
|
queenvictoria/bitcoin-arbitrage
|
12b6304684416fa2a7b2bb61c19e87b4572577c1
|
[
"Unlicense"
] | 2
|
2016-02-27T03:52:01.000Z
|
2017-10-23T18:47:31.000Z
|
arbitrage/private_markets/anxpro.py
|
queenvictoria/bitcoin-arbitrage
|
12b6304684416fa2a7b2bb61c19e87b4572577c1
|
[
"Unlicense"
] | null | null | null |
arbitrage/private_markets/anxpro.py
|
queenvictoria/bitcoin-arbitrage
|
12b6304684416fa2a7b2bb61c19e87b4572577c1
|
[
"Unlicense"
] | null | null | null |
# Copyright (C) 2013, Maxime Biais <[email protected]>
import urllib.request
import urllib.parse
import urllib.error
import urllib.request
import urllib.error
| 39.225641
| 95
| 0.577723
|
# Copyright (C) 2013, Maxime Biais <[email protected]>
from .market import Market
import time
import base64
import hmac
import urllib.request
import urllib.parse
import urllib.error
import urllib.request
import urllib.error
import urllib.parse
import hashlib
import sys
import json
import re
import logging
import config
class PrivateANXPro(Market):
def __init__(self):
super().__init__()
self.base_url = "https://anxpro.com/api/2/"
self.order_path = {"method": "POST", "path": "generic/private/order/result"}
self.open_orders_path = {"method": "POST", "path": "generic/private/orders"}
self.info_path = {"method": "POST", "path": "money/info"}
self.withdraw_path = {"method": "POST", "path": "generic/bitcoin/send_simple"}
self.deposit_path = {"method": "POST", "path": "generic/bitcoin/address"}
self.key = config.anxpro_api_key
self.secret = config.anxpro_api_secret
self.get_info()
def _create_nonce(self):
return int(time.time() * 1000000)
def _change_currency_url(self, url, currency):
return re.sub(r'BTC\w{3}', r'BTC' + currency, url)
def _to_int_price(self, price, currency):
ret_price = None
if currency in ["USD", "EUR", "GBP", "PLN", "CAD", "AUD", "CHF", "CNY",
"NZD", "RUB", "DKK", "HKD", "SGD", "THB"]:
ret_price = price
ret_price = int(price * 100000)
elif currency in ["JPY", "SEK"]:
ret_price = price
ret_price = int(price * 1000)
return ret_price
def _to_int_amount(self, amount):
amount = amount
return int(amount * 100000000)
def _from_int_amount(self, amount):
return amount / 100000000.
def _from_int_price(self, amount):
# FIXME: should take JPY and SEK into account
return amount / 100000.
def _send_request(self, url, params, extra_headers=None):
urlparams = urllib.parse.urlencode(dict(params))
secret_message = url["path"] + chr(0) + urlparams
secret_from_b64 = base64.b64decode(bytes(self.secret, "UTF-8"))
hmac_secret = hmac.new(secret_from_b64, secret_message.encode("UTF-8"), hashlib.sha512)
hmac_sign = base64.b64encode(hmac_secret.digest())
headers = {
'Rest-Key': self.key,
'Rest-Sign': hmac_sign.decode("UTF-8"),
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
}
if extra_headers is not None:
for k, v in extra_headers.items():
headers[k] = v
try:
req = urllib.request.Request(self.base_url + url['path'],
bytes(urlparams,
"UTF-8"), headers)
response = urllib.request.urlopen(req)
if response.getcode() == 200:
jsonstr = response.read()
return json.loads(str(jsonstr, "UTF-8"))
except Exception as err:
logging.error('Can\'t request ANXPro, %s' % err)
return None
def trade(self, amount, ttype, price=None):
if price:
price = self._to_int_price(price, self.currency)
amount = self._to_int_amount(amount)
self.buy_path["path"] = self._change_currency_url(
self.buy_path["path"], self.currency)
params = [("nonce", self._create_nonce()),
("amount_int", str(amount)),
("type", ttype)]
if price:
params.append(("price_int", str(price)))
response = self._send_request(self.buy_path, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
def _buy(self, amount, price):
return self.trade(amount, "bid", price)
def _sell(self, amount, price):
return self.trade(amount, "ask", price)
def withdraw(self, amount, address):
params = [("nonce", self._create_nonce()),
("amount_int", str(self._to_int_amount(amount))),
("address", address)]
response = self._send_request(self.withdraw_path, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
def deposit(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.deposit_path, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
class PrivateANXProAUD(PrivateANXPro):
def __init__(self):
super().__init__()
self.ticker_path = {"method": "GET", "path": "BTCAUD/public/ticker"}
self.buy_path = {"method": "POST", "path": "BTCAUD/private/order/add"}
self.sell_path = {"method": "POST", "path": "BTCAUD/private/order/add"}
self.currency = "AUD"
def get_info(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.info_path, params)
if response and "result" in response and response["result"] == "success":
self.btc_balance = self._from_int_amount(int(
response["data"]["Wallets"]["BTC"]["Balance"]["value_int"]))
self.aud_balance = self._from_int_price(int(
response["data"]["Wallets"]["AUD"]["Balance"]["value_int"]))
self.usd_balance = self.fc.convert(self.aud_balance, "AUD", "USD")
self.eur_balance = self.fc.convert(self.aud_balance, "AUD", "EUR")
funds = response["data"]["Wallets"]
if self.pair1_name in funds:
self.pair1_balance = self._from_int_amount(
int(funds[self.pair1_name]["Balance"]["value_int"])
)
if self.pair2_name in funds:
self.pair2_balance = self._from_int_amount(
int(funds[self.pair2_name]["Balance"]["value_int"])
)
return 1
return None
class PrivateANXProUSD(PrivateANXPro):
def __init__(self):
super().__init__()
self.ticker_path = {"method": "GET", "path": "BTCUSD/public/ticker"}
self.buy_path = {"method": "POST", "path": "BTCUSD/private/order/add"}
self.sell_path = {"method": "POST", "path": "BTCUSD/private/order/add"}
self.currency = "USD"
def get_info(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.info_path, params)
if response and "result" in response and response["result"] == "success":
self.btc_balance = self._from_int_amount(int(
response["data"]["Wallets"]["BTC"]["Balance"]["value_int"]))
self.usd_balance = self._from_int_price(int(
response["data"]["Wallets"]["USD"]["Balance"]["value_int"]))
funds = response["data"]["Wallets"]
if self.pair1_name in funds:
self.pair1_balance = self._from_int_amount(
int(funds[self.pair1_name]["Balance"]["value_int"])
)
if self.pair2_name in funds:
self.pair2_balance = self._from_int_amount(
int(funds[self.pair2_name]["Balance"]["value_int"])
)
return 1
return None
| 0
| 0
| 0
| 7,258
| 0
| 0
| 0
| -80
| 312
|
f3f1716e28d3670b76dc0d585daaea68d8eeab2d
| 2,442
|
py
|
Python
|
zeus/migrations/0f81e9efc84a_add_pending_artifact.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 221
|
2017-07-03T17:29:21.000Z
|
2021-12-07T19:56:59.000Z
|
zeus/migrations/0f81e9efc84a_add_pending_artifact.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 298
|
2017-07-04T18:08:14.000Z
|
2022-03-03T22:24:51.000Z
|
zeus/migrations/0f81e9efc84a_add_pending_artifact.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 24
|
2017-07-15T13:46:45.000Z
|
2020-08-16T16:14:45.000Z
|
"""add_pending_artifact
Revision ID: 0f81e9efc84a
Revises: 61a1763b9c8d
Create Date: 2019-10-24 15:13:36.705288
"""
# revision identifiers, used by Alembic.
revision = "0f81e9efc84a"
down_revision = "61a1763b9c8d"
branch_labels = ()
depends_on = None
| 32.56
| 85
| 0.638411
|
"""add_pending_artifact
Revision ID: 0f81e9efc84a
Revises: 61a1763b9c8d
Create Date: 2019-10-24 15:13:36.705288
"""
import zeus.db.types
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "0f81e9efc84a"
down_revision = "61a1763b9c8d"
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"pending_artifact",
sa.Column("provider", sa.String(), nullable=False),
sa.Column("external_job_id", sa.String(length=64), nullable=False),
sa.Column("external_build_id", sa.String(length=64), nullable=False),
sa.Column("hook_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("name", sa.String(length=256), nullable=False),
sa.Column("type", sa.String(length=64), nullable=True),
sa.Column("file", zeus.db.types.file.File(), nullable=False),
sa.Column("repository_id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column("id", zeus.db.types.guid.GUID(), nullable=False),
sa.Column(
"date_created",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(["hook_id"], ["hook.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["repository_id"], ["repository.id"], ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"idx_pending_artifact",
"pending_artifact",
["repository_id", "provider", "external_job_id", "external_build_id"],
unique=False,
)
op.create_index(
op.f("ix_pending_artifact_hook_id"),
"pending_artifact",
["hook_id"],
unique=False,
)
op.create_index(
op.f("ix_pending_artifact_repository_id"),
"pending_artifact",
["repository_id"],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(
op.f("ix_pending_artifact_repository_id"), table_name="pending_artifact"
)
op.drop_index(op.f("ix_pending_artifact_hook_id"), table_name="pending_artifact")
op.drop_index("idx_pending_artifact", table_name="pending_artifact")
op.drop_table("pending_artifact")
# ### end Alembic commands ###
| 0
| 0
| 0
| 0
| 0
| 2,071
| 0
| 2
| 112
|
04fc4ab0e7caeb7ba6bcedac53f83e0735e59840
| 5,148
|
py
|
Python
|
fixed_width.py
|
supakeen/fixed-width
|
57e9ae06ee5d77f2390f8377a9b7842f3596b04e
|
[
"MIT"
] | null | null | null |
fixed_width.py
|
supakeen/fixed-width
|
57e9ae06ee5d77f2390f8377a9b7842f3596b04e
|
[
"MIT"
] | null | null | null |
fixed_width.py
|
supakeen/fixed-width
|
57e9ae06ee5d77f2390f8377a9b7842f3596b04e
|
[
"MIT"
] | null | null | null |
"""
Copyright 2021 supakeen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Exporting the `default` context for short imports.
i8 = default.i8
i16 = default.i16
i32 = default.i32
i64 = default.i64
u8 = default.u8
u16 = default.u16
u32 = default.u32
u64 = default.u64
| 27.677419
| 80
| 0.698718
|
"""
Copyright 2021 supakeen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import enum
from typing import Optional, Any
class _storage(enum.Enum):
TWOS_COMPLEMENT = enum.auto()
class _behavior_overflow(enum.Enum):
EXCEPTION = enum.auto()
TRUNCATE = enum.auto()
class _behavior_promotion(enum.Enum):
EXCEPTION = enum.auto()
class _behavior:
overflow: _behavior_overflow
promotion: _behavior_promotion
def __init__(self, overflow, promotion):
self.overflow = overflow
self.promotion = promotion
def __repr__(self) -> str:
return f"_behavior({self.overflow=}, {self.promotion=})"
class _context:
"""Contexts are for storing various flags about what happened to types used
within them."""
overflow: bool
promotion: bool
def __init__(self):
self.overflow = False
self.promotion = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
class _value:
"""A value, we store the raw 'python' integer and only go into/out of bits
and fixed width on certain operations. This allows for easier conversion
during calculation.
"""
def __init__(self, rule, integer: int, context: Optional[_context] = None):
self._rule = rule
self._integer = integer
self._context = context
def __int__(self) -> int:
if self._rule._behavior.overflow == _behavior_overflow.TRUNCATE:
return self._integer & (2 ** self._rule._width - 1)
else:
self._integer & (2 ** self._rule._width - 1)
def __repr__(self) -> str:
return f"_value({self._integer=}, {self._rule=})"
def __add__(self, other: Any):
if isinstance(other, int):
other = self._rule(other)
class _type:
_context: Optional[_context]
_width: int
_behavior: _behavior
_storage: _storage
def __init__(
self,
width: int,
behavior: _behavior,
context: Optional[_context] = None,
) -> None:
self._width = width
self._behavior = behavior
self._context = context
self._storage = _storage.TWOS_COMPLEMENT
def __call__(self, integer: int):
return _value(self, integer, self._context)
def __repr__(self) -> str:
return f"_{self.__class__.__name__}({self._width=}, {self._behavior=})"
class _unsigned(_type):
pass
class _signed(_type):
pass
class c_stdint(_context):
_signed_behavior = _behavior(
overflow=_behavior_overflow.TRUNCATE,
promotion=_behavior_promotion.EXCEPTION,
)
int8_t = _signed(8, _signed_behavior)
int16_t = _signed(16, _signed_behavior)
int32_t = _signed(32, _signed_behavior)
int64_t = _signed(64, _signed_behavior)
_unsigned_behavior = _behavior(
overflow=_behavior_overflow.TRUNCATE,
promotion=_behavior_promotion.EXCEPTION,
)
uint8_t = _unsigned(8, _unsigned_behavior)
uint16_t = _unsigned(16, _unsigned_behavior)
uint32_t = _unsigned(32, _unsigned_behavior)
uint64_t = _unsigned(64, _unsigned_behavior)
class default(_context):
"""Some default values that do generally what people expect, these mimic the
`c_stdint` context and are exported on the module level directly for short
imports."""
_signed_behavior = _behavior(
overflow=_behavior_overflow.TRUNCATE,
promotion=_behavior_promotion.EXCEPTION,
)
i8 = _signed(8, _signed_behavior)
i16 = _signed(16, _signed_behavior)
i32 = _signed(32, _signed_behavior)
i64 = _signed(64, _signed_behavior)
_unsigned_behavior = _behavior(
overflow=_behavior_overflow.TRUNCATE,
promotion=_behavior_promotion.EXCEPTION,
)
u8 = _unsigned(8, _unsigned_behavior)
u16 = _unsigned(16, _unsigned_behavior)
u32 = _unsigned(32, _unsigned_behavior)
u64 = _unsigned(64, _unsigned_behavior)
# Exporting the `default` context for short imports.
i8 = default.i8
i16 = default.i16
i32 = default.i32
i64 = default.i64
u8 = default.u8
u16 = default.u16
u32 = default.u32
u64 = default.u64
| 0
| 0
| 0
| 3,585
| 0
| 0
| 0
| 1
| 299
|
67cbdb578387f31a0a05cd7f49b7dda4d7c9a62c
| 172
|
py
|
Python
|
16-coroutine/example_16_1.py
|
hua372494277/fluent_python_example-code
|
07577e3e3ca822ab0e769bbaa22477fd988edb36
|
[
"MIT"
] | null | null | null |
16-coroutine/example_16_1.py
|
hua372494277/fluent_python_example-code
|
07577e3e3ca822ab0e769bbaa22477fd988edb36
|
[
"MIT"
] | null | null | null |
16-coroutine/example_16_1.py
|
hua372494277/fluent_python_example-code
|
07577e3e3ca822ab0e769bbaa22477fd988edb36
|
[
"MIT"
] | null | null | null |
my_coro = simple_coroutine()
next(my_coro)
my_coro.send(42)
| 21.5
| 39
| 0.674419
|
def simple_coroutine():
print('-> coroutine started')
x = yield
print('-> coroutine received: ', x)
my_coro = simple_coroutine()
next(my_coro)
my_coro.send(42)
| 0
| 0
| 0
| 0
| 90
| 0
| 0
| 0
| 22
|
a6bb3f10026fd14c5e3cb38dc7884cfc0cad2c28
| 193
|
py
|
Python
|
example_project/urls.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | 1
|
2016-03-21T18:56:31.000Z
|
2016-03-21T18:56:31.000Z
|
example_project/urls.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | null | null | null |
example_project/urls.py
|
justquick/django-sentry
|
07988759144524ba49bc63b308663244d1a69d04
|
[
"BSD-3-Clause"
] | null | null | null |
urlpatterns = patterns('',
url(r'^trigger-500$', 'sentry.tests.views.raise_exc', name='sentry-raise-exc'),
url(r'^', include('sentry.urls')),
)
| 27.571429
| 83
| 0.668394
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
url(r'^trigger-500$', 'sentry.tests.views.raise_exc', name='sentry-raise-exc'),
url(r'^', include('sentry.urls')),
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 22
|
6ffdeaa033eaf21fe518e29893f1e57af448e9e1
| 2,012
|
py
|
Python
|
cruft/_commands/utils/diff.py
|
appunni-dishq/cruft
|
afa89a2856008bec49b31a90b6e3dd3c1eee24a9
|
[
"MIT"
] | null | null | null |
cruft/_commands/utils/diff.py
|
appunni-dishq/cruft
|
afa89a2856008bec49b31a90b6e3dd3c1eee24a9
|
[
"MIT"
] | null | null | null |
cruft/_commands/utils/diff.py
|
appunni-dishq/cruft
|
afa89a2856008bec49b31a90b6e3dd3c1eee24a9
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from subprocess import PIPE, run # nosec
from cruft import exceptions
def get_diff(repo0: Path, repo1: Path) -> str:
"""Compute the raw diff between two repositories."""
try:
diff = run(
_git_diff("--no-ext-diff", "--no-color", str(repo0), str(repo1)),
cwd=str(repo0),
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
except UnicodeDecodeError:
raise exceptions.ChangesetUnicodeError()
# By default, git diff --no-index will output full paths like so:
# --- a/tmp/tmpmp34g21y/remote/.coveragerc
# +++ b/tmp/tmpmp34g21y/local/.coveragerc
# We don't want this as we may need to apply the diff later on.
# Note that diff headers contain repo0 and repo1 with both "a" and "b"
# prefixes: headers for new files have a/repo1, headers for deleted files
# have b/repo0.
for repo in [repo0, repo1]:
diff = diff.replace("a" + str(repo), "a").replace("b" + str(repo), "b")
# This replacement is needed for renamed/moved files to be recognized properly
# Renamed files in the diff don't have the "a" or "b" prefix and instead look like
# /tmp/tmpmp34g21y/remote/.coveragerc
# If we replace repo paths which are like /tmp/tmpmp34g21y/remote
# we would end up with /.coveragerc which doesn't work.
# We also need to replace the trailing slash. As a result, we only do
# this after the above replacement is made as the trailing slash is needed there.
diff = diff.replace(str(repo0) + "/", "").replace(str(repo1) + "/", "")
return diff
def display_diff(repo0: Path, repo1: Path):
"""Displays the diff between two repositories."""
run(_git_diff(str(repo0), str(repo1)))
| 42.808511
| 100
| 0.656064
|
from pathlib import Path
from subprocess import PIPE, run # nosec
from typing import List
from cruft import exceptions
def _git_diff(*args: str) -> List[str]:
# https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---binary support for binary patch
return ["git", "-c", "diff.noprefix=", "diff", "--no-index", "--relative", "--binary", *args]
def get_diff(repo0: Path, repo1: Path) -> str:
"""Compute the raw diff between two repositories."""
try:
diff = run(
_git_diff("--no-ext-diff", "--no-color", str(repo0), str(repo1)),
cwd=str(repo0),
stdout=PIPE,
stderr=PIPE,
).stdout.decode()
except UnicodeDecodeError:
raise exceptions.ChangesetUnicodeError()
# By default, git diff --no-index will output full paths like so:
# --- a/tmp/tmpmp34g21y/remote/.coveragerc
# +++ b/tmp/tmpmp34g21y/local/.coveragerc
# We don't want this as we may need to apply the diff later on.
# Note that diff headers contain repo0 and repo1 with both "a" and "b"
# prefixes: headers for new files have a/repo1, headers for deleted files
# have b/repo0.
for repo in [repo0, repo1]:
diff = diff.replace("a" + str(repo), "a").replace("b" + str(repo), "b")
# This replacement is needed for renamed/moved files to be recognized properly
# Renamed files in the diff don't have the "a" or "b" prefix and instead look like
# /tmp/tmpmp34g21y/remote/.coveragerc
# If we replace repo paths which are like /tmp/tmpmp34g21y/remote
# we would end up with /.coveragerc which doesn't work.
# We also need to replace the trailing slash. As a result, we only do
# this after the above replacement is made as the trailing slash is needed there.
diff = diff.replace(str(repo0) + "/", "").replace(str(repo1) + "/", "")
return diff
def display_diff(repo0: Path, repo1: Path):
"""Displays the diff between two repositories."""
run(_git_diff(str(repo0), str(repo1)))
| 0
| 0
| 0
| 0
| 0
| 217
| 0
| 2
| 45
|
fe150d05608dcd1ad34cb5b7bf40af7c9cb0ade8
| 6,990
|
py
|
Python
|
octopus/modules/doaj/models.py
|
CottageLabs/magnificent-octopus-oacwellcome-fork
|
b1c8c412cf9a3fe66fca1c8e92ed074c9821663e
|
[
"Apache-2.0"
] | 2
|
2016-02-22T04:31:30.000Z
|
2021-08-03T23:58:36.000Z
|
octopus/modules/doaj/models.py
|
CottageLabs/magnificent-octopus-oacwellcome-fork
|
b1c8c412cf9a3fe66fca1c8e92ed074c9821663e
|
[
"Apache-2.0"
] | 9
|
2015-01-04T14:00:05.000Z
|
2021-12-13T19:35:07.000Z
|
octopus/modules/doaj/models.py
|
CottageLabs/magnificent-octopus-oacwellcome-fork
|
b1c8c412cf9a3fe66fca1c8e92ed074c9821663e
|
[
"Apache-2.0"
] | 3
|
2016-09-09T13:39:45.000Z
|
2018-02-19T14:23:12.000Z
|
BASE_ARTICLE_STRUCT = {
"fields": {
"id": {"coerce": "unicode"}, # Note that we'll leave these in for ease of use by the
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"} # to the real object
},
"objects": ["admin", "bibjson"],
"structs": {
"admin": {
"fields": {
"in_doaj": {"coerce": "bool", "get__default": False},
"publisher_record_id": {"coerce": "unicode"},
"upload_id": {"coerce": "unicode"}
}
},
"bibjson": {
"fields": {
"title": {"coerce": "unicode", "set__ignore_none": True},
"year": {"coerce": "unicode", "set__ignore_none": True},
"month": {"coerce": "unicode", "set__ignore_none": True},
"abstract": {"coerce": "unicode", "set__ignore_none": True}
},
"lists": {
"identifier": {"contains": "object"},
"link": {"contains": "object"},
"author": {"contains": "object"},
"keywords": {"coerce": "unicode", "contains": "field"},
"subject": {"contains": "object"},
},
"objects": [
"journal",
],
"structs": {
"identifier": {
"fields": {
"type": {"coerce": "unicode"},
"id": {"coerce": "unicode"}
}
},
"link": {
"fields": {
"type": {"coerce": "unicode"},
"url": {"coerce": "url"},
"content_type": {"coerce": "unicde"}
}
},
"author": {
"fields": {
"name": {"coerce": "unicode"},
"email": {"coerce": "unicode"},
"affiliation": {"coerce": "unicode"}
}
},
"journal": {
"fields": {
"start_page": {"coerce": "unicode", "set__ignore_none": True},
"end_page": {"coerce": "unicode", "set__ignore_none": True},
"volume": {"coerce": "unicode", "set__ignore_none": True},
"number": {"coerce": "unicode", "set__ignore_none": True},
"publisher": {"coerce": "unicode", "set__ignore_none": True},
"title": {"coerce": "unicode", "set__ignore_none": True},
"country": {"coerce": "unicode", "set__ignore_none": True}
},
"lists": {
"license": {"contains": "object"},
"language": {"coerce": "unicode", "contains": "field", "set__ignore_none": True}
},
"structs": {
"license": {
"fields": {
"title": {"coerce": "unicode"},
"type": {"coerce": "unicode"},
"url": {"coerce": "unicode"},
"version": {"coerce": "unicode"},
"open_access": {"coerce": "bool"},
}
}
}
},
"subject": {
"fields": {
"scheme": {"coerce": "unicode"},
"term": {"coerce": "unicode"},
"code": {"coerce": "unicode"}
}
},
}
}
}
}
ARTICLE_REQUIRED = {
"required": ["bibjson"],
"structs": {
"bibjson": {
"required": [
"title",
"author", # One author required
"identifier" # One type of identifier is required
],
"structs": {
"identifier": {
"required": ["type", "id"]
},
"link": {
"required": ["type", "url"]
},
"author": {
"required": ["name"]
},
}
}
}
}
| 34.097561
| 109
| 0.425322
|
from octopus.lib import dataobj
BASE_ARTICLE_STRUCT = {
"fields": {
"id": {"coerce": "unicode"}, # Note that we'll leave these in for ease of use by the
"created_date": {"coerce": "utcdatetime"}, # caller, but we'll need to ignore them on the conversion
"last_updated": {"coerce": "utcdatetime"} # to the real object
},
"objects": ["admin", "bibjson"],
"structs": {
"admin": {
"fields": {
"in_doaj": {"coerce": "bool", "get__default": False},
"publisher_record_id": {"coerce": "unicode"},
"upload_id": {"coerce": "unicode"}
}
},
"bibjson": {
"fields": {
"title": {"coerce": "unicode", "set__ignore_none": True},
"year": {"coerce": "unicode", "set__ignore_none": True},
"month": {"coerce": "unicode", "set__ignore_none": True},
"abstract": {"coerce": "unicode", "set__ignore_none": True}
},
"lists": {
"identifier": {"contains": "object"},
"link": {"contains": "object"},
"author": {"contains": "object"},
"keywords": {"coerce": "unicode", "contains": "field"},
"subject": {"contains": "object"},
},
"objects": [
"journal",
],
"structs": {
"identifier": {
"fields": {
"type": {"coerce": "unicode"},
"id": {"coerce": "unicode"}
}
},
"link": {
"fields": {
"type": {"coerce": "unicode"},
"url": {"coerce": "url"},
"content_type": {"coerce": "unicde"}
}
},
"author": {
"fields": {
"name": {"coerce": "unicode"},
"email": {"coerce": "unicode"},
"affiliation": {"coerce": "unicode"}
}
},
"journal": {
"fields": {
"start_page": {"coerce": "unicode", "set__ignore_none": True},
"end_page": {"coerce": "unicode", "set__ignore_none": True},
"volume": {"coerce": "unicode", "set__ignore_none": True},
"number": {"coerce": "unicode", "set__ignore_none": True},
"publisher": {"coerce": "unicode", "set__ignore_none": True},
"title": {"coerce": "unicode", "set__ignore_none": True},
"country": {"coerce": "unicode", "set__ignore_none": True}
},
"lists": {
"license": {"contains": "object"},
"language": {"coerce": "unicode", "contains": "field", "set__ignore_none": True}
},
"structs": {
"license": {
"fields": {
"title": {"coerce": "unicode"},
"type": {"coerce": "unicode"},
"url": {"coerce": "unicode"},
"version": {"coerce": "unicode"},
"open_access": {"coerce": "bool"},
}
}
}
},
"subject": {
"fields": {
"scheme": {"coerce": "unicode"},
"term": {"coerce": "unicode"},
"code": {"coerce": "unicode"}
}
},
}
}
}
}
ARTICLE_REQUIRED = {
"required": ["bibjson"],
"structs": {
"bibjson": {
"required": [
"title",
"author", # One author required
"identifier" # One type of identifier is required
],
"structs": {
"identifier": {
"required": ["type", "id"]
},
"link": {
"required": ["type", "url"]
},
"author": {
"required": ["name"]
},
}
}
}
}
class Journal(dataobj.DataObj):
def __init__(self, raw=None):
super(Journal, self).__init__(raw, expose_data=True)
def all_issns(self):
issns = []
# get the issns from the identifiers record
idents = self.bibjson.identifier
if idents is not None:
for ident in idents:
if ident.type in ["pissn", "eissn"]:
issns.append(ident.id)
# FIXME: this could be made better by having the Journal struct here too, but for
# the time being a catch on an AttributeError will suffice
try:
hist = self.bibjson.history
except AttributeError:
hist = None
if hist is not None:
for h in hist:
idents = h.bibjson.identifier
if idents is not None:
for ident in idents:
if ident.type in ["pissn", "eissn"]:
issns.append(ident.id)
return issns
class Article(dataobj.DataObj):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
super(Article, self).__init__(raw, expose_data=True)
def add_identifier(self, type, id):
if type is None or id is None:
return
self._add_to_list("bibjson.identifier", {"type" : type, "id" : id})
def get_identifier(self, type):
for id in self._get_list("bibjson.identifier"):
if id.get("type") == type:
return id.get("id")
return None
def add_link(self, type, url):
if type is None or url is None:
return
self._add_to_list("bibjson.link", {"type" : type, "url" : url})
def get_link(self, type):
for link in self._get_list("bibjson.link"):
if link.get("type") == type:
return link.get("url")
return None
def add_author(self, name):
if name is None:
return
self._add_to_list("bibjson.author", {"name" : name})
def is_api_valid(self):
try:
a = ArticleValidator(self.data)
except Exception as e:
return False
return True
class ArticleValidator(dataobj.DataObj):
def __init__(self, raw=None):
self._add_struct(BASE_ARTICLE_STRUCT)
self._add_struct(ARTICLE_REQUIRED)
super(ArticleValidator, self).__init__(raw, expose_data=True)
| 0
| 0
| 0
| 2,363
| 0
| 0
| 0
| 10
| 91
|
e33d9c4b1e510869e7210b59656c9fa8baeead13
| 959
|
py
|
Python
|
h3m/rmg/dispatch_gen.py
|
dmitrystril/homm3tools
|
5687f581a4eb5e7b0e8f48794d7be4e3b0a8cc8b
|
[
"MIT"
] | 113
|
2015-08-09T08:36:55.000Z
|
2022-03-21T10:42:46.000Z
|
h3m/rmg/dispatch_gen.py
|
dmitrystril/homm3tools
|
5687f581a4eb5e7b0e8f48794d7be4e3b0a8cc8b
|
[
"MIT"
] | 40
|
2015-08-23T06:36:34.000Z
|
2022-01-03T21:19:40.000Z
|
h3m/rmg/dispatch_gen.py
|
dmitrystril/homm3tools
|
5687f581a4eb5e7b0e8f48794d7be4e3b0a8cc8b
|
[
"MIT"
] | 27
|
2015-08-09T08:40:39.000Z
|
2022-03-28T08:03:12.000Z
|
def dispatch_gen(settings, rmgseed = 0):
"""
Short function that prepares map generation
and then dispatches to generation modules
"""
# Seed with a specific seed if one was provided
if rmgseed != 0:
print("seed:" + str(rmgseed))
seed(rmgseed)
module_name = "modules.%s.__init__" % settings.module_name
rmg_module = __import__(module_name, fromlist=[module_name])
max_attempts = 5
for i in range(max_attempts):
hmap = None
print("[i]At attempt %d of %d for for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
hmap = rmg_module.gen(settings)
if hmap != None:
print("[+]Successful at attempt %d/%d for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
break
else:
print("[!]Failed at attempt %d/%d for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
return hmap
| 31.966667
| 134
| 0.713243
|
import sys
from random import *
def dispatch_gen(settings, rmgseed = 0):
"""
Short function that prepares map generation
and then dispatches to generation modules
"""
# Seed with a specific seed if one was provided
if rmgseed != 0:
print("seed:" + str(rmgseed))
seed(rmgseed)
module_name = "modules.%s.__init__" % settings.module_name
rmg_module = __import__(module_name, fromlist=[module_name])
max_attempts = 5
for i in range(max_attempts):
hmap = None
print("[i]At attempt %d of %d for for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
hmap = rmg_module.gen(settings)
if hmap != None:
print("[+]Successful at attempt %d/%d for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
break
else:
print("[!]Failed at attempt %d/%d for module %s to generate given these settings" % (i+1, max_attempts, settings.module_name))
return hmap
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -12
| 44
|
82c564532f62f466d26f2f89f9095a93136b4581
| 1,793
|
py
|
Python
|
openff/benchmark/analysis/metrics.py
|
openforcefield/openff-benchmark
|
e27320922e7583e313eaf32119de863d23842217
|
[
"MIT"
] | 6
|
2021-01-27T14:06:57.000Z
|
2022-03-01T12:54:42.000Z
|
openff/benchmark/analysis/metrics.py
|
openforcefield/openff-benchmark
|
e27320922e7583e313eaf32119de863d23842217
|
[
"MIT"
] | 83
|
2020-07-31T18:10:47.000Z
|
2022-03-07T22:59:24.000Z
|
openff/benchmark/analysis/metrics.py
|
openforcefield/openff-benchmark
|
e27320922e7583e313eaf32119de863d23842217
|
[
"MIT"
] | 1
|
2022-03-03T04:06:32.000Z
|
2022-03-03T04:06:32.000Z
|
#!/usr/bin/env python
"""
metrics.py
Metrics calculation for the analysis/report part of the openff-benchmark workflow
By: David F. Hahn
Version: Nov 25 2020
"""
import numpy as np
from rdkit import Chem
from rdkit.Chem import TorsionFingerprints
def calc_tfd(ref_mol, query_mol):
"""
Calculate Torsion Fingerprint Deviation between two molecular structures.
RDKit is required for TFD calculation.
References
----------
Modified from the following code:
https://github.com/MobleyLab/benchmarkff/03_analysis/compare_ffs.py
TFD reference:
https://pubs.acs.org/doi/10.1021/ci2002318
Parameters
----------
ref_mol : RDKit RDMol
query_mol : RDKit RDMol
Returns
-------
tfd : float
Torsion Fingerprint Deviation between ref and query molecules
"""
# check if the molecules are the same
# tfd requires the two molecules must be instances of the same molecule
rsmiles = Chem.MolToSmiles(ref_mol)
qsmiles = Chem.MolToSmiles(query_mol)
if rsmiles != qsmiles:
print(
f"- WARNING: The reference mol {ref_mol.GetProp('_Name')} and "
f"query mol {query_mol.GetProp('_Name')} do NOT have the same "
f"SMILES strings as determined by RDKit MolToSmiles. "
f"\n {rsmiles}\n {qsmiles}"
)
tfd = np.nan
# calculate the TFD
else:
try:
tfd = TorsionFingerprints.GetTFDBetweenMolecules(ref_mol, query_mol)
# triggered for molecules such as urea
except IndexError:
print(
f"- Error calculating TFD on molecule {ref_mol.GetProp('_Name')}."
" Possibly no non-terminal rotatable bonds found."
)
tfd = np.nan
return tfd
| 26.761194
| 82
| 0.636921
|
#!/usr/bin/env python
"""
metrics.py
Metrics calculation for the analysis/report part of the openff-benchmark workflow
By: David F. Hahn
Version: Nov 25 2020
"""
import numpy as np
from rdkit import Chem
from rdkit.Chem import TorsionFingerprints
def calc_tfd(ref_mol, query_mol):
"""
Calculate Torsion Fingerprint Deviation between two molecular structures.
RDKit is required for TFD calculation.
References
----------
Modified from the following code:
https://github.com/MobleyLab/benchmarkff/03_analysis/compare_ffs.py
TFD reference:
https://pubs.acs.org/doi/10.1021/ci2002318
Parameters
----------
ref_mol : RDKit RDMol
query_mol : RDKit RDMol
Returns
-------
tfd : float
Torsion Fingerprint Deviation between ref and query molecules
"""
# check if the molecules are the same
# tfd requires the two molecules must be instances of the same molecule
rsmiles = Chem.MolToSmiles(ref_mol)
qsmiles = Chem.MolToSmiles(query_mol)
if rsmiles != qsmiles:
print(
f"- WARNING: The reference mol {ref_mol.GetProp('_Name')} and "
f"query mol {query_mol.GetProp('_Name')} do NOT have the same "
f"SMILES strings as determined by RDKit MolToSmiles. "
f"\n {rsmiles}\n {qsmiles}"
)
tfd = np.nan
# calculate the TFD
else:
try:
tfd = TorsionFingerprints.GetTFDBetweenMolecules(ref_mol, query_mol)
# triggered for molecules such as urea
except IndexError:
print(
f"- Error calculating TFD on molecule {ref_mol.GetProp('_Name')}."
" Possibly no non-terminal rotatable bonds found."
)
tfd = np.nan
return tfd
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
54f7bd38aa77922afb64699b585c50bd6c3ab6db
| 2,429
|
py
|
Python
|
simple_rest_client_example/resources.py
|
allisson/pythonbrasil-2018-exemplos
|
e106ba6d4833185566d4d306e8c108c97987350c
|
[
"MIT"
] | 3
|
2018-10-17T12:34:12.000Z
|
2021-06-18T01:00:33.000Z
|
simple_rest_client_example/resources.py
|
allisson/pythonbrasil-2018-exemplos
|
e106ba6d4833185566d4d306e8c108c97987350c
|
[
"MIT"
] | null | null | null |
simple_rest_client_example/resources.py
|
allisson/pythonbrasil-2018-exemplos
|
e106ba6d4833185566d4d306e8c108c97987350c
|
[
"MIT"
] | 1
|
2018-10-22T12:52:34.000Z
|
2018-10-22T12:52:34.000Z
|
films_actions = {
'list': {
'method': 'GET',
'url': 'films'
},
'retrieve': {
'method': 'GET',
'url': 'films/{}',
},
'schema': {
'method': 'GET',
'url': 'films/schema',
},
}
people_actions = {
'list': {
'method': 'GET',
'url': 'people'
},
'retrieve': {
'method': 'GET',
'url': 'people/{}',
},
'schema': {
'method': 'GET',
'url': 'people/schema',
},
}
planets_actions = {
'list': {
'method': 'GET',
'url': 'planets'
},
'retrieve': {
'method': 'GET',
'url': 'planets/{}',
},
'schema': {
'method': 'GET',
'url': 'planets/schema',
},
}
species_actions = {
'list': {
'method': 'GET',
'url': 'species'
},
'retrieve': {
'method': 'GET',
'url': 'species/{}',
},
'schema': {
'method': 'GET',
'url': 'species/schema',
},
}
starships_actions = {
'list': {
'method': 'GET',
'url': 'starships'
},
'retrieve': {
'method': 'GET',
'url': 'starships/{}',
},
'schema': {
'method': 'GET',
'url': 'starships/schema',
},
}
vehicles_actions = {
'list': {
'method': 'GET',
'url': 'vehicles'
},
'retrieve': {
'method': 'GET',
'url': 'vehicles/{}',
},
'schema': {
'method': 'GET',
'url': 'vehicles/schema',
},
}
| 17.22695
| 63
| 0.532318
|
from simple_rest_client.resource import AsyncResource, Resource
films_actions = {
'list': {
'method': 'GET',
'url': 'films'
},
'retrieve': {
'method': 'GET',
'url': 'films/{}',
},
'schema': {
'method': 'GET',
'url': 'films/schema',
},
}
people_actions = {
'list': {
'method': 'GET',
'url': 'people'
},
'retrieve': {
'method': 'GET',
'url': 'people/{}',
},
'schema': {
'method': 'GET',
'url': 'people/schema',
},
}
planets_actions = {
'list': {
'method': 'GET',
'url': 'planets'
},
'retrieve': {
'method': 'GET',
'url': 'planets/{}',
},
'schema': {
'method': 'GET',
'url': 'planets/schema',
},
}
species_actions = {
'list': {
'method': 'GET',
'url': 'species'
},
'retrieve': {
'method': 'GET',
'url': 'species/{}',
},
'schema': {
'method': 'GET',
'url': 'species/schema',
},
}
starships_actions = {
'list': {
'method': 'GET',
'url': 'starships'
},
'retrieve': {
'method': 'GET',
'url': 'starships/{}',
},
'schema': {
'method': 'GET',
'url': 'starships/schema',
},
}
vehicles_actions = {
'list': {
'method': 'GET',
'url': 'vehicles'
},
'retrieve': {
'method': 'GET',
'url': 'vehicles/{}',
},
'schema': {
'method': 'GET',
'url': 'vehicles/schema',
},
}
class FilmsAsyncResource(AsyncResource):
actions = films_actions
class FilmsResource(Resource):
actions = films_actions
class PeopleAsyncResource(AsyncResource):
actions = people_actions
class PeopleResource(Resource):
actions = people_actions
class PlanetsAsyncResource(AsyncResource):
actions = planets_actions
class PlanetsResource(Resource):
actions = planets_actions
class SpeciesAsyncResource(AsyncResource):
actions = species_actions
class SpeciesResource(Resource):
actions = species_actions
class StarshipsAsyncResource(AsyncResource):
actions = starships_actions
class StarshipsResource(Resource):
actions = starships_actions
class VehiclesAsyncResource(AsyncResource):
actions = vehicles_actions
class VehiclesResource(Resource):
actions = vehicles_actions
| 0
| 0
| 0
| 552
| 0
| 0
| 0
| 42
| 298
|
2924a841e050929bf5200ad6aa6ce38ce0e83e43
| 2,035
|
py
|
Python
|
examples/OLX/gen.py
|
godber/banky
|
a3c41b810b16cce737f0ca7e2f1a10a32c3a5d66
|
[
"MIT"
] | null | null | null |
examples/OLX/gen.py
|
godber/banky
|
a3c41b810b16cce737f0ca7e2f1a10a32c3a5d66
|
[
"MIT"
] | null | null | null |
examples/OLX/gen.py
|
godber/banky
|
a3c41b810b16cce737f0ca7e2f1a10a32c3a5d66
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Generates an EdX style XML question from the provided YAML question file.
'''
if __name__ == '__main__':
gen()
| 31.796875
| 85
| 0.69828
|
#!/usr/bin/env python
'''
Generates an EdX style XML question from the provided YAML question file.
'''
from pathlib import Path
from random import sample
import click
import yaml
from lxml import etree
@click.command()
@click.argument('file_name')
def gen(file_name):
# Read and parse the YAML file containing the question
try:
f = open(file_name)
question = yaml.safe_load(f)
f.close()
except Exception as e:
raise e
problem = etree.Element('problem')
multiplechoiceresponse = etree.Element('multiplechoiceresponse')
problem.append(multiplechoiceresponse)
# Adding the label to multiplechoiceresponse
label = etree.Element('label')
label.text = question['label']
multiplechoiceresponse.append(label)
# Adding the choicegroup to multiplechoiceresponse
choicegroup = etree.Element('choicegroup', type='MultipleChoice')
multiplechoiceresponse.append(choicegroup) # FIXME: Do I have to move this down?
# Create a list of tuples like (choice, True or False)
choice_list = [
(choice_value, truth_value)
for truth_value in question['choices']
for choice_value in question['choices'][truth_value]
]
# Randomize the choice_list using `random.sample` then create the choice
# elements and add them to choicegroup. Note that since the truthiness
# values are boolean True or False, the correct value will be True or False
# (with first character capitalized), unlike the example EdX XML
for (choice_text, truthiness) in sample(choice_list, k=len(choice_list)):
choice = etree.Element("choice", correct=str(truthiness))
choice.text = choice_text
choicegroup.append(choice)
outfile = Path(Path(file_name).stem + '.xml')
if outfile.exists():
print('WARNING: File exists, overwriting %s.' % outfile)
# Write out the XML to a file.
et = etree.ElementTree(problem)
et.write(str(outfile), pretty_print=True)
if __name__ == '__main__':
gen()
| 0
| 1,767
| 0
| 0
| 0
| 0
| 0
| -11
| 134
|
e31990aebb31cb9c87884c60bbda3ef72156d0f7
| 695
|
py
|
Python
|
AveriguaNBisiestosRangoTiempo.py
|
brown9804/Python_DiversosAlgortimos
|
e9ff0fbe761f24a49a30a513d50824ca56cafaa3
|
[
"Apache-2.0"
] | 3
|
2018-06-28T21:06:53.000Z
|
2018-07-01T20:39:30.000Z
|
AveriguaNBisiestosRangoTiempo.py
|
brown9804/Python_DiversosAlgortimos
|
e9ff0fbe761f24a49a30a513d50824ca56cafaa3
|
[
"Apache-2.0"
] | null | null | null |
AveriguaNBisiestosRangoTiempo.py
|
brown9804/Python_DiversosAlgortimos
|
e9ff0fbe761f24a49a30a513d50824ca56cafaa3
|
[
"Apache-2.0"
] | null | null | null |
#Python3
#Permite calcular cuantos aos han sido bisiestos en un rango de aos
###### DEFINICIONES ######
###### IMPLEMENTACION ######
an1 = int(input("Digite el ao mayor que desea comparar "))
an2 = int(input("Digite el ao menor "))
dif = an1 - an2
cont = 0
for indice in range (an2, an1+1):
if bisiesto(indice) == True:
cont = cont + 1
print ("La diferencia entre ", an1, " y ", an2, " es de ", dif)
if (cont >0):
print ("Entre este intervalo de aos hay ", cont, " aos bisiestos ")
else:
print("No hay aos bisiestos en este intervalo ")
| 26.730769
| 71
| 0.617266
|
#Python3
#Permite calcular cuantos años han sido bisiestos en un rango de años
###### DEFINICIONES ######
def bisiesto(year):
if (year % 400 == 0) or ((year % 4 == 0) and (year % 100 != 0)):
return True
else:
return False
###### IMPLEMENTACION ######
an1 = int(input("Digite el año mayor que desea comparar "))
an2 = int(input("Digite el año menor "))
dif = an1 - an2
cont = 0
for indice in range (an2, an1+1):
if bisiesto(indice) == True:
cont = cont + 1
print ("La diferencia entre ", an1, " y ", an2, " es de ", dif)
if (cont >0):
print ("Entre este intervalo de años hay ", cont, " años bisiestos ")
else:
print("No hay años bisiestos en este intervalo ")
| 14
| 0
| 0
| 0
| 0
| 100
| 0
| 0
| 22
|
db4f23ef884783a4a230a932eac3a80a44e8dffd
| 4,569
|
py
|
Python
|
code/tasks/REGEX/trainers/executor.py
|
khanhptnk/iliad
|
3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825
|
[
"MIT"
] | 7
|
2021-06-10T22:17:13.000Z
|
2022-03-03T05:58:55.000Z
|
code/tasks/REGEX/trainers/executor.py
|
khanhptnk/iliad
|
3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825
|
[
"MIT"
] | null | null | null |
code/tasks/REGEX/trainers/executor.py
|
khanhptnk/iliad
|
3eb4f11c1d3cdb6784fd2f78a83ce07f984d3825
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
| 29.101911
| 78
| 0.516743
|
import logging
import os
import sys
sys.path.append('..')
import itertools
import json
import random
import torch
from misc import util
class ExecutorTrainer(object):
def __init__(self, config):
self.config = config
self.random = random.Random(self.config.seed)
def do_rollout(self, batch, executor, is_eval):
src_words = []
tgt_words = []
instructions = []
batch_size = len(batch)
for item in batch:
src_words.append(item['src_word'])
tgt_words.append(item['tgt_word'])
instructions.append(item['instruction'])
executor.init(src_words, instructions, is_eval)
t = 0
golds = [None] * batch_size
while not executor.has_terminated():
for i in range(batch_size):
if t + 1 < len(tgt_words[i]):
golds[i] = tgt_words[i][t + 1]
else:
golds[i] = '<PAD>'
executor.act(gold_actions=golds)
t += 1
def train(self, datasets, executor):
max_iters = self.config.trainer.max_iters
log_every = self.config.trainer.log_every
i_iter = 0
total_loss = 0
best_eval_loss = 1e9
best_eval_acc = -1e9
for batch in datasets['train'].iterate_batches():
i_iter += 1
self.do_rollout(batch, executor, False)
loss = executor.learn()
total_loss += loss
if i_iter % log_every == 0:
avg_loss = total_loss / log_every
total_loss = 0
log_str = 'Train iter %d (%d%%): ' % \
(i_iter, i_iter / max_iters * 100)
log_str += 'loss = %.4f' % avg_loss
logging.info('')
logging.info(log_str)
# Save last model
executor.save('last')
# Save best model
eval_info = self.evaluate(datasets['val'], executor)
eval_loss = eval_info['loss']
eval_acc = eval_info['acc']
eval_preds = eval_info['pred']
if eval_acc > best_eval_acc:
logging.info('New best acc: %.1f' % eval_acc)
best_eval_acc = eval_acc
executor.save('best_dev')
self.save_preds('best_dev', eval_preds)
self.save_preds('last', eval_preds)
if i_iter >= max_iters:
break
def evaluate(self, dataset, executor, save_pred=False):
losses = []
all_preds = []
is_match = []
for i, batch in enumerate(dataset.iterate_batches()):
with torch.no_grad():
# Compute loss on unseen data
self.do_rollout(batch, executor, False)
loss = executor.compute_loss().item()
losses.append(loss)
# Make predictions
src_words = [item['src_word'] for item in batch]
instructions = [item['instruction'] for item in batch]
preds = executor.predict(src_words, instructions)
for item, pred in zip(batch, preds):
new_item = {}
new_item.update(item)
pred = ''.join(pred)
gold = ''.join(new_item['tgt_word'])
new_item['pred'] = pred
is_match.append(gold == pred)
new_item['is_match'] = is_match[-1]
new_item['src_word'] = ''.join(new_item['src_word'])
new_item['tgt_word'] = ''.join(new_item['tgt_word'])
new_item['instruction'] = ' '.join(new_item['instruction'])
all_preds.append(new_item)
avg_loss = sum(losses) / len(losses)
acc = sum(is_match) / len(is_match) * 100
log_str = 'Evaluation on %s: ' % dataset.split
log_str += 'loss = %.1f' % avg_loss
log_str += ', acc = %.1f' % acc
logging.info(log_str)
if save_pred:
self.save_preds(dataset.split, all_preds)
eval_info = {
'acc' : acc,
'loss': avg_loss,
'pred': all_preds,
}
return eval_info
def save_preds(self, filename, all_preds):
file_path = '%s/%s' % (self.config.experiment_dir, filename + '.pred')
with open(file_path, 'w') as f:
json.dump(all_preds, f, indent=2)
logging.info('Saved eval info to %s' % file_path)
| 0
| 0
| 0
| 4,406
| 0
| 0
| 0
| -51
| 179
|
d5d37258f5db0c94fe2805ed35fc6221e3fda39b
| 11,999
|
py
|
Python
|
DICOM/ParametricMapsDictionary.py
|
QIB-Sheffield/WEASEL
|
e4dad345fd6f347cfac990708252844a7cbcd025
|
[
"Apache-2.0"
] | 2
|
2021-02-10T09:07:15.000Z
|
2021-03-16T17:05:24.000Z
|
DICOM/ParametricMapsDictionary.py
|
QIB-Sheffield/WEASEL
|
e4dad345fd6f347cfac990708252844a7cbcd025
|
[
"Apache-2.0"
] | 102
|
2021-01-20T11:14:21.000Z
|
2021-12-12T17:34:42.000Z
|
DICOM/ParametricMapsDictionary.py
|
QIB-Sheffield/WEASEL
|
e4dad345fd6f347cfac990708252844a7cbcd025
|
[
"Apache-2.0"
] | 1
|
2021-01-29T09:28:05.000Z
|
2021-01-29T09:28:05.000Z
|
"""
This module is used in `SaveDICOM_Image.py` and used as the optional argument `parametric_map` in write functions of `Classes.py` and in "writeNewPixelArray" of `DeveloperTools.py`.
The functions in this module capture special versions of DICOM with unique parameters/attributes/values.
**How to use:** provide the parametric_map in the functions mentioned previously as a string. This string is the name of one of the functions in the ParametricClass (eg. parametric_map="RGB" or parametric_map="SEG")
"""
# Could insert a method regarding ROI colours, like in ITK-SNAP???
# rwv_sequence = Sequence()
# dicom.RealWorldValueMappingSequence = rwv_sequence
# rwv_slope = Dataset()
# rwv_slope.RealWorldValueSlope = 1
# rwv_sequence.append(rwv_slope)
# quantity_def = Dataset()
# quantity_def_sequence = Sequence()
# quantity_def.QuantityDefinitionSequence = quantity_def_sequence
# value_type = Dataset()
# value_type.ValueType = "CODE"
# quantity_def_sequence.append(value_type)
# concept_code = Dataset()
# concept_code_sequence = Sequence()
# concept_code.ConceptCodeSequence = concept_code_sequence
# code_code = Dataset()
# code_code.CodeValue = "113041"
# code_code.CodingSchemeDesignator = "DCM"
# code_code.CodeMeaning = "Apparent Diffusion Coefficient"
# concept_code_sequence.append(code_code)
# rwv_sequence.append(quantity_def)
# measure_units = Dataset()
# measure_units_sequence = Sequence()
# measure_units.MeasurementUnitsCodeSequence = measure_units_sequence
# measure_code = Dataset()
# measure_code.CodeValue = "um2/s"
# measure_code.CodingSchemeDesignator = "UCUM"
# measure_code.CodeMeaning = "um2/s"
# measure_units_sequence.append(measure_code)
# rwv_sequence.append(measure_units)
| 51.497854
| 215
| 0.685557
|
"""
This module is used in `SaveDICOM_Image.py` and used as the optional argument `parametric_map` in write functions of `Classes.py` and in "writeNewPixelArray" of `DeveloperTools.py`.
The functions in this module capture special versions of DICOM with unique parameters/attributes/values.
**How to use:** provide the parametric_map in the functions mentioned previously as a string. This string is the name of one of the functions in the ParametricClass (eg. parametric_map="RGB" or parametric_map="SEG")
"""
import pydicom
from pydicom.dataset import Dataset
from pydicom.sequence import Sequence
import numpy as np
import datetime
import struct
def editDicom(newDicom, imageArray, parametricMap):
callCase = ParametricClass()
callCase.selectParametricMap(newDicom, imageArray, parametricMap)
dt = datetime.datetime.now()
timeStr = dt.strftime('%H%M%S') # long format with micro seconds
newDicom.PerformedProcedureStepStartDate = dt.strftime('%Y%m%d')
newDicom.PerformedProcedureStepStartTime = timeStr
newDicom.PerformedProcedureStepDescription = "Post-processing application"
return newDicom
class ParametricClass(object):
def selectParametricMap(self, dicom, imageArray, argument):
methodName = argument
method = getattr(self, methodName, lambda: "No valid Parametric Map chosen")
return method(dicom, imageArray)
def RGB(self, dicom, imageArray):
dicom.PhotometricInterpretation = 'RGB'
dicom.SamplesPerPixel = 3
dicom.BitsAllocated = 8
dicom.BitsStored = 8
dicom.HighBit = 7
dicom.add_new(0x00280006, 'US', 0) # Planar Configuration
dicom.RescaleSlope = 1
dicom.RescaleIntercept = 0
pixelArray = imageArray.astype(np.uint8) # Should we multiply by 255?
dicom.WindowCenter = int((np.amax(imageArray) - np.amin(imageArray)) / 2)
dicom.WindowWidth = np.absolute(int(np.amax(imageArray) - np.amin(imageArray)))
dicom.PixelData = pixelArray.tobytes()
return
def ADC(self, dicom, imageArray):
# The commented parts are to apply when we decide to include Parametric Map IOD. No readers can deal with this yet
# dicom.SOPClassUID = '1.2.840.10008.5.1.4.1.1.67'
dicom.SeriesDescription = "Apparent Diffusion Coefficient (um2/s)"
dicom.Modality = "RWV"
dicom.FrameLaterality = "U"
dicom.DerivedPixelContrast = "ADC"
dicom.BitsAllocated = 32
dicom.PixelRepresentation = 1
dicom.PhotometricInterpretation = "MONOCHROME2"
dicom.PixelAspectRatio = ["1", "1"] # Need to have a better look at this
dicom.RescaleSlope = 1
dicom.RescaleIntercept = 0
# Rotate the image back to the original orientation
imageArray = np.transpose(imageArray)
dicom.Rows = np.shape(imageArray)[-2]
dicom.Columns = np.shape(imageArray)[-1]
dicom.WindowCenter = int((np.amax(imageArray) - np.amin(imageArray)) / 2)
dicom.WindowWidth = np.absolute(int(np.amax(imageArray) - np.amin(imageArray)))
dicom.FloatPixelData = bytes(imageArray.astype(np.float32).flatten())
del dicom.PixelData, dicom.BitsStored, dicom.HighBit
dicom.RealWorldValueMappingSequence = [Dataset(), Dataset(), Dataset(), Dataset()]
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence = [Dataset(), Dataset()]
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence[0].ValueType = "CODE"
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence[1].ConceptCodeSequence = [Dataset(), Dataset(), Dataset()]
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence[1].ConceptCodeSequence[0].CodeValue = "113041"
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence[1].ConceptCodeSequence[1].CodingSchemeDesignator = "DCM"
dicom.RealWorldValueMappingSequence[0].QuantityDefinitionSequence[1].ConceptCodeSequence[2].CodeMeaning = "Apparent Diffusion Coefficient"
dicom.RealWorldValueMappingSequence[1].MeasurementUnitsCodeSequence = [Dataset(), Dataset(), Dataset()]
dicom.RealWorldValueMappingSequence[1].MeasurementUnitsCodeSequence[0].CodeValue = "um2/s"
dicom.RealWorldValueMappingSequence[1].MeasurementUnitsCodeSequence[1].CodingSchemeDesignator = "UCUM"
dicom.RealWorldValueMappingSequence[1].MeasurementUnitsCodeSequence[2].CodeMeaning = "um2/s"
dicom.RealWorldValueMappingSequence[2].RealWorldValueSlope = 1
anatomyString = dicom.BodyPartExamined
saveAnatomicalInfo(anatomyString, dicom.RealWorldValueMappingSequence[3])
return
def T2Star(self, dicom, imageArray):
dicom.PixelSpacing = [3, 3] # find a mechanism to pass reconstruct pixel here
return
def SEG(self, dicom, imageArray):
#dicom.SOPClassUID = '1.2.840.10008.5.1.4.1.1.66.4' # WILL NOT BE USED HERE - This is for PACS. There will be another one for DICOM Standard
# The commented parts are to apply when we decide to include SEG IOD. No readers can deal with this yet
dicom.BitsAllocated = 8 # According to Federov DICOM Standard this should be 1-bit
dicom.BitsStored = 8
dicom.HighBit = 7
#dicom.SmallestImagePixelValue = 0
#dicom.LargestImagePixelValue = int(np.amax(imageArray)) # max 255
dicom.add_new('0x00280106', 'US', 0) # Minimum
dicom.add_new('0x00280107', 'US', int(np.amax(imageArray))) # Maximum
dicom.PixelRepresentation = 0
dicom.SamplesPerPixel = 1
#dicom.WindowCenter = 0.5
#dicom.WindowWidth = 1.1
dicom.add_new('0x00281050', 'DS', 0.5) # WindowCenter
dicom.add_new('0x00281051', 'DS', 1.1) # WindowWidth
#dicom.RescaleIntercept = 0
#dicom.RescaleSlope = 1
dicom.add_new('0x00281052', 'DS', 0) # RescaleIntercept
dicom.add_new('0x00281053', 'DS', 1) # RescaleSlope
dicom.LossyImageCompression = '00'
pixelArray = np.transpose(imageArray.astype(np.uint8)) # Should we multiply by 255?
dicom.PixelData = pixelArray.tobytes()
dicom.Modality = 'SEG'
dicom.SegmentationType = 'FRACTIONAL'
dicom.MaximumFractionalValue = int(np.amax(imageArray)) # max 255
dicom.SegmentationFractionalType = 'OCCUPANCY'
# Segment Labels
if hasattr(dicom, "ImageComments"):
dicom.ContentDescription = dicom.ImageComments.split('_')[-1] # 'Image segmentation'
segment_numbers = np.unique(pixelArray)
segment_dictionary = dict(list(enumerate(segment_numbers)))
segment_label = dicom.ImageComments.split('_')[-1]
segment_dictionary[0] = 'Background'
segment_dictionary[1] = segment_label
for key in segment_dictionary:
dicom.SegmentSequence = [Dataset(), Dataset(), Dataset(), Dataset(), Dataset(), Dataset()]
dicom.SegmentSequence[0].SegmentAlgorithmType = 'MANUAL'
dicom.SegmentSequence[1].SegmentNumber = key
dicom.SegmentSequence[2].SegmentDescription = str(segment_dictionary[key])
dicom.SegmentSequence[3].SegmentLabel = str(segment_dictionary[key])
dicom.SegmentSequence[4].SegmentAlgorithmName = "Weasel"
if hasattr(dicom, "BodyPartExamined"):
anatomyString = dicom.BodyPartExamined
saveAnatomicalInfo(anatomyString, dicom.SegmentSequence[5])
else:
dicom.ContentDescription = "Mask with no label"
return
def Registration(self, dicom, imageArray):
dicom.Modality = "REG"
return
def Signal(self, dicom, imageArray):
dicom.Modality = "RWV"
dicom.DerivedPixelContrast = "GraphPlot"
dicom.PhotometricInterpretation = "MONOCHROME2"
dicom.RescaleSlope = 1
dicom.RescaleIntercept = 0
imageArray = np.transpose(imageArray.astype(np.float32))
center = (np.amax(imageArray) + np.amin(imageArray)) / 2
width = np.amax(imageArray) - np.amin(imageArray)
dicom.add_new('0x00281050', 'DS', center)
dicom.add_new('0x00281051', 'DS', width)
dicom.BitsAllocated = 32
dicom.Rows = np.shape(imageArray)[0]
dicom.Columns = np.shape(imageArray)[1]
dicom.FloatPixelData = bytes(imageArray.flatten())
del dicom.PixelData, dicom.BitsStored, dicom.HighBit
return
# Could insert a method regarding ROI colours, like in ITK-SNAP???
def saveAnatomicalInfo(anatomyString, dicom):
try:
# FOR NOW, THE PRIORITY WILL BE ON KIDNEY
if "KIDNEY" or "ABDOMEN" in anatomyString.upper():
dicom.AnatomicRegionSequence = [Dataset(), Dataset(), Dataset()]
dicom.AnatomicRegionSequence[0].CodeValue = "T-71000"
dicom.AnatomicRegionSequence[1].CodingSchemeDesignator = "SRT"
dicom.AnatomicRegionSequence[2].CodeMeaning = "Kidney"
elif "LIVER" in anatomyString.upper():
dicom.AnatomicRegionSequence = [Dataset(), Dataset(), Dataset()]
dicom.AnatomicRegionSequence[0].CodeValue = "T-62000"
dicom.AnatomicRegionSequence[1].CodingSchemeDesignator = "SRT"
dicom.AnatomicRegionSequence[2].CodeMeaning = "Liver"
elif "PROSTATE" in anatomyString.upper():
dicom.AnatomicRegionSequence = [Dataset(), Dataset(), Dataset()]
dicom.AnatomicRegionSequence[0].CodeValue = "T-9200B"
dicom.AnatomicRegionSequence[1].CodingSchemeDesignator = "SRT"
dicom.AnatomicRegionSequence[2].CodeMeaning = "Prostate"
elif "BODY" in anatomyString.upper():
dicom.AnatomicRegionSequence = [Dataset(), Dataset(), Dataset()]
dicom.AnatomicRegionSequence[0].CodeValue = "P5-0905E"
dicom.AnatomicRegionSequence[1].CodingSchemeDesignator = "LN"
dicom.AnatomicRegionSequence[2].CodeMeaning = "MRI whole body"
except:
pass
return
# Series, Instance and Class for Reference
#newDicom.ReferencedSeriesSequence = [Dataset(), Dataset()]
#newDicom.ReferencedSeriesSequence[0].SeriesInstanceUID = dicom_data.SeriesInstanceUID
#newDicom.ReferencedSeriesSequence[1].ReferencedInstanceSequence = [Dataset(), Dataset()]
#newDicom.ReferencedSeriesSequence[1].ReferencedInstanceSequence[0].ReferencedSOPClassUID = dicom_data.SOPClassUID
#newDicom.ReferencedSeriesSequence[1].ReferencedInstanceSequence[1].ReferencedSOPInstanceUID = dicom_data.SOPInstanceUID
# rwv_sequence = Sequence()
# dicom.RealWorldValueMappingSequence = rwv_sequence
# rwv_slope = Dataset()
# rwv_slope.RealWorldValueSlope = 1
# rwv_sequence.append(rwv_slope)
# quantity_def = Dataset()
# quantity_def_sequence = Sequence()
# quantity_def.QuantityDefinitionSequence = quantity_def_sequence
# value_type = Dataset()
# value_type.ValueType = "CODE"
# quantity_def_sequence.append(value_type)
# concept_code = Dataset()
# concept_code_sequence = Sequence()
# concept_code.ConceptCodeSequence = concept_code_sequence
# code_code = Dataset()
# code_code.CodeValue = "113041"
# code_code.CodingSchemeDesignator = "DCM"
# code_code.CodeMeaning = "Apparent Diffusion Coefficient"
# concept_code_sequence.append(code_code)
# rwv_sequence.append(quantity_def)
# measure_units = Dataset()
# measure_units_sequence = Sequence()
# measure_units.MeasurementUnitsCodeSequence = measure_units_sequence
# measure_code = Dataset()
# measure_code.CodeValue = "um2/s"
# measure_code.CodingSchemeDesignator = "UCUM"
# measure_code.CodeMeaning = "um2/s"
# measure_units_sequence.append(measure_code)
# rwv_sequence.append(measure_units)
| 0
| 0
| 0
| 7,375
| 0
| 2,479
| 0
| 6
| 203
|
4f5dc02bbb6371303a2f5b9a9f2436c462178b54
| 670
|
py
|
Python
|
templates/dags/example_dag.py
|
dfedde/terraform-aws-ecs-airflow
|
e928c9272d8735809341e1225551e00cb83270de
|
[
"MIT"
] | 42
|
2020-10-30T11:54:08.000Z
|
2022-03-21T11:02:32.000Z
|
templates/dags/example_dag.py
|
dfedde/terraform-aws-ecs-airflow
|
e928c9272d8735809341e1225551e00cb83270de
|
[
"MIT"
] | 20
|
2020-11-18T15:12:08.000Z
|
2022-02-07T15:36:54.000Z
|
templates/dags/example_dag.py
|
dfedde/terraform-aws-ecs-airflow
|
e928c9272d8735809341e1225551e00cb83270de
|
[
"MIT"
] | 24
|
2020-11-19T21:00:57.000Z
|
2022-03-21T11:02:36.000Z
|
from datetime import datetime
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
args = {
"owner": "dataroots",
"start_date": datetime(2020, 10, 12),
}
with DAG(
dag_id="example_dag",
catchup=False,
max_active_runs=1,
default_args=args,
schedule_interval="*/5 * * * *"
) as dag:
task_a = DummyOperator(
task_id="task_a"
)
task_b = DummyOperator(
task_id="task_b"
)
task_c = DummyOperator(
task_id="task_c"
)
task_d = DummyOperator(
task_id="task_d"
)
task_a >> [task_b, task_c] >> task_d
| 19.142857
| 58
| 0.638806
|
from datetime import timedelta, datetime
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow import AirflowException
args = {
"owner": "dataroots",
"start_date": datetime(2020, 10, 12),
}
with DAG(
dag_id="example_dag",
catchup=False,
max_active_runs=1,
default_args=args,
schedule_interval="*/5 * * * *"
) as dag:
task_a = DummyOperator(
task_id="task_a"
)
task_b = DummyOperator(
task_id="task_b"
)
task_c = DummyOperator(
task_id="task_c"
)
task_d = DummyOperator(
task_id="task_d"
)
task_a >> [task_b, task_c] >> task_d
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 22
|
d2d300c5b67883d38f70b2c1b58d33ec2b7b239c
| 6,778
|
py
|
Python
|
solver.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
solver.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
solver.py
|
aelkouk/rainfall_runoff
|
7ab984c77abbef38c768fea9993b0cfecaca3e67
|
[
"MIT"
] | null | null | null |
# Purpose: Implement time stepping scheme to solve individual and coupled state equations
# Record of revisions:
# Date Programmer Description of change
# ======== ============= =====================
# 09-2020 A. Elkouk Original code
import numpy as np
# ----------------------------------------------------------------------------------------------------------------------
# Explicit (forward) Euler method
# ----------------------------------------------------------------------------------------------------------------------
def explicitEuler(f_S, S0, T, dt, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
S0 : float
State initial condition at t=0
T : float or int
Time period [days]
dt : float or int
Time step [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated state for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
t = np.zeros(n + 1)
dS = np.zeros(n + 1)
dS[0] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
dS[k + 1] = dS[k] + (dt * f_S(dS[k], **kwargs))
if dS[k + 1] < 0.0:
dS[k + 1] = 0.0
return dS, t
def explicitEuler_coupled_states(f_S, S0, T, dt, precip, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps
Parameters
----------
f_S : function
Coupled state function of the model sub-domains (e.g. canopy, unsaturated zone, and saturated zone)
S0 : array_like
State initial conditions at t=0 (e.g. [canopy, unsaturated zone, and saturated zone])
T : float or int
Time period [days]
dt : float or int
Time step [days]
precip : array_like
Precipitation flux [mm days^-1] at n=T/dt time step
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated states for n=T/dt time steps with shape (n, nbr_states)
RO : ndarray
Total runoff [mm day^-1] for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
nS = len(S0)
t = np.zeros(n + 1)
dS = np.zeros((n + 1, nS))
RO = np.zeros(n + 1)
dS[0, :] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
Sk, RO_k = f_S(dS[k], precip[k], **kwargs)
RO[k] = RO_k
dS[k + 1, :] = dS[k, :] + (dt * np.array(Sk))
dS = np.where(dS < 0.0, 0.0, dS)
return dS, RO, t
# ----------------------------------------------------------------------------------------------------------------------
# Heun's method
# ----------------------------------------------------------------------------------------------------------------------
def Heun(f_S, S0, T, dt, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps using the explicit Heun's method
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
S0 : float
State initial condition at t=0
T : float or int
Time period [days]
dt : float or int
Time step [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated state for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
t = np.zeros(n + 1)
dS = np.zeros(n + 1)
dS[0] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
K1 = f_S(dS[k], **kwargs)
K2 = f_S((K1 * dt) + dS[k], **kwargs)
dS[k + 1] = dS[k] + (0.5 * dt * (K1 + K2))
if dS[k + 1] < 0.0:
dS[k + 1] = 0.0
return dS, t
def Heun_ndt(f_S, Si, dt, T, **kwargs):
""" Solve dS/dt = f_S(S, t), S(t=t_i)=Si, at t=T with n steps (n=T/dt), using the explicit Heun's method
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
Si : float
State at time t=t_i
dt : float or int
Time step [days]
T : float or int
Time period [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : float
Integrated state at t=ndt with n=T/dt
"""
n = int(T / dt)
dS = Si
for _ in range(n):
K1 = f_S(dS, **kwargs)
K2 = f_S((K1 * dt) + dS, **kwargs)
dS = dS + (0.5 * dt * (K1 + K2))
return dS
def Heun_adaptive_substep(f_S, Si, dt, T, tau_r, tau_abs, s=0.9, rmin=0.1, rmax=4.0, EPS=10 ** (-10), **kwargs):
""" Solve dS/dt = f_S(S, t), S(t=t_i)=Si, using the explicit Heun's method with numerical
error control and adaptive sub stepping
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
Si : float
State at time t=t_i
dt : float or int
Time step [days]
T : float or int
Time period [days]
tau_r : float
Relative truncation error tolerance
tau_abs : float
Absolute truncation error tolerance
s : float
Safety factor
rmin, rmax : float
Step size multiplier constraints
EPS : float
Machine constant
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : list
Integrated state
time : list
Time steps [days]
"""
t = 0
dS = [Si]
time = [t]
while t < T:
t += dt
y1 = Heun_ndt(f_S, Si, dt, dt, **kwargs)
y2 = Heun_ndt(f_S, Si, dt/2, dt, **kwargs)
err = abs(y1 - y2)
diff = err - ((tau_r * abs(y2)) + tau_abs)
if diff < 0:
Si = y2
dS.append(Si)
time.append(dt)
dt = dt * min(s * np.sqrt((tau_r * abs(y2) + tau_abs) / (max(err, EPS))), rmax)
elif diff > 0:
t -= dt
dt = dt * max(s * np.sqrt((tau_r * abs(y2) + tau_abs) / (max(err, EPS))), rmin)
return dS, time
| 30.669683
| 121
| 0.482001
|
# Purpose: Implement time stepping scheme to solve individual and coupled state equations
# Record of revisions:
# Date Programmer Description of change
# ======== ============= =====================
# 09-2020 A. Elkouk Original code
import numpy as np
# ----------------------------------------------------------------------------------------------------------------------
# Explicit (forward) Euler method
# ----------------------------------------------------------------------------------------------------------------------
def explicitEuler(f_S, S0, T, dt, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
S0 : float
State initial condition at t=0
T : float or int
Time period [days]
dt : float or int
Time step [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated state for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
t = np.zeros(n + 1)
dS = np.zeros(n + 1)
dS[0] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
dS[k + 1] = dS[k] + (dt * f_S(dS[k], **kwargs))
if dS[k + 1] < 0.0:
dS[k + 1] = 0.0
return dS, t
def explicitEuler_coupled_states(f_S, S0, T, dt, precip, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps
Parameters
----------
f_S : function
Coupled state function of the model sub-domains (e.g. canopy, unsaturated zone, and saturated zone)
S0 : array_like
State initial conditions at t=0 (e.g. [canopy, unsaturated zone, and saturated zone])
T : float or int
Time period [days]
dt : float or int
Time step [days]
precip : array_like
Precipitation flux [mm days^-1] at n=T/dt time step
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated states for n=T/dt time steps with shape (n, nbr_states)
RO : ndarray
Total runoff [mm day^-1] for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
nS = len(S0)
t = np.zeros(n + 1)
dS = np.zeros((n + 1, nS))
RO = np.zeros(n + 1)
dS[0, :] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
Sk, RO_k = f_S(dS[k], precip[k], **kwargs)
RO[k] = RO_k
dS[k + 1, :] = dS[k, :] + (dt * np.array(Sk))
dS = np.where(dS < 0.0, 0.0, dS)
return dS, RO, t
# ----------------------------------------------------------------------------------------------------------------------
# Heun's method
# ----------------------------------------------------------------------------------------------------------------------
def Heun(f_S, S0, T, dt, **kwargs):
""" Solve dS/dt = f_S(S, t), S(0)=S0, for n=T/dt steps using the explicit Heun's method
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
S0 : float
State initial condition at t=0
T : float or int
Time period [days]
dt : float or int
Time step [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : ndarray
Integrated state for n=T/dt time steps
t : ndarray
Time steps [days]
"""
n = int(T / dt)
t = np.zeros(n + 1)
dS = np.zeros(n + 1)
dS[0] = S0
t[0] = 0
for k in range(n):
t[k + 1] = t[k] + dt
K1 = f_S(dS[k], **kwargs)
K2 = f_S((K1 * dt) + dS[k], **kwargs)
dS[k + 1] = dS[k] + (0.5 * dt * (K1 + K2))
if dS[k + 1] < 0.0:
dS[k + 1] = 0.0
return dS, t
def Heun_ndt(f_S, Si, dt, T, **kwargs):
""" Solve dS/dt = f_S(S, t), S(t=t_i)=Si, at t=T with n steps (n=T/dt), using the explicit Heun's method
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
Si : float
State at time t=t_i
dt : float or int
Time step [days]
T : float or int
Time period [days]
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : float
Integrated state at t=ndt with n=T/dt
"""
n = int(T / dt)
dS = Si
for _ in range(n):
K1 = f_S(dS, **kwargs)
K2 = f_S((K1 * dt) + dS, **kwargs)
dS = dS + (0.5 * dt * (K1 + K2))
return dS
def Heun_adaptive_substep(f_S, Si, dt, T, tau_r, tau_abs, s=0.9, rmin=0.1, rmax=4.0, EPS=10 ** (-10), **kwargs):
""" Solve dS/dt = f_S(S, t), S(t=t_i)=Si, using the explicit Heun's method with numerical
error control and adaptive sub stepping
Parameters
----------
f_S : function
State function for given model sub-domain (canopy, unsaturated zone, saturated zone)
Si : float
State at time t=t_i
dt : float or int
Time step [days]
T : float or int
Time period [days]
tau_r : float
Relative truncation error tolerance
tau_abs : float
Absolute truncation error tolerance
s : float
Safety factor
rmin, rmax : float
Step size multiplier constraints
EPS : float
Machine constant
kwargs : dict
*kwargs* are used to specify the additional parameters used by the the state function (f_S)
Returns
-------
dS : list
Integrated state
time : list
Time steps [days]
"""
t = 0
dS = [Si]
time = [t]
while t < T:
t += dt
y1 = Heun_ndt(f_S, Si, dt, dt, **kwargs)
y2 = Heun_ndt(f_S, Si, dt/2, dt, **kwargs)
err = abs(y1 - y2)
diff = err - ((tau_r * abs(y2)) + tau_abs)
if diff < 0:
Si = y2
dS.append(Si)
time.append(dt)
dt = dt * min(s * np.sqrt((tau_r * abs(y2) + tau_abs) / (max(err, EPS))), rmax)
elif diff > 0:
t -= dt
dt = dt * max(s * np.sqrt((tau_r * abs(y2) + tau_abs) / (max(err, EPS))), rmin)
return dS, time
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e7600c142bd7e27a405fac85062163bfc718090f
| 134
|
py
|
Python
|
mybitbank/libs/jsonrpc/authproxy.py
|
zonedoutspace/mybitbank
|
85d28726117a3c1ca76be5772d30c9edae1df7f4
|
[
"MIT"
] | 15
|
2015-08-29T12:35:59.000Z
|
2018-02-06T06:26:26.000Z
|
mybitbank/libs/jsonrpc/authproxy.py
|
FireWalkerX/mybitbank
|
945e604e5fee3914c7c98a25c2c34831ba0ad946
|
[
"MIT"
] | null | null | null |
mybitbank/libs/jsonrpc/authproxy.py
|
FireWalkerX/mybitbank
|
945e604e5fee3914c7c98a25c2c34831ba0ad946
|
[
"MIT"
] | 19
|
2015-02-03T21:32:51.000Z
|
2021-11-06T12:08:26.000Z
|
from mybitbank.libs.bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
__all__ = ['AuthServiceProxy', 'JSONRPCException']
| 44.666667
| 82
| 0.843284
|
from mybitbank.libs.bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
__all__ = ['AuthServiceProxy', 'JSONRPCException']
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c316d4e0585be0a5ee8136f55d032f0e3e48ad80
| 51,673
|
py
|
Python
|
pineboolib/plugins/sql/flmysql_myisam2.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 2
|
2015-09-19T16:54:49.000Z
|
2016-09-12T08:06:29.000Z
|
pineboolib/plugins/sql/flmysql_myisam2.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 1
|
2017-08-14T17:07:14.000Z
|
2017-08-15T00:22:47.000Z
|
pineboolib/plugins/sql/flmysql_myisam2.py
|
deavid/pineboo
|
acc96ab6d5b8bb182990af6dea4bf0986af15549
|
[
"MIT"
] | 9
|
2015-01-15T18:15:42.000Z
|
2019-05-05T18:53:00.000Z
|
"""
Module for MYISAM2 driver.
"""
from pineboolib import logging
logger = logging.getLogger(__name__)
| 33.751143
| 128
| 0.482147
|
"""
Module for MYISAM2 driver.
"""
from PyQt5.Qt import qWarning, QApplication, QRegExp # type: ignore
from PyQt5.QtXml import QDomDocument # type: ignore
from PyQt5.QtWidgets import QMessageBox, QWidget # type: ignore
from pineboolib.core.utils.utils_base import auto_qt_translate_text
from pineboolib.application.utils.check_dependencies import check_dependencies
from pineboolib.application.database.pnsqlquery import PNSqlQuery
from pineboolib.application.database.pnsqlcursor import PNSqlCursor
from pineboolib.core.utils.utils_base import text2bool
from pineboolib.application.metadata.pnfieldmetadata import PNFieldMetaData
from pineboolib.fllegacy import flapplication
from pineboolib.fllegacy.flutil import FLUtil
from pineboolib.application import project
import traceback
from pineboolib import logging
from PyQt5.QtCore import QTime, QDate, QDateTime, Qt # type: ignore
from typing import Any, Iterable, Optional, Union, List, Dict, cast
logger = logging.getLogger(__name__)
class FLMYSQL_MYISAM2(object):
"""MYISAM2 Driver class."""
version_: str
conn_ = None
name_: str
alias_: str
lastError_: Optional[str]
cursorsArray_: Dict[str, Any] # IApiCursor
noInnoDB: bool
mobile_: bool
pure_python_: bool
defaultPort_: int
cursor_ = None
db_ = None
engine_ = None
session_ = None
declarative_base_ = None
def __init__(self):
"""Create empty driver."""
self.version_ = "0.8"
self.conn_ = None
self.name_ = "FLMYSQL_MyISAM2"
self.open_ = False
self.alias_ = "MySQL MyISAM (PyMySQL)"
self.cursorsArray_ = {}
self.noInnoDB = True
self._dbname = None
self.mobile_ = True
self.pure_python_ = True
self.defaultPort_ = 3306
self.rowsFetched: Dict[str, int] = {}
self.active_create_index = True
self.db_ = None
self.engine_ = None
self.session_ = None
self.declarative_base_ = None
self.lastError_ = None
def version(self) -> str:
"""Get driver version."""
return self.version_
def driverName(self) -> str:
"""Get driver name."""
return self.name_
def pure_python(self) -> bool:
"""Return if this driver is pure python."""
return self.pure_python_
def safe_load(self) -> Any:
"""Check dependencies for this driver."""
return check_dependencies({"pymysql": "PyMySQL", "sqlalchemy": "sqlAlchemy"}, False)
def mobile(self) -> bool:
"""Check if is suitable for mobile platform."""
return self.mobile_
def isOpen(self) -> bool:
"""Return if driver has an open connection."""
return self.open_
def DBName(self) -> Any:
"""Return database name."""
return self._dbname
def connect(self, db_name, db_host, db_port, db_userName, db_password) -> Any:
"""Connect to a database."""
self._dbname = db_name
check_dependencies({"pymysql": "PyMySQL", "sqlalchemy": "sqlAlchemy"})
from sqlalchemy import create_engine # type: ignore
import pymysql
try:
self.conn_ = pymysql.connect(
host=db_host,
user=db_userName,
password=db_password,
db=db_name,
charset="utf8",
autocommit=True,
)
self.engine_ = create_engine(
"mysql+mysqldb://%s:%s@%s:%s/%s"
% (db_userName, db_password, db_host, db_port, db_name)
)
except pymysql.Error as e:
if project._splash:
project._splash.hide()
if "Unknown database" in str(e):
if project._DGI and not project.DGI.localDesktop():
return False
ret = QMessageBox.warning(
QWidget(),
"Pineboo",
"La base de datos %s no existe.\n¿Desea crearla?" % db_name,
cast(QMessageBox, QMessageBox.Ok | QMessageBox.No),
)
if ret == QMessageBox.No:
return False
else:
try:
tmpConn = pymysql.connect(
host=db_host,
user=db_userName,
password=db_password,
charset="utf8",
autocommit=True,
)
cursor = tmpConn.cursor()
try:
cursor.execute("CREATE DATABASE %s" % db_name)
except Exception:
print("ERROR: FLMYSQL2.connect", traceback.format_exc())
cursor.execute("ROLLBACK")
cursor.close()
return False
cursor.close()
return self.connect(db_name, db_host, db_port, db_userName, db_password)
except Exception:
qWarning(traceback.format_exc())
QMessageBox.information(
QWidget(),
"Pineboo",
"ERROR: No se ha podido crear la Base de Datos %s" % db_name,
QMessageBox.Ok,
)
print("ERROR: No se ha podido crear la Base de Datos %s" % db_name)
return False
else:
QMessageBox.information(
QWidget(), "Pineboo", "Error de conexión\n%s" % str(e), QMessageBox.Ok
)
return False
if self.conn_:
self.open_ = True
# self.conn_.autocommit(True)
# self.conn_.set_character_set('utf8')
return self.conn_
def cursor(self) -> Any:
"""Get current cursor for db."""
if not self.conn_:
raise Exception("Not connected")
if not self.cursor_:
self.cursor_ = self.conn_.cursor()
return self.cursor_
def engine(self) -> Any:
"""Get current driver engine."""
return self.engine_
def session(self) -> None:
"""Get sqlAlchemy session."""
if self.session_ is None:
from sqlalchemy.orm import sessionmaker # type: ignore
# from sqlalchemy import event
# from pineboolib.pnobjectsfactory import before_commit, after_commit
Session = sessionmaker(bind=self.engine())
self.session_ = Session()
# event.listen(Session, 'before_commit', before_commit, self.session_)
# event.listen(Session, 'after_commit', after_commit, self.session_)
def declarative_base(self) -> Any:
"""Get sqlAlchemy declarative base."""
if self.declarative_base_ is None:
from sqlalchemy.ext.declarative import declarative_base # type: ignore
self.declarative_base_ = declarative_base()
return self.declarative_base_
def formatValueLike(self, type_, v: Any, upper) -> str:
"""Format value for database LIKE expression."""
res = "IS NULL"
if v:
if type_ == "bool":
s = str(v[0]).upper()
if s == flapplication.aqApp.tr("Sí")[0].upper():
res = "=1"
elif flapplication.aqApp.tr("No")[0].upper():
res = "=0"
elif type_ == "date":
from pineboolib.application.utils.date_conversion import date_dma_to_amd
date_amd = date_dma_to_amd(str(v))
if date_amd:
res = " LIKE '%" + date_amd + "'"
else:
logger.warning("formatValueLike: failed to convert %s to ISO date format", v)
elif type_ == "time":
t = v.toTime()
res = " LIKE '" + t.toString(Qt.ISODate) + "%'"
else:
res = str(v)
if upper:
res = res.upper()
res = " LIKE '" + res + "%'"
return res
def formatValue(self, type_, v: Any, upper) -> Union[str, bool, None]:
"""Format value for database WHERE comparison."""
# util = FLUtil()
s: Union[str, bool, None] = None
# if v == None:
# v = ""
# TODO: psycopg2.mogrify ???
if v is None:
return "NULL"
if type_ == "bool" or type_ == "unlock":
s = text2bool(v)
elif type_ == "date":
# val = util.dateDMAtoAMD(v)
val = v
if val is None:
s = "Null"
else:
s = "'%s'" % val
elif type_ == "time":
s = "'%s'" % v
elif type_ in ("uint", "int", "double", "serial"):
if s == "Null":
s = "0"
else:
s = v
elif type_ in ("string", "stringlist"):
if v == "":
s = "Null"
else:
if type_ == "string":
v = auto_qt_translate_text(v)
if upper and type_ == "string":
v = v.upper()
s = "'%s'" % v
elif type_ == "pixmap":
if v.find("'") > -1:
v = self.normalizeValue(v)
s = "'%s'" % v
else:
s = v
# print ("PNSqlDriver(%s).formatValue(%s, %s) = %s" % (self.name_, type_, v, s))
return s
def canOverPartition(self) -> bool:
return True
def tables(self, type_name=None) -> list:
"""Introspect tables in database."""
tl: List[str] = []
if not self.isOpen():
return tl
q_tables = PNSqlQuery()
q_tables.exec_("show tables")
while q_tables.next():
tl.append(q_tables.value(0))
return tl
def nextSerialVal(self, table, field) -> Any:
"""Get next serial value for given table and field."""
if not self.isOpen():
logger.warning("%s::beginTransaction: Database not open", self.name_)
return None
# if not self.transaction():
# self.setLastError("No se puede iniciar la transacción", "BEGIN WORK")
# return None
max = 0
cur_max = 0
updateQry = False
ret = None
q = PNSqlQuery()
q.setSelect("max(%s)" % field)
q.setFrom(table)
q.setWhere("1 = 1")
if not q.exec_():
logger.warning("not exec sequence")
return None
if q.first() and q.value(0) is not None:
max = q.value(0)
if not self.conn_:
raise Exception("must be connected")
cursor = self.conn_.cursor()
strQry: Optional[str] = "SELECT seq FROM flseqs WHERE tabla = '%s' AND campo ='%s'" % (
table,
field,
)
try:
cur_max = 0
cursor.execute(strQry)
line = cursor.fetchone()
if line:
cur_max = line[0]
except Exception:
logger.warning(
"%s:: La consulta a la base de datos ha fallado", self.name_, traceback.format_exc()
)
self.rollbackTransaction()
return
if cur_max > 0:
updateQry = True
ret = cur_max
else:
ret = max
ret += 1
strQry = None
if updateQry:
if ret > cur_max:
strQry = "UPDATE flseqs SET seq=%s WHERE tabla = '%s' AND campo = '%s'" % (
ret,
table,
field,
)
else:
strQry = "INSERT INTO flseqs (tabla,campo,seq) VALUES('%s','%s',%s)" % (
table,
field,
ret,
)
if strQry is not None:
try:
cursor.execute(strQry)
except Exception:
logger.warning(
"%s:: La consulta a la base de datos ha fallado\n %s",
self.name_,
traceback.format_exc(),
)
self.rollbackTransaction()
return
# if not self.commitTransaction():
# qWarning("%s:: No se puede aceptar la transacción" % self.name_)
# return None
return ret
def queryUpdate(self, name, update, filter) -> str:
"""Return a templates UPDATE sql."""
sql = "UPDATE %s SET %s WHERE %s" % (name, update, filter)
return sql
def savePoint(self, n) -> bool:
"""Perform a transaction savepoint."""
if n == 0:
return True
if not self.isOpen():
logger.warning("%s::savePoint: Database not open", self.name_)
return False
cursor = self.cursor()
try:
cursor.execute("SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError("No se pudo crear punto de salvaguarda", "SAVEPOINT sv_%s" % n)
logger.warning(
"MySQLDriver:: No se pudo crear punto de salvaguarda SAVEPOINT sv_%s \n %s ",
n,
traceback.format_exc(),
)
return False
return True
def canSavePoint(self) -> bool:
"""Retrieve if this driver can perform savepoints."""
return False
def canTransaction(self) -> bool:
"""Retrieve if this driver can perform transactions."""
return False
def rollbackSavePoint(self, n) -> bool:
"""Rollback transaction to last savepoint."""
if n == 0:
return True
if not self.isOpen():
logger.warning("%s::rollbackSavePoint: Database not open", self.name_)
return False
cursor = self.cursor()
try:
cursor.execute("ROLLBACK TO SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError(
"No se pudo rollback a punto de salvaguarda", "ROLLBACK TO SAVEPOINTt sv_%s" % n
)
logger.warning(
"%s:: No se pudo rollback a punto de salvaguarda ROLLBACK TO SAVEPOINT sv_%s\n %s",
self.name_,
n,
traceback.format_exc(),
)
return False
return True
def setLastError(self, text, command) -> None:
"""Set last error from database."""
self.lastError_ = "%s (%s)" % (text, command)
def lastError(self) -> Optional[str]:
"""Get last error happened on database."""
return self.lastError_
def commitTransaction(self) -> bool:
"""Commit database transaction."""
if not self.isOpen():
logger.warning("%s::commitTransaction: Database not open", self.name_)
cursor = self.cursor()
try:
cursor.execute("COMMIT")
except Exception:
self.setLastError("No se pudo aceptar la transacción", "COMMIT")
logger.warning(
"%s:: No se pudo aceptar la transacción COMMIT\n %s",
self.name_,
traceback.format_exc(),
)
return False
return True
def rollbackTransaction(self) -> bool:
"""Rollback database transaction."""
if not self.isOpen():
logger.warning("%s::rollbackTransaction: Database not open", self.name_)
cursor = self.cursor()
if self.canSavePoint():
try:
cursor.execute("ROLLBACK")
except Exception:
self.setLastError("No se pudo deshacer la transacción", "ROLLBACK")
logger.warning(
"%s:: No se pudo deshacer la transacción ROLLBACK\n %s",
self.name_,
traceback.format_exc(),
)
return False
else:
qWarning(
"%s:: No se pudo deshacer la transacción ROLLBACK\n %s"
% (self.name_, traceback.format_exc())
)
return True
def transaction(self) -> bool:
"""Start new database transaction."""
if not self.isOpen():
logger.warning("%s::transaction: Database not open", self.name_)
cursor = self.cursor()
try:
cursor.execute("START TRANSACTION")
except Exception:
self.setLastError("No se pudo crear la transacción", "BEGIN WORK")
logger.warning(
"%s:: No se pudo crear la transacción BEGIN\n %s",
self.name_,
traceback.format_exc(),
)
return False
return True
def releaseSavePoint(self, n) -> bool:
"""Remove named savepoint from database."""
if n == 0:
return True
if not self.isOpen():
qWarning("%s::releaseSavePoint: Database not open" % self.name_)
return False
cursor = self.cursor()
try:
cursor.execute("RELEASE SAVEPOINT sv_%s" % n)
except Exception:
self.setLastError(
"No se pudo release a punto de salvaguarda", "RELEASE SAVEPOINT sv_%s" % n
)
qWarning(
"MySQLDriver:: No se pudo release a punto de salvaguarda RELEASE SAVEPOINT sv_%s\n %s"
% (n, traceback.format_exc())
)
return False
return True
def setType(self, type_, leng=None) -> str:
"""Template a SQL data type."""
if leng:
return "::%s(%s)" % (type_, leng)
else:
return "::%s" % type_
def refreshQuery(self, curname, fields, table, where, cursor, conn) -> None:
"""Perform a query."""
if curname not in self.cursorsArray_.keys():
self.cursorsArray_[curname] = cursor
sql = "SELECT %s FROM %s WHERE %s " % (fields, table, where)
sql = self.fix_query(sql)
try:
self.cursorsArray_[curname].execute(sql)
except Exception:
print("*", sql)
qWarning("CursorTableModel.Refresh\n %s" % traceback.format_exc())
def fix_query(self, val: str) -> str:
"""Fix values on SQL."""
ret_ = val.replace("'true'", "1")
ret_ = ret_.replace("'false'", "0")
ret_ = ret_.replace("'0'", "0")
ret_ = ret_.replace("'1'", "1")
# ret_ = ret_.replace(";", "")
return ret_
def refreshFetch(self, number, curname, table, cursor, fields, where_filter) -> None:
pass
# try:
# self.cursorsArray_[curname].fetchmany(number)
# except Exception:
# qWarning("%s.refreshFetch\n %s" %(self.name_, traceback.format_exc()))
def useThreads(self) -> bool:
"""Return if this driver supports threads."""
return False
def useTimer(self) -> bool:
"""Return if this driver supports timer."""
return True
def fetchAll(self, cursor, tablename, where_filter, fields, curname) -> List[Any]:
"""Fetch all pending rows on cursor."""
if curname not in self.rowsFetched.keys():
self.rowsFetched[curname] = 0
rowsF = []
try:
rows = list(self.cursorsArray_[curname])
if self.rowsFetched[curname] < len(rows):
i = 0
for row in rows:
i += 1
if i > self.rowsFetched[curname]:
rowsF.append(row)
self.rowsFetched[curname] = i
except Exception:
logger.error("%s:: fetchAll:%s", self.name_, traceback.format_exc())
return rowsF
def existsTable(self, name) -> bool:
"""Return if table exists."""
if not self.isOpen():
return False
t = PNSqlQuery()
t.setForwardOnly(True)
ok = t.exec_("SHOW TABLES LIKE '%s'" % name)
if ok:
ok = t.next()
return ok
def sqlCreateTable(self, tmd) -> Optional[str]:
"""Create a table from given MTD."""
# util = FLUtil()
if not tmd:
return None
primaryKey = None
sql = "CREATE TABLE %s (" % tmd.name()
# seq = None
fieldList = tmd.fieldList()
unlocks = 0
for field in fieldList:
if field.type() == "unlock":
unlocks += 1
if unlocks > 1:
qWarning(u"%s : No se ha podido crear la tabla %s" % (self.name_, tmd.name()))
qWarning(u"%s : Hay mas de un campo tipo unlock. Solo puede haber uno." % self.name_)
return None
i = 1
for field in fieldList:
sql = sql + field.name()
if field.type() == "int":
sql += " INT"
elif field.type() in ["uint", "serial"]:
sql += " INT UNSIGNED"
elif field.type() in ("bool", "unlock"):
sql += " BOOL"
elif field.type() == "double":
sql += " DECIMAL(%s,%s)" % (
field.partInteger() + field.partDecimal() + 5,
field.partDecimal() + 5,
)
elif field.type() == "time":
sql += " TIME"
elif field.type() == "date":
sql += " DATE"
elif field.type() in ["pixmap", "stringlist"]:
sql += " MEDIUMTEXT"
elif field.type() == "string":
if field.length() > 0:
if field.length() > 255:
sql += " VARCHAR"
else:
sql += " CHAR"
sql += "(%s)" % field.length()
else:
sql += " CHAR(255)"
elif field.type() == "bytearray":
sql = sql + " LONGBLOB"
if field.isPrimaryKey():
if primaryKey is None:
sql += " PRIMARY KEY"
primaryKey = field.name()
else:
qWarning(
QApplication.tr("FLManager : Tabla-> ")
+ tmd.name()
+ QApplication.tr(
" . Se ha intentado poner una segunda clave primaria para el campo "
)
+ field.name()
+ QApplication.tr(" , pero el campo ")
+ primaryKey
+ QApplication.tr(
" ya es clave primaria. Sólo puede existir una clave primaria en FLTableMetaData,"
" use FLCompoundKey para crear claves compuestas."
)
)
return None
else:
if field.isUnique():
sql += " UNIQUE"
if not field.allowNull():
sql += " NOT NULL"
else:
sql += " NULL"
if not i == len(fieldList):
sql += ","
i = i + 1
engine = ") ENGINE=INNODB" if not self.noInnoDB else ") ENGINE=MyISAM"
sql += engine
sql += " DEFAULT CHARACTER SET = utf8 COLLATE = utf8_bin"
qWarning("NOTICE: CREATE TABLE (%s%s)" % (tmd.name(), engine))
return sql
def Mr_Proper(self) -> None:
"""Cleanup database like mr.proper."""
util = FLUtil()
if not self.db_:
raise Exception("must be connected")
self.db_.dbAux().transaction()
qry = PNSqlQuery(None, "dbAux")
qry2 = PNSqlQuery(None, "dbAux")
qry3 = PNSqlQuery(None, "dbAux")
# qry4 = PNSqlQuery(None, "dbAux")
# qry5 = PNSqlQuery(None, "dbAux")
steps = 0
self.active_create_index = False
rx = QRegExp("^.*\\d{6,9}$")
if rx in self.tables():
listOldBks = self.tables()[rx]
else:
listOldBks = []
qry.exec_(
"select nombre from flfiles where nombre regexp"
"'.*[[:digit:]][[:digit:]][[:digit:]][[:digit:]]-[[:digit:]][[:digit:]].*:[[:digit:]][[:digit:]]$' or nombre regexp"
"'.*alteredtable[[:digit:]][[:digit:]][[:digit:]][[:digit:]].*' or (bloqueo=0 and nombre like '%.mtd')"
)
util.createProgressDialog(util.tr("Borrando backups"), len(listOldBks) + qry.size() + 2)
while qry.next():
item = qry.value(0)
util.setLabelText(util.tr("Borrando registro %s") % item)
qry2.exec_("DELETE FROM flfiles WHERE nombre ='%s'" % item)
if item.find("alteredtable") > -1:
if self.existsTable(item.replace(".mtd", "")):
util.setLabelText(util.tr("Borrando tabla %s" % item))
qry2.exec_("DROP TABLE %s CASCADE" % item.replace(".mtd", ""))
steps = steps + 1
util.setProgress(steps)
for item in listOldBks:
if self.existsTable(item):
util.setLabelText(util.tr("Borrando tabla %s" % item))
qry2.exec_("DROP TABLE %s CASCADE" % item)
steps = steps + 1
util.setProgress(steps)
util.setLabelText(util.tr("Inicializando cachés"))
steps = steps + 1
util.setProgress(steps)
qry.exec_("DELETE FROM flmetadata")
qry.exec_("DELETE FROM flvar")
self.db_.manager().cleanupMetaData()
# self.db_.driver().commit()
util.destroyProgressDialog()
steps = 0
qry3.exec_("SHOW TABLES")
util.createProgressDialog(util.tr("Comprobando base de datos"), qry3.size())
while qry3.next():
item = qry3.value(0)
# print("Comprobando", item)
# qry2.exec_("alter table %s convert to character set utf8 collate utf8_bin" % item)
mustAlter = self.mismatchedTable(item, item)
if mustAlter:
conte = self.db_.managerModules().content("%s.mtd" % item)
if conte:
msg = util.tr(
"La estructura de los metadatos de la tabla '%s' y su "
"estructura interna en la base de datos no coinciden. "
"Intentando regenerarla." % item
)
logger.warning("%s", msg)
self.alterTable2(conte, conte, None, True)
steps = steps + 1
util.setProgress(steps)
self.db_.dbAux().driver().transaction()
self.active_create_index = True
steps = 0
# sqlCursor = PNSqlCursor(None, True, self.db_.dbAux())
engine = "MyISAM" if self.noInnoDB else "INNODB"
convert_engine = False
do_ques = True
sqlQuery = PNSqlQuery(None, self.db_.dbAux())
sql_query2 = PNSqlQuery(None, self.db_.dbAux())
if sqlQuery.exec_("SHOW TABLES"):
util.setTotalSteps(sqlQuery.size())
while sqlQuery.next():
item = sqlQuery.value(0)
steps = steps + 1
util.setProgress(steps)
util.setLabelText(util.tr("Creando índices para %s" % item))
mtd = self.db_.manager().metadata(item, True)
if not mtd:
continue
fL = mtd.fieldList()
if not fL:
continue
for it in fL:
if not it or not it.type() == "pixmap":
continue
cur = PNSqlCursor(item, True, self.db_.dbAux())
cur.select(it.name() + " not like 'RK@%'")
while cur.next():
v = cur.value(it.name())
if v is None:
continue
v = self.db_.manager().storeLargeValue(mtd, v)
if v:
buf = cur.primeUpdate()
buf.setValue(it.name(), v)
cur.update(False)
# sqlCursor.setName(item, True)
# self.db_.dbAux().driver().commit()
sql_query2.exec_(
"show table status where Engine='%s' and Name='%s'" % (engine, item)
)
if not sql_query2.next():
if do_ques:
res = QMessageBox.question(
None,
util.tr("Mr. Proper"),
util.tr(
"Existen tablas que no son del tipo %s utilizado por el driver de la conexión actual.\n"
"Ahora es posible convertirlas, pero asegurése de tener una COPIA DE SEGURIDAD,\n"
"se pueden peder datos en la conversión de forma definitiva.\n\n"
"¿ Quiere convertirlas ?" % (engine)
),
QMessageBox.Yes,
QMessageBox.No,
)
if res == QMessageBox.Yes:
convert_engine = True
do_ques = False
if convert_engine:
conte = self.db_.managerModules().content("%s.mtd" % item)
self.alterTable2(conte, conte, None, True)
self.active_create_index = False
util.destroyProgressDialog()
def alterTable(self, mtd1, mtd2, key: Optional[str], force=False) -> bool:
"""Alter a table following mtd instructions."""
return self.alterTable2(mtd1, mtd2, key, force)
def hasCheckColumn(self, mtd) -> bool:
"""Retrieve if MTD has a check column."""
field_list = mtd.fieldList()
if not field_list:
return False
for field in field_list:
if field.isCheck() or field.name().endswith("_check_column"):
return True
return False
def alterTable2(self, mtd1, mtd2, key: Optional[str], force=False) -> bool:
"""Alter a table following mtd instructions."""
if not self.db_:
raise Exception("must be connected")
util = FLUtil()
oldMTD = None
newMTD = None
doc = QDomDocument("doc")
docElem = None
if not util.domDocumentSetContent(doc, mtd1):
print("FLManager::alterTable : " + util.tr("Error al cargar los metadatos."))
else:
docElem = doc.documentElement()
oldMTD = self.db_.manager().metadata(docElem, True)
if oldMTD and oldMTD.isQuery():
return True
if oldMTD and self.hasCheckColumn(oldMTD):
return False
if not util.domDocumentSetContent(doc, mtd2):
print("FLManager::alterTable : " + util.tr("Error al cargar los metadatos."))
return False
else:
docElem = doc.documentElement()
newMTD = self.db_.manager().metadata(docElem, True)
if not oldMTD:
oldMTD = newMTD
if not oldMTD.name() == newMTD.name():
print(
"FLManager::alterTable : "
+ util.tr("Los nombres de las tablas nueva y vieja difieren.")
)
if oldMTD and not oldMTD == newMTD:
del oldMTD
if newMTD:
del newMTD
return False
oldPK = oldMTD.primaryKey()
newPK = newMTD.primaryKey()
if not oldPK == newPK:
print(
"FLManager::alterTable : "
+ util.tr("Los nombres de las claves primarias difieren.")
)
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
if not force and self.db_.manager().checkMetaData(oldMTD, newMTD):
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return True
if not self.db_.manager().existsTable(oldMTD.name()):
print(
"FLManager::alterTable : "
+ util.tr("La tabla %1 antigua de donde importar los registros no existe.").arg(
oldMTD.name()
)
)
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
fieldList = oldMTD.fieldList()
# oldField = None
if not fieldList:
print("FLManager::alterTable : " + util.tr("Los antiguos metadatos no tienen campos."))
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
fieldNamesOld = []
if not force:
for it in fieldList:
if newMTD.field(it.name()) is not None:
fieldNamesOld.append(it.name())
renameOld = "%salteredtable%s" % (
oldMTD.name()[0:5],
QDateTime().currentDateTime().toString("ddhhssz"),
)
if not self.db_.dbAux():
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
# self.db_.dbAux().transaction()
fieldList = newMTD.fieldList()
if not fieldList:
qWarning("FLManager::alterTable : " + util.tr("Los nuevos metadatos no tienen campos"))
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
q = PNSqlQuery(None, "dbAux")
in_sql = "ALTER TABLE %s RENAME TO %s" % (oldMTD.name(), renameOld)
logger.warning(in_sql)
if not q.exec_(in_sql):
qWarning(
"FLManager::alterTable : " + util.tr("No se ha podido renombrar la tabla antigua.")
)
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
if not self.db_.manager().createTable(newMTD):
self.db_.dbAux().rollbackTransaction()
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
self.db_.dbAux().transaction()
if not force and key and len(key) == 40:
c = PNSqlCursor("flfiles", True, self.db_.dbAux())
# oldCursor.setModeAccess(oldCursor.Browse)
c.setForwardOnly(True)
c.setFilter("nombre='%s.mtd'" % renameOld)
c.select()
if not c.next():
# c.setModeAccess(c.Insert)
# c.refreshBuffer()
# c.setValueBuffer("nombre","%s.mtd" % renameOld)
# c.setValueBuffer("contenido", mtd1)
# c.setValueBuffer("sha", key)
# c.commitBuffer()
in_sql = (
"INSERT INTO flfiles(nombre,contenido,idmodulo,sha) VALUES ('%s.mtd','%s','%s','%s')"
% (
renameOld,
mtd1,
self.db_.managerModules().idModuleOfFile("%s.mtd" % oldMTD.name()),
key,
)
)
logger.warning(in_sql)
q.exec_(in_sql)
ok = False
if force and fieldNamesOld:
# sel = fieldNamesOld.join(",")
# in_sql = "INSERT INTO %s(%s) SELECT %s FROM %s" % (newMTD.name(), sel, sel, renameOld)
# logger.warning(in_sql)
# ok = q.exec_(in_sql)
if not ok:
self.db_.dbAux().rollbackTransaction()
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return self.alterTable2(mtd1, mtd2, key, True)
if not ok:
from pymysql.cursors import DictCursor
oldCursor = self.conn_.cursor(DictCursor)
# print("Lanzando!!", "SELECT * FROM %s WHERE 1 = 1" % (renameOld))
oldCursor.execute("SELECT * FROM %s WHERE 1 = 1" % (renameOld))
result_set = oldCursor.fetchall()
totalSteps = len(result_set)
# oldCursor = PNSqlCursor(renameOld, True, "dbAux")
# oldCursor.setModeAccess(oldCursor.Browse)
# oldCursor.setForwardOnly(True)
# oldCursor.select()
# totalSteps = oldCursor.size()
util.createProgressDialog(
util.tr("Reestructurando registros para %s...") % newMTD.alias(), totalSteps
)
util.setLabelText(util.tr("Tabla modificada"))
step = 0
newBuffer = None
newField = None
listRecords = []
newBufferInfo = self.recordInfo2(newMTD.name())
vector_fields = {}
default_values = {}
v = None
for it2 in fieldList:
oldField = oldMTD.field(it2.name())
if (
oldField is None
or not result_set
or oldField.name() not in result_set[0].keys()
):
if oldField is None:
oldField = it2
if it2.type() != PNFieldMetaData.Serial:
v = it2.defaultValue()
step += 1
default_values[str(step)] = v
step += 1
vector_fields[str(step)] = it2
step += 1
vector_fields[str(step)] = oldField
# step2 = 0
ok = True
x = 0
for row in result_set:
x += 1
newBuffer = newBufferInfo
i = 0
while i < step:
v = None
if str(i + 1) in default_values.keys():
i += 1
v = default_values[str(i)]
i += 1
newField = vector_fields[str(i)]
i += 1
oldField = vector_fields[str(i)]
else:
i += 1
newField = vector_fields[str(i)]
i += 1
oldField = vector_fields[str(i)]
v = row[newField.name()]
if (
(not oldField.allowNull() or not newField.allowNull())
and (v is None)
and newField.type() != PNFieldMetaData.Serial
):
defVal = newField.defaultValue()
if defVal is not None:
v = defVal
if v is not None and newField.type() == "string" and newField.length() > 0:
v = v[: newField.length()]
if (not oldField.allowNull() or not newField.allowNull()) and v is None:
if oldField.type() == PNFieldMetaData.Serial:
v = int(self.nextSerialVal(newMTD.name(), newField.name()))
elif oldField.type() in ["int", "uint", "bool", "unlock"]:
v = 0
elif oldField.type() == "double":
v = 0.0
elif oldField.type() == "time":
v = QTime.currentTime()
elif oldField.type() == "date":
v = QDate.currentDate()
else:
v = "NULL"[: newField.length()]
# new_b = []
for buffer in newBuffer:
if buffer[0] == newField.name():
new_buffer = []
new_buffer.append(buffer[0])
new_buffer.append(buffer[1])
new_buffer.append(newField.allowNull())
new_buffer.append(buffer[3])
new_buffer.append(buffer[4])
new_buffer.append(v)
new_buffer.append(buffer[6])
listRecords.append(new_buffer)
break
# newBuffer.setValue(newField.name(), v)
if listRecords:
if not self.insertMulti(newMTD.name(), listRecords):
ok = False
listRecords = []
util.setProgress(totalSteps)
util.destroyProgressDialog()
if ok:
self.db_.dbAux().commit()
if force:
q.exec_("DROP TABLE %s CASCADE" % renameOld)
else:
self.db_.dbAux().rollbackTransaction()
q.exec_("DROP TABLE %s CASCADE" % oldMTD.name())
q.exec_("ALTER TABLE %s RENAME TO %s" % (renameOld, oldMTD.name()))
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return False
if oldMTD and oldMTD != newMTD:
del oldMTD
if newMTD:
del newMTD
return True
def insertMulti(self, table_name, records: Iterable) -> bool:
"""Insert several rows at once."""
if not records:
return False
if not self.db_:
raise Exception("must be connected")
mtd = self.db_.manager().metadata(table_name)
fList = []
vList = []
cursor_ = self.cursor()
for f in records:
field = mtd.field(f[0])
if field.generated():
fList.append(field.name())
value = f[5]
if field.type() in ("string", "stringlist"):
value = self.db_.normalizeValue(value)
value = self.formatValue(field.type(), value, False)
vList.append(value)
sql = """INSERT INTO %s(%s) values (%s)""" % (
table_name,
", ".join(fList),
", ".join(map(str, vList)),
)
if not fList:
return False
try:
cursor_.execute(sql)
except Exception as exc:
print(sql, "\n", exc)
return False
return True
def mismatchedTable(self, table1, tmd_or_table2: str, db_=None) -> bool:
"""Check if table does not match MTD with database schema."""
if db_ is None:
db_ = self.db_
if isinstance(tmd_or_table2, str):
mtd = db_.manager().metadata(tmd_or_table2, True)
if not mtd:
return False
mismatch = False
processed_fields = []
try:
recMtd = self.recordInfo(tmd_or_table2)
recBd = self.recordInfo2(table1)
if recMtd is None:
raise Exception("Error obtaining recordInfo for %s" % tmd_or_table2)
# fieldBd = None
for fieldMtd in recMtd:
# fieldBd = None
found = False
for field in recBd:
if field[0] == fieldMtd[0]:
processed_fields.append(field[0])
found = True
if self.notEqualsFields(field, fieldMtd):
mismatch = True
recBd.remove(field)
break
if not found:
if fieldMtd[0] not in processed_fields:
mismatch = True
break
if len(recBd) > 0:
mismatch = True
except Exception:
logger.exception("mismatchedTable: Unexpected error")
return mismatch
else:
return self.mismatchedTable(table1, tmd_or_table2.name(), db_)
def recordInfo2(self, tablename) -> List[list]:
"""Obtain current cursor information on columns."""
if not self.isOpen():
raise Exception("MYISAM2: conn not opened")
if not self.conn_:
raise Exception("must be connected")
info = []
cursor = self.conn_.cursor()
cursor.execute("SHOW FIELDS FROM %s" % tablename)
# print("Campos", tablename)
for field in cursor.fetchall():
col_name = field[0]
allow_null = True if field[2] == "NO" else False
tipo_ = field[1]
if field[1].find("(") > -1:
tipo_ = field[1][: field[1].find("(")]
# len_
len_ = "0"
if field[1].find("(") > -1:
len_ = field[1][field[1].find("(") + 1 : field[1].find(")")]
precision_ = 0
tipo_ = self.decodeSqlType(tipo_)
if tipo_ in ["uint", "int", "double"]:
len_ = "0"
# print("****", tipo_, field)
else:
if len_.find(",") > -1:
precision_ = int(len_[len_.find(",") :])
len_ = len_[: len_.find(",")]
len_n = int(len_)
if len_n == 255 and tipo_ == "string":
len_n = 0
default_value_ = field[4]
primary_key_ = True if field[3] == "PRI" else False
# print("***", field)
# print("Nombre:", col_name)
# print("Tipo:", tipo_)
# print("Nulo:", allow_null)
# print("longitud:", len_)
# print("Precision:", precision_)
# print("Defecto:", default_value_)
info.append(
[col_name, tipo_, allow_null, len_n, precision_, default_value_, primary_key_]
)
# info.append(desc[0], desc[1], not desc[6], , part_decimal, default_value, is_primary_key)
return info
def decodeSqlType(self, t: str) -> str:
"""Translate types."""
ret = t
if t in ["char", "varchar", "text"]:
ret = "string"
elif t == "int":
ret = "uint"
elif t == "date":
ret = "date"
elif t == "mediumtext":
ret = "stringlist"
elif t == "tinyint":
ret = "bool"
elif t in ["decimal", "double"]:
ret = "double"
elif t == "longblob":
ret = "bytearray"
elif t == "time":
ret = "time"
else:
logger.warning("formato desconocido %s", ret)
return ret
def recordInfo(self, tablename_or_query: str) -> Optional[List[list]]:
"""Obtain current cursor information on columns."""
if not self.isOpen():
return None
if not self.db_:
raise Exception("Must be connected")
info = []
if isinstance(tablename_or_query, str):
tablename = tablename_or_query
doc = QDomDocument(tablename)
stream = self.db_.managerModules().contentCached("%s.mtd" % tablename)
util = FLUtil()
if not util.domDocumentSetContent(doc, stream):
print(
"FLManager : "
+ QApplication.tr("Error al cargar los metadatos para la tabla")
+ tablename
)
return self.recordInfo2(tablename)
# docElem = doc.documentElement()
mtd = self.db_.manager().metadata(tablename, True)
if not mtd:
return self.recordInfo2(tablename)
fL = mtd.fieldList()
if not fL:
del mtd
return self.recordInfo2(tablename)
for f in mtd.fieldNames():
field = mtd.field(f)
info.append(
[
field.name(),
field.type(),
not field.allowNull(),
field.length(),
field.partDecimal(),
field.defaultValue(),
field.isPrimaryKey(),
]
)
del mtd
return info
def notEqualsFields(self, field1: List[Any], field2: List[Any]) -> bool:
"""Check if two field definitions are equal."""
# print("comparando", field1, field1[1], field2, field2[1])
ret = False
try:
if not field1[2] == field2[2] and not field2[6]:
ret = True
if field1[1] == "stringlist" and not field2[1] in ("stringlist", "pixmap"):
ret = True
elif field1[1] == "string" and (
not field2[1] in ("string", "time", "date") or not field1[3] == field2[3]
):
if field1[3] == 0 and field2[3] == 255:
pass
else:
ret = True
elif field1[1] == "uint" and not field2[1] in ("int", "uint", "serial"):
ret = True
elif field1[1] == "bool" and not field2[1] in ("bool", "unlock"):
ret = True
elif field1[1] == "double" and not field2[1] == "double":
ret = True
except Exception:
print(traceback.format_exc())
return ret
def normalizeValue(self, text) -> Optional[str]:
"""Escape values, suitable to prevent sql injection."""
if text is None:
return None
import pymysql
return pymysql.escape_string(text)
# text = text.replace("'", "''")
# text = text.replace('\\"', '\\\\"')
# text = text.replace("\\n", "\\\\n")
# text = text.replace("\\r", "\\\\r")
# return text
def cascadeSupport(self) -> bool:
"""Check if database supports CASCADE."""
return True
def canDetectLocks(self) -> bool:
"""Check if driver can detect locks."""
return True
def desktopFile(self) -> bool:
"""Return if this database is a file."""
return False
def execute_query(self, q: str) -> Any:
"""Execute a SQL query."""
if not self.isOpen():
logger.warning("MySQLDriver::execute_query. DB is closed")
return False
cursor = self.cursor()
try:
q = self.fix_query(q)
cursor.execute(q)
except Exception:
self.setLastError("No se puedo ejecutar la siguiente query %s" % q, q)
logger.warning(
"MySQLDriver:: No se puedo ejecutar la siguiente query %s\n %s",
q,
traceback.format_exc(),
)
return cursor
| 38
| 0
| 0
| 50,631
| 0
| 0
| 0
| 494
| 422
|
93d72bc6802337a6844119c8802b8bf664d50346
| 2,501
|
py
|
Python
|
doit/support/cmd/errors.py
|
i386x/doit
|
61892904940b7ecf29af8ea815b731e242a945f7
|
[
"MIT"
] | null | null | null |
doit/support/cmd/errors.py
|
i386x/doit
|
61892904940b7ecf29af8ea815b731e242a945f7
|
[
"MIT"
] | null | null | null |
doit/support/cmd/errors.py
|
i386x/doit
|
61892904940b7ecf29af8ea815b731e242a945f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#! \file ./doit/support/cmd/errors.py
#! \author Ji Kuera, <[email protected]>
#! \stamp 2016-02-15 13:19:12 (UTC+01:00, DST+00:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Command processor's errors.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Ji Kuera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
from doit.support.errors import DoItError
ERROR_COMMAND_PROCESSOR = DoItError.alloc_codes(1)
ERROR_COMMAND = DoItError.alloc_codes(1)
#-class
#-class
| 29.081395
| 79
| 0.652139
|
# -*- coding: utf-8 -*-
#! \file ./doit/support/cmd/errors.py
#! \author Jiří Kučera, <[email protected]>
#! \stamp 2016-02-15 13:19:12 (UTC+01:00, DST+00:00)
#! \project DoIt!: Tools and Libraries for Building DSLs
#! \license MIT
#! \version 0.0.0
#! \fdesc @pyfile.docstr
#
"""\
Command processor's errors.\
"""
__license__ = """\
Copyright (c) 2014 - 2017 Jiří Kučera.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.\
"""
from doit.support.errors import DoItError
ERROR_COMMAND_PROCESSOR = DoItError.alloc_codes(1)
ERROR_COMMAND = DoItError.alloc_codes(1)
class CommandProcessorError(DoItError):
"""
"""
__slots__ = [ 'traceback' ]
def __init__(self, tb, emsg):
"""
"""
DoItError.__init__(self, ERROR_COMMAND_PROCESSOR, emsg)
self.traceback = tb
#-def
def __str__(self):
"""
"""
if self.traceback is not None:
return "%s %s" % (self.traceback, DoItError.__str__(self))
return DoItError.__str__(self)
#-def
#-class
class CommandError(DoItError):
"""
"""
__slots__ = [ 'ecls', 'emsg', 'tb' ]
def __init__(self, ecls, emsg, tb):
"""
"""
DoItError.__init__(self, ERROR_COMMAND, "%s: %s" % (ecls, emsg))
self.ecls = ecls
self.emsg = emsg
self.tb = tb
#-def
def __repr__(self):
"""
"""
return "%s(\"%s\")" % (self.ecls, self.emsg)
#-def
#-class
| 12
| 0
| 0
| 835
| 0
| 0
| 0
| 0
| 46
|
bc4091829f8438988183491e40582e30250e0759
| 4,364
|
py
|
Python
|
nototools/notoconfig.py
|
dedbbs1/nototools
|
428f149d7c235ac45fb9255414c77a3529e0c8bf
|
[
"Apache-2.0"
] | 156
|
2015-06-11T00:03:49.000Z
|
2019-03-12T10:05:14.000Z
|
nototools/notoconfig.py
|
dedbbs1/nototools
|
428f149d7c235ac45fb9255414c77a3529e0c8bf
|
[
"Apache-2.0"
] | 323
|
2015-06-09T21:26:40.000Z
|
2019-04-09T11:09:52.000Z
|
nototools/notoconfig.py
|
twardoch/nototools
|
546beddb96e8eb4d93fa0f4c60793bee56e346ac
|
[
"Apache-2.0"
] | 63
|
2015-06-09T19:21:58.000Z
|
2019-03-27T21:52:30.000Z
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read config file for noto tools. One could also just define some
environment variables, but using Python for this lets you keep your
environment and shell prefs clean.
This expects a file named '.notoconfig' in the users home directory.
It should contain lines consisting of a name, '=' and a path. The
expected names are 'noto_tools', 'noto_fonts', 'noto_cjk',
'noto_emoji', and 'noto_source'. The values are absolute paths
to the base directories of these noto repositories.
Formerly these were a single repository so the paths could all be reached
from a single root, but that is no longer the case.
"""
from os import path
_ERR_MSG = """
Could not find ~/.notoconfig or /usr/local/share/noto/config.
Nototools uses this file to locate resources it uses, since many resources
such as fonts and sample_texts are not installed in locations relative
to the nototools python files and scripts.
Please create one of the above config files containing a line like the
following, where the absolute path to the root of the git repo on your
machine follows the '=' character:
noto_tools=/path/to/root/of/nototools
If you use any of the other noto repos, add similar lines for 'noto_emoji',
'noto_fonts', 'noto_cjk', 'noto_source', or 'noto_fonts_alpha'.
"""
_values = {}
_config_path = None # so we know
def _setup():
"""The config consists of lines of the form <name> = <value>.
values will hold a mapping from the <name> to value.
Blank lines and lines starting with '#' are ignored."""
global _config_path
paths = [path.expanduser("~/.notoconfig"), "/usr/local/share/noto/config"]
for configfile in paths:
if path.exists(configfile):
with open(configfile, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
_values[k.strip()] = v.strip()
_config_path = configfile
break
# This needs to be silent. It causes a makefile error in noto-emoji,
# which expects stdout to consist only of the output of a python
# script it runs.
_setup()
# convenience for names we expect.
# By default we allow running without a config, since many small tools don't
# require it. But if you run code that calls noto_tools and provides no
# default, we assume you do require it and raise an exception.
def noto_tools(default=""):
"""Local path to nototools git repo. If this is called, we require config
to be set up."""
result = _values.get("noto_tools", default)
if result:
return result
raise Exception(_ERR_MSG)
def noto_fonts(default=""):
"""Local path to noto-font git repo"""
return _values.get("noto_fonts", default)
def noto_cjk(default=""):
"""Local path to noto-cjk git repo"""
return _values.get("noto_cjk", default)
def noto_emoji(default=""):
"""Local path to noto-emoji git repo"""
return _values.get("noto_emoji", default)
def noto_source(default=""):
"""Local path to noto-source git repo"""
return _values.get("noto_source", default)
def noto_fonts_alpha(default=""):
"""Local path to noto-fonts-alpha git repo"""
return _values.get("noto_fonts_alpha", default)
if __name__ == "__main__":
keyset = set(_values.keys())
if not keyset:
print("no keys defined, probably no notoconfig file was found.")
else:
wid = max(len(k) for k in keyset)
fmt = "%%%ds: %%s" % wid
for k in sorted(keyset):
print(fmt % (k, get(k)))
print("config: %s" % _config_path)
| 32.81203
| 78
| 0.681714
|
#!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read config file for noto tools. One could also just define some
environment variables, but using Python for this lets you keep your
environment and shell prefs clean.
This expects a file named '.notoconfig' in the users home directory.
It should contain lines consisting of a name, '=' and a path. The
expected names are 'noto_tools', 'noto_fonts', 'noto_cjk',
'noto_emoji', and 'noto_source'. The values are absolute paths
to the base directories of these noto repositories.
Formerly these were a single repository so the paths could all be reached
from a single root, but that is no longer the case.
"""
from os import path
_ERR_MSG = """
Could not find ~/.notoconfig or /usr/local/share/noto/config.
Nototools uses this file to locate resources it uses, since many resources
such as fonts and sample_texts are not installed in locations relative
to the nototools python files and scripts.
Please create one of the above config files containing a line like the
following, where the absolute path to the root of the git repo on your
machine follows the '=' character:
noto_tools=/path/to/root/of/nototools
If you use any of the other noto repos, add similar lines for 'noto_emoji',
'noto_fonts', 'noto_cjk', 'noto_source', or 'noto_fonts_alpha'.
"""
_values = {}
_config_path = None # so we know
def _setup():
"""The config consists of lines of the form <name> = <value>.
values will hold a mapping from the <name> to value.
Blank lines and lines starting with '#' are ignored."""
global _config_path
paths = [path.expanduser("~/.notoconfig"), "/usr/local/share/noto/config"]
for configfile in paths:
if path.exists(configfile):
with open(configfile, "r") as f:
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
k, v = line.split("=", 1)
_values[k.strip()] = v.strip()
_config_path = configfile
break
# This needs to be silent. It causes a makefile error in noto-emoji,
# which expects stdout to consist only of the output of a python
# script it runs.
_setup()
# convenience for names we expect.
# By default we allow running without a config, since many small tools don't
# require it. But if you run code that calls noto_tools and provides no
# default, we assume you do require it and raise an exception.
def noto_tools(default=""):
"""Local path to nototools git repo. If this is called, we require config
to be set up."""
result = _values.get("noto_tools", default)
if result:
return result
raise Exception(_ERR_MSG)
def noto_fonts(default=""):
"""Local path to noto-font git repo"""
return _values.get("noto_fonts", default)
def noto_cjk(default=""):
"""Local path to noto-cjk git repo"""
return _values.get("noto_cjk", default)
def noto_emoji(default=""):
"""Local path to noto-emoji git repo"""
return _values.get("noto_emoji", default)
def noto_source(default=""):
"""Local path to noto-source git repo"""
return _values.get("noto_source", default)
def noto_fonts_alpha(default=""):
"""Local path to noto-fonts-alpha git repo"""
return _values.get("noto_fonts_alpha", default)
def get(key, default=""):
return _values.get(key, default)
if __name__ == "__main__":
keyset = set(_values.keys())
if not keyset:
print("no keys defined, probably no notoconfig file was found.")
else:
wid = max(len(k) for k in keyset)
fmt = "%%%ds: %%s" % wid
for k in sorted(keyset):
print(fmt % (k, get(k)))
print("config: %s" % _config_path)
| 0
| 0
| 0
| 0
| 0
| 41
| 0
| 0
| 23
|
e0b252fafdf6487f16fd80918000aa7a03e2b15b
| 541
|
py
|
Python
|
src/assets/svg/clear_fill.py
|
Pure-Peace/vue-adaptive-template
|
a3f39258ac748eff84c894510cce0f8227358b88
|
[
"MIT"
] | 7
|
2020-10-27T15:16:05.000Z
|
2022-02-15T08:01:21.000Z
|
src/assets/svg/clear_fill.py
|
Pure-Peace/vue3-vite-ssr
|
650b4aa24e4684b6a1d93fcf27bf744dfae60215
|
[
"MIT"
] | 3
|
2021-03-10T19:59:12.000Z
|
2021-08-31T19:46:45.000Z
|
src/assets/svg/clear_fill.py
|
Pure-Peace/vue-adaptive-template
|
a3f39258ac748eff84c894510cce0f8227358b88
|
[
"MIT"
] | 1
|
2021-12-21T14:14:51.000Z
|
2021-12-21T14:14:51.000Z
|
import os
import re
for svg in [file for file in os.listdir(os.getcwd()) if '.svg' in file]:
file_path = '{}/{}'.format(os.getcwd(), svg)
with open (file_path, 'r', encoding='utf-8') as svg_file:
content = svg_file.read()
target = re.search('fill=".*?"', content)
if target != None:
fixed_content = content.replace(target.group(), '')
with open (file_path, 'w', encoding='utf-8') as svg_file:
svg_file.write(fixed_content)
print('done 1')
print('all done')
| 38.642857
| 72
| 0.578558
|
import os
import re
for svg in [file for file in os.listdir(os.getcwd()) if '.svg' in file]:
file_path = '{}/{}'.format(os.getcwd(), svg)
with open (file_path, 'r', encoding='utf-8') as svg_file:
content = svg_file.read()
target = re.search('fill=".*?"', content)
if target != None:
fixed_content = content.replace(target.group(), '')
with open (file_path, 'w', encoding='utf-8') as svg_file:
svg_file.write(fixed_content)
print('done 1')
print('all done')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cf2818a4a563783db22a4d4eba9f7f379db5d084
| 419
|
py
|
Python
|
decorators.py
|
oramirezperera/closures
|
17aa68642448eaa607872e871d8785ecdfe23d8b
|
[
"MIT"
] | null | null | null |
decorators.py
|
oramirezperera/closures
|
17aa68642448eaa607872e871d8785ecdfe23d8b
|
[
"MIT"
] | null | null | null |
decorators.py
|
oramirezperera/closures
|
17aa68642448eaa607872e871d8785ecdfe23d8b
|
[
"MIT"
] | null | null | null |
random_func()
| 22.052632
| 88
| 0.653938
|
from datetime import datetime
def execution_time(func):
def wrapper():
initial_time = datetime.now()
func()
final_time = datetime.now()
time_elapsed = final_time - initial_time
print('The execution time was ' + str(time_elapsed.total_seconds()) + 'seconds')
return wrapper
@execution_time
def random_func():
for _ in range(1,10000000):
pass
random_func()
| 0
| 58
| 0
| 0
| 0
| 269
| 0
| 8
| 68
|
44016d8febc11f65a98a21222676ac994b234bb7
| 5,863
|
py
|
Python
|
pyzoo/zoo/chronos/model/prophet.py
|
cabuliwallah/analytics-zoo
|
5e662bd01c5fc7eed412973119594cf2ecea8b11
|
[
"Apache-2.0"
] | 1
|
2021-06-16T11:42:32.000Z
|
2021-06-16T11:42:32.000Z
|
pyzoo/zoo/chronos/model/prophet.py
|
cabuliwallah/analytics-zoo
|
5e662bd01c5fc7eed412973119594cf2ecea8b11
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/zoo/chronos/model/prophet.py
|
cabuliwallah/analytics-zoo
|
5e662bd01c5fc7eed412973119594cf2ecea8b11
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 40.157534
| 99
| 0.656149
|
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import pandas as pd
from prophet import Prophet
from prophet.serialize import model_to_json, model_from_json
from zoo.automl.common.metrics import Evaluator
from zoo.automl.model.abstract import BaseModel, ModelBuilder
class ProphetModel(BaseModel):
def __init__(self):
"""
Initialize Model
"""
self.metric = 'mse'
self.model = None
self.model_init = False
def _build(self, **config):
"""
build the model and initialize.
:param config: hyperparameters for the model
"""
changepoint_prior_scale = config.get('changepoint_prior_scale', 0.05)
seasonality_prior_scale = config.get('seasonality_prior_scale', 10.0)
holidays_prior_scale = config.get('holidays_prior_scale', 10.0)
seasonality_mode = config.get('seasonality_mode', 'additive')
changepoint_range = config.get('changepoint_range', 0.8)
self.metric = config.get('metric', self.metric)
self.model = Prophet(changepoint_prior_scale=changepoint_prior_scale,
seasonality_prior_scale=seasonality_prior_scale,
holidays_prior_scale=holidays_prior_scale,
changepoint_range=changepoint_range,
seasonality_mode=seasonality_mode)
def fit_eval(self, data, validation_data, **config):
"""
Fit on the training data from scratch.
:param data: training data, an dataframe with Td rows,
and 2 columns, with column 'ds' indicating date and column 'y' indicating target
and Td is the time dimension
:param validation_data: validation data, should be the same type as data.
:return: the evaluation metric value
"""
if not self.model_init:
self._build(**config)
self.model_init = True
self.model.fit(data)
val_metric = self.evaluate(target=validation_data,
metrics=[self.metric])[0].item()
return {self.metric: val_metric}
def predict(self, data=None, horizon=24):
"""
Predict horizon time-points ahead the input data in fit_eval
:param data: Prophet predicts the horizon steps foreward from the training data.
So data should be None as it is not used.
:param horizon: horizon length to predict
:return: predicted result of length horizon
"""
if data is not None:
raise ValueError("We don't support input data currently")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
future = self.model.make_future_dataframe(periods=horizon)
out = self.model.predict(future)[-horizon:]
return out
def evaluate(self, target, data=None, metrics=['mse']):
"""
Evaluate on the prediction results. We predict horizon time-points ahead the input data
in fit_eval before evaluation, where the horizon length equals the second dimension size of
target.
:param data: Prophet predicts the horizon steps foreward from the training data.
So data should be None as it is not used.
:param target: target for evaluation.
:param metrics: a list of metrics in string format
:return: a list of metric evaluation results
"""
if data is not None:
raise ValueError("We don't support input data currently")
if target is None:
raise ValueError("Input invalid target of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling evaluate")
horizon = len(target)
future = self.model.make_future_dataframe(periods=horizon)
target_pred = self.predict(horizon=horizon)[['yhat']]
return [Evaluator.evaluate(m, target[['y']].values, target_pred.values) for m in metrics]
def save(self, checkpoint):
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling save")
with open(checkpoint, 'w') as fout:
json.dump(model_to_json(self.model), fout)
def restore(self, checkpoint):
with open(checkpoint, 'r') as fin:
self.model = model_from_json(json.load(fin))
self.model_init = True
class ProphetBuilder(ModelBuilder):
def __init__(self, **prophet_config):
"""
Initialize Prophet Model
:param prophet_config: Other prophet hyperparameters. You may refer to
https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning
for the parameter names to specify.
"""
self.model_config = prophet_config.copy()
def build(self, config):
"""
Build Prophet Model
:param config: Other prophet hyperparameters. You may refer to
https://facebook.github.io/prophet/docs/diagnostics.html#hyperparameter-tuning
for the parameter names to specify.
"""
from zoo.chronos.model.prophet import ProphetModel
model = ProphetModel()
model._build(**config)
return model
| 0
| 0
| 0
| 4,994
| 0
| 0
| 0
| 99
| 180
|
450ca3987d19aeeb31e66048ea9f05f4707a6a4e
| 2,182
|
py
|
Python
|
mdptools/utils/highlight.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | 1
|
2021-12-15T13:22:48.000Z
|
2021-12-15T13:22:48.000Z
|
mdptools/utils/highlight.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | 2
|
2021-11-09T23:43:48.000Z
|
2021-11-13T20:41:12.000Z
|
mdptools/utils/highlight.py
|
mholdg16/py-mdptools
|
ae986edc2097e97cb73331d66f0051ca9f5bd15c
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-few-public-methods,missing-docstring
highlight = Highlight()
| 22.968421
| 58
| 0.579285
|
# pylint: disable=too-few-public-methods,missing-docstring
class COLORS_ENABLED:
RESET = "\033[0m"
BOLD = "\033[01m"
BLACK = "\033[30m"
RED = "\033[31m"
GREEN = "\033[32m"
ORANGE = "\033[33m"
BLUE = "\033[34m"
PURPLE = "\033[35m"
CYAN = "\033[36m"
LIGHTGREY = "\033[37m"
DARKGREY = "\033[90m"
LIGHTRED = "\033[91m"
LIGHTGREEN = "\033[92m"
YELLOW = "\033[93m"
LIGHTBLUE = "\033[94m"
PINK = "\033[95m"
LIGHTCYAN = "\033[96m"
class COLORS_DISABLED:
RESET = ""
BOLD = ""
BLACK = ""
RED = ""
GREEN = ""
ORANGE = ""
BLUE = ""
PURPLE = ""
CYAN = ""
LIGHTGREY = ""
DARKGREY = ""
LIGHTRED = ""
LIGHTGREEN = ""
YELLOW = ""
LIGHTBLUE = ""
PINK = ""
LIGHTCYAN = ""
class Highlight:
def __init__(self):
self.bc = COLORS_DISABLED
def __call__(self, color: str, text: str) -> str:
return color + f"{text}" + self.bc.RESET
def state(self, text: str):
return self.__call__(self.bc.LIGHTCYAN, text)
def action(self, text: str):
return self.__call__(self.bc.LIGHTGREEN, text)
def function(self, text: str):
return self.__call__(self.bc.LIGHTBLUE, text)
def variable(self, text: str):
return self.__call__(self.bc.PINK, text)
def string(self, text: str):
return self.__call__(self.bc.YELLOW, text)
def comment(self, text: str):
return self.__call__(self.bc.LIGHTGREY, text)
def ok(self, text: str):
return self.__call__(self.bc.LIGHTGREEN, text)
def fail(self, text: str):
return self.__call__(self.bc.LIGHTRED, text)
def error(self, text: str):
return self.__call__(self.bc.RED, text)
def numeral(self, text: str):
return self.__call__(self.bc.ORANGE, text)
def types(self, text: str):
return self.__call__(self.bc.GREEN, text)
def note(self, text: str):
return self.__call__(self.bc.PURPLE, text)
def use_colors(self, value: bool = True):
if value:
self.bc = COLORS_ENABLED
else:
self.bc = COLORS_DISABLED
highlight = Highlight()
| 0
| 0
| 0
| 2,027
| 0
| 0
| 0
| 0
| 68
|
ef71623e2adac2944868d6fb2d5158e9f125310e
| 35,571
|
py
|
Python
|
interpro7dw/ebi/interpro/ftp/xmlfiles.py
|
ProteinsWebTeam/interpro7-dw
|
5ff1886bb767964658574bd39b812d822a894163
|
[
"Apache-2.0"
] | null | null | null |
interpro7dw/ebi/interpro/ftp/xmlfiles.py
|
ProteinsWebTeam/interpro7-dw
|
5ff1886bb767964658574bd39b812d822a894163
|
[
"Apache-2.0"
] | 1
|
2020-08-14T23:15:24.000Z
|
2020-08-14T23:15:24.000Z
|
interpro7dw/ebi/interpro/ftp/xmlfiles.py
|
ProteinsWebTeam/interpro7-dw
|
5ff1886bb767964658574bd39b812d822a894163
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import MySQLdb
from interpro7dw.ebi.interpro import utils
_DC_STATUSES = {v: k for k, v in utils.DC_STATUSES.items()}
_TAGS = {
"cazy": "CAZY",
"cog": "COG",
"genprop": "GENPROP",
"ec": "EC",
"intenz": "EC",
"interpro": "INTERPRO",
"pfam": "PFAM",
"pdbe": "PDBE",
"pirsf": "PIRSF",
"prosite": "PROSITE",
"prositedoc": "PROSITEDOC",
"superfamily": "SSF",
"swissprot": "SWISSPROT",
"tigrfams": "TIGRFAMs"
}
| 39.132013
| 79
| 0.519103
|
# -*- coding: utf-8 -*-
import json
import gzip
import math
import multiprocessing as mp
import os
import re
import shutil
from tempfile import mkstemp
from typing import Optional, Sequence
from xml.dom.minidom import getDOMImplementation, parseString
from xml.parsers.expat import ExpatError
import cx_Oracle
import MySQLdb
import MySQLdb.cursors
from interpro7dw import logger
from interpro7dw.ebi import pdbe
from interpro7dw.ebi.interpro import production as ippro, utils
from interpro7dw.utils import DumpFile, KVdb, Store, loadobj, url2dict
_DC_STATUSES = {v: k for k, v in utils.DC_STATUSES.items()}
_TAGS = {
"cazy": "CAZY",
"cog": "COG",
"genprop": "GENPROP",
"ec": "EC",
"intenz": "EC",
"interpro": "INTERPRO",
"pfam": "PFAM",
"pdbe": "PDBE",
"pirsf": "PIRSF",
"prosite": "PROSITE",
"prositedoc": "PROSITEDOC",
"superfamily": "SSF",
"swissprot": "SWISSPROT",
"tigrfams": "TIGRFAMs"
}
def _restore_tags(match: re.Match) -> str:
tag, key = match.groups()
tag = tag.lower()
if tag == "cite":
return f'<cite idref="{key}"/>'
elif tag in _TAGS:
return f'<db_xref db="{_TAGS[tag]}" dbkey="{key}"/>'
elif tag not in ["mim", "pmid", "pubmed"]:
logger.warning(match.group(0))
def _restore_abstract(data: str) -> str:
return re.sub(pattern=r"\[([a-z]+):([a-z0-9_.:]+)\]",
repl=_restore_tags,
string=data,
flags=re.I)
def export_interpro(url: str, p_entries: str, p_entry2xrefs: str,
p_interpro2taxonomy: str, outdir: str,
tmpdir: Optional[str] = None):
shutil.copy(os.path.join(os.path.dirname(__file__), "interpro.dtd"),
outdir)
logger.info("loading entries")
entries = loadobj(p_entries)
interpro_entries = []
deleted_entries = []
for e in entries.values():
if e.database != "interpro":
continue
elif e.is_deleted:
deleted_entries.append(e.accession)
else:
interpro_entries.append(e.accession)
logger.info("creating entry-taxon database")
fd, taxdb = mkstemp(dir=tmpdir)
os.close(fd)
os.remove(taxdb)
with DumpFile(p_interpro2taxonomy) as interpro2taxonomy:
with KVdb(taxdb, writeback=True) as kvdb:
i = 0
for entry_acc, taxon_id, counts in interpro2taxonomy:
kvdb[f"{entry_acc}-{taxon_id}"] = str(counts)
i += 1
if not i % 1000000:
kvdb.sync()
logger.info("loading protein counts")
con = MySQLdb.connect(**url2dict(url), charset="utf8mb4")
cur = MySQLdb.cursors.SSCursor(con)
cur.execute(
"""
SELECT accession, counts
FROM webfront_entry
"""
)
num_proteins = {}
for entry_acc, counts in cur:
num_proteins[entry_acc] = str(json.loads(counts)["proteins"])
output = os.path.join(outdir, "interpro.xml.gz")
with gzip.open(output, "wt", encoding="utf-8") as fh:
fh.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fh.write('<!DOCTYPE interprodb SYSTEM "interpro.dtd">\n')
fh.write("<interprodb>\n")
doc = getDOMImplementation().createDocument(None, None, None)
# writing <release> section (do not log progress, < 1 sec)
elem = doc.createElement("release")
databases = {}
cur.execute(
"""
SELECT name, name_alt, type, num_entries, version, release_date
FROM webfront_database
ORDER BY name_long
"""
)
for name, name_alt, db_type, entry_count, version, date in cur:
databases[name] = name_alt
if db_type in ("entry", "protein"):
dbinfo = doc.createElement("dbinfo")
dbinfo.setAttribute("version", version)
dbinfo.setAttribute("dbname", name_alt)
dbinfo.setAttribute("entry_count", str(entry_count))
dbinfo.setAttribute("file_date",
date.strftime("%d-%b-%y").upper())
elem.appendChild(dbinfo)
elem.writexml(fh, addindent=" ", newl="\n")
logger.info("loading taxonomic data")
key_species = {
"3702", # Arabidopsis thaliana
"6239", # Caenorhabditis elegans
"7955", # Danio rerio
"7227", # Drosophila melanogaster
"9606", # Homo sapiens
"10090", # Mus musculus
"367110", # Neurospora crassa
"10116", # Rattus norvegicus
"559292", # Saccharomyces cerevisiae
"284812", # Schizosaccharomyces pombe
"4577", # Zea mays
}
superkingdoms = {
"Archaea": None,
"Bacteria": None,
"Eukaryota": None,
"Viruses": None
}
cur.execute(
"""
SELECT accession, scientific_name, full_name, lineage
FROM webfront_taxonomy
"""
)
taxa = {}
for tax_id, sci_name, full_name, lineage in cur:
"""
lineage stored as a string with heading/leading whitespaces,
and a whitespace between taxa
"""
taxa[tax_id] = (full_name, lineage.strip().split())
if sci_name in superkingdoms:
superkingdoms[sci_name] = tax_id
cur.close()
con.close()
# Raise if a superkingdom is not in the table
for sci_name, tax_id in superkingdoms.items():
if tax_id is None:
raise ValueError(f"{sci_name}: missing taxon ID")
superkingdoms = {tax_id for tax_id in superkingdoms.values()}
logger.info("writing entries")
with DumpFile(p_entry2xrefs) as entry2xrefs, KVdb(taxdb) as kvdb:
for entry_acc, xrefs in entry2xrefs:
entry = entries[entry_acc]
if entry.database != "interpro" or entry.is_deleted:
continue
elem = doc.createElement("interpro")
elem.setAttribute("id", entry.accession)
elem.setAttribute("protein_count", num_proteins[entry_acc])
elem.setAttribute("short_name", entry.short_name)
elem.setAttribute("type", entry.type)
name = doc.createElement("name")
name.appendChild(doc.createTextNode(entry.name))
elem.appendChild(name)
text = _restore_abstract('\n'.join(entry.description))
try:
_doc = parseString(f"<abstract>{text}</abstract>")
except ExpatError as exc:
# TODO: use CDATA section for all entries
logger.warning(f"{entry_acc}: {exc}")
# abstract = doc.createElement("abstract")
# abstract.appendChild(doc.createCDATASection(text))
else:
abstract = _doc.documentElement
elem.appendChild(abstract)
if entry.go_terms:
go_list = doc.createElement("class_list")
for term in entry.go_terms:
go_elem = doc.createElement("classification")
go_elem.setAttribute("id", term["identifier"])
go_elem.setAttribute("class_type", "GO")
_elem = doc.createElement("category")
_elem.appendChild(
doc.createTextNode(term["category"]["name"])
)
go_elem.appendChild(_elem)
_elem = doc.createElement("description")
_elem.appendChild(
doc.createTextNode(term["name"])
)
go_elem.appendChild(_elem)
go_list.appendChild(go_elem)
elem.appendChild(go_list)
if entry.literature:
pub_list = doc.createElement("pub_list")
for pub_id in sorted(entry.literature):
pub = entry.literature[pub_id]
pub_elem = doc.createElement("publication")
pub_elem.setAttribute("id", pub_id)
_elem = doc.createElement("author_list")
if pub["authors"]:
_elem.appendChild(
doc.createTextNode(", ".join(pub['authors']))
)
else:
_elem.appendChild(doc.createTextNode("Unknown"))
pub_elem.appendChild(_elem)
if pub["title"]:
_elem = doc.createElement("title")
_elem.appendChild(
doc.createTextNode(pub["title"])
)
pub_elem.appendChild(_elem)
if pub["URL"]:
_elem = doc.createElement("url")
_elem.appendChild(doc.createTextNode(pub["URL"]))
pub_elem.appendChild(_elem)
_elem = doc.createElement("db_xref")
if pub["PMID"]:
_elem.setAttribute("db", "PUBMED")
_elem.setAttribute("dbkey", str(pub["PMID"]))
else:
_elem.setAttribute("db", "MEDLINE")
_elem.setAttribute("dbkey", "MEDLINE")
pub_elem.appendChild(_elem)
if pub["ISO_journal"]:
_elem = doc.createElement("journal")
_elem.appendChild(
doc.createTextNode(pub["ISO_journal"])
)
pub_elem.appendChild(_elem)
if pub["ISBN"]:
_elem = doc.createElement("book_title")
isbn = f"ISBN:{pub['ISBN']}"
_elem.appendChild(doc.createTextNode(isbn))
pub_elem.appendChild(_elem)
if pub["volume"] or pub["issue"] or pub["raw_pages"]:
_elem = doc.createElement("location")
if pub["volume"]:
_elem.setAttribute("volume", pub["volume"])
if pub["issue"]:
_elem.setAttribute("issue", pub["issue"])
if pub["raw_pages"]:
_elem.setAttribute("pages", pub["raw_pages"])
pub_elem.appendChild(_elem)
if pub["year"]:
_elem = doc.createElement("year")
_elem.appendChild(
doc.createTextNode(str(pub["year"]))
)
pub_elem.appendChild(_elem)
pub_list.appendChild(pub_elem)
elem.appendChild(pub_list)
parent, children = entry.relations
if parent:
par_elem = doc.createElement("parent_list")
_elem = doc.createElement("rel_ref")
_elem.setAttribute("ipr_ref", parent)
par_elem.appendChild(_elem)
elem.appendChild(par_elem)
if children:
child_list = doc.createElement("child_list")
for child in children:
_elem = doc.createElement("rel_ref")
_elem.setAttribute("ipr_ref", child)
child_list.appendChild(_elem)
elem.appendChild(child_list)
members = []
for database, signatures in entry.integrates.items():
for signature_acc in signatures:
members.append((
signature_acc,
entries[signature_acc].short_name,
database,
num_proteins[signature_acc],
))
mem_list = doc.createElement("member_list")
for member in sorted(members):
_elem = doc.createElement("db_xref")
_elem.setAttribute("protein_count", member[3])
_elem.setAttribute("db", databases[member[2]])
_elem.setAttribute("dbkey", member[0])
_elem.setAttribute("name", member[1])
mem_list.appendChild(_elem)
elem.appendChild(mem_list)
# Merge cross-references and pathways
cross_refs = {}
for key, values in entry.cross_references.items():
cross_refs[databases[key]] = values
for key, values in entry.pathways.items():
cross_refs[databases[key]] = [val["id"] for val in values]
if cross_refs:
xref_list = doc.createElement("external_doc_list")
for ref_db in sorted(cross_refs):
for ref_id in sorted(cross_refs[ref_db]):
_elem = doc.createElement("db_xref")
_elem.setAttribute("db", ref_db)
_elem.setAttribute("dbkey", ref_id)
xref_list.appendChild(_elem)
elem.appendChild(xref_list)
if xrefs["structures"]:
xref_list = doc.createElement("structure_db_links")
for pdb_id in sorted(xrefs["structures"]):
_elem = doc.createElement("db_xref")
_elem.setAttribute("db", "PDB")
_elem.setAttribute("dbkey", pdb_id)
xref_list.appendChild(_elem)
elem.appendChild(xref_list)
# Find key species and taxonomic distribution
entry_key_species = []
entry_superkingdoms = {}
for tax_id in xrefs["taxa"]:
full_name, lineage = taxa[tax_id]
if tax_id in key_species:
entry_key_species.append((full_name, tax_id))
# Find the superkingdom contain this taxon
for superkingdom_id in superkingdoms:
if superkingdom_id in lineage:
break
else:
continue
try:
other_lineage = entry_superkingdoms[superkingdom_id]
except KeyError:
entry_superkingdoms[superkingdom_id] = lineage
else:
# Compare lineages and find lowest common ancestor
i = 0
while i < len(lineage) and i < len(other_lineage):
if lineage[i] != other_lineage[i]:
break
i += 1
# Path to the lowest common ancestor
entry_superkingdoms[superkingdom_id] = lineage[:i]
# Get lowest common ancestor for each represented superkingdom
lowest_common_ancestors = []
for lineage in entry_superkingdoms.values():
# Lowest common ancestor
tax_id = lineage[-1]
full_name, _ = taxa[tax_id]
lowest_common_ancestors.append((full_name, tax_id))
# Write taxonomic distribution
tax_dist = doc.createElement("taxonomy_distribution")
for full_name, tax_id in sorted(lowest_common_ancestors):
_elem = doc.createElement("taxon_data")
_elem.setAttribute("name", full_name)
key = f"{entry_acc}-{tax_id}"
_elem.setAttribute("proteins_count", kvdb[key])
tax_dist.appendChild(_elem)
elem.appendChild(tax_dist)
if entry_key_species:
# Write key species
key_spec = doc.createElement("key_species")
for full_name, tax_id in sorted(entry_key_species):
_elem = doc.createElement("taxon_data")
_elem.setAttribute("name", full_name)
key = f"{entry_acc}-{tax_id}"
_elem.setAttribute("proteins_count", kvdb[key])
key_spec.appendChild(_elem)
elem.appendChild(key_spec)
elem.writexml(fh, addindent=" ", newl="\n")
if deleted_entries:
block = doc.createElement("deleted_entries")
for entry_acc in sorted(deleted_entries):
elem = doc.createElement("del_ref")
elem.setAttribute("id", entry_acc)
block.appendChild(elem)
block.writexml(fh, addindent=" ", newl="\n")
fh.write("</interprodb>\n")
logger.info(f"temporary file: {os.path.getsize(taxdb)/1024/1024:,.0f} MB")
os.remove(taxdb)
logger.info("complete")
def _create_match(doc, signature: dict, locations: Sequence[dict]):
match = doc.createElement("match")
match.setAttribute("id", signature["accession"])
match.setAttribute("name", signature["name"])
match.setAttribute("dbname", signature["database"])
match.setAttribute("status", 'T')
"""
The model is stored in locations, so we get the model
from the first location for the match's 'model' attribute
"""
match.setAttribute("model", locations[0]["model"])
match.setAttribute("evd", signature["evidence"])
if signature["interpro"]:
ipr = doc.createElement("ipr")
for attname, value in signature["interpro"]:
if value:
ipr.setAttribute(attname, value)
match.appendChild(ipr)
for loc in locations:
match.appendChild(create_lcn(doc, loc))
return match
def create_lcn(doc, location: dict):
fragments = location["fragments"]
"""
We do not have to orginal start/end match positions,
so we use the leftmost/rightmost fragment positions.
We also reconstruct the fragment string (START-END-STATUS)
"""
fragments_obj = []
start = fragments[0]["start"]
end = 0
for frag in fragments:
if frag["end"] > end:
end = frag["end"]
status = _DC_STATUSES[frag["dc-status"]]
fragments_obj.append(f"{frag['start']}-{frag['end']}-{status}")
lcn = doc.createElement("lcn")
lcn.setAttribute("start", str(start))
lcn.setAttribute("end", str(end))
lcn.setAttribute("fragments", ','.join(fragments_obj))
lcn.setAttribute("score", str(location["score"]))
return lcn
def _write_match_tmp(signatures: dict, u2variants: dict, p_proteins: str,
p_uniprot2matches: str, start: str, stop: Optional[str],
output: str):
proteins = Store(p_proteins)
u2matches = Store(p_uniprot2matches)
with open(output, "wt", encoding="utf-8") as fh:
doc = getDOMImplementation().createDocument(None, None, None)
for uniprot_acc, protein in proteins.range(start, stop):
elem = doc.createElement("protein")
elem.setAttribute("id", uniprot_acc)
elem.setAttribute("name", protein["identifier"])
elem.setAttribute("length", str(protein["length"]))
elem.setAttribute("crc64", protein["crc64"])
try:
protein_entries = u2matches[uniprot_acc]
except KeyError:
pass
else:
for signature_acc in sorted(protein_entries):
try:
signature = signatures[signature_acc]
except KeyError:
# InterPro entry
continue
elem.appendChild(
_create_match(doc, signature,
protein_entries[signature_acc])
)
finally:
elem.writexml(fh, addindent=" ", newl="\n")
protein_variants = u2variants.get(uniprot_acc, [])
for variant, length, crc64, matches in protein_variants:
elem = doc.createElement("protein")
elem.setAttribute("id", variant)
elem.setAttribute("name", variant)
elem.setAttribute("length", str(length))
elem.setAttribute("crc64", crc64)
for signature_acc in sorted(matches):
try:
signature = signatures[signature_acc]
except KeyError:
# InterPro entry
continue
elem.appendChild(
_create_match(doc, signature,
matches[signature_acc])
)
elem.writexml(fh, addindent=" ", newl="\n")
def export_matches(pro_url: str, stg_url: str, p_proteins: str,
p_uniprot2matches: str, outdir: str, processes: int = 8):
shutil.copy(os.path.join(os.path.dirname(__file__), "match_complete.dtd"),
outdir)
logger.info("loading isoforms")
u2variants = {}
for accession, variant in ippro.get_isoforms(pro_url).items():
protein_acc = variant["protein_acc"]
try:
variants = u2variants[protein_acc]
except KeyError:
variants = u2variants[protein_acc] = []
finally:
variants.append((
accession,
variant["length"],
variant["crc64"],
variant["matches"]
))
# Sorting variants by accession (so XXXX-1 comes before XXXX-2)
for variants in u2variants.values():
variants.sort(key=lambda x: x[0])
logger.info("loading signatures")
con = cx_Oracle.connect(pro_url)
cur = con.cursor()
signatures = ippro.get_signatures(cur)
cur.close()
con.close()
logger.info("spawning processes")
processes = max(1, processes - 1)
ctx = mp.get_context(method="spawn")
workers = []
with Store(p_proteins) as proteins:
proteins_per_file = math.ceil(len(proteins) / processes)
start_acc = None
for i, uniprot_acc in enumerate(proteins):
if not i % proteins_per_file:
if start_acc:
filename = f"match_{len(workers)+1}.xml"
filepath = os.path.join(outdir, filename)
p = ctx.Process(target=_write_match_tmp,
args=(signatures, u2variants, p_proteins,
p_uniprot2matches, start_acc,
uniprot_acc, filepath))
p.start()
workers.append((p, filepath))
start_acc = uniprot_acc
filename = f"match_{len(workers) + 1}.xml"
filepath = os.path.join(outdir, filename)
p = ctx.Process(target=_write_match_tmp,
args=(signatures, u2variants, p_proteins,
p_uniprot2matches, start_acc, None, filepath))
p.start()
workers.append((p, filepath))
logger.info("waiting for processes")
con = MySQLdb.connect(**url2dict(stg_url), charset="utf8mb4")
cur = con.cursor()
cur.execute(
"""
SELECT name, name_alt, type, num_entries, version, release_date
FROM webfront_database
ORDER BY name_long
"""
)
doc = getDOMImplementation().createDocument(None, None, None)
elem = doc.createElement("release")
for name, name_alt, db_type, entry_count, version, date in cur:
if db_type == "entry":
dbinfo = doc.createElement("dbinfo")
dbinfo.setAttribute("dbname", name_alt)
if version:
dbinfo.setAttribute("version", version)
if entry_count:
dbinfo.setAttribute("entry_count", str(entry_count))
if date:
dbinfo.setAttribute("file_date",
date.strftime("%d-%b-%y").upper())
elem.appendChild(dbinfo)
cur.close()
con.close()
output = os.path.join(outdir, "match_complete.xml.gz")
ofh = None
for i, (p, filepath) in enumerate(workers):
p.join()
if i == 0:
# First process to complete
logger.info("concatenating XML files")
# Open file and write header
ofh = gzip.open(output, "wt", encoding="utf-8")
ofh.write('<?xml version="1.0" encoding="UTF-8"?>\n')
ofh.write('<!DOCTYPE interpromatch SYSTEM "match_complete.dtd">\n')
ofh.write('<interpromatch>\n')
elem.writexml(ofh, addindent=" ", newl="\n")
with open(filepath, "rt", encoding="utf-8") as ifh:
while (block := ifh.read(1024)) != '':
ofh.write(block)
os.remove(filepath)
logger.info(f"\t{i + 1} / {len(workers)}")
ofh.write('</interpromatch>\n')
ofh.close()
logger.info("complete")
def _write_feature_tmp(features: dict, p_proteins: str,
p_uniprot2features: str, start: str,
stop: Optional[str], output: str):
proteins = Store(p_proteins)
u2features = Store(p_uniprot2features)
with open(output, "wt", encoding="utf-8") as fh:
doc = getDOMImplementation().createDocument(None, None, None)
# for uniprot_acc, protein in proteins.range(start, stop):
for uniprot_acc, protein_features in u2features.range(start, stop):
protein = proteins[uniprot_acc]
elem = doc.createElement("protein")
elem.setAttribute("id", uniprot_acc)
elem.setAttribute("name", protein["identifier"])
elem.setAttribute("length", str(protein["length"]))
elem.setAttribute("crc64", protein["crc64"])
for feature_acc in sorted(protein_features):
feature = features[feature_acc]
feature_match = protein_features[feature_acc]
match = doc.createElement("match")
match.setAttribute("id", feature_acc)
match.setAttribute("name", feature["name"])
match.setAttribute("dbname", feature["database"])
match.setAttribute("status", 'T')
match.setAttribute("model", feature_acc)
match.setAttribute("evd", feature["evidence"])
for loc in sorted(feature_match["locations"]):
# there is only one fragment per location
pos_start, pos_end, seq_feature = loc
lcn = doc.createElement("lcn")
lcn.setAttribute("start", str(pos_start))
lcn.setAttribute("end", str(pos_end))
if seq_feature:
lcn.setAttribute("sequence-feature", seq_feature)
match.appendChild(lcn)
elem.appendChild(match)
elem.writexml(fh, addindent=" ", newl="\n")
def export_features_matches(url: str, p_proteins: str, p_uniprot2features: str,
outdir: str, processes: int = 8):
shutil.copy(os.path.join(os.path.dirname(__file__), "extra.dtd"),
outdir)
logger.info("loading features")
con = cx_Oracle.connect(url)
cur = con.cursor()
features = ippro.get_features(cur)
cur.close()
con.close()
logger.info("spawning processes")
processes = max(1, processes - 1)
ctx = mp.get_context(method="spawn")
workers = []
with Store(p_uniprot2features) as proteins:
proteins_per_file = math.ceil(len(proteins) / processes)
start_acc = None
for i, uniprot_acc in enumerate(proteins):
if not i % proteins_per_file:
if start_acc:
filename = f"extra_{len(workers) + 1}.xml"
filepath = os.path.join(outdir, filename)
p = ctx.Process(target=_write_feature_tmp,
args=(features, p_proteins,
p_uniprot2features, start_acc,
uniprot_acc, filepath))
p.start()
workers.append((p, filepath))
start_acc = uniprot_acc
filename = f"extra_{len(workers) + 1}.xml"
filepath = os.path.join(outdir, filename)
p = ctx.Process(target=_write_feature_tmp,
args=(features, p_proteins, p_uniprot2features,
start_acc, None, filepath))
p.start()
workers.append((p, filepath))
logger.info("concatenating XML files")
output = os.path.join(outdir, "extra.xml.gz")
with gzip.open(output, "wt", encoding="utf-8") as fh:
fh.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fh.write('<!DOCTYPE interproextra SYSTEM "extra.dtd">\n')
fh.write('<interproextra>\n')
doc = getDOMImplementation().createDocument(None, None, None)
elem = doc.createElement("release")
databases = {(f["database"], f["version"]) for f in features.values()}
for name, version in sorted(databases):
dbinfo = doc.createElement("dbinfo")
dbinfo.setAttribute("dbname", name)
if version:
dbinfo.setAttribute("version", version)
elem.appendChild(dbinfo)
elem.writexml(fh, addindent=" ", newl="\n")
for i, (p, filepath) in enumerate(workers):
p.join()
with open(filepath, "rt", encoding="utf-8") as tfh:
for line in tfh:
fh.write(line)
os.remove(filepath)
logger.info(f"\t{i+1} / {len(workers)}")
fh.write('</interproextra>\n')
logger.info("complete")
def export_structure_matches(pdbe_url: str, p_proteins: str, p_structures: str,
outdir:str):
shutil.copy(os.path.join(os.path.dirname(__file__), "feature.dtd"),
outdir)
logger.info("loading structures")
uniprot2pdbe = {}
for pdb_id, entry in loadobj(p_structures).items():
for uniprot_acc, chains in entry["proteins"].items():
try:
uniprot2pdbe[uniprot_acc][pdb_id] = chains
except KeyError:
uniprot2pdbe[uniprot_acc] = {pdb_id: chains}
logger.info("loading CATH/SCOP domains")
uni2prot2cath = pdbe.get_cath_domains(pdbe_url)
uni2prot2scop = pdbe.get_scop_domains(pdbe_url)
logger.info("writing file")
output = os.path.join(outdir, "feature.xml.gz")
with gzip.open(output, "wt", encoding="utf-8") as fh:
fh.write('<?xml version="1.0" encoding="UTF-8"?>\n')
fh.write('<!DOCTYPE interprofeature SYSTEM "feature.dtd">\n')
fh.write('<interprofeature>\n')
with Store(p_proteins) as proteins:
doc = getDOMImplementation().createDocument(None, None, None)
for uniprot_acc, protein in proteins.items():
pdb_entries = uniprot2pdbe.get(uniprot_acc, {})
cath_entries = uni2prot2cath.get(uniprot_acc, {})
scop_entries = uni2prot2scop.get(uniprot_acc, {})
if pdb_entries or cath_entries or scop_entries:
elem = doc.createElement("protein")
elem.setAttribute("id", uniprot_acc)
elem.setAttribute("name", protein["identifier"])
elem.setAttribute("length", str(protein["length"]))
elem.setAttribute("crc64", protein["crc64"])
for pdb_id in sorted(pdb_entries):
chains = pdb_entries[pdb_id]
for chain_id in sorted(chains):
domain = doc.createElement("domain")
domain.setAttribute("id", f"{pdb_id}{chain_id}")
domain.setAttribute("dbname", "PDB")
for loc in chains[chain_id]:
start = loc["protein_start"]
end = loc["protein_end"]
coord = doc.createElement("coord")
coord.setAttribute("pdb", pdb_id)
coord.setAttribute("chain", chain_id)
coord.setAttribute("start", str(start))
coord.setAttribute("end", str(end))
domain.appendChild(coord)
elem.appendChild(domain)
for domain_id in sorted(cath_entries):
entry = cath_entries[domain_id]
domain = doc.createElement("domain")
domain.setAttribute("id", domain_id)
domain.setAttribute("cfn", entry["superfamily"]["id"])
domain.setAttribute("dbname", "CATH")
for loc in entry["locations"]:
coord = doc.createElement("coord")
coord.setAttribute("pdb", entry["pdb_id"])
coord.setAttribute("chain", entry["chain"])
coord.setAttribute("start", str(loc["start"]))
coord.setAttribute("end", str(loc["end"]))
domain.appendChild(coord)
elem.appendChild(domain)
for domain_id in sorted(scop_entries):
entry = scop_entries[domain_id]
domain = doc.createElement("domain")
domain.setAttribute("id", domain_id)
domain.setAttribute("cfn", entry["superfamily"]["id"])
domain.setAttribute("dbname", "SCOP")
for loc in entry["locations"]:
coord = doc.createElement("coord")
coord.setAttribute("pdb", entry["pdb_id"])
coord.setAttribute("chain", entry["chain"])
coord.setAttribute("start", str(loc["start"]))
coord.setAttribute("end", str(loc["end"]))
domain.appendChild(coord)
elem.appendChild(domain)
elem.writexml(fh, addindent=" ", newl="\n")
fh.write('</interprofeature>\n')
logger.info("complete")
| 0
| 0
| 0
| 0
| 0
| 34,377
| 0
| 113
| 585
|
6857bd448442ddfa8d293e203ac7bc287e7f8c42
| 4,119
|
py
|
Python
|
code/plotter_tests.py
|
twf2360/Newtons_Cradle
|
84cab9b81765146d1453a68ee0f50f2a7e511c6b
|
[
"MIT"
] | null | null | null |
code/plotter_tests.py
|
twf2360/Newtons_Cradle
|
84cab9b81765146d1453a68ee0f50f2a7e511c6b
|
[
"MIT"
] | null | null | null |
code/plotter_tests.py
|
twf2360/Newtons_Cradle
|
84cab9b81765146d1453a68ee0f50f2a7e511c6b
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
from calculator import calculator
from plotter import plotter
from random import random
'''
in order to test the functions of the plotter class, data must be collected to plot
In order to test the simplest aspects - such as that total energy = pe+ke, and conservation of energy, the simulation of a single ball - a simple pendulum - will be tested
'''
def test_list_lengths():
'''
lots of lists are created by the plotter class, and the length of them should be known, as it should be the number of iterations in a lot of case
'''
''' as testing class attributes, not results, need to call a new class instance'''
theta = math.pi/6 #initial starting angle!
get_results = calculator(0.0001,50000)
get_results.get_balls(number = 1,positions= [[1 * math.sin(theta), -1 * math.cos(theta)]], velocities= [[0,0]], radii=[0.02], masses=[1], anchors= [[0,0]])
get_results.calculate(approximation='rk2', density=0)
plot = plotter('system_states_over_time', 1)
assert len(plot.timelist) == 50000
assert len(plot.total_ke_list) == 50000
assert len(plot.total_pe_list) == 50000
assert len(plot.total_energy_by_time) == 50000
assert len(plot.potential_energy_by_time) == 50000
assert len(plot.kinetic_energy_by_time) == 50000
assert len(plot.list_position_by_time) == 50000
def test_energy_addition():
'''
test to ensure that the when adding kinetic and potential energy to get total energy, that the addition is done correctly
'''
''' use is close due to errors in floating point maths'''
energies = plotter_init()
ke = energies[0]
pe = energies[1]
total = energies[2]
assert (np.isclose(total[0] , (np.add(ke[0] , pe[0])))).all(), "total energy does not equal potential plus kinetic at the start"
random_time = 50000 * random()
random_time_int = math.floor(random_time)
assert (np.isclose(total[random_time_int], (np.add(ke[random_time_int] ,pe[random_time_int])))).all(), "total energy does not equal potential plus kinetic at the a random point"
def two_ball_init():
'''
some of the results rely on the tota energy of the system, and therefore this is to check that these things are caluclated correctly with a more complicated system
'''
theta = math.pi/6 #initial starting angle!
get_results = calculator(0.0001,50000)
get_results.get_balls(number = 2,positions= [[1 * math.sin(theta), -1 * math.cos(theta)], [0,-0.4]], velocities= [[0,0], [0,0]], radii=[0.02,0.02], masses=[1,1], anchors= [[0,0], [0.4]])
get_results.calculate(approximation='rk2', density=0)
plot = plotter('system_states_over_time', 2)
return plot
def test_two_ball_energy():
'''
testing that the total energy of the system is calculated correctly - kinetic plus potential.
'''
plot = two_ball_init()
ke = plot.total_kinetic_energy
pe = plot.total_potential_energy
total = plot.total_energy()
ke_plus_pe = ke + pe
assert np.isclose(ke_plus_pe[0], total[0]).all(), "total energy not equal to kinetic plus potential at start"
random_time = 50000 * random()
random_time_int = math.floor(random_time)
assert np.isclose(ke_plus_pe[random_time_int], total[random_time_int]).all(), "total energy not equal to kinetic plus potential at random time"
| 41.606061
| 190
| 0.704783
|
import pytest
import math
import numpy as np
from ball import ball
import matplotlib.pyplot as plt
import copy
from itertools import combinations
import sys
import pandas as pd
from calculator import calculator
import json
import dataframes
from plotter import plotter
from random import random
'''
in order to test the functions of the plotter class, data must be collected to plot
In order to test the simplest aspects - such as that total energy = pe+ke, and conservation of energy, the simulation of a single ball - a simple pendulum - will be tested
'''
def plotter_init():
theta = math.pi/6 #initial starting angle!
get_results = calculator(0.0001,50000)
get_results.get_balls(number = 1,positions= [[1 * math.sin(theta), -1 * math.cos(theta)]], velocities= [[0,0]], radii=[0.02], masses=[1], anchors= [[0,0]])
get_results.calculate(approximation='rk2', density=0)
plot = plotter('system_states_over_time', 1)
ke_by_time = plot.total_kinetic_energy()
pe_by_time = plot.total_potential_energy()
total_e_by_time = plot.total_energy()
return [ke_by_time, pe_by_time, total_e_by_time]
def test_list_lengths():
'''
lots of lists are created by the plotter class, and the length of them should be known, as it should be the number of iterations in a lot of case
'''
''' as testing class attributes, not results, need to call a new class instance'''
theta = math.pi/6 #initial starting angle!
get_results = calculator(0.0001,50000)
get_results.get_balls(number = 1,positions= [[1 * math.sin(theta), -1 * math.cos(theta)]], velocities= [[0,0]], radii=[0.02], masses=[1], anchors= [[0,0]])
get_results.calculate(approximation='rk2', density=0)
plot = plotter('system_states_over_time', 1)
assert len(plot.timelist) == 50000
assert len(plot.total_ke_list) == 50000
assert len(plot.total_pe_list) == 50000
assert len(plot.total_energy_by_time) == 50000
assert len(plot.potential_energy_by_time) == 50000
assert len(plot.kinetic_energy_by_time) == 50000
assert len(plot.list_position_by_time) == 50000
def test_energy_addition():
'''
test to ensure that the when adding kinetic and potential energy to get total energy, that the addition is done correctly
'''
''' use is close due to errors in floating point maths'''
energies = plotter_init()
ke = energies[0]
pe = energies[1]
total = energies[2]
assert (np.isclose(total[0] , (np.add(ke[0] , pe[0])))).all(), "total energy does not equal potential plus kinetic at the start"
random_time = 50000 * random()
random_time_int = math.floor(random_time)
assert (np.isclose(total[random_time_int], (np.add(ke[random_time_int] ,pe[random_time_int])))).all(), "total energy does not equal potential plus kinetic at the a random point"
def two_ball_init():
'''
some of the results rely on the tota energy of the system, and therefore this is to check that these things are caluclated correctly with a more complicated system
'''
theta = math.pi/6 #initial starting angle!
get_results = calculator(0.0001,50000)
get_results.get_balls(number = 2,positions= [[1 * math.sin(theta), -1 * math.cos(theta)], [0,-0.4]], velocities= [[0,0], [0,0]], radii=[0.02,0.02], masses=[1,1], anchors= [[0,0], [0.4]])
get_results.calculate(approximation='rk2', density=0)
plot = plotter('system_states_over_time', 2)
return plot
def test_two_ball_energy():
'''
testing that the total energy of the system is calculated correctly - kinetic plus potential.
'''
plot = two_ball_init()
ke = plot.total_kinetic_energy
pe = plot.total_potential_energy
total = plot.total_energy()
ke_plus_pe = ke + pe
assert np.isclose(ke_plus_pe[0], total[0]).all(), "total energy not equal to kinetic plus potential at start"
random_time = 50000 * random()
random_time_int = math.floor(random_time)
assert np.isclose(ke_plus_pe[random_time_int], total[random_time_int]).all(), "total energy not equal to kinetic plus potential at random time"
| 0
| 0
| 0
| 0
| 0
| 548
| 0
| -22
| 227
|
c67414731c3470e8d42acae5cbdf0fa8ded1e0a9
| 676
|
py
|
Python
|
example/functions/add.py
|
osblinnikov/pytorch-binary
|
61842542c94766ffa21b0fa3ea86a435f802b95f
|
[
"MIT"
] | 293
|
2018-08-17T14:29:28.000Z
|
2022-02-28T12:35:48.000Z
|
example/functions/add.py
|
osblinnikov/pytorch-binary
|
61842542c94766ffa21b0fa3ea86a435f802b95f
|
[
"MIT"
] | 1
|
2018-08-21T14:48:38.000Z
|
2018-08-21T14:48:38.000Z
|
example/functions/add.py
|
osblinnikov/pytorch-binary
|
61842542c94766ffa21b0fa3ea86a435f802b95f
|
[
"MIT"
] | 70
|
2018-08-21T03:39:38.000Z
|
2022-03-01T07:21:08.000Z
|
# functions/add.py
| 29.391304
| 68
| 0.674556
|
# functions/add.py
import torch
from torch.autograd import Function
from _ext import my_lib
class MyAddFunction(Function):
def forward(self, input1, input2):
output = input1.new()
if not input1.is_cuda:
my_lib.my_lib_add_forward(input1, input2, output)
else:
my_lib.my_lib_add_forward_cuda(input1, input2, output)
return output
def backward(self, grad_output):
grad_input = grad_output.new()
if not grad_output.is_cuda:
my_lib.my_lib_add_backward(grad_output, grad_input)
else:
my_lib.my_lib_add_backward_cuda(grad_output, grad_input)
return grad_input
| 0
| 0
| 0
| 560
| 0
| 0
| 0
| 7
| 89
|
e8a4d6853804f4590a0288c9c5eca52412b23f32
| 31,243
|
py
|
Python
|
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-09-29T21:27:50.000Z
|
2021-09-29T21:27:50.000Z
|
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | null | null | null |
augly/audio/transforms.py
|
Ierezell/AugLy
|
a7dca8c36bc05dbd7694373fe9b883d6ff720f56
|
[
"MIT"
] | 1
|
2021-07-02T13:08:55.000Z
|
2021-07-02T13:08:55.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import augly.utils as utils
"""
Base Classes for Transforms
"""
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> audio_array = np.array([...])
>>> pitch_shift_tsfm = PitchShift(n_steps=4.0, p=0.5)
>>> shifted_audio = pitch_shift_tsfm(audio_array, sample_rate)
"""
| 35.223224
| 92
| 0.625964
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import random
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import augly.audio.functional as F
import augly.utils as utils
import numpy as np
from augly.audio.utils import RNGSeed
"""
Base Classes for Transforms
"""
class BaseTransform(object):
def __init__(self, p: float = 1.0):
"""
@param p: the probability of the transform being applied; default value is 1.0
"""
assert 0 <= p <= 1.0, "p must be a value in the range [0, 1]"
self.p = p
def __call__(
self,
audio: np.ndarray,
sample_rate: int = utils.DEFAULT_SAMPLE_RATE,
metadata: Optional[List[Dict[str, Any]]] = None,
force: bool = False,
) -> Tuple[np.ndarray, int]:
"""
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@param force: if set to True, the transform will be applied. otherwise,
application is determined by the probability set
@returns: the augmented audio array and sample rate
"""
assert isinstance(audio, np.ndarray), "Audio passed in must be a np.ndarray"
assert type(force) == bool, "Expected type bool for variable `force`"
if not force and random.random() > self.p:
return audio, sample_rate
return self.apply_transform(audio, sample_rate, metadata)
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
This function is to be implemented in the child classes.
From this function, call the augmentation function with the
parameters specified
"""
raise NotImplementedError()
"""
Non-Random Transforms
These classes below are essentially class-based versions of the augmentation
functions previously defined. These classes were developed such that they can
be used with Composition operators (such as `torchvision`'s) and to support
use cases where a specific transform with specific attributes needs to be
applied multiple times.
Example:
>>> audio_array = np.array([...])
>>> pitch_shift_tsfm = PitchShift(n_steps=4.0, p=0.5)
>>> shifted_audio = pitch_shift_tsfm(audio_array, sample_rate)
"""
class AddBackgroundNoise(BaseTransform):
def __init__(
self,
background_audio: Optional[Union[str, np.ndarray]] = None,
snr_level_db: float = 10.0,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param snr_level_db: signal-to-noise ratio in dB
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.background_audio = background_audio
self.snr_level_db = snr_level_db
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Mixes in a background sound into the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.add_background_noise(
audio,
sample_rate,
self.background_audio,
self.snr_level_db,
self.seed,
metadata=metadata,
)
class ApplyLambda(BaseTransform):
def __init__(
self,
aug_function: Callable[..., Tuple[np.ndarray, int]] = lambda x, y: (x, y),
p: float = 1.0,
):
"""
@param aug_function: the augmentation function to be applied onto the audio
(should expect the audio np.ndarray & sample rate int as input, and return
the transformed audio & sample rate)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.aug_function = aug_function
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Apply a user-defined lambda to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.apply_lambda(audio, sample_rate, self.aug_function, metadata=metadata)
class ChangeVolume(BaseTransform):
def __init__(self, volume_db: float = 0.0, p: float = 1.0):
"""
@param volume_db: the decibel amount by which to either increase (positive
value) or decrease (negative value) the volume of the audio
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.volume_db = volume_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the volume of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.change_volume(audio, sample_rate, self.volume_db, metadata=metadata)
class Clicks(BaseTransform):
def __init__(self, seconds_between_clicks: float = 0.5, p: float = 1.0):
"""
@param seconds_between_clicks: the amount of time between each click that will
be added to the audio, in seconds
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.seconds_between_clicks = seconds_between_clicks
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds clicks to the audio at a given regular interval
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clicks(
audio, sample_rate, self.seconds_between_clicks, metadata=metadata
)
class Clip(BaseTransform):
def __init__(
self, offset_factor: float = 0.0, duration_factor: float = 1.0, p: float = 1.0
):
"""
@param offset_factor: start point of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param duration_factor: the length of the crop relative to the audio duration
(this parameter is multiplied by the audio duration)
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.duration_factor = duration_factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Clips the audio using the specified offset and duration factors
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.clip(
audio,
sample_rate,
self.offset_factor,
self.duration_factor,
metadata=metadata,
)
class Harmonic(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the harmonic part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.harmonic(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class HighPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 3000.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with lower frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) below this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency higher than the given cutoff to pass
through and attenuates signals with frequencies lower than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.high_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class InsertInBackground(BaseTransform):
def __init__(
self,
offset_factor: float = 0.0,
background_audio: Optional[Union[str, np.ndarray]] = None,
seed: Optional[RNGSeed] = None,
p: float = 1.0,
):
"""
@param offset_factor: start point of the crop relative to the background duration
(this parameter is multiplied by the background duration)
@param background_audio: the path to the background audio or a variable of type
np.ndarray containing the background audio. If set to `None`, the background
audio will be white noise
@param seed: a NumPy random generator (or seed) such that these results
remain reproducible
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.offset_factor = offset_factor
self.background_audio = background_audio
self.seed = seed
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Non-overlapping insert audio in a background audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.insert_in_background(
audio,
sample_rate,
self.offset_factor,
self.background_audio,
self.seed,
metadata=metadata,
)
class InvertChannels(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Inverts the channels of the audio.
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.invert_channels(audio, sample_rate, metadata=metadata)
class LowPassFilter(BaseTransform):
def __init__(self, cutoff_hz: float = 500.0, p: float = 1.0):
"""
@param cutoff_hz: frequency (in Hz) where signals with higher frequencies will
begin to be reduced by 6dB per octave (doubling in frequency) above this point
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.cutoff_hz = cutoff_hz
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Allows audio signals with a frequency lower than the given cutoff to pass through
and attenuates signals with frequencies higher than the cutoff frequency
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.low_pass_filter(audio, sample_rate, self.cutoff_hz, metadata=metadata)
class Normalize(BaseTransform):
def __init__(
self,
norm: Optional[float] = np.inf,
axis: int = 0,
threshold: Optional[float] = None,
fill: Optional[bool] = None,
p: float = 1.0,
):
"""
@param norm: the type of norm to compute:
- np.inf: maximum absolute value
- -np.inf: minimum absolute value
- 0: number of non-zeros (the support)
- float: corresponding l_p norm
- None: no normalization is performed
@param axis: axis along which to compute the norm
@param threshold: if provided, only the columns (or rows) with norm of at
least `threshold` are normalized
@param fill: if None, then columns (or rows) with norm below `threshold` are
left as is. If False, then columns (rows) with norm below `threshold` are
set to 0. If True, then columns (rows) with norm below `threshold` are
filled uniformly such that the corresponding norm is 1
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.norm, self.axis = norm, axis
self.threshold, self.fill = threshold, fill
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Normalizes the audio array along the chosen axis (norm(audio, axis=axis) == 1)
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.normalize(
audio,
sample_rate,
self.norm,
self.axis,
self.threshold,
self.fill,
metadata=metadata,
)
class PeakingEqualizer(BaseTransform):
def __init__(
self,
center_hz: float = 500.0,
q: float = 1.0,
gain_db: float = -3.0,
p: float = 1.0,
):
"""
@param center_hz: point in the frequency spectrum at which EQ is applied
@param q: ratio of center frequency to bandwidth; bandwidth is inversely
proportional to Q, meaning that as you raise Q, you narrow the bandwidth
@param gain_db: amount of gain (boost) or reduction (cut) that is applied
at a given frequency. Beware of clipping when using positive gain
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.center_hz = center_hz
self.q = q
self.gain_db = gain_db
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Applies a two-pole peaking equalization filter. The signal-level at and around
`center_hz` can be increased or decreased, while all other frequencies are unchanged
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.peaking_equalizer(
audio, sample_rate, self.center_hz, self.q, self.gain_db, metadata=metadata
)
class Percussive(BaseTransform):
def __init__(
self,
kernel_size: int = 31,
power: float = 2.0,
margin: float = 1.0,
p: float = 1.0,
):
"""
@param kernel_size: kernel size for the median filters
@param power: exponent for the Wiener filter when constructing soft mask matrices
@param margin: margin size for the masks
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.kernel_size = kernel_size
self.power = power
self.margin = margin
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Extracts the percussive part of the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.percussive(
audio,
sample_rate,
self.kernel_size,
self.power,
self.margin,
metadata=metadata,
)
class PitchShift(BaseTransform):
def __init__(self, n_steps: float = 1.0, p: float = 1.0):
"""
@param n_steps: each step is equal to one semitone
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.n_steps = n_steps
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Shifts the pitch of the audio by `n_steps`
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.pitch_shift(audio, sample_rate, self.n_steps, metadata=metadata)
class Reverb(BaseTransform):
def __init__(
self,
reverberance: float = 50.0,
hf_damping: float = 50.0,
room_scale: float = 100.0,
stereo_depth: float = 100.0,
pre_delay: float = 0.0,
wet_gain: float = 0.0,
wet_only: bool = False,
p: float = 1.0,
):
"""
@param reverberance: (%) sets the length of the reverberation tail. This
determines how long the reverberation continues for after the original
sound being reverbed comes to an end, and so simulates the "liveliness"
of the room acoustics
@param hf_damping: (%) increasing the damping produces a more "muted" effect.
The reverberation does not build up as much, and the high frequencies decay
faster than the low frequencies
@param room_scale: (%) sets the size of the simulated room. A high value will
simulate the reverberation effect of a large room and a low value will
simulate the effect of a small room
@param stereo_depth: (%) sets the apparent "width" of the reverb effect for
stereo tracks only. Increasing this value applies more variation between
left and right channels, creating a more "spacious" effect. When set at
zero, the effect is applied independently to left and right channels
@param pre_delay: (ms) delays the onset of the reverberation for the set time
after the start of the original input. This also delays the onset of the
reverb tail
@param wet_gain: (db) applies volume adjustment to the reverberation ("wet")
component in the mix
@param wet_only: only the wet signal (added reverberation) will be in the
resulting output, and the original audio will be removed
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.reverberance = reverberance
self.hf_damping = hf_damping
self.room_scale = room_scale
self.stereo_depth = stereo_depth
self.pre_delay = pre_delay
self.wet_gain = wet_gain
self.wet_only = wet_only
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adds reverberation to the audio
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.reverb(
audio,
sample_rate,
self.reverberance,
self.hf_damping,
self.room_scale,
self.stereo_depth,
self.pre_delay,
self.wet_gain,
self.wet_only,
metadata=metadata,
)
class Speed(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the speed factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Changes the speed of the audio, affecting pitch as well
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.speed(audio, sample_rate, self.factor, metadata=metadata)
class Tempo(BaseTransform):
def __init__(self, factor: float = 2.0, p: float = 1.0):
"""
@param factor: the tempo factor. If rate > 1 the audio will be sped up by that
factor; if rate < 1 the audio will be slowed down by that factor, without
affecting the pitch
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.factor = factor
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Adjusts the tempo of the audio by a given factor
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.tempo(audio, sample_rate, self.factor, metadata=metadata)
class TimeStretch(BaseTransform):
def __init__(self, rate: float = 1.5, p: float = 1.0):
"""
@param rate: the time stretch factor
@param p: the probability of the transform being applied; default value is 1.0
"""
super().__init__(p)
self.rate = rate
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Time-stretches the audio by a fixed rate
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.time_stretch(audio, sample_rate, self.rate, metadata=metadata)
class ToMono(BaseTransform):
def apply_transform(
self,
audio: np.ndarray,
sample_rate: int,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> Tuple[np.ndarray, int]:
"""
Converts the audio from stereo to mono by averaging samples across channels
@param audio: the audio array to be augmented
@param sample_rate: the audio sample rate of the inputted audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, sample rates, etc. will be
appended to the inputted list. If set to None, no metadata will be appended
@returns: the augmented audio array and sample rate
"""
return F.to_mono(audio, sample_rate, metadata=metadata)
| 0
| 0
| 0
| 29,920
| 0
| 0
| 0
| 65
| 572
|
e0ef9bce5e87aa31386e4253a0d246ce6c621dd9
| 2,663
|
py
|
Python
|
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
python_code/easy/412_Fizz_Buzz_easy/solution.py
|
timshenkao/interview_coding_exercises
|
c531fa5e0c09faef976539275589e957fcb88393
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# 412. Fizz Buzz https://leetcode.com/problems/fizz-buzz/
# Given an integer n, return a string array resultwer (1-indexed) where:
# resultwer[i] == "FizzBuzz" if i is divisible by 3 and 5.
# resultwer[i] == "Fizz" if i is divisible by 3.
# resultwer[i] == "Buzz" if i is divisible by 5.
# resultwer[i] == i (as a string) if none of the above conditions are true.
# 1 <= n <= 104
| 37.507042
| 117
| 0.579046
|
# Copyright (c) 2021 - present, Timur Shenkao
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from collections import OrderedDict
from typing import List
# 412. Fizz Buzz https://leetcode.com/problems/fizz-buzz/
# Given an integer n, return a string array resultwer (1-indexed) where:
# resultwer[i] == "FizzBuzz" if i is divisible by 3 and 5.
# resultwer[i] == "Fizz" if i is divisible by 3.
# resultwer[i] == "Buzz" if i is divisible by 5.
# resultwer[i] == i (as a string) if none of the above conditions are true.
# 1 <= n <= 104
class Solution:
def fizz_buzz(self, n: int) -> List[str]:
""" Time complexity: O(n). We iterate from 1 to n.
Space complexity: O(n). We create output list of strings.
"""
fizz = 'Fizz'
buzz = 'Buzz'
fizz_buzz = 'FizzBuzz'
result = list()
for i in range(1, n + 1):
if (i % 3 == 0) and (i % 5 == 0):
result.append(fizz_buzz)
elif i % 3 == 0:
result.append(fizz)
elif i % 5 == 0:
result.append(buzz)
else:
result.append(str(i))
return result
def fizz_buzz_lookup(self, n: int) -> List[str]:
""" Time complexity: O(n). We iterate from 1 to n. We perform fixed amount of computations on each iteration.
Space complexity: O(n). We create output list of strings.
"""
# Lookup for all fizzbuzz mappings
fizz_buzz_dict = OrderedDict({3 : "Fizz", 5 : "Buzz"})
result = list()
i_result = list()
for i in range(1, n + 1):
i_result.clear()
for key in fizz_buzz_dict.keys():
# If the number is divisible by key,
# then add the corresponding string mapping to current i_result
if i % key == 0:
i_result.append(fizz_buzz_dict[key])
if not i_result:
i_result.append(str(i))
result.append(''.join(i_result))
return result
| 0
| 0
| 0
| 1,490
| 0
| 0
| 0
| 16
| 68
|
aa8755d6383871ea335e941c7857fa0ecccd50d3
| 1,458
|
py
|
Python
|
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/migrations/0110_scheduledevent.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-08 18:16
from __future__ import unicode_literals
| 42.882353
| 138
| 0.613855
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-08 18:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0109_auto_20161108_0128'),
]
operations = [
migrations.CreateModel(
name='ScheduledEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('type', models.CharField(choices=[('notify_mods_unscheduled', 'Notify mods of unscheduled games')], max_length=255)),
('offset', models.DurationField()),
('relative_to', models.CharField(choices=[('round_start', 'Round start'), ('round_end', 'Round end')], max_length=255)),
('last_run', models.DateTimeField(blank=True, null=True)),
('league', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.League')),
('season', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Season')),
],
options={
'abstract': False,
},
),
]
| 0
| 0
| 0
| 1,247
| 0
| 0
| 0
| 30
| 68
|
09cf4e8dc61f85f2ebaa498eb81cabb195f04722
| 3,358
|
py
|
Python
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/MH/Layout/Visualisation/Graph/RenderingParticle.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Simple topography viewer server - takes textual commands from a single socket
# and renders the appropriate graph
| 37.730337
| 114
| 0.611674
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Simple topography viewer server - takes textual commands from a single socket
# and renders the appropriate graph
import pygame
from Physics import Particle as BaseParticle
class RenderingParticle(BaseParticle):
"""Version of Physics.Particle with added rendering functions. """
def __init__(self, ID, position, name):
super(RenderingParticle,self).__init__(position=position, ID = ID )
self.radius = 20
self.labelText = name
font = pygame.font.Font(None, 24)
self.label = font.render(self.labelText, False, (0,0,0))
self.left = 0
self.top = 0
self.selected = False
def render(self, surface):
"""Rendering passes. A generator method that renders in multiple passes.
Use yields to specify a wait until the pass the next stage of rendering
should take place at.
Example, that renders bonds 'behind' the blobs.
def render(self, surface):
yield 1
self.renderBonds(surface) # render bonds on pass 1
yield 5
self.renderSelf(surface) # render 'blob' on pass 5
If another particle type rendered, for example, on pass 3, then it
would be rendered on top of the bonds, but behind the blobs.
Use this mechanism to order rendering into layers.
"""
x = int(self.pos[0]) - self.left
y = int(self.pos[1]) - self.top
yield 1
for p in self.bondedTo:
pygame.draw.line(surface, (128,128,255), (x,y), (int(p.pos[0] -self.left),int(p.pos[1] - self.top)) )
yield 2
pygame.draw.circle(surface, (255,128,128), (x,y), self.radius)
if self.selected:
pygame.draw.circle(surface, (0,0,0), (x,y), self.radius, 2)
surface.blit(self.label, (x - self.label.get_width()/2, y - self.label.get_height()/2))
def setOffset( self, (left,top) ):
"""Inform of a change to the coords of the top left of the drawing surface,
so that this entity can render, as if the top left had moved
"""
self.left = left
self.top = top
def select( self ):
"""Tell this particle it is selected"""
self.selected = True
def deselect( self ):
"""Tell this particle it is selected"""
self.selected = False
| 0
| 0
| 0
| 2,254
| 0
| 0
| 0
| 15
| 68
|
3ccdf549310d1c10291d371e3807c060ab2fe1c2
| 2,130
|
py
|
Python
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 388
|
2016-01-28T15:16:43.000Z
|
2022-03-28T08:18:07.000Z
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 38
|
2016-02-12T14:51:12.000Z
|
2022-02-12T09:10:25.000Z
|
bindings/python/benchmark.py
|
wangjia3015/marisa-trie
|
da2924831c1e8f90dae7223cfe7a2bc1bd8b5132
|
[
"BSD-2-Clause"
] | 79
|
2016-03-16T15:47:50.000Z
|
2022-03-15T22:21:08.000Z
|
import datetime
import marisa
import sys
time_begin = datetime.datetime.now()
keys = []
for line in sys.stdin:
keys.append(line.rstrip())
time_end = datetime.datetime.now()
print "input:", time_end - time_begin
time_begin = datetime.datetime.now()
dic = dict()
for i in range(len(keys)):
dic[keys[i]] = i
time_end = datetime.datetime.now()
print "dict_build:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
dic.get(key)
time_end = datetime.datetime.now()
print "dict_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
keyset = marisa.Keyset()
for key in keys:
keyset.push_back(key)
time_end = datetime.datetime.now()
print "keyset_build:", time_end - time_begin
time_begin = datetime.datetime.now()
trie = marisa.Trie()
trie.build(keyset)
time_end = datetime.datetime.now()
print "trie_build:", time_end - time_begin
time_begin = datetime.datetime.now()
agent = marisa.Agent()
for key in keys:
agent.set_query(key)
trie.lookup(agent)
agent.key_id()
time_end = datetime.datetime.now()
print "trie_agent_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
trie.lookup(key)
time_end = datetime.datetime.now()
print "trie_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
agent.set_query(i)
trie.reverse_lookup(agent)
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
trie.reverse_lookup(i)
time_end = datetime.datetime.now()
print "trie_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.common_prefix_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_common_prefix_search:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.predictive_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_predictive_search:", time_end - time_begin
| 25.97561
| 63
| 0.753052
|
import datetime
import marisa
import sys
time_begin = datetime.datetime.now()
keys = []
for line in sys.stdin:
keys.append(line.rstrip())
time_end = datetime.datetime.now()
print "input:", time_end - time_begin
time_begin = datetime.datetime.now()
dic = dict()
for i in range(len(keys)):
dic[keys[i]] = i
time_end = datetime.datetime.now()
print "dict_build:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
dic.get(key)
time_end = datetime.datetime.now()
print "dict_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
keyset = marisa.Keyset()
for key in keys:
keyset.push_back(key)
time_end = datetime.datetime.now()
print "keyset_build:", time_end - time_begin
time_begin = datetime.datetime.now()
trie = marisa.Trie()
trie.build(keyset)
time_end = datetime.datetime.now()
print "trie_build:", time_end - time_begin
time_begin = datetime.datetime.now()
agent = marisa.Agent()
for key in keys:
agent.set_query(key)
trie.lookup(agent)
agent.key_id()
time_end = datetime.datetime.now()
print "trie_agent_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
trie.lookup(key)
time_end = datetime.datetime.now()
print "trie_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
agent.set_query(i)
trie.reverse_lookup(agent)
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for i in range(len(keys)):
trie.reverse_lookup(i)
time_end = datetime.datetime.now()
print "trie_reverse_lookup:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.common_prefix_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_common_prefix_search:", time_end - time_begin
time_begin = datetime.datetime.now()
for key in keys:
agent.set_query(key)
while trie.predictive_search(agent):
agent.key_str()
time_end = datetime.datetime.now()
print "trie_agent_predictive_search:", time_end - time_begin
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7f9c95359486993b762a90f03b356fc2e537a3c5
| 5,868
|
py
|
Python
|
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | null | null | null |
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | null | null | null |
SPACE/random_concept_building.py
|
lkreiskoether/SPACE
|
ba7e697bd10c5881cd6a87f9f877664978436597
|
[
"Apache-2.0"
] | 1
|
2021-10-05T09:07:36.000Z
|
2021-10-05T09:07:36.000Z
|
"""
Copyright 2021 Lukas Kreiskther
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
| 54.333333
| 128
| 0.63514
|
"""
Copyright 2021 Lukas Kreisköther
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import PIL.Image
import numpy as np
import os
import random
class RandomConceptBuilder:
"""RandomConceptBuilder objects capsule the functionality for building random concept images necessary for using the
TCAV framework in industrial usecases. For that random crops from defined sets of images (e.g. from good class
when testing the bad class) with size crop_size are build. The random concept images are stored in folders
with name prefix 'random500_' so that they can be used for the TCAV framework.
"""
def __init__(self, path, folders_for_building, store_fmt, image_shape, crop_size, num_fold=30,
num_imgs_per_fold=100):
"""Initializes a RandomConceptBuilder object.
Args:
path (str): path which leads to the directory in which the folders are laying based upon which the random
concept images should be build (e.g. '/home/lukas/Documents/02_Data/FGUSS_subsets_grey/').
folders_for_building (list of str): list of strings for all folders in the directory from which the algorithm should
choose images to build the random concept images (e.g. ['good'] or ['one', 'two', 'three'])
image_shape (list of int): list with len=2 which defines the shape the produced images should have
(normally equals the input size of the model to investigate).
crop_size (list of int): list with len=3 defining the size of the random crops (e.g. [56, 56, 3]).
num_fold (int): number of folders of random concept images the algorithm should build.
num_imgs_per_fold (int): number of images per folder for the folders of random concept images.
store_fmt (str): store format of produced images.
"""
self.path = path
self.folders_for_building = folders_for_building
self.name_prefix = 'random500_'
self.store_fmt = store_fmt
self.image_shape = image_shape
self.crop_size = crop_size
self.num_fold = num_fold
self.num_imgs_per_fold = num_imgs_per_fold
if len(self.folders_for_building) == 1:
self.X_names = [str(self.folders_for_building[0] + '/' + name) for name in
os.listdir(self.path + self.folders_for_building[0])
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)]
else:
X_temp = []
for folder_name in self.folders_for_building:
X_temp = X_temp + ([str(folder_name + '/' + name) for name in os.listdir(self.path + folder_name)
if not os.path.isdir(self.path + self.folders_for_building[0] + '/' + name)])
self.X_names = X_temp
np.random.shuffle(self.X_names)
self.img_tensor = tf.placeholder(tf.float32, shape=(self.image_shape[0], self.image_shape[1], 3))
self.out = tf.image.random_crop(value=self.img_tensor, size=self.crop_size)
def build_random_concept_image(self, img):
"""Method for building the random concept image from an input image.
Args:
img (numpy.ndarray[float]): image to build a random concept image from.
Returns: PIL.Image: Random concept image as PIL.Image.
"""
img = np.array(img, dtype=np.float32)
with tf.Session():
i = self.out.eval(feed_dict={self.img_tensor: img})
i = np.tile(i, (int(img.shape[0] / i.shape[0]), int(img.shape[1] / i.shape[1]), 1))
img = np.pad(array=i, pad_width=((0, img.shape[0] % i.shape[0]), (0, img.shape[1] % i.shape[1]), (0, 0)),
mode='wrap')
return PIL.Image.fromarray(img.astype(np.uint8))
def build(self):
"""Method to call to start building the concept images. Function looks how many
images are already in the folders and fills the folders respectively.
"""
for i in range(self.num_fold):
sub_fold = self.name_prefix + str(i)
if not os.path.isdir(self.path + sub_fold):
try:
os.mkdir(self.path + sub_fold + '/')
except Exception as e:
print("Creation of the directory %s failed" % sub_fold)
print(e)
else:
print("Successfully created the directory %s " % sub_fold)
num_files = len([name for name in os.listdir(self.path + sub_fold) if
os.path.isfile(os.path.join(self.path + sub_fold, name))])
if not (num_files == self.num_imgs_per_fold):
for j in range(self.num_imgs_per_fold - num_files):
img = random.choice(self.X_names)
img = np.array(PIL.Image.open(tf.gfile.Open(self.path + '/' + img, 'rb')).convert('RGB'),
dtype=np.float32)
# todo: resize (right now, we don't do it since images have to be in right size for TCAV anyway)
img_ran = self.build_random_concept_image(img)
img_ran.save(self.path + sub_fold + '/' + str(num_files + j) + '.' + self.store_fmt)
| 2
| 0
| 0
| 5,109
| 0
| 0
| 0
| -26
| 133
|
3178a9110e2900570b8a0543edc4ea7b69019a8e
| 13,724
|
py
|
Python
|
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | 1
|
2021-07-13T02:21:15.000Z
|
2021-07-13T02:21:15.000Z
|
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | null | null | null |
examples/model_compression/distill_lstm/data.py
|
wzzju/PaddleNLP
|
1757a4fc2a3cd5a45f75c6482746777752b414d8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import numpy as np
import jieba
from paddlenlp.data import Stack, Tuple, Pad, Vocab
from paddlenlp.transformers import BertTokenizer
from paddlenlp.datasets import load_dataset
from utils import convert_example_for_lstm, convert_example_for_distill, convert_pair_example
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n").split("\t")[0]
vocab[token] = index
return vocab
def apply_data_augmentation(data,
task_name,
tokenizer,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 6),
whole_word_mask=False,
seed=0):
"""
Data Augmentation contains Masking and n-gram sampling. Tokenization and
Masking are performed at the same time, so that the masked token can be
directly replaced by `mask_token`, after what sampling is performed.
"""
np.random.seed(seed)
new_data = []
for example in data:
if task_name == 'qqp':
data_list = tokenizer.tokenize(example['sentence1'])
data_list_2 = tokenizer.tokenize(example['sentence2'])
new_data.append({
"sentence1": data_list,
"sentence2": data_list_2,
"labels": example['labels']
})
else:
data_list = tokenizer.tokenize(example['sentence'])
new_data.append({
"sentence": data_list,
"labels": example['labels']
})
for example in data:
for _ in range(n_iter):
if task_name == 'qqp':
words = _data_augmentation(example['sentence1'], data_list)
words_2 = _data_augmentation(example['sentence2'], data_list_2)
new_data.append({
"sentence1": words,
"sentence2": words_2,
"labels": example['labels']
})
else:
words = _data_augmentation(example['sentence'], data_list)
new_data.append({
"sentence": words,
"labels": example['labels']
})
return new_data
def apply_data_augmentation_for_cn(data,
tokenizer,
vocab,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 10),
seed=0):
"""
Because BERT and jieba have different `tokenize` function, it returns
jieba_tokenizer(example['text'], bert_tokenizer(example['text']) and
example['label]) for each example in data.
jieba tokenization and Masking are performed at the same time, so that the
masked token can be directly replaced by `mask_token`, and other tokens
could be tokenized by BERT's tokenizer, from which tokenized example for
student model and teacher model would get at the same time.
"""
np.random.seed(seed)
new_data = []
for example in data:
text_tokenized = list(jieba.cut(example['text']))
lstm_tokens = text_tokenized
bert_tokens = tokenizer.tokenize(example['text'])
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
for _ in range(n_iter):
# 1. Masking
lstm_tokens, bert_tokens = [], []
for word in text_tokenized:
if np.random.rand() < p_mask:
lstm_tokens.append([vocab.unk_token])
bert_tokens.append([tokenizer.unk_token])
else:
lstm_tokens.append([word])
bert_tokens.append(tokenizer.tokenize(word))
# 2. N-gram sampling
lstm_tokens, bert_tokens = ngram_sampling(lstm_tokens, bert_tokens,
p_ng, ngram_range)
lstm_tokens, bert_tokens = flatten(lstm_tokens), flatten(
bert_tokens)
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
return new_data
def create_data_loader_for_small_model(task_name,
vocab_path,
model_name=None,
batch_size=64,
max_seq_length=128,
shuffle=True):
"""Data loader for bi-lstm, not bert."""
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
if task_name == 'chnsenticorp':
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
else:
vocab = BertTokenizer.from_pretrained(model_name)
pad_val = vocab.pad_token_id
trans_fn = partial(
convert_example_for_lstm,
task_name=task_name,
vocab=vocab,
max_seq_length=max_seq_length,
is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=pad_val), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_distill_loader(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
n_iter=20,
whole_word_mask=False,
seed=0):
"""
Returns batch data for bert and small model.
Bert and small model have different input representations.
"""
tokenizer = BertTokenizer.from_pretrained(model_name)
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
data_aug_fn = partial(
apply_data_augmentation_for_cn,
tokenizer=tokenizer,
vocab=vocab,
n_iter=n_iter,
seed=seed)
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
vocab = tokenizer
pad_val = tokenizer.pad_token_id
data_aug_fn = partial(
apply_data_augmentation,
task_name=task_name,
tokenizer=tokenizer,
n_iter=n_iter,
whole_word_mask=whole_word_mask,
seed=seed)
train_ds = train_ds.map(data_aug_fn, batched=True)
print("Data augmentation has been applied.")
trans_fn = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab)
trans_fn_dev = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab,
is_tokenized=False)
if task_name == 'qqp':
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
else:
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn_dev, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_pair_loader_for_small_model(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
is_test=False):
"""Only support QQP now."""
tokenizer = BertTokenizer.from_pretrained(model_name)
train_ds, dev_ds = load_dataset('glue', task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
trans_func = partial(
convert_pair_example,
task_name=task_name,
vocab=tokenizer,
is_tokenized=False,
max_seq_length=max_seq_length,
is_test=is_test)
train_ds = train_ds.map(trans_func, lazy=True)
dev_ds = dev_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
| 36.403183
| 93
| 0.573302
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
from functools import partial
import numpy as np
import jieba
import paddle
from paddlenlp.data import Stack, Tuple, Pad, Vocab
from paddlenlp.transformers import BertTokenizer
from paddlenlp.datasets import load_dataset
from utils import convert_example_for_lstm, convert_example_for_distill, convert_pair_example
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n").split("\t")[0]
vocab[token] = index
return vocab
def ngram_sampling(words, words_2=None, p_ng=0.25, ngram_range=(2, 6)):
if np.random.rand() < p_ng:
ngram_len = np.random.randint(ngram_range[0], ngram_range[1] + 1)
ngram_len = min(ngram_len, len(words))
start = np.random.randint(0, len(words) - ngram_len + 1)
words = words[start:start + ngram_len]
if words_2:
words_2 = words_2[start:start + ngram_len]
return words if not words_2 else (words, words_2)
def flatten(list_of_list):
final_list = []
for each_list in list_of_list:
final_list += each_list
return final_list
def apply_data_augmentation(data,
task_name,
tokenizer,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 6),
whole_word_mask=False,
seed=0):
"""
Data Augmentation contains Masking and n-gram sampling. Tokenization and
Masking are performed at the same time, so that the masked token can be
directly replaced by `mask_token`, after what sampling is performed.
"""
def _data_augmentation(data,
tokenized_list,
whole_word_mask=whole_word_mask):
# 1. Masking
words = []
if not whole_word_mask:
words = [
tokenizer.mask_token if np.random.rand() < p_mask else word
for word in tokenized_list
]
else:
for word in data.split():
words += [[tokenizer.mask_token]] if np.random.rand(
) < p_mask else [tokenizer.tokenize(word)]
# 2. N-gram sampling
words = ngram_sampling(words, p_ng=p_ng, ngram_range=ngram_range)
words = flatten(words) if isinstance(words[0], list) else words
return words
np.random.seed(seed)
new_data = []
for example in data:
if task_name == 'qqp':
data_list = tokenizer.tokenize(example['sentence1'])
data_list_2 = tokenizer.tokenize(example['sentence2'])
new_data.append({
"sentence1": data_list,
"sentence2": data_list_2,
"labels": example['labels']
})
else:
data_list = tokenizer.tokenize(example['sentence'])
new_data.append({
"sentence": data_list,
"labels": example['labels']
})
for example in data:
for _ in range(n_iter):
if task_name == 'qqp':
words = _data_augmentation(example['sentence1'], data_list)
words_2 = _data_augmentation(example['sentence2'], data_list_2)
new_data.append({
"sentence1": words,
"sentence2": words_2,
"labels": example['labels']
})
else:
words = _data_augmentation(example['sentence'], data_list)
new_data.append({
"sentence": words,
"labels": example['labels']
})
return new_data
def apply_data_augmentation_for_cn(data,
tokenizer,
vocab,
n_iter=20,
p_mask=0.1,
p_ng=0.25,
ngram_range=(2, 10),
seed=0):
"""
Because BERT and jieba have different `tokenize` function, it returns
jieba_tokenizer(example['text'], bert_tokenizer(example['text']) and
example['label]) for each example in data.
jieba tokenization and Masking are performed at the same time, so that the
masked token can be directly replaced by `mask_token`, and other tokens
could be tokenized by BERT's tokenizer, from which tokenized example for
student model and teacher model would get at the same time.
"""
np.random.seed(seed)
new_data = []
for example in data:
text_tokenized = list(jieba.cut(example['text']))
lstm_tokens = text_tokenized
bert_tokens = tokenizer.tokenize(example['text'])
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
for _ in range(n_iter):
# 1. Masking
lstm_tokens, bert_tokens = [], []
for word in text_tokenized:
if np.random.rand() < p_mask:
lstm_tokens.append([vocab.unk_token])
bert_tokens.append([tokenizer.unk_token])
else:
lstm_tokens.append([word])
bert_tokens.append(tokenizer.tokenize(word))
# 2. N-gram sampling
lstm_tokens, bert_tokens = ngram_sampling(lstm_tokens, bert_tokens,
p_ng, ngram_range)
lstm_tokens, bert_tokens = flatten(lstm_tokens), flatten(
bert_tokens)
new_data.append({
"lstm_tokens": lstm_tokens,
"bert_tokens": bert_tokens,
"label": example['label']
})
return new_data
def create_data_loader_for_small_model(task_name,
vocab_path,
model_name=None,
batch_size=64,
max_seq_length=128,
shuffle=True):
"""Data loader for bi-lstm, not bert."""
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
if task_name == 'chnsenticorp':
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
else:
vocab = BertTokenizer.from_pretrained(model_name)
pad_val = vocab.pad_token_id
trans_fn = partial(
convert_example_for_lstm,
task_name=task_name,
vocab=vocab,
max_seq_length=max_seq_length,
is_test=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=pad_val), # input_ids
Stack(dtype="int64"), # seq len
Stack(dtype="int64") # label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_distill_loader(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
n_iter=20,
whole_word_mask=False,
seed=0):
"""
Returns batch data for bert and small model.
Bert and small model have different input representations.
"""
tokenizer = BertTokenizer.from_pretrained(model_name)
if task_name == 'chnsenticorp':
train_ds, dev_ds = load_dataset(task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
pad_val = vocab['[PAD]']
data_aug_fn = partial(
apply_data_augmentation_for_cn,
tokenizer=tokenizer,
vocab=vocab,
n_iter=n_iter,
seed=seed)
else:
train_ds, dev_ds = load_dataset(
'glue', task_name, splits=["train", "dev"])
vocab = tokenizer
pad_val = tokenizer.pad_token_id
data_aug_fn = partial(
apply_data_augmentation,
task_name=task_name,
tokenizer=tokenizer,
n_iter=n_iter,
whole_word_mask=whole_word_mask,
seed=seed)
train_ds = train_ds.map(data_aug_fn, batched=True)
print("Data augmentation has been applied.")
trans_fn = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab)
trans_fn_dev = partial(
convert_example_for_distill,
task_name=task_name,
tokenizer=tokenizer,
label_list=train_ds.label_list,
max_seq_length=max_seq_length,
vocab=vocab,
is_tokenized=False)
if task_name == 'qqp':
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
else:
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # bert input
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # bert segment
Pad(axis=0, pad_val=pad_val), # small input_ids
Stack(dtype="int64"), # small seq len
Stack(dtype="int64") # small label
): fn(samples)
train_ds = train_ds.map(trans_fn, lazy=True)
dev_ds = dev_ds.map(trans_fn_dev, lazy=True)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_pair_loader_for_small_model(task_name,
model_name,
vocab_path,
batch_size=64,
max_seq_length=128,
shuffle=True,
is_test=False):
"""Only support QQP now."""
tokenizer = BertTokenizer.from_pretrained(model_name)
train_ds, dev_ds = load_dataset('glue', task_name, splits=["train", "dev"])
vocab = Vocab.load_vocabulary(
vocab_path,
unk_token='[UNK]',
pad_token='[PAD]',
bos_token=None,
eos_token=None, )
trans_func = partial(
convert_pair_example,
task_name=task_name,
vocab=tokenizer,
is_tokenized=False,
max_seq_length=max_seq_length,
is_test=is_test)
train_ds = train_ds.map(trans_func, lazy=True)
dev_ds = dev_ds.map(trans_func, lazy=True)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Pad(axis=0, pad_val=vocab['[PAD]']), # input
Stack(), # length
Stack(dtype="int64" if train_ds.label_list else "float32") # label
): fn(samples)
train_data_loader, dev_data_loader = create_dataloader(
train_ds, dev_ds, batch_size, batchify_fn, shuffle)
return train_data_loader, dev_data_loader
def create_dataloader(train_ds, dev_ds, batch_size, batchify_fn, shuffle=True):
train_batch_sampler = paddle.io.DistributedBatchSampler(
train_ds, batch_size=batch_size, shuffle=shuffle)
dev_batch_sampler = paddle.io.BatchSampler(
dev_ds, batch_size=batch_size, shuffle=False)
train_data_loader = paddle.io.DataLoader(
dataset=train_ds,
batch_sampler=train_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
dev_data_loader = paddle.io.DataLoader(
dataset=dev_ds,
batch_sampler=dev_batch_sampler,
collate_fn=batchify_fn,
num_workers=0,
return_list=True)
return train_data_loader, dev_data_loader
| 0
| 0
| 0
| 0
| 0
| 1,987
| 0
| -20
| 142
|
b3cfc2a4680ba5fb688c8c605dffbd3378abcff5
| 472
|
py
|
Python
|
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
pametis/__init__.py
|
avnr/pametis
|
1037c7b50e5825770f2296761f3a0ad3cb37eae4
|
[
"MIT"
] | null | null | null |
__all__ = [
'OPT',
'configure',
'reset',
'sitemap',
'PametisException',
'AmbiguousOptions',
'BadParam',
'PametisCacheError',
'BadDomain',
'CantRemove',
'Pametis_cache',
'Sql_cache',
'postgres',
'sqlite',
'Pametis_spider',
'file_spider',
'sitemap_spider',
]
__version__ = "0.4"
__version_info__ = ( 0, 4, 0 )
| 19.666667
| 30
| 0.489407
|
from .pametis import *
__all__ = [
'OPT',
'configure',
'reset',
'sitemap',
'PametisException',
'AmbiguousOptions',
'BadParam',
'PametisCacheError',
'BadDomain',
'CantRemove',
'Pametis_cache',
'Sql_cache',
'postgres',
'sqlite',
'Pametis_spider',
'file_spider',
'sitemap_spider',
]
__version__ = "0.4"
__version_info__ = ( 0, 4, 0 )
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 22
|
727b56502133746fee15b7edcec9513b698ea9ac
| 513
|
py
|
Python
|
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | 1
|
2016-03-15T16:21:28.000Z
|
2016-03-15T16:21:28.000Z
|
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | null | null | null |
proxy/parser.py
|
GavinHan/sina_weibo_crawler
|
5fcbd4007fb8d2fad1aa3ad68b73aec6b7669b49
|
[
"BSD-2-Clause"
] | null | null | null |
#coding: utf-8
from pyquery import PyQuery as pq
page = '''
'''
doc = pq(page)
div = doc('div').find('.proxylistitem')
div.each(perser)
#print d('p') #<p>test 1</p><p>test 2</p>
#print d('p').html() #test 1
#print d('p').eq(1).html() #test 2
| 19.730769
| 65
| 0.608187
|
#coding: utf-8
import re
from pyquery import PyQuery as pq
from lxml import etree
page = '''
'''
def perser(i):
node = pq(this)
#import pdb; pdb.set_trace()
ip = node.find('.tbBottomLine:first').html().strip()
port = node.find('.tbBottomLine:first').next().html().strip()
print ('%s:%s %s')%(ip, port)
doc = pq(page)
div = doc('div').find('.proxylistitem')
div.each(perser)
#print d('p') #返回<p>test 1</p><p>test 2</p>
#print d('p').html() #返回test 1
#print d('p').eq(1).html() #返回test 2
| 18
| 0
| 0
| 0
| 0
| 203
| 0
| -11
| 67
|
a1b30ecc1b479a04796b2d974aafc93c7541b6f8
| 2,964
|
py
|
Python
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 280
|
2016-03-23T05:16:07.000Z
|
2022-03-25T10:45:33.000Z
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 384
|
2016-03-22T05:14:47.000Z
|
2021-09-13T23:46:14.000Z
|
picoCTF-web/api/common.py
|
minhnq1618/picoCTF
|
f634f0e55be6b1a8552a33e4f94e7487142e8bce
|
[
"MIT"
] | 142
|
2016-03-15T16:27:21.000Z
|
2022-02-23T23:41:28.000Z
|
"""Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: plaintext password
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
| 23.903226
| 75
| 0.612686
|
"""Classes and functions used by multiple modules in the system."""
import uuid
from hashlib import md5
import bcrypt
from voluptuous import Invalid, MultipleInvalid
def token():
"""
Generate a random but insecure token.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hash a string.
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class PicoException(Exception):
"""
General class for exceptions in the picoCTF API.
Allows specification of a message and response code to display to the
client, as well as an optional field for arbitrary data.
The 'data' field will not be displayed to clients but will be stored
in the database, making it ideal for storing stack traces, etc.
"""
def __init__(self, message, status_code=500, data=None):
"""Initialize a new PicoException."""
Exception.__init__(self)
self.message = message
self.status_code = status_code
self.data = data
def to_dict(self):
"""Convert a PicoException to a dict for serialization."""
rv = dict()
rv["message"] = self.message
return rv
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our PicoException.
Args:
callback_tuples: a callback_tuple should contain
(status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Try to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
PicoException with 400 status code and error msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise PicoException(msg, 400)
return value
return v
def validate(schema, data):
"""
Wrap the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
PicoException with 400 status code and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise PicoException(error.msg, 400)
def hash_password(password):
"""
Hash plaintext password.
Args:
password: plaintext password
Returns:
Secure hash of password.
"""
return bcrypt.hashpw(password.encode("utf-8"), bcrypt.gensalt(8))
| 0
| 0
| 0
| 759
| 0
| 0
| 0
| 0
| 23
|
83823ad614f73f1c0d4cb2b4bbf506ba88b266f8
| 573
|
py
|
Python
|
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
src/active_learner.py
|
shenw33/ML_DLD
|
e83b5237a6f8dce6f9b347258f04b59345c59678
|
[
"BSD-3-Clause"
] | null | null | null |
from keras import layers, optimizers
if __name__ == "__main__":
learning_cycle = 0
for _ in range(learning_cycle):
mymodel = train()
multi_step_inference()
new_exp()
query_new_data()
| 20.464286
| 45
| 0.710297
|
import numpy as np
import matplotlib.pyplot as plt
import os
from keras import layers, optimizers
from keras.models import Model, Sequential
from keras.layers import Dense, LSTM, Dropout
from keras import optimizers, regularizers
from tensorflow import keras
from tensorflow.keras import layers
from train_model import *
def new_exp():
get_model()
load_data()
if __name__ == "__main__":
learning_cycle = 0
for _ in range(learning_cycle):
mymodel = train()
multi_step_inference()
new_exp()
query_new_data()
| 0
| 0
| 0
| 0
| 0
| 25
| 0
| 86
| 222
|
6bbbf7ab3429580d63cba356479214e356e08185
| 16
|
py
|
Python
|
shapelet_features/shapelets/__init__.py
|
ratschlab/circEWS
|
b2b1f00dac4f5d46856a2c7abe2ca4f12d4c612d
|
[
"MIT"
] | 34
|
2020-03-17T16:42:00.000Z
|
2022-03-29T15:53:24.000Z
|
shapelet_features/utils/__init__.py
|
ranxiao/circEWS
|
1e52880c268f8f763bbc16763131634ffc217153
|
[
"MIT"
] | 3
|
2020-07-30T22:37:10.000Z
|
2021-08-10T00:02:30.000Z
|
shapelet_features/utils/__init__.py
|
ranxiao/circEWS
|
1e52880c268f8f763bbc16763131634ffc217153
|
[
"MIT"
] | 14
|
2020-04-22T01:13:54.000Z
|
2021-11-27T20:23:41.000Z
|
# Do not REMOVE
| 8
| 15
| 0.6875
|
# Do not REMOVE
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f706c175ce9374eac08af908306915c436098b21
| 239
|
py
|
Python
|
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | null | null | null |
gym_simpleflappy/__init__.py
|
jmathison/gym-simpleflappy
|
54acd54346f0ba4a611120a9ebba69acf0bae8b5
|
[
"MIT"
] | 1
|
2019-09-19T05:26:02.000Z
|
2019-09-19T05:26:02.000Z
|
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
| 19.916667
| 58
| 0.757322
|
from gym.envs.registration import register
register(
id='SimpleFlappy-v0',
entry_point='gym_simpleflappy.envs:FlappyEnv',
)
register(
id='SimpleFlappyDistance-v0',
entry_point='gym_simpleflappy.envs:FlappyEnvDistance',
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
96c6bc44c1be86be9f511ff65006a4d582768b84
| 10,154
|
py
|
Python
|
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | 11
|
2018-06-10T18:20:26.000Z
|
2021-09-02T03:25:29.000Z
|
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | null | null | null |
gym_bandits/scoreboard.py
|
ThomasLecat/gym-bandits-environments
|
adafed5952e00f1601e8a5294078cf7a2e83c836
|
[
"MIT"
] | 4
|
2019-05-07T17:41:26.000Z
|
2020-10-08T21:02:40.000Z
|
from gym.scoreboard.registration import add_task, add_group
add_group(
id='bandits',
name='Bandits',
description='Various N-Armed Bandit environments'
)
add_task(
id='BanditTwoArmedDeterministicFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Simplest bandit where one action always pays, and the other never does.",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1, 0]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTwoArmedHighHighFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are likely",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.9]
r_dist = [1, 1]
""",
background="Bandit B Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedLowLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are unlikley",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.1, 0.2]
r_dist = [1, 1]
""",
background="Bandit A Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedHighLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a large difference between which bandit pays out of two choices",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.2]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTenArmedGaussian-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit mentioned with reward based on a Gaussian distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1] (* 10)
r_dist = [numpy.random.normal(0, 1), 1] (* 10)
Every bandit always pays out
Each action has a reward mean (selected from a normal distribution with mean 0 and std 1), and the actual
reward returns is selected with a std of 1 around the selected mean
""",
background="Described on page 30 of Sutton and Barto's [Reinforcement Learning: An Introduction](https://www.dropbox.com/s/b3psxv2r0ccmf80/book2015oct.pdf?dl=0)"
)
add_task(
id='BanditTenArmedRandomRandom-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to both payouts and rewards",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.random.uniform(size=10)
Bandits have uniform probability of paying out and payout a reward of uniform probability
""",
background=""
)
add_task(
id='BanditTenArmedRandomFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to how often the action will provide a reward",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.full(bandits, 1)
Bandits have a uniform probability of rewarding and always reward 1
""",
background=""
)
add_task(
id='BanditTenArmedUniformDistributedReward-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with that always pays out with a reward selected from a uniform distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.full(bandits, 1)
r_dist = numpy.random.uniform(size=10)
Bandits always pay out. Reward is selected from uniform distribution
""",
background="Based on comparisons from http://sudeepraja.github.io/Bandits/"
)
add_task(
id='BanditTwoArmedIndependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Simple two independent armed bandit giving a reward of one with probabilities p_1 and p_2",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = np.random.uniform(2)
r_dist = [1, 1]
""",
background="For the first experience, called 'Bandit with independent arms' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0,1] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = np.random.uniform()
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentEasy-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.1,0.9] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.1,0,9][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentMedium-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.25,0.75] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.25,0,75][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentHard-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.4,0.6] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.4,0,6][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditEleveArmedWithIndex-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="11 armed bandit with deterministic payouts. \
Nine 'non-target' return a reward of 1.1, \
one 'target' returns a reward of 5, \
the 11th arm has reward = 0.1 * index of the target arm (ranging from 0.1 to 1.0)",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
index = np.random.randint(0,10)
p_dist = np.full(11,1)
r_dist = np.full(11,1.1)
r_dist[index] = 5
r_dist[-1] = 0.1*index
BanditEnv.__init__(self, p_dist = p_dist, r_dist = r_dist)
""",
background="For the experience called 'Bandits with dependent arms (II)' of https://arxiv.org/abs/1611.05763"
| 38.755725
| 168
| 0.701792
|
from gym.scoreboard.registration import add_task, add_group
add_group(
id='bandits',
name='Bandits',
description='Various N-Armed Bandit environments'
)
add_task(
id='BanditTwoArmedDeterministicFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Simplest bandit where one action always pays, and the other never does.",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1, 0]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTwoArmedHighHighFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are likely",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.9]
r_dist = [1, 1]
""",
background="Bandit B Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedLowLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a small difference between which bandit pays where both are unlikley",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.1, 0.2]
r_dist = [1, 1]
""",
background="Bandit A Figure 2.3 from Reinforcement Learning: An Introduction (Sutton & Barto) [link](https://webdocs.cs.ualberta.ca/~sutton/book/ebook/node18.html)"
)
add_task(
id='BanditTwoArmedHighLowFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="Stochastic version with a large difference between which bandit pays out of two choices",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [0.8, 0.2]
r_dist = [1, 1]
""",
background=""
)
add_task(
id='BanditTenArmedGaussian-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit mentioned with reward based on a Gaussian distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = [1] (* 10)
r_dist = [numpy.random.normal(0, 1), 1] (* 10)
Every bandit always pays out
Each action has a reward mean (selected from a normal distribution with mean 0 and std 1), and the actual
reward returns is selected with a std of 1 around the selected mean
""",
background="Described on page 30 of Sutton and Barto's [Reinforcement Learning: An Introduction](https://www.dropbox.com/s/b3psxv2r0ccmf80/book2015oct.pdf?dl=0)"
)
add_task(
id='BanditTenArmedRandomRandom-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to both payouts and rewards",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.random.uniform(size=10)
Bandits have uniform probability of paying out and payout a reward of uniform probability
""",
background=""
)
add_task(
id='BanditTenArmedRandomFixed-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with random probabilities assigned to how often the action will provide a reward",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.random.uniform(size=10)
r_dist = numpy.full(bandits, 1)
Bandits have a uniform probability of rewarding and always reward 1
""",
background=""
)
add_task(
id='BanditTenArmedUniformDistributedReward-v0',
group='bandits',
experimental=True,
contributor='jkcooper2',
summary="10 armed bandit with that always pays out with a reward selected from a uniform distribution",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = numpy.full(bandits, 1)
r_dist = numpy.random.uniform(size=10)
Bandits always pay out. Reward is selected from uniform distribution
""",
background="Based on comparisons from http://sudeepraja.github.io/Bandits/"
)
add_task(
id='BanditTwoArmedIndependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Simple two independent armed bandit giving a reward of one with probabilities p_1 and p_2",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p_dist = np.random.uniform(2)
r_dist = [1, 1]
""",
background="For the first experience, called 'Bandit with independent arms' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentUniform-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0,1] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = np.random.uniform()
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentEasy-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.1,0.9] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.1,0,9][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentMedium-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.25,0.75] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.25,0,75][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditTwoArmedDependentHard-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="Two armed bandit giving a reward of one with probabilities p_1 ~ U[0.4,0.6] and p_2 = 1 - p_1",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
p = [0.4,0,6][np.random.randint(0,2)]
p_dist = [p, 1-p]
r_dist = [1, 1]
""",
background="For the experience called 'Bandits with dependent arms (I)' of https://arxiv.org/abs/1611.05763"
add_task(
id='BanditEleveArmedWithIndex-v0',
group='bandits',
experimental=True,
contributor='Thomas_Lecat',
summary="11 armed bandit with deterministic payouts. \
Nine 'non-target' return a reward of 1.1, \
one 'target' returns a reward of 5, \
the 11th arm has reward = 0.1 * index of the target arm (ranging from 0.1 to 1.0)",
description="""
Each bandit takes in a probability distribution, which is the likelihood of the action paying out,
and a reward distribution, which is the value or distribution of what the agent will be rewarded
the bandit does payout.
index = np.random.randint(0,10)
p_dist = np.full(11,1)
r_dist = np.full(11,1.1)
r_dist[index] = 5
r_dist[-1] = 0.1*index
BanditEnv.__init__(self, p_dist = p_dist, r_dist = r_dist)
""",
background="For the experience called 'Bandits with dependent arms (II)' of https://arxiv.org/abs/1611.05763"
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
dbe5e2a057d822c9b7627e69b232caa21bca193c
| 5,368
|
py
|
Python
|
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
books/forms.py
|
burhan/hellowebbooks-website
|
96ca56f6d32716e5ce694664c760aa6a7bfce419
|
[
"MIT"
] | null | null | null |
# TODO: Might be good to update this later to update the username too so we aren't doing two database saves
PRODUCTS = [
('ebook', 'eBook Only'),
('paperback', 'Paperback'),
('video', 'Video'),
]
| 47.087719
| 126
| 0.698398
|
from django import forms
from django.contrib.auth import forms as auth_forms
from django.contrib.auth.models import User
from books.widgets import NoNameTextInput
# TODO: Might be good to update this later to update the username too so we aren't doing two database saves
class EditEmailForm(forms.ModelForm):
class Meta:
model = User
fields = ('email',)
def __init__(self, *args, **kwargs):
super(EditEmailForm, self).__init__(*args, **kwargs)
self.fields['email'].label = "Update your email address"
class AddEmailForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
super(AddEmailForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs['class'] = 'form-control'
self.fields['email'].widget.attrs['placeholder'] = '[email protected]'
self.fields['password'].widget.attrs['class'] = 'form-control'
def clean_email(self):
email = self.data['email']
if "@" not in email:
raise forms.ValidationError("Please enter a valid email address.")
return email
class CardForm(forms.Form):
last_4_digits = forms.CharField(required=True, min_length=4, max_length=4, widget=forms.HiddenInput())
stripe_token = forms.CharField(required=True, widget=forms.HiddenInput())
coupon = forms.CharField(required=False, widget=forms.HiddenInput())
def addError(self, message):
self._errors[NON_FIELD_ERRORS] = self.error_class([message])
class StripePaymentForm(CardForm):
card_number = forms.CharField(required=False, max_length=20, widget=NoNameTextInput())
card_cvc = forms.CharField(required=False, max_length=4, widget=NoNameTextInput())
card_expiry_month = forms.CharField(required=False, max_length=2, widget=NoNameTextInput())
card_expiry_year = forms.CharField(required=False, max_length=4, widget=NoNameTextInput())
#card_address_zip = forms.CharField(required=False, max_length=10, widget=NoNameTextInput(attrs={'style':'width:100px'}))
coupon_code = forms.CharField(required=False, max_length=20, widget=NoNameTextInput())
def __init__(self, *args, **kwargs):
super(StripePaymentForm, self).__init__(*args, **kwargs)
self.fields['card_number'].label = "Credit card number:"
self.fields['card_number'].widget.attrs['autocompletetype'] = 'cc-number'
self.fields['card_number'].widget.attrs['class'] = 'form-control card-number'
self.fields['card_cvc'].label = "Credit card CVC:"
self.fields['card_cvc'].widget.attrs['autocomplete'] = 'off'
self.fields['card_cvc'].widget.attrs['autocompletetype'] = 'cc-csc'
self.fields['card_cvc'].widget.attrs['pattern'] = '\d*'
self.fields['card_cvc'].widget.attrs['class'] = 'form-control card-cvc'
self.fields['card_cvc'].widget.attrs['style'] = 'display:inline-block;width:80px'
self.fields['card_expiry_month'].widget.attrs['placeholder'] = 'MM'
self.fields['card_expiry_month'].widget.attrs['pattern'] = '\d*'
self.fields['card_expiry_month'].widget.attrs['class'] = 'form-control card-expiry-month'
self.fields['card_expiry_month'].widget.attrs['style'] = 'display:inline-block;width:63px'
self.fields['card_expiry_year'].widget.attrs['placeholder'] = 'YYYY'
self.fields['card_expiry_year'].widget.attrs['pattern'] = '\d*'
self.fields['card_expiry_year'].widget.attrs['class'] = 'form-control card-expiry-year'
self.fields['card_expiry_year'].widget.attrs['style'] = 'display:inline-block;width:76px'
self.fields['coupon_code'].label = "Coupon code (optional):"
self.fields['coupon_code'].widget.attrs['class'] = 'form-control coupon-code'
def clean_card_number(self):
card_number = self.cleaned_data['card_number'].replace("-","").replace(" ","")
return card_number
class MyAuthenticationForm(auth_forms.AuthenticationForm):
def __init__(self, request=None, *args, **kwargs):
super(MyAuthenticationForm, self).__init__(*args, **kwargs)
self.fields['username'].label = "Email"
def clean_username(self):
# use the email address to get the username for the account
email = self.cleaned_data.get('username')
username = email.replace("@", "").replace(".", "")
return username
PRODUCTS = [
('ebook', 'eBook Only'),
('paperback', 'Paperback'),
('video', 'Video'),
]
class AdminAddCustomerForm(forms.Form):
email = forms.EmailField()
hello_web_app = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
hello_web_design = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
class AdminAddCustomerBulkForm(forms.Form):
emails = forms.CharField(widget=forms.Textarea)
hello_web_app = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
hello_web_design = forms.MultipleChoiceField(choices=PRODUCTS, widget=forms.CheckboxSelectMultiple(), required=False)
def __init__(self, *args, **kwargs):
super(AdminAddCustomerBulkForm, self).__init__(*args, **kwargs)
self.fields['emails'].label = "Comma delimited emails"
| 0
| 0
| 0
| 4,827
| 0
| 0
| 0
| 75
| 250
|
9cfd1b79359a086dbd7fc0769ab8bcefa649fbcf
| 5,944
|
py
|
Python
|
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | null | null | null |
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | null | null | null |
test_discretization/test_reduction_diff_by_class.py
|
wsgan001/AnomalyDetection
|
397673dc6ce978361a3fc6f2fd34879f69bc962a
|
[
"MIT"
] | 1
|
2020-03-16T21:50:52.000Z
|
2020-03-16T21:50:52.000Z
|
# -*- coding: utf-8 -*-
"""
It generates plots that shows similarity for anomalies in each dataset.
"""
import matplotlib
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
if __name__ == '__main__':
import time
start = time.time()
df_training_20, df_training_full, gmms_training_20, gmms_training_full = preprocessing.get_preprocessed_training_data()
df_test_plus, df_test_21, gmms_test_plus, gmms_test_21 = preprocessing.get_preprocessed_test_data()
generate_plots_for_df(df_training_20, gmms_training_20, "training20")
generate_plots_for_df(df_training_full, gmms_training_full, "trainingfull")
generate_plots_for_df(df_test_plus, gmms_test_plus, "testplus")
generate_plots_for_df(df_test_21, gmms_test_21, "test21")
| 33.965714
| 126
| 0.621803
|
# -*- coding: utf-8 -*-
"""
It generates plots that shows similarity for anomalies in each dataset.
"""
import copy
import math
import numpy as np
import matplotlib
import matplotlib.mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
def get_score(gmm, value):
minval = 1e+20
minidx = -1
# one of distribution
# or density of distributions
for mi, _ in enumerate(gmm.means_):
det = abs(mi - value)
m1 = gmm.means_[mi]
c1 = gmm.covars_[mi]
if minval > det :
minval = matplotlib.mlab.normpdf(value,m1,np.sqrt(c1))[0][0]
minval = minval*len(gmm.means_)
sums = 0
for mi, _ in enumerate(gmm.means_) :
m1 = gmm.means_[mi]
c1 = gmm.covars_[mi]
w1 = gmm.weights_[mi]
ys = matplotlib.mlab.normpdf(value,m1,np.sqrt(c1))[0]*w1
sums = sums + ys[0]
# if sums > minval :
# print "=== sums ==="
# else :
# print "=== minval ==="
# print minval
# print sums
score = max(sums, minval)
if score == 0:
score = 1e-20
# print "score : " + str(score)
score = math.log(score)
return score
def generate_plots(df_abnormal, df_normal, headers, gmms, title, path="", protcls_name=""):
proj = []
gmm_normals = gmms[0]
gmm_abnormals = gmms[1]
fig, ax = plt.subplots()
plt.subplot(2, 1, 1)
plt.title("normal scores")
plt.subplot(2, 1, 2)
plt.title("abnormal scores")
for di, d in df_normal.iterrows() :
# print str(di) + "/" + str(len(df_normal))
normal_score = 0
abnormal_score = 0
normal_scores = []
abnormal_scores = []
for hi, header in enumerate(headers) :
if header in ["protocol_type", "attack", "difficulty"] :
continue
val = d[header]
gmm_normal = gmm_normals[hi]
gmm_abnormal = gmm_abnormals[hi]
score = get_score(gmm_normal,val)
normal_scores.append(score)
score = get_score(gmm_abnormal,val)
abnormal_scores.append(score)
xs = range(len(headers))
plt.subplot(2, 1, 1)
plt.plot(xs,normal_scores,color='y', lw=3)
plt.subplot(2, 1, 2)
plt.plot(xs,abnormal_scores,color='y', lw=3)
for di, d in df_abnormal.iterrows() :
print str(di) + "/" + str(len(df_abnormal))
normal_score = 0
abnormal_score = 0
normal_scores = []
abnormal_scores = []
for hi, header in enumerate(headers) :
if header in ["protocol_type", "attack", "difficulty"] :
continue
val = d[header]
gmm_normal = gmm_normals[hi]
gmm_abnormal = gmm_abnormals[hi]
score = get_score(gmm_normal,val)
normal_scores.append(score)
score = get_score(gmm_abnormal,val)
abnormal_scores.append(score)
xs = range(len(headers))
plt.subplot(2, 1, 1)
plt.plot(xs,normal_scores,color='b', lw=1)
plt.subplot(2, 1, 2)
plt.plot(xs,abnormal_scores,color='b', lw=1)
# save and close
filename = "./plots/" + path + "/" + title + "_" + protcls_name + "_" + path + ".png"
print filename
fig.savefig(filename)
plt.close()
def generate_plots_for_df(df, gmms, path="") :
headers, _ = preprocessing.get_header_data()
headers.remove('protocol_type')
headers.remove('attack')
headers.remove('difficulty')
# plot for classes
protocol_types = model.protocol_types #["udp","tcp","icmp"]
for protocol_index, protocol_type in enumerate(protocol_types):
gmm_normals = gmms[0][protocol_index]
gmm_abnormals = gmms[1][protocol_index]
# normal data
df_normal = copy.deepcopy(df)
df_normal = df_normal[(df_normal["attack"] == 11)] # only select for 1 class
df_normal = df_normal[(df_normal["protocol_type"] == protocol_index)]
df_normal.drop('attack',1,inplace=True) # remove useless
df_normal.drop('difficulty',1,inplace=True) # remove useless
df_normal.drop('protocol_type',1,inplace=True)
df_normal.reset_index(drop=True)
df_normal = df_normal[0:10]
# abnormal data
for i, attack_type in enumerate(model.attack_types) :
if i == 11 :
continue
df_abnormal = copy.deepcopy(df)
df_abnormal = df_abnormal[(df_abnormal["attack"] == i)] # only select for 1 class
df_abnormal = df_abnormal[(df_abnormal["protocol_type"] == protocol_index)]
if 1 > len(df_abnormal) :
continue
df_abnormal.drop('attack',1,inplace=True) # remove useless
df_abnormal.drop('difficulty',1,inplace=True) # remove useless
df_abnormal.drop('protocol_type',1,inplace=True)
df_abnormal.reset_index(drop=True)
df_abnormal = df_abnormal[0:10]
gmm_normals_protcl = gmms[0][protocol_index]
gmm_abnormals_protcl = gmms[1][protocol_index]
gmms_protcl = [gmm_normals_protcl, gmm_abnormals_protcl]
generate_plots(df_abnormal, df_normal, headers, gmms_protcl, attack_type, path=path, protcls_name = protocol_type)
if __name__ == '__main__':
import time
start = time.time()
df_training_20, df_training_full, gmms_training_20, gmms_training_full = preprocessing.get_preprocessed_training_data()
df_test_plus, df_test_21, gmms_test_plus, gmms_test_21 = preprocessing.get_preprocessed_test_data()
generate_plots_for_df(df_training_20, gmms_training_20, "training20")
generate_plots_for_df(df_training_full, gmms_training_full, "trainingfull")
generate_plots_for_df(df_test_plus, gmms_test_plus, "testplus")
generate_plots_for_df(df_test_21, gmms_test_21, "test21")
| 0
| 0
| 0
| 0
| 0
| 4,959
| 0
| -2
| 202
|
84d6e4d646bae0fc1e7b329a0d2484ed91b465ac
| 6,596
|
py
|
Python
|
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
app/main.py
|
Ackaman/starter-snake-python
|
450d24c72b9f3af6bffaef2369913bd4d827acf2
|
[
"MIT"
] | null | null | null |
import os
import bottle
# Moving towards a tail is safe as long as that snake does not have food witihn reach.
# If it is te only possible move, that move should be made anyway
# int x,y or tuple (NEXT STEP)
##Only looks for dead end
##def snake_head_area(snake_heads, my_head):
## avoid_heads = []
## snake_heads1 = snake_heads
## snake_heads1.remove(my_head)
##
## for heads in snake_heads1:
## avoid_heads.append((heads[0]+1, heads[1]))
## avoid_heads.append((heads[0] - 1, heads[1]))
## avoid_heads.append((heads[0], heads[1] + 1))
## avoid_heads.append((heads[0], heads[1] - 1))
##
## return avoid_heads
# def safetyLevel(x,y, stuffToAvoid):
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 27.831224
| 111
| 0.626289
|
import json
import os
import random
import bottle
from api import ping_response, start_response, move_response, end_response
# Moving towards a tail is safe as long as that snake does not have food witihn reach.
# If it is te only possible move, that move should be made anyway
@bottle.route('/')
def index():
return '''
Battlesnake documentation can be found at
<a href="https://docs.battlesnake.io">https://docs.battlesnake.io</a>.
'''
@bottle.route('/static/<path:path>')
def static(path):
"""
Given a path, return the static file located relative
to the static folder.
This can be used to return the snake head URL in an API response.
"""
return bottle.static_file(path, root='static/')
@bottle.post('/ping')
def ping():
"""
A keep-alive endpoint used to prevent cloud application platforms,
such as Heroku, from sleeping the application instance.
"""
return ping_response()
@bottle.post('/start')
def start():
data = bottle.request.json
"""
TODO: If you intend to have a stateful snake AI,
initialize your snake state here using the
request's data if necessary.
"""
print(json.dumps(data))
color = "#00FF00"
return start_response(color)
@bottle.post('/move')
def move():
data = bottle.request.json
foodposition = []
for food in data['food']['data']:
foodposition.append((food['x'], food['y']))
my_head = (data['you']['body']['data'][0]['x'], data['you']['body']['data'][0]['y'])
my_length = len((data['you']['body']['data']))
snakePositions = []
myPositions = []
for pos in data['you']['body']['data']:
myPositions.append((pos['x'], pos['y']))
snake_heads = []
for snakes in data['snakes']['data']: ## alla ormar
x = snakes['body']['data'][0]['x']
y = snakes['body']['data'][0]['y']
snake_heads.append((x, y))
for pos in snakes['body']['data']: ## alla ormens positioner
snakePositions.append((pos['x'], pos['y']))
snake_heads.remove(my_head)
snake_head_area = []
for snake_head in snake_heads:
snake_head_area.append((snake_head[0]-1, snake_head[1]))
snake_head_area.append((snake_head[0]+1, snake_head[1]))
snake_head_area.append((snake_head[0], snake_head[1]+1))
snake_head_area.append((snake_head[0], snake_head[1]-1))
walls = []
width = data['height']
for i in range(width + 1):
walls.append((0 - 1, i))
walls.append((i, 0 - 1))
walls.append((width, i))
walls.append((i, width))
stuffToAvoid = []
for position in myPositions:
stuffToAvoid.append(position)
for position in walls:
stuffToAvoid.append(position)
for position in snakePositions:
stuffToAvoid.append(position)
xhead = my_head[0]
yhead = my_head[1]
possiblemoves = []
if (xhead + 1, yhead) not in stuffToAvoid and safe_path(xhead + 1, yhead, stuffToAvoid):
possiblemoves.append('right')
if (xhead, yhead + 1) not in stuffToAvoid and safe_path(xhead, yhead + 1, stuffToAvoid):
possiblemoves.append('down')
if (xhead - 1, yhead) not in stuffToAvoid and safe_path(xhead - 1, yhead, stuffToAvoid):
possiblemoves.append('left')
if (xhead, yhead - 1) not in stuffToAvoid and safe_path(xhead, yhead - 1, stuffToAvoid):
possiblemoves.append('up')
##Find closest food
currentDist = 1000000
for i in foodposition:
xfood = i[0]
yfood = i[1]
dist = ((abs(xhead - xfood)) + (abs(yhead - yfood)))
if (dist < currentDist):
closestFoodPos = (xfood, yfood)
currentDist = dist
xdistancetofood = abs(xhead - closestFoodPos[0])
ydistancetofood = abs(yhead - closestFoodPos[1])
# foodtotheright = ((xhead - closestFoodPos[0]) < 0)
# foodtothetop = ((yhead - closestFoodPos[1]) > 0)
prioritymoves = []
if (xdistancetofood >= ydistancetofood) and ((xhead - closestFoodPos[0]) < 0) and 'right' in possiblemoves:
prioritymoves.append('right')
if (xdistancetofood >= ydistancetofood) and ((xhead - closestFoodPos[0]) > 0) and 'left' in possiblemoves:
prioritymoves.append('left')
if (ydistancetofood >= xdistancetofood) and ((yhead - closestFoodPos[1]) > 0) and 'up' in possiblemoves:
prioritymoves.append('up')
if (ydistancetofood >= xdistancetofood) and ((yhead - closestFoodPos[1]) < 0) and 'down' in possiblemoves:
prioritymoves.append('down')
if (xhead + 1, yhead) in snake_head_area and 'right' in prioritymoves:
prioritymoves.remove('right')
# prioritymoves.append('right')
if (xhead - 1, yhead) in snake_head_area and 'left' in prioritymoves:
prioritymoves.remove('left')
# prioritymoves.append('left')
if (xhead, yhead + 1) in snake_head_area and 'down' in prioritymoves:
prioritymoves.remove('down')
# prioritymoves.append('down')
if (xhead, yhead - 1) in snake_head_area and 'up' in prioritymoves:
prioritymoves.remove('up')
# prioritymoves.append('up')
prioritymoves.append(random.choice(possiblemoves))
direction = prioritymoves[0]
return move_response(direction)
# int x,y or tuple (NEXT STEP)
##Only looks for dead end
def safe_path(x, y, stuffToAvoid):
right = (x + 1, y)
left = (x - 1, y)
down = (x, y + 1)
up = (x, y - 1)
if right in stuffToAvoid and left in stuffToAvoid and down in stuffToAvoid and up in stuffToAvoid:
safe = False
else:
safe = True
return safe
##def snake_head_area(snake_heads, my_head):
## avoid_heads = []
## snake_heads1 = snake_heads
## snake_heads1.remove(my_head)
##
## for heads in snake_heads1:
## avoid_heads.append((heads[0]+1, heads[1]))
## avoid_heads.append((heads[0] - 1, heads[1]))
## avoid_heads.append((heads[0], heads[1] + 1))
## avoid_heads.append((heads[0], heads[1] - 1))
##
## return avoid_heads
# def safetyLevel(x,y, stuffToAvoid):
@bottle.post('/end')
def end():
data = bottle.request.json
"""
TODO: If your snake AI was stateful,
clean up any stateful objects here.
"""
print(json.dumps(data))
return end_response()
# Expose WSGI app (so gunicorn can find it)
application = bottle.default_app()
if __name__ == '__main__':
bottle.run(
application,
host=os.getenv('IP', '0.0.0.0'),
port=os.getenv('PORT', '8080'),
debug=os.getenv('DEBUG', True)
)
| 0
| 5,085
| 0
| 0
| 0
| 272
| 0
| 35
| 227
|
83b39d103baf95f5e28b9da6371e0b29b75f4428
| 17,696
|
py
|
Python
|
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | 1
|
2021-12-19T15:11:09.000Z
|
2021-12-19T15:11:09.000Z
|
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | null | null | null |
pygsti/report/report.py
|
drewrisinger/pyGSTi
|
dd4ad669931c7f75e026456470cf33ac5b682d0d
|
[
"Apache-2.0"
] | null | null | null |
""" Internal model of a report during generation """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
from .. import _version, tools as _tools
# TODO this whole thing needs to be rewritten with different reports as derived classes
| 42.640964
| 112
| 0.61415
|
""" Internal model of a report during generation """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import time as _time
import warnings as _warnings
from pathlib import Path as _Path
import shutil as _shutil
from collections import defaultdict as _defaultdict, OrderedDict as _OrderedDict
import pickle as _pickle
from . import autotitle as _autotitle
from . import merge_helpers as _merge
from .. import _version, tools as _tools
from ..objects import VerbosityPrinter as _VerbosityPrinter, ExplicitOpModel as _ExplicitOpModel
from . import workspace as _ws
from .notebook import Notebook as _Notebook
# TODO this whole thing needs to be rewritten with different reports as derived classes
class Report:
""" The internal model of a report.
This class should never be instantiated directly. Instead, users
should use the appropriate factory method in
`pygsti.report.factory`.
"""
def __init__(self, templates, results, sections, flags,
global_qtys, report_params, build_defaults=None,
pdf_available=True, workspace=None):
self._templates = templates
self._results = results
self._sections = sections
self._flags = flags
self._global_qtys = global_qtys
self._report_params = report_params
self._workspace = workspace or _ws.Workspace()
self._build_defaults = build_defaults or {}
self._pdf_available = pdf_available
def _build(self, build_options=None):
""" Render all sections to a map of report elements for templating """
full_params = {
'results': self._results,
**self._report_params
}
full_params.update(self._build_defaults)
full_params.update(build_options or {})
qtys = self._global_qtys.copy()
for section in self._sections:
qtys.update(section.render(self._workspace, **full_params))
return qtys
def write_html(self, path, auto_open=False, link_to=None,
connected=False, build_options=None, brevity=0,
precision=None, resizable=True, autosize='initial',
single_file=False, verbosity=0):
""" Write this report to the disk as a collection of HTML documents.
Parameters
----------
path : str or path-like object
The filesystem path of a directory to write the report
to. If the specified directory does not exist, it will be
created automatically
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
link_to : list, optional
If not None, a list of one or more items from the set
{"tex", "pdf", "pkl"} indicating whether or not to
create and include links to Latex, PDF, and Python pickle
files, respectively.
connected : bool, optional
Whether output HTML should assume an active internet connection. If
True, then the resulting HTML file size will be reduced because it
will link to web resources (e.g. CDN libraries) instead of embedding
them.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
resizable : bool, optional
Whether plots and tables are made with resize handles and can be
resized within the report.
autosize : {'none', 'initial', 'continual'}
Whether tables and plots should be resized, either initially --
i.e. just upon first rendering (`"initial"`) -- or whenever
the browser window is resized (`"continual"`).
single_file : bool, optional
If true, the report will be written to a single HTML
document, with external dependencies baked-in. This mode
is not recommended for large reports, because this file
can grow large enough that major web browsers may struggle
to render it.
verbosity : int, optional
Amount of detail to print to stdout.
"""
build_options = build_options or {}
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
# Render sections
qtys = self._build(build_options)
# TODO this really should be a parameter of this method
embed_figures = self._report_params.get('embed_figures', True)
if single_file:
assert(embed_figures), \
"Single-file mode requires `embed_figures` to be True"
_merge.merge_jinja_template(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, verbosity=verbosity
)
else:
_merge.merge_jinja_template_dir(
qtys, path, templateDir=self._templates['html'],
auto_open=auto_open, precision=precision,
link_to=link_to, connected=connected, toggles=toggles,
renderMath=True, resizable=resizable,
autosize=autosize, embed_figures=embed_figures,
verbosity=verbosity
)
def write_notebook(self, path, auto_open=False, connected=False, verbosity=0):
""" Write this report to the disk as an IPython notebook
A notebook report allows the user to interact more flexibly with the data
underlying the figures, and to easily generate customized variants on the
figures. As such, this type of report will be most useful for experts
who want to tinker with the standard analysis presented in the static
HTML or LaTeX format reports.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.ipynb` file extension.
auto_open : bool, optional
If True, automatically open the report in a web browser after it
has been generated.
connected : bool, optional
Whether output notebook should assume an active internet connection. If
True, then the resulting file size will be reduced because it will link
to web resources (e.g. CDN libraries) instead of embedding them.
verbosity : int, optional
How much detail to send to stdout.
"""
# TODO this only applies to standard reports; rewrite generally
title = self._global_qtys['title']
confidenceLevel = self._report_params['confidence_level']
path = _Path(path)
printer = _VerbosityPrinter.build_printer(verbosity)
templatePath = _Path(__file__).parent / 'templates' / self._templates['notebook']
outputDir = path.parent
#Copy offline directory into position
if not connected:
_merge.rsync_offline_dir(outputDir)
#Save results to file
# basename = _os.path.splitext(_os.path.basename(filename))[0]
basename = path.stem
results_file_base = basename + '_results.pkl'
results_file = outputDir / results_file_base
with open(str(results_file), 'wb') as f:
_pickle.dump(self._results, f)
nb = _Notebook()
nb.add_markdown('# {title}\n(Created on {date})'.format(
title=title, date=_time.strftime("%B %d, %Y")))
nb.add_code("""\
import pickle
import pygsti""")
dsKeys = list(self._results.keys())
results = self._results[dsKeys[0]]
#Note: `results` is always a single Results obj from here down
nb.add_code("""\
#Load results dictionary
with open('{infile}', 'rb') as infile:
results_dict = pickle.load(infile)
print("Available dataset keys: ", ', '.join(results_dict.keys()))\
""".format(infile=results_file_base))
nb.add_code("""\
#Set which dataset should be used below
results = results_dict['{dsKey}']
print("Available estimates: ", ', '.join(results.estimates.keys()))\
""".format(dsKey=dsKeys[0]))
estLabels = list(results.estimates.keys())
estimate = results.estimates[estLabels[0]]
nb.add_code("""\
#Set which estimate is to be used below
estimate = results.estimates['{estLabel}']
print("Available gauge opts: ", ', '.join(estimate.goparameters.keys()))\
""".format(estLabel=estLabels[0]))
goLabels = list(estimate.goparameters.keys())
nb.add_code("""\
gopt = '{goLabel}'
ds = results.dataset
gssFinal = results.circuit_structs['final']
Ls = results.circuit_structs['final'].Ls
gssPerIter = results.circuit_structs['iteration'] #ALL_L
prepStrs = results.circuit_lists['prep fiducials']
effectStrs = results.circuit_lists['meas fiducials']
germs = results.circuit_lists['germs']
strs = (prepStrs, effectStrs)
params = estimate.parameters
objective = estimate.parameters['objective']
if objective == "logl":
mpc = estimate.parameters['minProbClip']
else:
mpc = estimate.parameters['minProbClipForWeighting']
clifford_compilation = estimate.parameters.get('clifford_compilation',None)
effective_ds, scale_subMxs = estimate.get_effective_dataset(True)
scaledSubMxsDict = {{'scaling': scale_subMxs, 'scaling.colormap': "revseq"}}
models = estimate.models
mdl = models[gopt] #FINAL
mdl_final = models['final iteration estimate'] #ITER
target_model = models['target']
mdlPerIter = models['iteration estimates']
mdl_eigenspace_projected = pygsti.tools.project_to_target_eigenspace(mdl, target_model)
goparams = estimate.goparameters[gopt]
confidenceLevel = {CL}
if confidenceLevel is None:
cri = None
else:
crfactory = estimate.get_confidence_region_factory(gopt)
region_type = "normal" if confidenceLevel >= 0 else "non-markovian"
cri = crfactory.view(abs(confidenceLevel), region_type)\
""".format(goLabel=goLabels[0], CL=confidenceLevel))
nb.add_code("""\
from pygsti.report import Workspace
ws = Workspace()
ws.init_notebook_mode(connected={conn}, autodisplay=True)\
""".format(conn=str(connected)))
nb.add_notebook_text_files([
templatePath / 'summary.txt',
templatePath / 'goodness.txt',
templatePath / 'gauge_invariant.txt',
templatePath / 'gauge_variant.txt'])
#Insert multi-dataset specific analysis
if len(dsKeys) > 1:
nb.add_markdown(('# Dataset comparisons\n'
'This report contains information for more than one data set.'
'This page shows comparisons between different data sets.'))
nb.add_code("""\
dslbl1 = '{dsLbl1}'
dslbl2 = '{dsLbl2}'
dscmp_gss = results_dict[dslbl1].circuit_structs['final']
ds1 = results_dict[dslbl1].dataset
ds2 = results_dict[dslbl2].dataset
dscmp = pygsti.obj.DataComparator([ds1, ds2], DS_names=[dslbl1, dslbl2])
""".format(dsLbl1=dsKeys[0], dsLbl2=dsKeys[1]))
nb.add_notebook_text_files([
templatePath / 'data_comparison.txt'])
#Add reference material
nb.add_notebook_text_files([
templatePath / 'input.txt',
templatePath / 'meta.txt'])
printer.log("Report Notebook created as %s" % path)
if auto_open:
port = "auto" if auto_open is True else int(auto_open)
nb.launch(str(path), port=port)
else:
nb.save_to(str(path))
def write_pdf(self, path, latex_cmd='pdflatex', latex_flags=None,
build_options=None,
brevity=0, precision=None, auto_open=False,
comm=None, verbosity=0):
""" Write this report to the disk as a PDF document.
Parameters
----------
path : str or path-like object
The filesystem path to write the report to. By convention,
this should use the `.pdf` file extension.
latex_cmd : str, optional
Shell command to run to compile a PDF document from the
generated LaTeX source.
latex_flags : [str], optional
List of flags to pass when calling `latex_cmd`.
build_options : dict
Dict of options for building plots. Expected values are
defined during construction of this report object.
brevity : int, optional
Amount of detail to include in the report. Larger values mean smaller
"more briefr" reports, which reduce generation time, load time, and
disk space consumption. In particular:
- 1: Plots showing per-sequences quantities disappear at brevity=1
- 2: Reference sections disappear at brevity=2
- 3: Germ-level estimate tables disappear at brevity=3
- 4: Everything but summary figures disappears at brevity=4
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
auto_open : bool, optional
Whether the output file should be automatically opened in a web browser.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
verbosity : int, optional
Amount of detail to print to stdout.
"""
if not self._pdf_available:
raise ValueError(("PDF output unavailable. (Usually this is because this report"
" has multiple gauge optimizations and/or datasets.)"))
toggles = _defaultdict(lambda: False)
toggles.update(
{k: True for k in self._flags}
)
for k in range(brevity, 4):
toggles['BrevityLT' + str(k + 1)] = True
printer = _VerbosityPrinter.build_printer(verbosity, comm=comm)
path = _Path(path)
latex_flags = latex_flags or ["-interaction=nonstopmode", "-halt-on-error", "-shell-escape"]
# Render sections
qtys = self._build(build_options)
# TODO: filter while generating plots to remove need for sanitization
qtys = {k: v for k, v in qtys.items()
if not(isinstance(v, _ws.Switchboard) or isinstance(v, _ws.SwitchboardView))}
printer.log("Generating LaTeX source...")
_merge.merge_latex_template(
qtys, self._templates['pdf'], str(path.with_suffix('.tex')),
toggles, precision, printer
)
printer.log("Compiling with `{} {}`".format(latex_cmd, ' '.join(latex_flags)))
_merge.compile_latex_report(str(path.parent / path.stem), [latex_cmd] + latex_flags, printer, auto_open)
| 0
| 0
| 0
| 16,342
| 0
| 0
| 0
| 221
| 266
|
056e5bfd74cdb3c57ea5d1772797214b876ae034
| 6,878
|
py
|
Python
|
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | null | null | null |
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | 32
|
2021-02-25T01:18:20.000Z
|
2021-03-03T23:42:27.000Z
|
function/tests/testTask/test_update.py
|
kohski/serverless_todo
|
60e90caf86f5d921150193beac4acbd90752c814
|
[
"MIT"
] | null | null | null |
import boto3
import os
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])
# ------------------------------------------
# valid pattern
# ------------------------------------------
# ------------------------------------------
# not found pattern
# ------------------------------------------
# ------------------------------------------
# invalid pattern
# ------------------------------------------
INVALID_PAYLOAD_LIST = [
{
"title": ""
},
{
"title": None
},
{
"title": "a" * 101
},
{
"content": "a" * 2001
},
{
"priority": "invalid_priority_value"
},
{
"is_done": "invalid_is_done_value"
},
]
| 32.443396
| 109
| 0.486915
|
import pytest
from datetime import datetime
from update import lambda_handler
import boto3
import os
import json
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['TABLE_NAME'])
# ------------------------------------------
# valid pattern
# ------------------------------------------
@pytest.mark.parametrize("word,is_done,priority", [
(word, is_done, priority)
for word in [None, "", "修正後内容"]
for is_done in ['true', 'false', True, False]
for priority in ['high', 'medium', 'low']
])
def test_existing_task_and_requested_by_task_owner(word, is_done, priority, context, ulid_mock):
event = {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {
"task_id": "ABCDEFGHIJKLMNOPQRSTUVW000"
},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "[email protected]"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": "修正後タイトル",
"priority": priority,
"is_done": is_done,
"content": word
}),
"isBase64Encoded": False
}
response = lambda_handler(event, context)
del response['body']
assert response == {
'statusCode': 201,
'isBase64Encoded': False
}
item = table.get_item(
Key={
'id': 'Task:ABCDEFGHIJKLMNOPQRSTUVW000',
'meta': 'latest'
}
)
assert item['Item']['title'] == '修正後タイトル'
# ------------------------------------------
# not found pattern
# ------------------------------------------
@pytest.fixture()
def not_found_event():
return {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {
'task_id': 'NOTEXISTINGTASK'
},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "[email protected]"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": "タイトル",
"priority": "medium",
"is_done": 'true',
"content": "内容"
}),
"isBase64Encoded": False
}
def test_raise_not_found_case(not_found_event, context):
response = lambda_handler(not_found_event, context)
assert response == {
'statusCode': 404,
'body': 'task is not found',
'isBase64Encoded': False
}
# ------------------------------------------
# invalid pattern
# ------------------------------------------
INVALID_PAYLOAD_LIST = [
{
"title": ""
},
{
"title": None
},
{
"title": "a" * 101
},
{
"content": "a" * 2001
},
{
"priority": "invalid_priority_value"
},
{
"is_done": "invalid_is_done_value"
},
]
@pytest.fixture(params=INVALID_PAYLOAD_LIST)
def invalid_event(request):
return {
"resource": "/task/",
"path": "/task",
"httpMethod": 'POST',
"headers": {},
"multiValueHeaders": {},
"queryStringParameters": None,
"multiValueQueryStringParameters": None,
"pathParameters": {},
"stageVariables": None,
"requestContext": {
"authorizer": {
"claims": {
"sub": "68f2ed3a-3726-439d-81cb-171dab716733",
"aud": "19gqr90c608tn8gp7j9nvop7h7",
"event_id": "55536ceb-c042-4c18-8a25-8bd4e4c2b28d",
"token_use": "id",
"auth_time": str(int(datetime.now().timestamp())),
"iss": "https://cognito-idp.ap-northeast-1.amazonaws.com/ap-northeast-*",
"cognito:username": "existing_user_id",
"exp": "Sun Feb 28 01:38:19 UTC 2021",
"iat": "Sun Feb 28 00:38:20 UTC 2021",
"email": "[email protected]"
}
},
"resourcePath": "/task",
"httpMethod": "GET",
"path": "/prod/task/123456",
"requestTimeEpoch": int(datetime.now().timestamp()),
"identity": {}
},
"body": json.dumps({
"title": request.param.get('title'),
"priority": "medium" if request.param.get('priority') is None else request.param.get('priority'),
"is_done": 'true' if request.param.get('is_done') is None else request.param.get('is_done'),
"content": "内容" if request.param.get('content') is None else request.param.get('content')
}),
"isBase64Encoded": False
}
def test_raise_invalid_case(invalid_event, context, ulid_mock):
response = lambda_handler(invalid_event, context)
assert response == {
'statusCode': 400,
'body': 'invalid parameter',
'isBase64Encoded': False
}
| 81
| 5,425
| 0
| 0
| 0
| 443
| 0
| 2
| 203
|
339c00d28f3cce8e0930a2efcf6717be89f5a16d
| 642
|
py
|
Python
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 6
|
2015-11-26T15:03:38.000Z
|
2020-10-05T14:08:54.000Z
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 7
|
2015-12-09T06:44:34.000Z
|
2021-12-14T15:51:28.000Z
|
micron/tests/audiodevice.test.py
|
zhengyang-c/photonLauncher
|
76215f47ccd1178f1826834533f5702c4b8f2c35
|
[
"Apache-2.0"
] | 3
|
2016-07-25T10:43:21.000Z
|
2021-12-07T14:12:47.000Z
|
#!/usr/bin/env python3
import pyaudio
import sys
sys.path.insert(0, "../")
from pwmaudio import noALSAerror
with noALSAerror():
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print(p.get_host_api_count())
print(info)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
# print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i))
| 33.789474
| 116
| 0.661994
|
#!/usr/bin/env python3
import pyaudio
import sys
sys.path.insert(0, "../")
from pwmaudio import noALSAerror
with noALSAerror():
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
print(p.get_host_api_count())
print(info)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels')) > 0:
# print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
print("Output Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
38f4cfc7cbdf2d4521cd2cce0f4533e5bc58ff43
| 2,384
|
py
|
Python
|
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
app/src/main/assets/code/controller/controller.py
|
tongjinlv/py_and
|
a069336c47dd233648fbbadee7275ef188696a44
|
[
"Apache-2.0"
] | null | null | null |
import imp
imp.load_source('controllerleds', '/data/data/com.matatalab.matatacode/run/controller/leds.py')
imp.load_source('controllermessage', '/data/data/com.matatalab.matatacode/run/controller/message.py')
imp.load_source('controllesensor', '/data/data/com.matatalab.matatacode/run/controller/sensor.py')
imp.load_source('controllemotion_sensor', '/data/data/com.matatalab.matatacode/run/controller/motion_sensor.py')
imp.load_source('controllebutton', '/data/data/com.matatalab.matatacode/run/controller/button.py')
imp.load_source('controllecolor_sensor', '/data/data/com.matatalab.matatacode/run/controller/color_sensor.py')
imp.load_source('controlleinfrared_sensor', '/data/data/com.matatalab.matatacode/run/controller/infrared_sensor.py')
imp.load_source('controllesound_sensor', '/data/data/com.matatalab.matatacode/run/controller/sound_sensor.py')
imp.load_source('controlletimer', '/data/data/com.matatalab.matatacode/run/controller/timer.py')
| 41.824561
| 116
| 0.75797
|
import sys
import math
import random
import imp
from java import jclass
from controller.leds import leds
from controller.message import message
from controller.sensor import sensor
from controller.motion_sensor import motion_sensor
from controller.button import button
from controller.color_sensor import color_sensor
from controller.infrared_sensor import infrared_sensor
from controller.sound_sensor import sound_sensor
from controller.timer import timer
imp.load_source('controllerleds', '/data/data/com.matatalab.matatacode/run/controller/leds.py')
imp.load_source('controllermessage', '/data/data/com.matatalab.matatacode/run/controller/message.py')
imp.load_source('controllesensor', '/data/data/com.matatalab.matatacode/run/controller/sensor.py')
imp.load_source('controllemotion_sensor', '/data/data/com.matatalab.matatacode/run/controller/motion_sensor.py')
imp.load_source('controllebutton', '/data/data/com.matatalab.matatacode/run/controller/button.py')
imp.load_source('controllecolor_sensor', '/data/data/com.matatalab.matatacode/run/controller/color_sensor.py')
imp.load_source('controlleinfrared_sensor', '/data/data/com.matatalab.matatacode/run/controller/infrared_sensor.py')
imp.load_source('controllesound_sensor', '/data/data/com.matatalab.matatacode/run/controller/sound_sensor.py')
imp.load_source('controlletimer', '/data/data/com.matatalab.matatacode/run/controller/timer.py')
class controller:
call=None
leds=None
message=None
sensor=None
motion_sensor=None
button=None
color_sensor=None
infrared_sensor=None
sound_sensor=None
timer=None
def __init__(self):
Python2Java = jclass("com.matatalab.matatacode.model.Python2Java")
self.call = Python2Java("python")
self.leds=leds(self.call)
self.message=message(self.call)
self.sensor=sensor(self.call)
self.motion_sensor=motion_sensor(self.call)
self.button=button(self.call)
self.color_sensor=color_sensor(self.call)
self.infrared_sensor=infrared_sensor(self.call)
self.sound_sensor=sound_sensor(self.call)
self.timer=timer(self.call)
#data = [0x7e,0x02,0x02,0x00,0x00]
#print("控制器 设置为新协议")
#self.call.blewrite(data)
#self.call.blewait()
def test(self):
data = [0x39,0x04]
self.call.blewrite(data)
self.call.blewait()
| 27
| 0
| 0
| 949
| 0
| 0
| 0
| 160
| 309
|
9126ebac0a1ed3389e3b8adbc570ccd9cc668771
| 4,684
|
py
|
Python
|
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | null | null | null |
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | 2
|
2021-02-16T20:48:43.000Z
|
2021-03-03T06:20:13.000Z
|
crowdstrike/src/crowdstrike/actor/importer.py
|
galonsososa/connectors
|
6272128a2ca69ffca13cec63ff0f7bc55ee902a5
|
[
"Apache-2.0"
] | 2
|
2021-02-16T20:45:11.000Z
|
2021-03-03T05:47:53.000Z
|
# -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike actor importer module."""
| 33.219858
| 105
| 0.638343
|
# -*- coding: utf-8 -*-
"""OpenCTI CrowdStrike actor importer module."""
from typing import Any, Generator, List, Mapping, Optional
from crowdstrike_client.api.intel.actors import Actors
from crowdstrike_client.api.models import Response
from crowdstrike_client.api.models.actor import Actor
from pycti.connector.opencti_connector_helper import OpenCTIConnectorHelper # type: ignore # noqa: E501
from stix2 import Bundle, Identity, MarkingDefinition # type: ignore
from crowdstrike.actor.builder import ActorBundleBuilder
from crowdstrike.importer import BaseImporter
from crowdstrike.utils import datetime_to_timestamp, paginate, timestamp_to_datetime
class ActorImporter(BaseImporter):
"""CrowdStrike actor importer."""
_LATEST_ACTOR_TIMESTAMP = "latest_actor_timestamp"
def __init__(
self,
helper: OpenCTIConnectorHelper,
actors_api: Actors,
update_existing_data: bool,
author: Identity,
default_latest_timestamp: int,
tlp_marking: MarkingDefinition,
) -> None:
"""Initialize CrowdStrike actor importer."""
super().__init__(helper, author, tlp_marking, update_existing_data)
self.actors_api = actors_api
self.default_latest_timestamp = default_latest_timestamp
def run(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
"""Run importer."""
self._info("Running actor importer with state: {0}...", state)
fetch_timestamp = state.get(
self._LATEST_ACTOR_TIMESTAMP, self.default_latest_timestamp
)
latest_fetched_actor_timestamp = None
for actors_batch in self._fetch_actors(fetch_timestamp):
if not actors_batch:
break
if latest_fetched_actor_timestamp is None:
first_in_batch = actors_batch[0]
created_date = first_in_batch.created_date
if created_date is None:
self._error(
"Missing created date for actor {0} ({1})",
first_in_batch.name,
first_in_batch.id,
)
break
latest_fetched_actor_timestamp = datetime_to_timestamp(created_date)
self._process_actors(actors_batch)
state_timestamp = latest_fetched_actor_timestamp or fetch_timestamp
self._info(
"Actor importer completed, latest fetch {0}.",
timestamp_to_datetime(state_timestamp),
)
return {self._LATEST_ACTOR_TIMESTAMP: state_timestamp}
def _fetch_actors(self, start_timestamp: int) -> Generator[List[Actor], None, None]:
limit = 50
sort = "created_date|desc"
fql_filter = f"created_date:>{start_timestamp}"
fields = ["__full__"]
paginated_query = paginate(self._query_actor_entities)
return paginated_query(
limit=limit, sort=sort, fql_filter=fql_filter, fields=fields
)
def _query_actor_entities(
self,
limit: int = 50,
offset: int = 0,
sort: Optional[str] = None,
fql_filter: Optional[str] = None,
fields: Optional[List[str]] = None,
) -> Response[Actor]:
self._info(
"Query actors limit: {0}, offset: {1}, sort: {2}, filter: {3}, fields: {4}",
limit,
offset,
sort,
fql_filter,
fields,
)
return self.actors_api.query_entities(
limit=limit, offset=offset, sort=sort, fql_filter=fql_filter, fields=fields
)
def _process_actors(self, actors: List[Actor]) -> None:
actor_count = len(actors)
self._info("Processing {0} actors...", actor_count)
for actor in actors:
self._process_actor(actor)
self._info("Processing actors completed (imported: {0})", actor_count)
def _process_actor(self, actor: Actor) -> None:
self._info("Processing actor {0} ({1})...", actor.name, actor.id)
actor_bundle = self._create_actor_bundle(actor)
# with open(f"actor_bundle_{actor.id}.json", "w") as f:
# f.write(actor_bundle.serialize(pretty=True))
self._send_bundle(actor_bundle)
def _create_actor_bundle(self, actor: Actor) -> Optional[Bundle]:
author = self.author
source_name = self._source_name()
object_marking_refs = [self.tlp_marking]
confidence_level = self._confidence_level()
bundle_builder = ActorBundleBuilder(
actor, author, source_name, object_marking_refs, confidence_level
)
return bundle_builder.build()
| 0
| 0
| 0
| 3,999
| 0
| 0
| 0
| 339
| 272
|
f6bfb6ffbc2d0285ca49b8bc43649c6454ef1f28
| 3,024
|
py
|
Python
|
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
Sample.py
|
zerobounce-llc/zero-bounce-python-sdk-setup
|
46dcdce9ece529e23d65fa92fb81f8ac19ce5c2e
|
[
"MIT"
] | null | null | null |
test()
| 29.076923
| 85
| 0.652116
|
from datetime import date
from zerobouncesdk import zerobouncesdk, ZBApiException, \
ZBMissingApiKeyException
def test_validate():
try:
response = zerobouncesdk.validate(email="<EMAIL_TO_TEST>")
print("validate success response: " + str(response))
except ZBApiException as e:
print("validate error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_get_credits():
try:
response = zerobouncesdk.get_credits()
print("get_credits success response: " + str(response))
except ZBApiException as e:
print("get_credits error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_send_file():
try:
response = zerobouncesdk.send_file(
file_path='./email_file.csv',
email_address_column=1,
return_url=None,
first_name_column=2,
last_name_column=3,
has_header_row=True)
print("sendfile success response: " + str(response))
except ZBApiException as e:
print("sendfile error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_credits error message: " + str(e))
def test_file_status():
try:
response = zerobouncesdk.file_status("<YOUR_FILE_ID>")
print("file_status success response: " + str(response))
except ZBApiException as e:
print("file_status error message: " + str(e))
except ZBMissingApiKeyException as e:
print("file_status error message: " + str(e))
def test_delete_file():
try:
response = zerobouncesdk.delete_file("<YOUR_FILE_ID>")
print("delete_file success response: " + str(response))
except ZBApiException as e:
print("delete_file error message: " + str(e))
except ZBMissingApiKeyException as e:
print("delete_file error message: " + str(e))
def test_get_api_usage():
try:
start_date = date(2019, 7, 5)
end_date = date(2019, 7, 15)
response = zerobouncesdk.get_api_usage(start_date, end_date)
print("get_api_usage success response: " + str(response))
except ZBApiException as e:
print("get_api_usage error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_api_usage error message: " + str(e))
def test_get_file():
try:
response = zerobouncesdk.get_file("<YOUR_FILE_ID>", "./downloads/emails.csv")
print("get_file success response: " + str(response))
except ZBApiException as e:
print("get_file error message: " + str(e))
except ZBMissingApiKeyException as e:
print("get_file error message: " + str(e))
def test():
zerobouncesdk.initialize("<YOUR_API_KEY>")
# test_validate()
# test_send_file()
# test_get_credits()
# test_file_status()
# test_delete_file()
# test_get_api_usage()
test_get_file()
test()
| 0
| 0
| 0
| 0
| 0
| 2,709
| 0
| 70
| 228
|
7bc2b1b8575ca1fb963e3e27a6dc57290ad35330
| 2,305
|
py
|
Python
|
Software/Sensors/IAQ_SCD30.py
|
xJohnnyBravo/zephyrus-iaq
|
31d39ae21080de55d39bc0dde6e49f5749d39477
|
[
"MIT"
] | 2
|
2019-10-01T23:08:25.000Z
|
2019-11-05T23:37:38.000Z
|
Software/Sensors/IAQ_SCD30.py
|
aaronjense/raspberrypi-indoor-air-quality-pcb
|
7e1fc68b31dea88229866c8cbc6b221a4a679134
|
[
"MIT"
] | 1
|
2019-11-14T02:28:30.000Z
|
2019-11-14T02:28:30.000Z
|
Software/Sensors/IAQ_SCD30.py
|
aaronjense/raspberrypi-indoor-air-quality-pcb
|
7e1fc68b31dea88229866c8cbc6b221a4a679134
|
[
"MIT"
] | 6
|
2019-10-01T22:44:44.000Z
|
2019-11-14T20:19:46.000Z
|
#!/usr/bin/python
#################################################################################
# MIT License
#
# Copyright (c) 2019 Aaron Jense, Amy Heidner, Dennis Heidner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
| 39.741379
| 82
| 0.6282
|
#!/usr/bin/python
#################################################################################
# MIT License
#
# Copyright (c) 2019 Aaron Jense, Amy Heidner, Dennis Heidner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from third_party.Adafruit_I2C import *
from IAQ_Exceptions import *
import struct
class IAQ_SCD30():
sid = None
i2c = None
I2C_ADDRESS = 0x61
def __init__(self, sensor_id=None):
self.sid = sensor_id
try:
self.i2c = Adafruit_I2C(self.I2C_ADDRESS, debug=False)
except IOError:
raise SensorSetupError('Could not setup SCD30 I2C.')
def getData(self):
try:
self.i2c.write8(0x0,0)
self.i2c.write8(0x46,2)
rawdata = self.i2c.readList(0x03,18)
struct_co2 = struct.pack('BBBB', rawdata[0], rawdata[1],
rawdata[3], rawdata[4])
float_co2 = struct.unpack('>f', struct_co2)
data = "%.4f"%float_co2
except IOError:
raise SensorReadError('Unable to read SCD30.')
except TypeError:
raise SensorReadError('Unable to read SCD30.')
return data
| 0
| 0
| 0
| 847
| 0
| 0
| 0
| 16
| 96
|
64b668a6aa6c762d7927caa38bc992ca22f6db7c
| 3,424
|
py
|
Python
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 56
|
2018-12-01T00:11:27.000Z
|
2022-03-08T04:10:10.000Z
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 33
|
2018-12-13T20:18:07.000Z
|
2022-03-23T16:03:26.000Z
|
dc_gym/iroko_reward.py
|
matthieu637/iroko
|
3905caa46328a7c011762f8a96a15fbde9826899
|
[
"Apache-2.0"
] | 17
|
2019-02-19T05:31:23.000Z
|
2022-03-14T15:20:00.000Z
|
import numpy as np
import logging
log = logging.getLogger(__name__)
def fairness_reward(actions, queues=None):
"""Compute Jain"s fairness index for a list of values.
See http://en.wikipedia.org/wiki/Fairness_measure for fairness equations.
@param values: list of values
@return fairness: JFI
"""
if len(actions) == 0:
return 1.0
num = sum(actions) ** 2
denom = len(actions) * sum([i ** 2 for i in actions])
return num / float(denom)
def gini_reward(actions, queues=None):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
# Values must be sorted:
actions = np.sort(actions)
# Number of array elements:
n = actions.shape[0]
# Index per array element:
index = np.arange(1, n + 1)
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * actions)) / (n * np.sum(actions)))
# small script to visualize the reward output
if __name__ == "__main__":
import matplotlib.pyplot as plt
queues = [i * 0.1 for i in range(0, 11)]
actions = [i * .001 for i in range(0, 1000)]
for queue in queues:
rewards = []
queue_input = np.array([queue])
for action in actions:
action_input = np.array([action])
rewards.append((joint_queue_reward(action_input, queue_input)))
plt.plot(actions, rewards, label="Queue Size %f" % queue)
plt.xlabel("Action Input")
plt.ylabel("Reward")
plt.legend()
plt.show()
| 29.264957
| 80
| 0.651577
|
import numpy as np
import math
import logging
log = logging.getLogger(__name__)
def fairness_reward(actions, queues=None):
"""Compute Jain"s fairness index for a list of values.
See http://en.wikipedia.org/wiki/Fairness_measure for fairness equations.
@param values: list of values
@return fairness: JFI
"""
if len(actions) == 0:
return 1.0
num = sum(actions) ** 2
denom = len(actions) * sum([i ** 2 for i in actions])
return num / float(denom)
def gini_reward(actions, queues=None):
"""Calculate the Gini coefficient of a numpy array."""
# based on bottom eq:
# http://www.statsdirect.com/help/generatedimages/equations/equation154.svg
# from:
# http://www.statsdirect.com/help/default.htm#nonparametric_methods/gini.htm
# All values are treated equally, arrays must be 1d:
# Values must be sorted:
actions = np.sort(actions)
# Number of array elements:
n = actions.shape[0]
# Index per array element:
index = np.arange(1, n + 1)
# Gini coefficient:
return ((np.sum((2 * index - n - 1) * actions)) / (n * np.sum(actions)))
def action_reward(actions, queues=None):
return np.mean(actions)
def fair_queue_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
fairness = fairness_reward(actions[actions < 1.0])
reward = action - queue * action + (fairness * (1 - queue))
return reward
def joint_queue_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
reward = action - 2 * (queue * action)
return reward
def step_reward(actions, queues):
queue = np.max(queues)
action = np.mean(actions)
if queue > 0.30:
return -action - queue
else:
return action * (1 + (1 - gini_reward(actions))) - queue
def std_dev_reward(actions, queues=None):
return -np.std(actions)
def queue_reward(actions, queues):
queue_reward = -np.sum(queues)**2
return queue_reward
def selu_reward(reward):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * (max(0, reward) + min(0, alpha * (math.exp(reward) - 1)))
class RewardFunction:
__slots__ = ["stats_dict", "reward_funs"]
def __init__(self, reward_models, stats_dict):
self.stats_dict = stats_dict
self.reward_funs = self._set_reward(reward_models)
def _set_reward(self, reward_models):
reward_funs = []
for model in reward_models:
reward_funs.append(globals()["%s_reward" % model])
return reward_funs
def get_reward(self, stats, actions):
queues = stats[self.stats_dict["backlog"]]
reward = 0.0
for reward_fun in self.reward_funs:
reward += reward_fun(actions, queues)
return reward
# small script to visualize the reward output
if __name__ == "__main__":
import matplotlib.pyplot as plt
queues = [i * 0.1 for i in range(0, 11)]
actions = [i * .001 for i in range(0, 1000)]
for queue in queues:
rewards = []
queue_input = np.array([queue])
for action in actions:
action_input = np.array([action])
rewards.append((joint_queue_reward(action_input, queue_input)))
plt.plot(actions, rewards, label="Queue Size %f" % queue)
plt.xlabel("Action Input")
plt.ylabel("Reward")
plt.legend()
plt.show()
| 0
| 0
| 0
| 619
| 0
| 887
| 0
| -10
| 206
|
e25ff3df493ac431d6d60b22582cb70b4670f2a3
| 727
|
py
|
Python
|
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | null | null | null |
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | 4
|
2018-03-03T22:08:22.000Z
|
2021-09-07T23:44:54.000Z
|
cellar/fs.py
|
JonathanHuot/cellarpy
|
74fe9f144b63b891d6cda45f10f63d310c0d0f58
|
[
"MIT"
] | 1
|
2017-06-08T13:01:02.000Z
|
2017-06-08T13:01:02.000Z
|
# -*- coding: utf-8 -*-
| 25.964286
| 73
| 0.639615
|
# -*- coding: utf-8 -*-
import os
def iswritable(directory):
if not os.path.exists(directory):
try:
os.makedirs(directory)
except:
return False
return os.access(directory, os.W_OK | os.X_OK | os.R_OK)
def static_file_path(root, filename):
root = os.path.abspath(root) + os.sep
return os.path.abspath(os.path.join(root, filename.strip('/\\')))
def static_file_exists(root, filename):
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
if not filename.startswith(root):
return False
if not os.path.exists(filename) or not os.path.isfile(filename):
return False
return True
| 0
| 0
| 0
| 0
| 0
| 621
| 0
| -12
| 91
|
3c950daaa32f79dd0904fd8cf520966d77491761
| 371
|
py
|
Python
|
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
ex043.py
|
GuilhermeAntony14/Estudando-Python
|
b020f6d2625e7fcc42d30658bcbd881b093434dd
|
[
"MIT"
] | null | null | null |
print('vamos calcular seu IMC')
a = float(input('Sua altura: '))
p = float(input('Seu peso: '))
n = (p/(a**2))
print(f'Seu IMC e: {n:.1f}')
if n < 18.5:
print('Abaixo do peso.')
elif n <= 25 and n > 18.5:
print('Peso ideal.')
elif n < 30 and n > 25:
print('Sobrepeso.')
elif n <= 40 and 30 < n:
print('obsidade.')
else:
print('obsidade mrbida.')
| 23.1875
| 32
| 0.566038
|
print('vamos calcular seu IMC')
a = float(input('Sua altura: '))
p = float(input('Seu peso: '))
n = (p/(a**2))
print(f'Seu IMC e: {n:.1f}')
if n < 18.5:
print('Abaixo do peso.')
elif n <= 25 and n > 18.5:
print('Peso ideal.')
elif n < 30 and n > 25:
print('Sobrepeso.')
elif n <= 40 and 30 < n:
print('obsidade.')
else:
print('obsidade mórbida.')
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e5bd5f40b426cef7283c560a2796fb22b549035d
| 667
|
py
|
Python
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 231
|
2016-05-30T02:34:45.000Z
|
2022-03-28T17:00:29.000Z
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 77
|
2016-03-23T16:28:34.000Z
|
2021-09-30T22:08:03.000Z
|
tests/test_data_cleanser.py
|
luisccalves/supplychainpy
|
63a10b77ffdcc5bca71e815c70667c819d8f9af0
|
[
"BSD-3-Clause"
] | 103
|
2016-08-10T19:53:09.000Z
|
2022-03-16T16:34:38.000Z
|
#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
| 39.235294
| 104
| 0.728636
|
from unittest import TestCase
import logging
from supplychainpy._helpers import _data_cleansing
from supplychainpy.sample_data.config import ABS_FILE_PATH
#logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class TestCleanser(TestCase):
def test_incorrect_row_length(self):
""" Tests for incorrect specification of number of columns after initial SKU identification. """
with open(ABS_FILE_PATH['COMPLETE_CSV_XSM']) as f:
for i in range(0, 11):
with self.assertRaises(expected_exception=Exception):
_data_cleansing.clean_orders_data_row_csv(f, length=i)
| 0
| 0
| 0
| 393
| 0
| 0
| 0
| 67
| 113
|
e738317c6f5cf90c7eb6eb5ab706d4e66f9a907d
| 438
|
py
|
Python
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 1,292
|
2015-01-09T17:48:46.000Z
|
2022-03-30T20:08:15.000Z
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 31
|
2015-01-20T15:01:24.000Z
|
2022-03-03T11:02:06.000Z
|
sikuli-ide/sample-scripts/mute.sikuli/mute.py
|
mgrundy/sikuli
|
4adaab7880d2f3e14702ca7287ae9c9e4f4de9ab
|
[
"MIT"
] | 267
|
2015-02-08T19:51:25.000Z
|
2022-03-19T22:16:01.000Z
|
switchApp("System Preferences.app")
click("1273526123226.png")
click("1273526171905.png")
thumbs = findAll("1273527194228.png")
for t in list(thumbs)[:2]: # only take the first two
dragLeft(t) # off
#dragRight(t) # on
#dragToMute(t)
| 23.052632
| 58
| 0.691781
|
def dragLeft(t):
dragDrop(t, t.getCenter().left(200))
def dragRight(t):
dragDrop(t, t.getCenter().right(200))
def dragToMute(t):
dragDrop(t, t.nearby().left().find("1273527108356.png"))
switchApp("System Preferences.app")
click("1273526123226.png")
click("1273526171905.png")
thumbs = findAll("1273527194228.png")
for t in list(thumbs)[:2]: # only take the first two
dragLeft(t) # off
#dragRight(t) # on
#dragToMute(t)
| 0
| 0
| 0
| 0
| 0
| 127
| 0
| 0
| 68
|
5a5a83ad47518f4946babfacb96a920c30542e02
| 4,869
|
py
|
Python
|
pytest_func_cov/plugin.py
|
radug0314/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 4
|
2020-04-03T19:36:51.000Z
|
2021-04-11T23:41:59.000Z
|
pytest_func_cov/plugin.py
|
RaduG/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 5
|
2020-02-23T20:37:04.000Z
|
2021-07-07T07:53:39.000Z
|
pytest_func_cov/plugin.py
|
radug0314/pytest_func_cov
|
cc689ba68d083e69d399baad83189861a7adb199
|
[
"MIT"
] | 1
|
2021-04-05T15:36:54.000Z
|
2021-04-05T15:36:54.000Z
|
def pytest_addoption(parser):
"""
Pytest hook - register command line arguments. We want to register the
--func_cov argument to explicitly pass the location of the package to
discover and the ignore_func_names ini setting.
Args:
parser:
"""
group = parser.getgroup("func_cov")
group.addoption(
"--func_cov",
dest="func_cov_source",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
group.addoption(
"--func_cov_report",
dest="func_cov_report",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
parser.addini("ignore_func_names", "function names to ignore", "linelist", [])
| 30.622642
| 82
| 0.586979
|
import os
import sys
from .tracking import FunctionIndexer, get_full_function_name
def pytest_addoption(parser):
"""
Pytest hook - register command line arguments. We want to register the
--func_cov argument to explicitly pass the location of the package to
discover and the ignore_func_names ini setting.
Args:
parser:
"""
group = parser.getgroup("func_cov")
group.addoption(
"--func_cov",
dest="func_cov_source",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
group.addoption(
"--func_cov_report",
dest="func_cov_report",
action="append",
default=[],
metavar="SOURCE",
nargs="?",
const=True,
)
parser.addini("ignore_func_names", "function names to ignore", "linelist", [])
def pytest_load_initial_conftests(early_config, parser, args):
if early_config.known_args_namespace.func_cov_source:
plugin = FuncCovPlugin(early_config)
early_config.pluginmanager.register(plugin, "_func_cov")
class FuncCovPlugin:
def __init__(self, args):
self.args = args
self.indexer = FunctionIndexer(args.getini("ignore_func_names"))
def pytest_sessionstart(self, session):
"""
Pytest hook - called when the pytest session is created. At this point,
we need to run a full module discovery and register all functions
prior to initiating the collection. If the PYTEST_FUNC_COV environment
variable is set, use that as the root discovery path, relative to the
session fspath.
Args:
session: Pytest session
"""
# Add current folder to sys.path if it is not already in
cwd = os.getcwd()
if cwd not in sys.path:
sys.path.append(cwd)
pytest_cov_paths = self.args.known_args_namespace.func_cov_source
if len(pytest_cov_paths) == 0:
pytest_cov_paths = [session.fspath]
else:
pytest_cov_paths = [
os.path.join(session.fspath, path.rstrip("/\\"))
for path in pytest_cov_paths
]
for package_path in pytest_cov_paths:
self.indexer.index_package(package_path)
def pytest_collect_file(self, path):
"""
Pytest hook - called before the collection of a file. At this point
we need to register the current test file as a valid function call
origin.
Args:
path (str): Path to test file
"""
self.indexer.register_source_module(str(path))
def pytest_terminal_summary(self, terminalreporter):
"""
Pytest hook - called when the test summary is outputted. Here we
output basic statistics of the number of functions registered and called,
as well as a function call test coverage (in percentage).
Args:
terminalreporter:
"""
output_options = self.args.known_args_namespace.func_cov_report
include_missing = "term-missing" in output_options
tr = terminalreporter
cwd = os.getcwd()
found = self.indexer.monitor.registered_functions
called = self.indexer.monitor.called_functions
missed = self.indexer.monitor.missed_functions
module_paths = [sys.modules[m].__file__[len(cwd) + 1 :] for m, _ in found]
max_name_len = max([len(mp) for mp in module_paths] + [5])
fmt_name = "%%- %ds " % max_name_len
header = (fmt_name % "Name") + " Funcs Miss" + "%*s" % (10, "Cover")
if include_missing:
header += "%*s" % (10, "Missing")
fmt_coverage = fmt_name + "%6d %6d" + "%%%ds%%%%" % (9,)
if include_missing:
fmt_coverage += " %s"
msg = "pytest_func_cov"
tr.write("-" * 20 + msg + "-" * 20 + "\n")
tr.write(header + "\n")
tr.write("-" * len(header) + "\n")
total_funcs = 0
total_miss = 0
for i, mp in enumerate(module_paths):
funcs = len(found[i][1])
miss = len(missed[i][1])
cover = int(((funcs - miss) / funcs) * 100)
total_funcs += funcs
total_miss += miss
args = (mp, funcs, miss, cover)
if include_missing:
args += (", ".join([f.__qualname__ for f in missed[i][1]]),)
tr.write(fmt_coverage % args)
tr.write("\n")
tr.write("-" * len(header) + "\n")
if total_funcs != 0:
total_cover = int(((total_funcs - total_miss) / total_funcs) * 100)
else:
total_cover = 0
args = ("TOTAL", total_funcs, total_miss, total_cover)
if include_missing:
args += ("",)
tr.write(fmt_coverage % args + "\n")
| 0
| 0
| 0
| 3,739
| 0
| 209
| 0
| 17
| 113
|
df932a1d318345b8235882775e0cd92939917f5c
| 1,968
|
py
|
Python
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 10
|
2021-02-09T19:25:46.000Z
|
2022-03-29T13:49:23.000Z
|
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | null | null | null |
lib/tests/test_runner.py
|
xuzhiying9510/ncflow
|
3f6f4a5b2c13ac8f6375b097b35f6c55b18d212e
|
[
"Artistic-1.0-cl8"
] | 5
|
2020-12-23T15:24:40.000Z
|
2022-01-06T09:42:38.000Z
|
#! /usr/bin/env python
from .toy_problem_test import ToyProblemTest
from .reconciliation_problem_test import ReconciliationProblemTest
from .reconciliation_problem_2_test import ReconciliationProblem2Test
from .recon3_test import Recon3Test
from .optgapc1_test import OptGapC1Test
from .optgapc2_test import OptGapC2Test
from .optgapc3_test import OptGapC3Test
from .optgap4_test import OptGap4Test
from .single_edge_b import SingleEdgeBTest
from .feasibility_test import FeasibilityTest
from .flow_path_construction_test import FlowPathConstructionTest
from .we_need_to_fix_this_test import WeNeedToFixThisTest
import argparse
ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(),
ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(),
OptGapC2Test(), OptGapC3Test(), FeasibilityTest(),
OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(),
SingleEdgeBTest()]
TEST_NAME_DICT = {test.name: test for test in ALL_TESTS}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tests', nargs='+', required=False)
args = parser.parse_args()
if args.tests is not None:
tests_to_run = [TEST_NAME_DICT[name] for name in args.tests]
else:
tests_to_run = ALL_TESTS
print('RUNNING THE FOLLOWING TESTS: {}'.format(
[test.name for test in tests_to_run]))
run_tests(tests_to_run)
| 33.931034
| 102
| 0.723069
|
#! /usr/bin/env python
from .toy_problem_test import ToyProblemTest
from .reconciliation_problem_test import ReconciliationProblemTest
from .reconciliation_problem_2_test import ReconciliationProblem2Test
from .recon3_test import Recon3Test
from .optgapc1_test import OptGapC1Test
from .optgapc2_test import OptGapC2Test
from .optgapc3_test import OptGapC3Test
from .optgap4_test import OptGap4Test
from .single_edge_b import SingleEdgeBTest
from .feasibility_test import FeasibilityTest
from .flow_path_construction_test import FlowPathConstructionTest
from .we_need_to_fix_this_test import WeNeedToFixThisTest
from .abstract_test import bcolors
import argparse
ALL_TESTS = [ToyProblemTest(), ReconciliationProblemTest(),
ReconciliationProblem2Test(), Recon3Test(), OptGapC1Test(),
OptGapC2Test(), OptGapC3Test(), FeasibilityTest(),
OptGap4Test(), FlowPathConstructionTest(), WeNeedToFixThisTest(),
SingleEdgeBTest()]
TEST_NAME_DICT = {test.name: test for test in ALL_TESTS}
def run_tests(tests_to_run):
tests_that_failed = []
for test in tests_to_run:
print('\n\n---{} TEST---\n\n'.format(test.name.upper()))
test.run()
if test.has_error:
tests_that_failed.append(test)
for test in tests_that_failed:
print()
print(bcolors.ERROR + '\n\n---{} TEST failed---\n\n'.format(test.name.upper()) + bcolors.ENDC)
if len(tests_that_failed) == 0:
print(bcolors.OKGREEN + 'All tests passed!' + bcolors.ENDC)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tests', nargs='+', required=False)
args = parser.parse_args()
if args.tests is not None:
tests_to_run = [TEST_NAME_DICT[name] for name in args.tests]
else:
tests_to_run = ALL_TESTS
print('RUNNING THE FOLLOWING TESTS: {}'.format(
[test.name for test in tests_to_run]))
run_tests(tests_to_run)
| 0
| 0
| 0
| 0
| 0
| 476
| 0
| 13
| 45
|
5eef5e446e1922c169ce5770f96bdb08b8933d69
| 17,847
|
py
|
Python
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 451
|
2015-07-20T11:52:35.000Z
|
2022-03-28T08:04:56.000Z
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 1,096
|
2015-07-21T03:08:26.000Z
|
2022-03-31T11:59:17.000Z
|
openmdao/core/tests/test_getset_vars.py
|
friedenhe/OpenMDAO
|
db1d7e22a8bf9f66afa82ec3544b7244d5545f6d
|
[
"Apache-2.0"
] | 301
|
2015-07-16T20:02:11.000Z
|
2022-03-28T08:04:39.000Z
|
"""Test getting/setting variables and subjacs with promoted/relative/absolute names."""
import unittest
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
if __name__ == '__main__':
unittest.main()
| 39.837054
| 292
| 0.518462
|
"""Test getting/setting variables and subjacs with promoted/relative/absolute names."""
import unittest
import numpy as np
from openmdao.api import Problem, Group, ExecComp, IndepVarComp, DirectSolver, ParallelGroup
from openmdao.utils.mpi import MPI
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
class TestGetSetVariables(unittest.TestCase):
def test_no_promotion(self):
"""
Illustrative examples showing how to access variables and subjacs.
"""
c = ExecComp('y=2*x')
g = Group()
g.add_subsystem('c', c)
model = Group()
model.add_subsystem('g', g)
p = Problem(model)
p.setup()
# -------------------------------------------------------------------
# inputs
p['g.c.x'] = 5.0
self.assertEqual(p['g.c.x'], 5.0)
# outputs
p['g.c.y'] = 5.0
self.assertEqual(p['g.c.y'], 5.0)
# Conclude setup but don't run model.
p.final_setup()
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
inputs['c.x'] = 5.0
self.assertEqual(inputs['c.x'], 5.0)
# outputs
outputs['c.y'] = 5.0
self.assertEqual(outputs['c.y'], 5.0)
# Removed part of test where we set values into the jacobian willy-nilly.
# You can only set declared values now.
def test_with_promotion(self):
"""
Illustrative examples showing how to access variables and subjacs.
"""
c1 = IndepVarComp('x')
c2 = ExecComp('y=2*x')
c3 = ExecComp('z=3*x')
g = Group()
g.add_subsystem('c1', c1, promotes=['*'])
g.add_subsystem('c2', c2, promotes=['*'])
g.add_subsystem('c3', c3, promotes=['*'])
model = Group()
model.add_subsystem('g', g, promotes=['*'])
p = Problem(model)
p.setup()
# -------------------------------------------------------------------
# inputs
p['g.c2.x'] = 5.0
self.assertEqual(p['g.c2.x'], 5.0)
# outputs
p['g.c2.y'] = 5.0
self.assertEqual(p['g.c2.y'], 5.0)
p['y'] = 5.0
self.assertEqual(p['y'], 5.0)
# Conclude setup but don't run model.
p.final_setup()
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
inputs['c2.x'] = 5.0
self.assertEqual(inputs['c2.x'], 5.0)
# outputs
outputs['c2.y'] = 5.0
self.assertEqual(outputs['c2.y'], 5.0)
outputs['y'] = 5.0
self.assertEqual(outputs['y'], 5.0)
# Removed part of test where we set values into the jacobian willy-nilly. You can only set
# declared values now.
def test_no_promotion_errors(self):
"""
Tests for error-handling for invalid variable names and keys.
"""
g = Group(assembled_jac_type='dense')
g.linear_solver = DirectSolver(assemble_jac=True)
g.add_subsystem('c', ExecComp('y=2*x'))
p = Problem()
model = p.model
model.add_subsystem('g', g)
p.setup()
# -------------------------------------------------------------------
msg = '\'<model> <class Group>: Variable "{}" not found.\''
# inputs
with self.assertRaises(KeyError) as ctx:
p['x'] = 5.0
self.assertEqual(str(ctx.exception), msg.format('x'))
p._initial_condition_cache = {}
with self.assertRaises(KeyError) as ctx:
p['x']
self.assertEqual(str(ctx.exception), msg.format('x'))
# outputs
with self.assertRaises(KeyError) as ctx:
p['y'] = 5.0
self.assertEqual(str(ctx.exception), msg.format('y'))
p._initial_condition_cache = {}
with self.assertRaises(KeyError) as ctx:
p['y']
self.assertEqual(str(ctx.exception), msg.format('y'))
p.final_setup()
msg = "'g' <class Group>: Variable name '{}' not found."
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
for vname in ['x', 'g.c.x']:
with self.assertRaises(KeyError) as cm:
inputs[vname] = 5.0
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
with self.assertRaises(KeyError) as cm:
inputs[vname]
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
# outputs
for vname in ['y', 'g.c.y']:
with self.assertRaises(KeyError) as cm:
outputs[vname] = 5.0
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
with self.assertRaises(KeyError) as cm:
outputs[vname]
self.assertEqual(cm.exception.args[0], f"'g' <class Group>: Variable name '{vname}' not found.")
msg = r'Variable name pair \("{}", "{}"\) not found.'
jac = g.linear_solver._assembled_jac
# d(output)/d(input)
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
jac['y', 'x'] = 5.0
with self.assertRaisesRegex(KeyError, msg.format('y', 'x')):
jac['y', 'x']
# allow absolute keys now
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.x')):
# jac['g.c.y', 'g.c.x'] = 5.0
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.x')):
# deriv = jac['g.c.y', 'g.c.x']
# d(output)/d(output)
with self.assertRaisesRegex(KeyError, msg.format('y', 'y')):
jac['y', 'y'] = 5.0
with self.assertRaisesRegex(KeyError, msg.format('y', 'y')):
jac['y', 'y']
# allow absoute keys now
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.y')):
# jac['g.c.y', 'g.c.y'] = 5.0
# with self.assertRaisesRegex(KeyError, msg.format('g.c.y', 'g.c.y')):
# deriv = jac['g.c.y', 'g.c.y']
def test_with_promotion_errors(self):
"""
Tests for error-handling for invalid variable names and keys.
"""
c1 = IndepVarComp('x')
c2 = ExecComp('y=2*x')
c3 = ExecComp('z=3*x')
g = Group(assembled_jac_type='dense')
g.add_subsystem('c1', c1, promotes=['*'])
g.add_subsystem('c2', c2, promotes=['*'])
g.add_subsystem('c3', c3, promotes=['*'])
g.linear_solver = DirectSolver(assemble_jac=True)
model = Group()
model.add_subsystem('g', g, promotes=['*'])
p = Problem(model)
p.setup()
# Conclude setup but don't run model.
p.final_setup()
# -------------------------------------------------------------------
msg1 = "'g' <class Group>: Variable name '{}' not found."
msg2 = "The promoted name x is invalid because it refers to multiple inputs: " \
"[g.c2.x ,g.c3.x]. Access the value from the connected output variable x instead."
inputs, outputs, residuals = g.get_nonlinear_vectors()
# inputs
with self.assertRaises(Exception) as context:
inputs['x'] = 5.0
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(Exception) as context:
self.assertEqual(inputs['x'], 5.0)
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(KeyError) as cm:
inputs['g.c2.x'] = 5.0
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.x'))
with self.assertRaises(KeyError) as cm:
inputs['g.c2.x']
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.x'))
# outputs
with self.assertRaises(KeyError) as cm:
outputs['g.c2.y'] = 5.0
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.y'))
with self.assertRaises(KeyError) as cm:
outputs['g.c2.y']
self.assertEqual(cm.exception.args[0], msg1.format('g.c2.y'))
msg1 = r'Variable name pair \("{}", "{}"\) not found.'
jac = g.linear_solver._assembled_jac
# d(outputs)/d(inputs)
with self.assertRaises(Exception) as context:
jac['y', 'x'] = 5.0
self.assertEqual(str(context.exception), msg2)
with self.assertRaises(Exception) as context:
self.assertEqual(jac['y', 'x'], 5.0)
self.assertEqual(str(context.exception), msg2)
def test_serial_multi_src_inds(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
p.model.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
p.model.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'C2.x', src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = (np.arange(7) + 1.) * 2.
p['C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['C2.y'], (np.arange(7,10) + 1.) * 9.)
def test_serial_multi_src_inds_promoted(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)), promotes=['x'])
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7)},
y={'val': np.zeros(7)}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3)},
y={'val': np.zeros(3)}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = (np.arange(7) + 1.) * 2.
p['C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['C2.y'], (np.arange(7,10) + 1.) * 9.)
def test_serial_multi_src_inds_units_promoted(self):
p = Problem()
indep = p.model.add_subsystem('indep', IndepVarComp(), promotes=['x'])
indep.add_output('x', units='inch', val=np.ones(10))
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p['C1.x'] = np.ones(7) * 2.
p['C2.x'] = np.ones(3) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], np.ones(7) * 24.)
np.testing.assert_allclose(p['indep.x'][7:10], np.ones(3) * 3.)
np.testing.assert_allclose(p['C1.x'], np.ones(7) * 2.)
np.testing.assert_allclose(p['C1.y'], np.ones(7) * 4.)
np.testing.assert_allclose(p['C2.x'], np.ones(3) * 3.)
np.testing.assert_allclose(p['C2.y'], np.ones(3) * 9.)
def test_serial_multi_src_inds_units_promoted_no_src(self):
p = Problem()
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.add_subsystem('C3', ExecComp('y=x*4.',
x={'val': np.zeros(10), 'units': 'mm'},
y={'val': np.zeros(10), 'units': 'mm'}),
promotes=['x'])
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
with self.assertRaises(RuntimeError) as cm:
p.setup()
self.assertEqual(str(cm.exception), "<model> <class Group>: The following inputs, ['C1.x', 'C2.x', 'C3.x'], promoted to 'x', are connected but their metadata entries ['units'] differ. Call <group>.set_input_defaults('x', units=?), where <group> is the model to remove the ambiguity.")
def test_serial_multi_src_inds_units_setval_promoted(self):
p = Problem()
indep = p.model.add_subsystem('indep', IndepVarComp(), promotes=['x'])
indep.add_output('x', units='inch', val=np.ones(10))
p.model.add_subsystem('C1', ExecComp('y=x*2.',
x={'val': np.zeros(7),
'units': 'ft'},
y={'val': np.zeros(7), 'units': 'ft'}))
p.model.add_subsystem('C2', ExecComp('y=x*3.',
x={'val': np.zeros(3),
'units': 'inch'},
y={'val': np.zeros(3), 'units': 'inch'}))
p.model.promotes('C1', inputs=['x'], src_indices=list(range(7)))
p.model.promotes('C2', inputs=['x'], src_indices=list(range(7, 10)))
p.setup()
p.set_val('C1.x', np.ones(7) * 24., units='inch')
p.set_val('C2.x', np.ones(3) * 3., units='inch')
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], np.ones(7) * 24.)
np.testing.assert_allclose(p['indep.x'][7:10], np.ones(3) * 3.)
np.testing.assert_allclose(p['C1.x'], np.ones(7) * 2.)
np.testing.assert_allclose(p['C1.y'], np.ones(7) * 4.)
np.testing.assert_allclose(p['C2.x'], np.ones(3) * 3.)
np.testing.assert_allclose(p['C2.y'], np.ones(3) * 9.)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class ParTestCase(unittest.TestCase):
N_PROCS = 2
def test_par_multi_src_inds(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
par = p.model.add_subsystem('par', ParallelGroup())
par.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
par.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'par.C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'par.C2.x', src_indices=list(range(7, 10)))
p.setup()
p['indep.x'] = np.concatenate([(np.arange(7) + 1.) * 2., (np.arange(7, 10) + 1.) * 3.])
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p.get_val('par.C1.x', get_remote=True), (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p.get_val('par.C2.x', get_remote=True), (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p.get_val('par.C1.y', get_remote=True), (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p.get_val('par.C2.y', get_remote=True), (np.arange(7,10) + 1.) * 9.)
@unittest.expectedFailure
def test_par_multi_src_inds_fail(self):
p = Problem()
p.model.add_subsystem('indep', IndepVarComp('x', val=np.ones(10)))
par = p.model.add_subsystem('par', ParallelGroup())
par.add_subsystem('C1', ExecComp('y=x*2.', x=np.zeros(7), y=np.zeros(7)))
par.add_subsystem('C2', ExecComp('y=x*3.', x=np.zeros(3), y=np.zeros(3)))
p.model.connect('indep.x', 'par.C1.x', src_indices=list(range(7)))
p.model.connect('indep.x', 'par.C2.x', src_indices=list(range(7, 10)))
p.setup()
p['par.C1.x'] = (np.arange(7) + 1.) * 2.
p['par.C2.x'] = (np.arange(7,10) + 1.) * 3.
p.run_model()
np.testing.assert_allclose(p['indep.x'][:7], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['indep.x'][7:10], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['par.C1.x'], (np.arange(7) + 1.) * 2.)
np.testing.assert_allclose(p['par.C2.x'], (np.arange(7,10) + 1.) * 3.)
np.testing.assert_allclose(p['par.C1.y'], (np.arange(7) + 1.) * 4.)
np.testing.assert_allclose(p['par.C2.y'], (np.arange(7,10) + 1.) * 9.)
if __name__ == '__main__':
unittest.main()
| 0
| 2,501
| 0
| 14,890
| 0
| 0
| 0
| 81
| 112
|
c4ac532576ad2e3296ef052f13dff92d03c958af
| 6,299
|
py
|
Python
|
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
heltour/tournament/tests/test_views.py
|
lenguyenthanh/heltour
|
13018b1905539de0b273370a76f6aa1d1ebbb01a
|
[
"MIT"
] | null | null | null |
# For now we just have sanity checks for the templates used
# This could be enhanced by verifying the context data
| 41.993333
| 123
| 0.699476
|
from django.test import TestCase
from heltour.tournament.models import *
from django.core.urlresolvers import reverse
# For now we just have sanity checks for the templates used
# This could be enhanced by verifying the context data
def createCommonLeagueData():
team_count = 4
round_count = 3
board_count = 2
league = League.objects.create(name='Team League', tag='team', competitor_type='team')
season = Season.objects.create(league=league, name='Team Season', tag='team', rounds=round_count, boards=board_count)
league2 = League.objects.create(name='Lone League', tag='lone')
season2 = Season.objects.create(league=league2, name='Lone Season', tag='lone', rounds=round_count, boards=board_count)
player_num = 1
for n in range(1, team_count + 1):
team = Team.objects.create(season=season, number=n, name='Team %s' % n)
TeamScore.objects.create(team=team)
for b in range(1, board_count + 1):
player = Player.objects.create(lichess_username='Player%d' % player_num)
player_num += 1
TeamMember.objects.create(team=team, player=player, board_number=b)
class HomeTestCase(TestCase):
def setUp(self):
pass
def test_template(self):
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'tournament/home.html')
class LeagueHomeTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:league_home', args=['team']))
self.assertTemplateUsed(response, 'tournament/team_league_home.html')
response = self.client.get(reverse('by_league:league_home', args=['lone']))
self.assertTemplateUsed(response, 'tournament/lone_league_home.html')
class SeasonLandingTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:season_landing', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_season_landing.html')
response = self.client.get(reverse('by_league:by_season:season_landing', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_season_landing.html')
for s in Season.objects.all():
s.is_completed = True
s.save()
response = self.client.get(reverse('by_league:by_season:season_landing', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_completed_season_landing.html')
response = self.client.get(reverse('by_league:by_season:season_landing', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_completed_season_landing.html')
class RostersTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:rosters', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_rosters.html')
response = self.client.get(reverse('by_league:by_season:rosters', args=['lone', 'lone']))
self.assertEqual(404, response.status_code)
class StandingsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:standings', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_standings.html')
response = self.client.get(reverse('by_league:by_season:standings', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_standings.html')
class CrosstableTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:crosstable', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_crosstable.html')
response = self.client.get(reverse('by_league:by_season:crosstable', args=['lone', 'lone']))
self.assertEqual(404, response.status_code)
class WallchartTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:wallchart', args=['team', 'team']))
self.assertEqual(404, response.status_code)
response = self.client.get(reverse('by_league:by_season:wallchart', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_wallchart.html')
class PairingsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:pairings', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_pairings.html')
response = self.client.get(reverse('by_league:by_season:pairings', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_pairings.html')
class StatsTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:stats', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/team_stats.html')
response = self.client.get(reverse('by_league:by_season:stats', args=['lone', 'lone']))
self.assertTemplateUsed(response, 'tournament/lone_stats.html')
class RegisterTestCase(TestCase):
def setUp(self):
createCommonLeagueData()
def test_template(self):
response = self.client.get(reverse('by_league:by_season:register', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/registration_closed.html')
season = Season.objects.all()[0]
season.registration_open = True
season.save()
response = self.client.get(reverse('by_league:by_season:register', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/register.html')
response = self.client.get(reverse('by_league:by_season:registration_success', args=['team', 'team']))
self.assertTemplateUsed(response, 'tournament/registration_success.html')
| 0
| 0
| 0
| 4,919
| 0
| 893
| 0
| 52
| 319
|
f99a17b230e8119aba628bdff0bc3af92b2d5225
| 765
|
py
|
Python
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 14
|
2020-11-23T17:05:27.000Z
|
2022-03-22T01:52:09.000Z
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 2
|
2020-11-25T12:00:24.000Z
|
2020-12-09T18:10:50.000Z
|
tests/test_mass_fetch.py
|
dominik7680/py-skydb
|
ddf2c6c993cc75398bc3dfcf41954b6793bf3349
|
[
"MIT"
] | 5
|
2020-12-09T15:57:11.000Z
|
2022-01-30T13:17:14.000Z
|
from skydb import SkydbTable
from random import choice
from string import ascii_letters
table_name = ''.join([choice(ascii_letters) for i in range(20)])
print("Creating table")
table = SkydbTable(table_name, columns=['c1','c2','c3'], seed="some_random", verbose=1)
print("Added table successfully")
| 24.677419
| 87
| 0.678431
|
from skydb import SkydbTable
from random import choice
from string import ascii_letters
table_name = ''.join([choice(ascii_letters) for i in range(20)])
import time
print("Creating table")
table = SkydbTable(table_name, columns=['c1','c2','c3'], seed="some_random", verbose=1)
print("Added table successfully")
def test_mass_fetch():
global table
rows = []
for i in range(20):
row = {}
for c in ['c1', 'c2','c3']:
row['c1'] = ''.join([choice(ascii_letters) for i in range(5)])
row['c2'] = ''.join([choice(ascii_letters) for i in range(5)])
row['c3'] = ''.join([choice(ascii_letters) for i in range(5)])
rows.append(row)
print("Adding rows")
table.add_rows(rows)
print("Successfully added rows")
out = table.fetch_rows(list(range(10)))
| 0
| 0
| 0
| 0
| 0
| 427
| 0
| -10
| 45
|
f47c4459ec95c272bab38550541eef155938f3cc
| 1,733
|
py
|
Python
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 204
|
2015-11-06T21:34:34.000Z
|
2022-03-30T16:17:01.000Z
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 509
|
2015-11-05T13:54:43.000Z
|
2022-03-30T22:15:30.000Z
|
api/tests/integration/tests/layout/template_layout.py
|
tsingdao-Tp/Indigo
|
b2d73faebb6a450e9b3d34fed553fad4f9d0012f
|
[
"Apache-2.0"
] | 89
|
2015-11-17T08:22:54.000Z
|
2022-03-17T04:26:28.000Z
|
import os
import sys
import errno
sys.path.append('../../common')
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
indigo.setOption("treat-x-as-pseudoatom", "1")
indigo.setOption("smart-layout", "1")
ref_path = getRefFilepath("template_layout.sdf")
ref = indigo.iterateSDFile(ref_path)
print("**** Test template layout *****")
saver = indigo.writeFile(joinPathPy("out/template_layout.sdf", __file__))
for idx, item in enumerate(indigo.iterateSDFile(joinPathPy("molecules/template_layout.sdf", __file__))):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, ref.at(idx).rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
print("**** Test rings templates layout *****")
ref_path = getRefFilepath("rings_templates.sdf")
ref = indigo.iterateSDFile(ref_path)
saver = indigo.writeFile(joinPathPy("out/rings_templates.sdf", __file__))
for idx, item in enumerate(ref):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, item.rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
| 31.509091
| 104
| 0.669359
|
import os
import sys
import errno
import math
from math import *
sys.path.append('../../common')
from env_indigo import *
if not os.path.exists(joinPathPy("out", __file__)):
try:
os.makedirs(joinPathPy("out", __file__))
except OSError as e:
if e.errno != errno.EEXIST:
raise
indigo = Indigo()
indigo.setOption("molfile-saving-skip-date", "1")
indigo.setOption("treat-x-as-pseudoatom", "1")
indigo.setOption("smart-layout", "1")
ref_path = getRefFilepath("template_layout.sdf")
ref = indigo.iterateSDFile(ref_path)
print("**** Test template layout *****")
saver = indigo.writeFile(joinPathPy("out/template_layout.sdf", __file__))
for idx, item in enumerate(indigo.iterateSDFile(joinPathPy("molecules/template_layout.sdf", __file__))):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, ref.at(idx).rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
print("**** Test rings templates layout *****")
ref_path = getRefFilepath("rings_templates.sdf")
ref = indigo.iterateSDFile(ref_path)
saver = indigo.writeFile(joinPathPy("out/rings_templates.sdf", __file__))
for idx, item in enumerate(ref):
try:
mol = item.clone()
mol.layout()
res = moleculeLayoutDiff(indigo, mol, item.rawData(), ref_is_file = False)
print(" Item #{}: Result: {}".format(idx, res))
saver.sdfAppend(mol)
except IndigoException as e:
print("Exception for #%s: %s" % (idx, getIntemplate_layout.sdfdigoExceptionText(e)))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -10
| 66
|
b76165b2825027823939347d4036fc0458906b68
| 605
|
py
|
Python
|
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
src/tblink_rpc_utils/input_reader_yaml.py
|
tblink-rpc/tblink-rpc-utils
|
48a731cb8c6201e1975ba18f43737228eb1f7dee
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Oct 20, 2021
@author: mballance
'''
| 25.208333
| 58
| 0.66281
|
'''
Created on Oct 20, 2021
@author: mballance
'''
from tblink_rpc_utils.idl_spec import IDLSpec
from tblink_rpc_utils.input_reader import InputReader
from tblink_rpc_utils.input_spec import InputSpec
from tblink_rpc_utils.yaml_idl_parser import YamlIDLParser
class InputReaderYaml(InputReader):
def __init__(self):
super().__init__()
def read(self, in_spec:InputSpec)->IDLSpec:
yaml_p = YamlIDLParser()
for file in in_spec.files:
with open(file, "r") as fp:
yaml_p.parse(fp)
return yaml_p.spec
| 0
| 0
| 0
| 321
| 0
| 0
| 0
| 121
| 111
|
9734061ff8c9ce101186289f0971e8af178cbcda
| 518
|
py
|
Python
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2019-03-15T18:04:24.000Z
|
2019-03-15T18:04:24.000Z
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 22
|
2019-03-17T21:53:50.000Z
|
2021-03-31T19:12:19.000Z
|
escola/templatetags/markdown.py
|
vini84200/medusa2
|
37cf33d05be8b0195b10845061ca893ba5e814dd
|
[
"MIT"
] | 1
|
2018-11-25T03:05:23.000Z
|
2018-11-25T03:05:23.000Z
|
from django import template
register = template.Library()
| 28.777778
| 78
| 0.702703
|
import misaka as m
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from MedusaII.settings import MARKDOWNX_MARKDOWN_EXTENSIONS
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def md(value):
rendered_text = mark_safe(m.html(value,
extensions=MARKDOWNX_MARKDOWN_EXTENSIONS,
render_flags=('skip-html',)))
return rendered_text
| 0
| 253
| 0
| 0
| 0
| 0
| 0
| 93
| 112
|
297983f1224fa368c806bb3709b78cd221f4c7f1
| 1,720
|
py
|
Python
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 44
|
2015-02-08T09:38:46.000Z
|
2017-11-15T01:19:40.000Z
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 112
|
2015-02-08T09:34:40.000Z
|
2017-04-10T19:06:30.000Z
|
targetrsqueak-embedded.py
|
shiplift/RSqueakOnABoat
|
ac449758ddb7aef1721e65a13171547761dd6e39
|
[
"BSD-3-Clause"
] | 7
|
2015-04-08T11:49:10.000Z
|
2017-01-19T06:36:27.000Z
|
#! /usr/bin/env python
import sys
# This loads an image file in advance and includes it in the
# translation-output. At run-time, the defined selector is sent
# to the defined SmallInteger. This way we get an RPython
# "image" frozen into the executable, mmap'ed by the OS from
# there and loaded lazily when needed :-)
# Besides testing etc., this can be used to create standalone
# binaries executing a smalltalk program.
sys.setrecursionlimit(100000)
imagefile = "images/mini.image"
selector = "loopTest"
receiver = 0
interp, s_frame = setup()
# _____ Define and setup target ___
if __name__ == "__main__":
entry_point(sys.argv)
| 30.714286
| 85
| 0.73314
|
#! /usr/bin/env python
import sys
from rpython.jit.codewriter.policy import JitPolicy
from rsqueakvm import model, objspace, interpreter, squeakimage
# This loads an image file in advance and includes it in the
# translation-output. At run-time, the defined selector is sent
# to the defined SmallInteger. This way we get an RPython
# "image" frozen into the executable, mmap'ed by the OS from
# there and loaded lazily when needed :-)
# Besides testing etc., this can be used to create standalone
# binaries executing a smalltalk program.
sys.setrecursionlimit(100000)
imagefile = "images/mini.image"
selector = "loopTest"
receiver = 0
def setup():
space = objspace.ObjSpace()
stream = squeakimage.Stream(filename=imagefile)
image = squeakimage.ImageReader(space, stream).create_image()
interp = interpreter.Interpreter(space, image)
w_selector = interp.perform(space.wrap_string(selector), "asSymbol")
w_object = model.W_SmallInteger(receiver)
s_class = w_object.class_shadow(space)
w_method = s_class.lookup(w_selector)
s_frame = w_method.create_frame(space, w_object)
return interp, s_frame
interp, s_frame = setup()
def entry_point(argv):
if len(argv) > 1:
print "This RSqueak VM has an embedded image and ignores all cli-parameters."
try:
interp.loop(s_frame.w_self())
except interpreter.ReturnFromTopLevel, e:
w_result = e.object
print interp.space.unwrap_string(w_result)
return 0
# _____ Define and setup target ___
def target(driver, *args):
driver.exe_name = "rsqueak-embedded"
return entry_point, None
def jitpolicy(driver):
return JitPolicy()
if __name__ == "__main__":
entry_point(sys.argv)
| 0
| 0
| 0
| 0
| 0
| 869
| 0
| 72
| 136
|
d5764f7267401bb5f87916e35baf7cbfc9aaaca4
| 96
|
bzl
|
Python
|
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
tools/bzl/classpath.bzl
|
jinrongc1986/events-log
|
37371e72e9604cc637d0a96ebc91e5a53d420e2c
|
[
"Apache-2.0"
] | null | null | null |
load(
"@com_googlesource_gerrit_bazlets//tools:classpath.bzl",
"classpath_collector",
)
| 19.2
| 60
| 0.739583
|
load(
"@com_googlesource_gerrit_bazlets//tools:classpath.bzl",
"classpath_collector",
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9daee1667658b8ded3cb5aa585437a23a6547b46
| 9,652
|
py
|
Python
|
wrap/pyllbc/script/common/Stream.py
|
caochunxi/llbc
|
2ff4af937f1635be67a7e24602d0a3e87c708ba7
|
[
"MIT"
] | 83
|
2015-11-10T09:52:56.000Z
|
2022-01-12T11:53:01.000Z
|
wrap/pyllbc/script/common/Stream.py
|
lailongwei/llbc
|
ec7e69bfa1f0afece8bb19dfa9a0a4578508a077
|
[
"MIT"
] | 30
|
2017-09-30T07:43:20.000Z
|
2022-01-23T13:18:48.000Z
|
wrap/pyllbc/script/common/Stream.py
|
caochunxi/llbc
|
2ff4af937f1635be67a7e24602d0a3e87c708ba7
|
[
"MIT"
] | 34
|
2015-11-14T12:37:44.000Z
|
2021-12-16T02:38:36.000Z
|
# -*- coding: utf-8 -*-
import llbc
llbc.Stream = pyllbcStream
| 30.544304
| 129
| 0.623809
|
# -*- coding: utf-8 -*-
import inspect
import llbc
class pyllbcStream(object):
"""
Stream class encapsulation, use to pack/unpack data sequence.
"""
def __init__(self, size=0, init_obj=None, endian=llbc.Endian.MachineEndian):
self.__c_obj = llbc.inl.NewPyStream(self, size, endian)
self.packobj(init_obj)
def __del__(self):
llbc.inl.DelPyStream(self.__c_obj)
@property
def endian(self):
"""
Get stream endian setting(see llbc.Endian module).
"""
return llbc.inl.GetPyStreamEndian(self.__c_obj)
@endian.setter
def endian(self, e):
"""
Set stream endian(see llbc.Endian module).
"""
llbc.inl.SetPyStreamEndian(self.__c_obj, e)
@property
def pos(self):
"""
Get stream current reading/writing position.
"""
return llbc.inl.GetPyStreamPos(self.__c_obj)
@pos.setter
def pos(self, p):
"""
Set stream current reading/writing position.
"""
llbc.inl.SetPyStreamPos(self.__c_obj, p)
@property
def size(self):
"""
Get stream size(unsafe method, size will automatic adjust by stream).
"""
return llbc.inl.GetPyStreamSize(self.__c_obj)
@size.setter
def size(self, s):
"""
Set stream size(unsafe method, size will automatic adjust by stream).
"""
llbc.inl.SetPyStreamSize(self.__c_obj, s)
@property
def raw(self):
"""
Get stream memery view as buffer.
"""
return llbc.inl.PyStreamGetRaw(self.__c_obj)
@raw.setter
def raw(self, r):
"""
Set stream raw memory from str/buffer/bytearray.
"""
llbc.inl.PyStreamSetRaw(self.__c_obj, r)
@property
def cobj(self):
"""
Get raw pyllbc stream object(calling by c/c++ layer).
"""
return self.__c_obj
def __str__(self):
"""
Get human readable stream data's string representation.
"""
import binascii
return binascii.hexlify(self.raw)
@staticmethod
def getcachedsize():
return llbc.inl.PyStreamGetCachedSize()
@staticmethod
def getcachelimit():
return llbc.inl.PyStreamGetCacheLimit()
@staticmethod
def setcachelimit(lmt):
llbc.inl.PyStreamSetCacheLimit(lmt)
@staticmethod
def discardexpr(expr):
llbc.inl.PyStreamDiscardExpr(expr)
@staticmethod
def discardallexprs():
llbc.inl.PyStreamDiscardAllExprs()
def unpack(self, fmt):
"""
Unpack data according to the given format. the result is a tuple even if it contents exactly one item.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
The format examples:
iiS
(i)
(U)
[i]
{i:S}
{i:(C<int>)}
([SC<int>NA(i)]{int:S}B
"""
return self.__unpack(fmt)
def unpackone(self, fmt):
return self.__unpack(fmt)[0]
def unpackcls(self, cls):
return llbc.inl.PyStreamRead(self.__c_obj, cls)
def unpacknone(self):
return llbc.inl.PyStreamRead_None(self.__c_obj)
def unpackbyte(self):
return llbc.inl.PyStreamRead_Byte(self.__c_obj)
def unpackbool(self):
return llbc.inl.PyStreamRead_Bool(self.__c_obj)
def unpackint16(self):
return llbc.inl.PyStreamRead_Int16(self.__c_obj)
def unpackint32(self):
return llbc.inl.PyStreamRead_Int32(self.__c_obj)
def unpackint64(self):
return llbc.inl.PyStreamRead_Int64(self.__c_obj)
def unpackfloat(self):
return llbc.inl.PyStreamRead_Float(self.__c_obj)
def unpackdouble(self):
return llbc.inl.PyStreamRead_Double(self.__c_obj)
def unpackstr(self):
return llbc.inl.PyStreamRead_Str(self.__c_obj)
def unpackstr2(self):
return llbc.inl.PyStreamRead_Str2(self.__c_obj)
def unpackstr3(self):
return llbc.inl.PyStreamRead_Str3(self.__c_obj)
def unpackunicode(self):
return llbc.inl.PyStreamRead_Unicode(self.__c_obj)
def unpackbytearray(self):
return llbc.inl.PyStreamRead_ByteArray(self.__c_obj)
def unpackbuffer(self):
return llbc.inl.PyStreamRead_Buffer(self.__c_obj)
def unpackstream(self, begin=0, end=-1):
return llbc.inl.PyStreamRead_Stream(self.__c_obj, begin, end)
def pack(self, fmt, *values):
"""
Pack values according to the given format, the arguments must match the values required by the format exactly.
format strings:
c: char value(like b).
b: byte value(like c).
B: boolean value.
s: short value.
i: integer value.
q: signed long long value.
f: float value.
d: double value(only support Fomat method).
S: string value.
S#: string value, use another pack/unpack algorithm, 4 bytes length + string content(not include NULL character).
S$: string value, will read stream to end as string content, write like 'S', but not append string end character '\0'.
U: unicode value.
A: byte array value.
F: buffer value.
N: None value.
C: class type, will automatic call class.encode() method to decode, must tell stream this class name,
use C<ClassName> semantic.
(): tuple type, if only has one element, it represent tuple all element type is the given type, otherwise
the tuple size must equal your given element count.
[]: list type, the same as tuple type: ().
{key:value}: dictionary type.
"""
caller_env = None
if fmt.find('C') >= 0 and not llbc.inl.PyStreamIsExprCompiled(fmt):
caller_env = inspect.stack()[1][0].f_globals
return llbc.inl.PyStreamFmtWrite(self.__c_obj, fmt, values, caller_env)
def packobj(self, obj):
return llbc.inl.PyStreamWrite(self.__c_obj, obj)
def packnone(self):
return llbc.inl.PyStreamWrite_None(self.__c_obj, None)
def packbyte(self, obj):
return llbc.inl.PyStreamWrite_Byte(self.__c_obj, obj)
def packbool(self, obj):
return llbc.inl.PyStreamWrite_Bool(self.__c_obj, obj)
def packint16(self, obj):
return llbc.inl.PyStreamWrite_Int16(self.__c_obj, obj)
def packint32(self, obj):
return llbc.inl.PyStreamWrite_Int32(self.__c_obj, obj)
def packint64(self, obj):
return llbc.inl.PyStreamWrite_Int64(self.__c_obj, obj)
def packfloat(self, obj):
return llbc.inl.PyStreamWrite_Float(self.__c_obj, obj)
def packdouble(self, obj):
return llbc.inl.PyStreamWrite_Double(self.__c_obj, obj)
def packstr(self, obj):
return llbc.inl.PyStreamWrite_Str(self.__c_obj, obj)
def packstr2(self, obj):
return llbc.inl.PyStreamWrite_Str2(self.__c_obj, obj)
def packstr3(self, obj):
return llbc.inl.PyStreamWrite_Str3(self.__c_obj, obj)
def packunicode(self, obj):
return llbc.inl.PyStreamWrite_Unicode(self.__c_obj, obj)
def packbytearray(self, obj):
return llbc.inl.PyStreamWrite_ByteArray(self.__c_obj, obj)
def packbuffer(self, obj):
return llbc.inl.PyStreamWrite_Buffer(self.__c_obj, obj)
def packtuple(self, obj):
return llbc.inl.PyStreamWrite_Tuple(self.__c_obj, obj)
def packlist(self, obj):
return llbc.inl.PyStreamWrite_List(self.__c_obj, obj)
def packsequence(self, obj):
return llbc.inl.PyStreamWrite_Sequence(self.__c_obj, obj)
def packdict(self, obj):
return llbc.inl.PyStreamWrite_Dict(self.__c_obj, obj)
def packstream(self, s, begin=0, to=-1):
if not isinstance(s, pyllbcStream):
raise TypeError('pack argument "s" must be stream type')
return llbc.inl.PyStreamWrite_Stream(self.__c_obj, s.cobj, begin, to)
def encode(self, s):
if not isinstance(s, pyllbcStream):
raise TypeError('encode argument not Stream type')
return llbc.inl.PyStreamEncodeSelf(self.__c_obj, s.cobj)
def __unpack(self, fmt, stack_idx=1):
caller_env = None
if fmt.find('C') >= 0 and not llbc.inl.PyStreamIsExprCompiled(fmt):
caller_env = inspect.stack()[stack_idx + 1][0].f_globals
return llbc.inl.PyStreamFmtRead(self.__c_obj, fmt, caller_env)
llbc.Stream = pyllbcStream
| 0
| 1,607
| 0
| 7,940
| 0
| 0
| 0
| -7
| 46
|
793dabc069adbb525fbf397b7888dc9fdb942b2b
| 2,036
|
py
|
Python
|
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
src/BloomFilter/BloomFilter.py
|
shapovalovdev/AlgorythmsAndDataStructures
|
34d5f38c089e0ba902813607f08847fbdc7361ab
|
[
"Apache-2.0"
] | null | null | null |
#import hashlib
#from random import randint
# if __name__ == '__main__':
# dataset=["0123456789", "1234567890", "sdfsdfsdf", "sdf2143124", "hophey", "abirvaolg", "8901234567", "2356sdfqix,ed", "9012345678"]
# dataset2=["012345678932", "12345623e47890", "sdfdsfq1sdfsdf", "sdf2gs2143124", "qwerhophey", "atgxcvbirvaolg", "8sdgaw901234567", "321452356sdfqix,ed", "5124e39012345678"]
# BLOOM_TEST=BloomFilter(32)
# for data in dataset:
# BLOOM_TEST.add(data)
# for data in dataset2:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# for data in dataset:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# print( BLOOM_TEST.bloom_array)
| 32.83871
| 178
| 0.589391
|
#import hashlib
#from random import randint
class BloomFilter:
def __init__(self, f_len):
self.filter_len = f_len
# создаём битовый массив длиной f_len ...
self.bloom_array=self.filter_len * [0]
def hash1(self, str1):
result=1
rand_int=17
for c in str1:
result = result*rand_int + ord(c)
return result % self.filter_len
#
# def hash2(self, str1):
#
# result=0
# b_str1=str.encode(str1)
# h=hashlib.sha1(b_str1).hexdigest()
# for c in str1:
# result += ord(c)
# return result % self.filter_len
def hash2(self, str1):
result=1
rand_int=223
for c in str1:
result = result*rand_int + ord(c)
return result % self.filter_len
def add(self, str1):
self.bloom_array[self.hash1(str1)] = 1
self.bloom_array[self.hash2(str1)] = 1
def is_value(self, str1):
# проверка, имеется ли строка str1 в фильтре
if not self.bloom_array[self.hash1(str1)] or not self.bloom_array[self.hash2(str1)]:
return False
else:
return True
# if __name__ == '__main__':
# dataset=["0123456789", "1234567890", "sdfsdfsdf", "sdf2143124", "hophey", "abirvaolg", "8901234567", "2356sdfqix,ed", "9012345678"]
# dataset2=["012345678932", "12345623e47890", "sdfdsfq1sdfsdf", "sdf2gs2143124", "qwerhophey", "atgxcvbirvaolg", "8sdgaw901234567", "321452356sdfqix,ed", "5124e39012345678"]
# BLOOM_TEST=BloomFilter(32)
# for data in dataset:
# BLOOM_TEST.add(data)
# for data in dataset2:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# for data in dataset:
# if BLOOM_TEST.is_value(data):
# print(f'It seems {data} is here')
# else:
# print(f'No {data} by the name of bloom filter ')
# print( BLOOM_TEST.bloom_array)
| 114
| 0
| 0
| 1,049
| 0
| 0
| 0
| 0
| 22
|
6f4a2eae77a08e70580164954af371a9d61703ba
| 5,170
|
py
|
Python
|
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | 4
|
2015-01-22T06:32:15.000Z
|
2020-01-30T05:53:48.000Z
|
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | null | null | null |
ivolution/FacemovieThread.py
|
jlengrand/Ivolution
|
2753d7120b11fb94a5ce84bfe4a134b5a437a5c7
|
[
"BSD-3-Clause"
] | 7
|
2015-04-23T12:34:19.000Z
|
2021-08-01T05:58:56.000Z
|
"""
.. module:: Facemovie
:platform: Unix, Windows
:synopsis: Main class of the application. Contains the core image processing functions.Plays the role of a controller for the application, as it supports the communication layer.
.. moduleauthor:: Julien Lengrand-Lambert <[email protected]>
"""
| 40.077519
| 181
| 0.609284
|
"""
.. module:: Facemovie
:platform: Unix, Windows
:synopsis: Main class of the application. Contains the core image processing functions.Plays the role of a controller for the application, as it supports the communication layer.
.. moduleauthor:: Julien Lengrand-Lambert <[email protected]>
"""
import threading
import logging
import Facemovie_lib
from util.Notifier import Observer
from util.Notifier import Observable
class FacemovieThread(threading.Thread, Observable, Observer):
'''
Creates a Thread version of Facemovie using the facemovie_lib.
This class can then be run anywhere, from a GUI, script, ...
'''
def __init__(self, face_params):
"""
Initializes all parameters of the application. Input and output folders
are defined, together with the classifier profile.
:param face_params: A faceparams object that contains all needed information to run the Facemovie.
:type face_params: FaceParams
"""
threading.Thread.__init__(self)
Observable.__init__(self)
Observer.__init__(self, "Application")
self.stop_process = False
self.face_params = face_params
self.facemovie = Facemovie_lib.FaceMovie(self.face_params)
self.facemovie.subscribe(self) # Subscribing to facemovie reports
self.subscribe(self.facemovie) # Used to send request to stop
self.my_logger = logging.getLogger('IvolutionFile.Thread')
#self.console_logger = logging.getLogger('ConsoleLog')
def update(self, message):
"""
Trigerred by IvolutionWindow.
Uses the Observer pattern to inform the user about the progress of the GUI.
"""
if len(message) == 1: # system commands
if message[0] == "STOP":
#self.console_logger.debug("Facemovie is going to stop")
self.my_logger.debug("Facemovie is going to stop")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
else:
#self.console_logger.debug("Unrecognized system command")
self.my_logger.debug("Unrecognized system command")
##self.console_logger.debug(message)
self.my_logger.debug(message)
elif len(message) == 2: # notifications
##self.console_logger.debug(message)
self.my_logger.debug(message)
if message[0] == "FILEADD":
self.notify(["Interface", [message[0], message[1], 0]])
else:
# notify gui about small updates
self.notify(["Interface", ["STATUS", message[0], message[1]]])
# checking for fatal error
if message[0] == "Error":
#self.console_logger.debug("Fatal Error detected")
self.my_logger.debug("Fatal Error detected")
self.stop_process = True
self.notify(["Lib", ["STOP"]])
elif len(message) == 3: # notifications
if message[0] == "FILEDONE":
self.notify(["Interface", message])
else:
#self.console_logger.debug("Unrecognized command")
self.my_logger.debug("Unrecognized command")
#self.console_logger.debug(message)
self.my_logger.debug(message)
def run(self):
if not self.stop_process:
self.my_logger.debug("Listing pictures")
self.notify(["Interface", ["PROGRESS", "Listing pictures", 0.0]])
num_guys = self.facemovie.list_guys()
# FIXME: Later to be done in Lib
if num_guys < 0:
self.notify(["Interface", ["STATUS", "Source folder not found", 0.0]])
self.stop_process = True
elif num_guys == 0:
self.notify(["Interface", ["STATUS", "No image found in source folder", 0.0]])
self.stop_process = True
if not self.stop_process:
self.my_logger.debug("Detecting Faces")
self.notify(["Interface", ["PROGRESS", "Detecting Faces", 0.2]])
self.facemovie.prepare_faces() # I want to search for the faces, and characteristics of the images
if not self.stop_process:
self.my_logger.debug("Calculating video requirements")
self.notify(["Interface", ["PROGRESS", "Calculating video requirements", 0.6]])
self.facemovie.find_final_dimensions() # finds output size for desired mode.
if not self.stop_process:
self.my_logger.debug("Generating movie")
self.notify(["Interface", ["PROGRESS", "Generating movie", 0.8]])
self.facemovie.save_movie()
self.my_logger.debug("Movie saved")
self.notify(["Interface", ["PROGRESS", "Movie saved, Finished!", 1.0]])
# updating status to avoid remanent messages
self.notify(["Interface", ["STATUS", " ", 1.0]])
if not self.stop_process:
self.my_logger.debug("Thread terminated")
if self.stop_process:
self.notify(["Interface", ["PROGRESS", "Process cancelled!", 1.0]])
| 0
| 0
| 0
| 4,712
| 0
| 0
| 0
| 15
| 136
|
0c88166c936c8776b4331a148fd68ba27d214ba1
| 1,395
|
py
|
Python
|
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
Q024_implement_queue_class_in_python.py
|
latika18/learning
|
a57c9aacc0157bf7c318f46c1e7c4971d1d55aea
|
[
"Unlicense"
] | null | null | null |
#Question 24
#Implement a queue class in Python: It should support 3 APIs:
#queue.top(): prints current element at front of queue
#queue.pop(): takes out an element from front of queue
#queue.add(): adds a new element at end of stack
queue_1 = Queue()
queue_1.add(12)
queue_1.add(11)
queue_1.add(55)
queue_1.add(66)
queue_1.add(56)
queue_1.add(43)
queue_1.add(33)
queue_1.add(88)
queue_1.add(56)
queue_1.add(34)
print queue_1
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
| 21.136364
| 61
| 0.653047
|
#Question 24
#Implement a queue class in Python: It should support 3 APIs:
#queue.top(): prints current element at front of queue
#queue.pop(): takes out an element from front of queue
#queue.add(): adds a new element at end of stack
class Queue:
def __init__(self):
"""initialise a Queue class"""
self.items = []
def top(self):
"""returns the current element at front of queue"""
if self.items:
return self.items[0]
else:
raise Exception("Empty Queue")
def pop(self):
"""takes out an element from front of queue"""
if self.items:
self.items.pop(0)
else :
raise Exception("Empty Queue")
def add(self , item):
"""adds a new element at the end of queue"""
self.items.append(item)
queue_1 = Queue()
queue_1.add(12)
queue_1.add(11)
queue_1.add(55)
queue_1.add(66)
queue_1.add(56)
queue_1.add(43)
queue_1.add(33)
queue_1.add(88)
queue_1.add(56)
queue_1.add(34)
print queue_1
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
print queue_1.top()
queue_1.pop()
| 0
| 0
| 0
| 571
| 0
| 0
| 0
| 0
| 22
|
7a93655ebac4268d033069e0c8e6ff264d96d3fb
| 45
|
py
|
Python
|
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/widget/user1/__init__.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Just for test
"""
| 11.25
| 23
| 0.466667
|
# -*- coding: utf-8 -*-
"""
Just for test
"""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2366a81d98fc425ba641105e960fcc11a70e1e25
| 993
|
py
|
Python
|
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | 7
|
2020-06-10T05:38:17.000Z
|
2022-03-11T10:33:57.000Z
|
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | null | null | null |
plot/Plot_thermal_conductiviy_V1.py
|
eastsheng/Thermal-conductivity
|
8a26be22c58b3b3b6723c57c65f4bba93556f9e8
|
[
"MIT"
] | 2
|
2020-04-24T09:36:25.000Z
|
2022-03-11T10:33:58.000Z
|
if __name__ == '__main__':
# tcfile = './Thermal_conductivity_Se.txt'
tcfile = './Thermal_conductivity_S.txt'
plt_tc(tcfile)
| 24.825
| 96
| 0.678751
|
import numpy as np
import matplotlib.pyplot as plt
def plt_tc(tcfile):
tc = np.loadtxt(tcfile)
# print(tc)
x = tc[:,0]/32.06
y = tc[:,1:4]
y_mean = np.mean(y,axis=1)
y_std = np.std(y,axis=1)
# print(y_mean,y_std)
plt.rc('font',family='Times New Roman',size=26)
fig, ax = plt.subplots(figsize=(8,6))
fig.subplots_adjust(bottom=0.2,left=0.2)
s1 = ax.errorbar(x,y_mean,yerr=y_std,capsize=10,capthick=4,
fmt='bo:',mfc='w',mec='b',markersize=16,mew=2)
ax.legend(handles=[s1],labels=['$\mathregular{MoS_2}$/$\mathregular{MoS^{m}}_\mathregular{2}$']
,loc='best', fontsize=26)
ax.set_xlabel('Mass ratio (R)',fontsize=26,fontweight='bold')
ax.set_ylabel('Thermal conductivity (W/m-K)',fontsize=26,fontweight='bold')
ax.set_xticks([0,1,2,3,4,5,6])
ax.set_yticks([0,5,10,15,20,25,30])
plt.savefig(tcfile+'_.tiff',dpi=300)
plt.show()
return
if __name__ == '__main__':
# tcfile = './Thermal_conductivity_Se.txt'
tcfile = './Thermal_conductivity_S.txt'
plt_tc(tcfile)
| 0
| 0
| 0
| 0
| 0
| 785
| 0
| 7
| 69
|
68a08329945e4e078a86db5c9188a879ac68c385
| 4,743
|
py
|
Python
|
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | 1
|
2020-09-13T16:52:03.000Z
|
2020-09-13T16:52:03.000Z
|
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | null | null | null |
scripts/scatter_plots.py
|
tupleblog/bkkdreams-datathon
|
54214356d42cecdc758803d958375bd7ee7dc169
|
[
"MIT"
] | null | null | null |
"""
Scatter plot between
"""
import pandas as pd
from bokeh.io import output_notebook
output_notebook()
from bokeh.io import show, output_file
from bokeh.models import (ColumnDataSource, HoverTool, Span)
from bokeh.plotting import figure, save, output_file
def plot_vs_population(districts_budget_df):
"""
From district budget to scatter plots vs total population
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df["num_total"] = df["num_male"] + df["num_female"]
df = df.groupby(["dname", "num_total"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["num_total"] / 10000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.num_total.mean() / 10000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = " ()"
p.yaxis.axis_label = f"{q} ()"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("", "@desc"),
(f"{q}", "@y "),
("", "@x "),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget.html", mode="inline")
save(p)
def plot_vs_area(districts_budget_df):
"""
From district budget to scatter plots vs area size
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df = df.groupby(["dname", "AREA"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["AREA"] / 1000000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.AREA.mean() / 1000000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = " (..)"
p.yaxis.axis_label = f"{q} ()"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("", "@desc"),
(f"{q}", "@y "),
("", "@x .."),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget-area.html", mode="inline")
save(p)
if __name__ == "__main__":
districts_budget_df = pd.read_csv("data/districts_budget.csv")[
["dname", "", "", "AREA", "num_male", "num_female"]
]
districts_budget_df["num_total"] = (
districts_budget_df.num_male + districts_budget_df.num_female
)
districts_budget_df.rename(
columns={"": "budget_type", "": "budget"}, inplace=True
)
q_map = {
"//": "gen",
"": "treasury",
"/": "clean",
"//": "civil",
"/": "pedes",
"": "env",
"/": "enh",
"/": "health",
"": "edu",
}
plot_vs_population(districts_budget_df)
plot_vs_area(districts_budget_df)
| 31.62
| 85
| 0.560826
|
"""
Scatter plot between
"""
import pandas as pd
import numpy as np
from numpy.random import random
from math import pi
from bokeh.io import output_notebook
output_notebook()
from bokeh.io import show, output_file
from bokeh.palettes import RdYlGn6
from bokeh.models import (
BasicTicker,
ColorBar,
LinearColorMapper,
PrintfTickFormatter,
ColumnDataSource,
HoverTool,
Span,
)
from bokeh.plotting import figure, save, show, output_file
from bokeh.palettes import BuGn, Blues8, Oranges256
def plot_vs_population(districts_budget_df):
"""
From district budget to scatter plots vs total population
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df["num_total"] = df["num_male"] + df["num_female"]
df = df.groupby(["dname", "num_total"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["num_total"] / 10000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.num_total.mean() / 10000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "จำนวนผู้อยู่อาศัย (หมื่นคน)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("จำนวนผู้อาศัย", "@x หมื่นคน"),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget.html", mode="inline")
save(p)
def plot_vs_area(districts_budget_df):
"""
From district budget to scatter plots vs area size
"""
for q in districts_budget_df.budget_type.unique():
df = districts_budget_df.query("budget_type == '{}'".format(q))
df = df.groupby(["dname", "AREA"])["budget"].sum().reset_index()
source = ColumnDataSource(
data=dict(
x=df["AREA"] / 1000000, y=df["budget"] / 1000000, desc=df["dname"]
)
)
p = figure(title="", tools="hover,box_zoom,reset")
vline = Span(
location=df.AREA.mean() / 1000000,
dimension="height",
line_color="gold",
line_width=1.5,
)
hline = Span(
location=df["budget"].mean() / 1000000,
dimension="width",
line_color="gold",
line_width=1.5,
)
p.circle(
"x", "y", source=source, fill_alpha=0.2, size=10,
)
p.xaxis.axis_label = "ขนาดพื้นที่ (ตร.กม.)"
p.yaxis.axis_label = f"งบประมาณ{q} (ล้านบาท)"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
hover = HoverTool(
tooltips=[
("เขต", "@desc"),
(f"งบ{q}", "@y ล้านบาท"),
("ขนาดพื้นที่", "@x ตร.กม."),
]
)
p.add_tools(hover)
p.renderers.extend([vline, hline])
output_file(f"plots/scatter-{q_map[q]}-budget-area.html", mode="inline")
save(p)
if __name__ == "__main__":
districts_budget_df = pd.read_csv("data/districts_budget.csv")[
["dname", "ประเภทแผนงาน", "งบแผนงาน", "AREA", "num_male", "num_female"]
]
districts_budget_df["num_total"] = (
districts_budget_df.num_male + districts_budget_df.num_female
)
districts_budget_df.rename(
columns={"ประเภทแผนงาน": "budget_type", "งบแผนงาน": "budget"}, inplace=True
)
q_map = {
"ทั่วไป/บริหาร/อื่นๆ": "gen",
"การคลัง": "treasury",
"เทศกิจ/รักษาความสะอาด": "clean",
"โยธา/ก่อสร้าง/จราจร": "civil",
"น้ำท่วม/ทางเท้า": "pedes",
"สิ่งแวดล้อม": "env",
"พัฒนาชุมชน/อาชีพ": "enh",
"อนามัย/สาธารณะสุข": "health",
"การศึกษา": "edu",
}
plot_vs_population(districts_budget_df)
plot_vs_area(districts_budget_df)
| 879
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 110
|
c175aa18424a015e81f4404dc8122dd28b20d6bf
| 302
|
py
|
Python
|
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
QISKIT/TDA/Qconfig_20_qubit.py
|
rsarkar-github/Quantum-Computing-Projects
|
966c0465f98dca0091f09826e12eb57277faf3c0
|
[
"MIT"
] | null | null | null |
APItoken = 'ffb1bf6df27099919ca9ab63da88b1929016a7f7468d477f65241f61e1f457ab4' \
'f53c50ead0371ce632b283b5dc803fae33b34b3601053d2bde24f4ebc921b1b'
config = {
'url': 'https://q-console-api.mybluemix.net/api',
'hub': 'ibmq',
'group': 'qc-ware',
'project': 'default'
}
| 30.2
| 80
| 0.688742
|
APItoken = 'ffb1bf6df27099919ca9ab63da88b1929016a7f7468d477f65241f61e1f457ab4' \
'f53c50ead0371ce632b283b5dc803fae33b34b3601053d2bde24f4ebc921b1b'
config = {
'url': 'https://q-console-api.mybluemix.net/api',
'hub': 'ibmq',
'group': 'qc-ware',
'project': 'default'
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
de5c1f120fe34ee35b979c4f5b009fc460b748c4
| 1,906
|
py
|
Python
|
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
os_faults/ansible/modules/iptables.py
|
Marie-Donnie/os-faults
|
2f9eb760d240b9a03b7df5682b5b24cf35daacd0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
if __name__ == '__main__':
main()
| 38.897959
| 75
| 0.629066
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import * # noqa
def main():
module = AnsibleModule(
argument_spec=dict(
service=dict(required=True, type='str'),
action=dict(required=True, choices=['block', 'unblock']),
port=dict(required=True, type='int'),
protocol=dict(required=True, choices=['tcp', 'udp']),
))
service = module.params['service']
action = module.params['action']
port = module.params['port']
protocol = module.params['protocol']
comment = '{}_temporary_DROP'.format(service)
if action == 'block':
cmd = ('bash -c "iptables -I INPUT 1 -p {protocol} --dport {port} '
'-j DROP -m comment --comment "{comment}""'.format(
comment=comment, port=port, protocol=protocol))
else:
cmd = ('bash -c "rule=`iptables -L INPUT -n --line-numbers | '
'grep "{comment}" | cut -d \' \' -f1`; for arg in $rule;'
' do iptables -D INPUT -p {protocol} --dport {port} '
'-j DROP -m comment --comment "{comment}"; done"'.format(
comment=comment, port=port, protocol=protocol))
rc, stdout, stderr = module.run_command(cmd, check_rc=True)
module.exit_json(cmd=cmd, rc=rc, stderr=stderr, stdout=stdout)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 1,228
| 0
| 19
| 54
|
91aa923768fc23db2b0a5d788c50eb978a3701bc
| 15,766
|
py
|
Python
|
python/pyspark/sql/tests/test_session.py
|
wangyeweikuer/spark
|
731aa2cdf8a78835621fbf3de2d3492b27711d1a
|
[
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2022-03-25T06:40:43.000Z
|
2022-03-25T06:40:43.000Z
|
python/pyspark/sql/tests/test_session.py
|
nyingping/spark
|
ca7200b0008dc6101a252020e6c34ef7b72d81d6
|
[
"Apache-2.0"
] | 6
|
2018-06-14T11:15:27.000Z
|
2019-01-27T12:11:23.000Z
|
python/pyspark/sql/tests/test_session.py
|
nyingping/spark
|
ca7200b0008dc6101a252020e6c34ef7b72d81d6
|
[
"Apache-2.0"
] | 1
|
2022-03-09T08:50:07.000Z
|
2022-03-09T08:50:07.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
if __name__ == "__main__":
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 40.425641
| 99
| 0.639921
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import unittest
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, SQLContext, Row
from pyspark.sql.functions import col
from pyspark.testing.sqlutils import ReusedSQLTestCase
from pyspark.testing.utils import PySparkTestCase
class SparkSessionTests(ReusedSQLTestCase):
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
class SparkSessionTests1(ReusedSQLTestCase):
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext("local[4]", self.sc.appName)
spark = SparkSession.builder.getOrCreate()
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
sc.stop()
class SparkSessionTests2(PySparkTestCase):
# This test is separate because it's closely related with session's start and stop.
# See SPARK-23228.
def test_set_jvm_default_session(self):
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
finally:
spark.stop()
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isEmpty())
def test_jvm_default_session_already_set(self):
# Here, we assume there is the default session already set in JVM.
jsession = self.sc._jvm.SparkSession(self.sc._jsc.sc())
self.sc._jvm.SparkSession.setDefaultSession(jsession)
spark = SparkSession.builder.getOrCreate()
try:
self.assertTrue(spark._jvm.SparkSession.getDefaultSession().isDefined())
# The session should be the same with the exiting one.
self.assertTrue(jsession.equals(spark._jvm.SparkSession.getDefaultSession().get()))
finally:
spark.stop()
class SparkSessionTests3(unittest.TestCase):
def test_active_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
try:
activeSession = SparkSession.getActiveSession()
df = activeSession.createDataFrame([(1, "Alice")], ["age", "name"])
self.assertEqual(df.collect(), [Row(age=1, name="Alice")])
finally:
spark.stop()
def test_get_active_session_when_no_active_session(self):
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
spark = SparkSession.builder.master("local").getOrCreate()
active = SparkSession.getActiveSession()
self.assertEqual(active, spark)
spark.stop()
active = SparkSession.getActiveSession()
self.assertEqual(active, None)
def test_spark_session(self):
spark = SparkSession.builder.master("local").config("some-config", "v2").getOrCreate()
try:
self.assertEqual(spark.conf.get("some-config"), "v2")
self.assertEqual(spark.sparkContext._conf.get("some-config"), "v2")
self.assertEqual(spark.version, spark.sparkContext.version)
spark.sql("CREATE DATABASE test_db")
spark.catalog.setCurrentDatabase("test_db")
self.assertEqual(spark.catalog.currentDatabase(), "test_db")
spark.sql("CREATE TABLE table1 (name STRING, age INT) USING parquet")
self.assertEqual(spark.table("table1").columns, ["name", "age"])
self.assertEqual(spark.range(3).count(), 3)
# SPARK-37516: Only plain column references work as variable in SQL.
self.assertEqual(
spark.sql("select {c} from range(1)", c=col("id")).first(), spark.range(1).first()
)
with self.assertRaisesRegex(ValueError, "Column"):
spark.sql("select {c} from range(10)", c=col("id") + 1)
finally:
spark.sql("DROP DATABASE test_db CASCADE")
spark.stop()
def test_global_default_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
try:
self.assertEqual(SparkSession.builder.getOrCreate(), spark)
finally:
spark.stop()
def test_default_and_active_session(self):
spark = SparkSession.builder.master("local").getOrCreate()
activeSession = spark._jvm.SparkSession.getActiveSession()
defaultSession = spark._jvm.SparkSession.getDefaultSession()
try:
self.assertEqual(activeSession, defaultSession)
finally:
spark.stop()
def test_config_option_propagated_to_existing_session(self):
session1 = SparkSession.builder.master("local").config("spark-config1", "a").getOrCreate()
self.assertEqual(session1.conf.get("spark-config1"), "a")
session2 = SparkSession.builder.config("spark-config1", "b").getOrCreate()
try:
self.assertEqual(session1, session2)
self.assertEqual(session1.conf.get("spark-config1"), "b")
finally:
session1.stop()
def test_new_session(self):
session = SparkSession.builder.master("local").getOrCreate()
newSession = session.newSession()
try:
self.assertNotEqual(session, newSession)
finally:
session.stop()
newSession.stop()
def test_create_new_session_if_old_session_stopped(self):
session = SparkSession.builder.master("local").getOrCreate()
session.stop()
newSession = SparkSession.builder.master("local").getOrCreate()
try:
self.assertNotEqual(session, newSession)
finally:
newSession.stop()
def test_active_session_with_None_and_not_None_context(self):
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
sc = None
session = None
try:
sc = SparkContext._active_spark_context
self.assertEqual(sc, None)
activeSession = SparkSession.getActiveSession()
self.assertEqual(activeSession, None)
sparkConf = SparkConf()
sc = SparkContext.getOrCreate(sparkConf)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertFalse(activeSession.isDefined())
session = SparkSession(sc)
activeSession = sc._jvm.SparkSession.getActiveSession()
self.assertTrue(activeSession.isDefined())
activeSession2 = SparkSession.getActiveSession()
self.assertNotEqual(activeSession2, None)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkSessionTests4(ReusedSQLTestCase):
def test_get_active_session_after_create_dataframe(self):
session2 = None
try:
activeSession1 = SparkSession.getActiveSession()
session1 = self.spark
self.assertEqual(session1, activeSession1)
session2 = self.spark.newSession()
activeSession2 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession2)
self.assertNotEqual(session2, activeSession2)
session2.createDataFrame([(1, "Alice")], ["age", "name"])
activeSession3 = SparkSession.getActiveSession()
self.assertEqual(session2, activeSession3)
session1.createDataFrame([(1, "Alice")], ["age", "name"])
activeSession4 = SparkSession.getActiveSession()
self.assertEqual(session1, activeSession4)
finally:
if session2 is not None:
session2.stop()
class SparkSessionTests5(unittest.TestCase):
def setUp(self):
# These tests require restarting the Spark context so we set up a new one for each test
# rather than at the class level.
self.sc = SparkContext("local[4]", self.__class__.__name__, conf=SparkConf())
self.spark = SparkSession(self.sc)
def tearDown(self):
self.sc.stop()
self.spark.stop()
def test_sqlcontext_with_stopped_sparksession(self):
# SPARK-30856: test that SQLContext.getOrCreate() returns a usable instance after
# the SparkSession is restarted.
sql_context = SQLContext.getOrCreate(self.spark.sparkContext)
self.spark.stop()
spark = SparkSession.builder.master("local[4]").appName(self.sc.appName).getOrCreate()
new_sql_context = SQLContext.getOrCreate(spark.sparkContext)
self.assertIsNot(new_sql_context, sql_context)
self.assertIs(SQLContext.getOrCreate(spark.sparkContext).sparkSession, spark)
try:
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
finally:
spark.stop()
self.assertIsNone(SQLContext._instantiatedContext)
def test_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SparkSession when only the SparkContext is stopped
self.sc.stop()
spark = SparkSession.builder.master("local[4]").appName(self.sc.appName).getOrCreate()
self.sc = spark.sparkContext
self.assertIs(SQLContext.getOrCreate(self.sc).sparkSession, spark)
def test_get_sqlcontext_with_stopped_sparkcontext(self):
# SPARK-30856: test initialization via SQLContext.getOrCreate() when only the SparkContext
# is stopped
self.sc.stop()
self.sc = SparkContext("local[4]", self.sc.appName)
self.assertIs(SQLContext.getOrCreate(self.sc)._sc, self.sc)
class SparkSessionBuilderTests(unittest.TestCase):
def test_create_spark_context_first_then_spark_session(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext("local[4]", "SessionBuilderTests", conf=conf)
session = SparkSession.builder.config("key2", "value2").getOrCreate()
self.assertEqual(session.conf.get("key1"), "value1")
self.assertEqual(session.conf.get("key2"), "value2")
self.assertEqual(session.sparkContext, sc)
self.assertFalse(sc.getConf().contains("key2"))
self.assertEqual(sc.getConf().get("key1"), "value1")
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
def test_another_spark_session(self):
session1 = None
session2 = None
try:
session1 = SparkSession.builder.config("key1", "value1").getOrCreate()
session2 = SparkSession.builder.config(
"spark.sql.codegen.comments", "true"
).getOrCreate()
self.assertEqual(session1.conf.get("key1"), "value1")
self.assertEqual(session2.conf.get("key1"), "value1")
self.assertEqual(session1.conf.get("spark.sql.codegen.comments"), "false")
self.assertEqual(session2.conf.get("spark.sql.codegen.comments"), "false")
self.assertEqual(session1.sparkContext, session2.sparkContext)
self.assertEqual(session1.sparkContext.getConf().get("key1"), "value1")
self.assertFalse(session1.sparkContext.getConf().contains("key2"))
finally:
if session1 is not None:
session1.stop()
if session2 is not None:
session2.stop()
def test_create_spark_context_with_initial_session_options(self):
sc = None
session = None
try:
conf = SparkConf().set("key1", "value1")
sc = SparkContext("local[4]", "SessionBuilderTests", conf=conf)
session = (
SparkSession.builder.config("spark.sql.codegen.comments", "true")
.enableHiveSupport()
.getOrCreate()
)
self.assertEqual(session._jsparkSession.sharedState().conf().get("key1"), "value1")
self.assertEqual(
session._jsparkSession.sharedState().conf().get("spark.sql.codegen.comments"),
"true",
)
self.assertEqual(
session._jsparkSession.sharedState().conf().get("spark.sql.catalogImplementation"),
"hive",
)
self.assertEqual(session.sparkContext, sc)
finally:
if session is not None:
session.stop()
if sc is not None:
sc.stop()
class SparkExtensionsTest(unittest.TestCase):
# These tests are separate because it uses 'spark.sql.extensions' which is
# static and immutable. This can't be set or unset, for example, via `spark.conf`.
@classmethod
def setUpClass(cls):
import glob
from pyspark.find_spark_home import _find_spark_home
SPARK_HOME = _find_spark_home()
filename_pattern = (
"sql/core/target/scala-*/test-classes/org/apache/spark/sql/"
"SparkSessionExtensionSuite.class"
)
if not glob.glob(os.path.join(SPARK_HOME, filename_pattern)):
raise unittest.SkipTest(
"'org.apache.spark.sql.SparkSessionExtensionSuite' is not "
"available. Will skip the related tests."
)
# Note that 'spark.sql.extensions' is a static immutable configuration.
cls.spark = (
SparkSession.builder.master("local[4]")
.appName(cls.__name__)
.config("spark.sql.extensions", "org.apache.spark.sql.MyExtensions")
.getOrCreate()
)
@classmethod
def tearDownClass(cls):
cls.spark.stop()
def test_use_custom_class_for_extensions(self):
self.assertTrue(
self.spark._jsparkSession.sessionState()
.planner()
.strategies()
.contains(
self.spark._jvm.org.apache.spark.sql.MySparkStrategy(self.spark._jsparkSession)
),
"MySparkStrategy not found in active planner strategies",
)
self.assertTrue(
self.spark._jsparkSession.sessionState()
.analyzer()
.extendedResolutionRules()
.contains(self.spark._jvm.org.apache.spark.sql.MyRule(self.spark._jsparkSession)),
"MyRule not found in extended resolution rules",
)
if __name__ == "__main__":
from pyspark.sql.tests.test_session import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 0
| 904
| 0
| 13,273
| 0
| 0
| 0
| 142
| 358
|
f10f0867880f642b6d0b6d7c51bd8255be411723
| 14,187
|
py
|
Python
|
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | 1
|
2022-02-03T08:47:45.000Z
|
2022-02-03T08:47:45.000Z
|
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | null | null | null |
model/run_experiments.py
|
irenetrampoline/clustering-interval-censored
|
f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('../data')
sys.path.append('../plot')
if __name__=='__main__':
main()
| 48.585616
| 229
| 0.584408
|
import argparse
import numpy as np
import os
import sys
sys.path.append('../data')
sys.path.append('../plot')
import torch
from load import sigmoid, quadratic, chf, parkinsons, load_data_format
from data_utils import parse_data, change_missing
from plot_utils import plot_subtypes, plot_latent
from models import Sublign
def get_hyperparameters(data_format_num):
# if data_format_num < 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0., 10, 20, 50, 'l1', 0.01
if data_format_num == 3:
# failing on hpsearch
anneal, C, b_vae, dh, ds, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001
# if data_format_num == 5 or data_format_num == 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0.01, 20, 20, 100, 'l2', 0.01
if data_format_num == 1:
# best by hpsearch: (True, 0.001, 0.0, 200, 5, 200, 'l2', 0.001)
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.001, 0.0, 5, 200, 200, 'l2', 0.001
if data_format_num == 3:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.0, 0.0, 100, 20, 200, 'l2', 0.01
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 1.0, 0.01, 100, 5, 200, 'l2', 0.01 # cheat
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001 # cheat 2
if data_format_num == 4:
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.01 # cheat
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 1.0, 0.01, 200, 20, 200, 'l2', 0.1
if data_format_num == 5:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 200, 20, 200, 'l2', 0.01
if data_format_num == 6:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.01, 0.0, 200, 20, 200, 'l2', 0.01
if data_format_num == 7:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.01, 0.0, 100, 20, 200, 'l2', 0.001
if data_format_num == 8:
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = True, 0.01, 0.01, 100, 20, 200, 'l2', 0.01
anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 1., 0., 10, 20, 50, 'l1', 1e-2
# best from prev : False, 0.001, 0.0, 10, 20, 50, 'l1', 0.1
# anneal, b_vae, C, ds, dh, drnn, reg_type, lr = False, 0.001, 0.0, 10, 20, 50, 'l1', 0.1
return anneal, b_vae, C, ds, dh, drnn, reg_type, lr
def get_hyperparameters_ppmi():
b_vae, C, ds, dh, drnn, reg_type, lr = 0.01, 0., 10, 10, 20, 'l1', 0.1
return b_vae, C, ds, dh, drnn, reg_type, lr
def get_hyperparameters_chf(version=0):
# original, results in paper are from this version
if version == 0:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10, 20, 50,'l1', 0.0, 0.001, 0.01, 1000, False
elif version == 1:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.001, 0.01, 1000, True
elif version == 2:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.1, 0.001, 0.01, 1000, True
elif version == 3:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.001, 0.1, 1000, True
elif version == 4:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = 10,200,200,'l1', 0.0, 0.01, 0.1, 1000, True
return ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', action='store', type=int, default=800, help="Number of epochs")
parser.add_argument('--trials', action='store', type=int, default=1, help="Number of trials")
parser.add_argument('--model_name', action='store', type=str, default='SubLign', help="Model name for Latex table making")
parser.add_argument('--lr', action='store', type=float, default=None, help="Learning rate manual override")
parser.add_argument('--b_vae', action='store', type=float, default=None, help="b-VAE val override")
parser.add_argument('--C', action='store', type=float, default=None, help="C override")
# datasets
parser.add_argument('--data_num', action='store', type=int, help="Data Format Number")
parser.add_argument('--chf', action='store_true', help="Use CHF dataset")
parser.add_argument('--ppmi', action='store_true', help="Use PPMI dataset")
# delta setup
parser.add_argument('--max_delta', action='store', type=float, default=5., help="Maximum possible delta")
parser.add_argument('--no_time', action='store_true', help="Learn time at all")
# debugging
parser.add_argument('--verbose', action='store_true', help="Plot everything")
parser.add_argument('--cuda', action='store_true', help="Use GPU")
parser.add_argument('--missing', action='store', type=float, default=0., help="What percent of data to make missing")
parser.add_argument('--plot_debug', action='store_true', help="Make animated gif about alignment / clusterings over epochs")
parser.add_argument('--epoch_debug', action='store_true', help="Save pickle about epoch differences over training")
parser.add_argument('--aggressive', action='store', type=int, help="Learn time at all")
parser.add_argument('--version', action='store', type=int, help="Choose hyp settings", default=0)
# other experiments
args = parser.parse_args()
trial_results = np.zeros((args.trials, 4))
data_format_num = args.data_num
if args.cuda:
device = 'cuda'
else:
device = 'cpu'
print('device', device)
print('data %d' % data_format_num)
for trial_num in range(args.trials):
# datasets
if data_format_num is not None:
max_visits = 4
num_output_dims = 3 if data_format_num < 3 else 1
use_sigmoid = data_format_num < 3
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
if args.lr is not None:
print('Running with lr=%.3f' % args.lr)
lr = args.lr
if args.C is not None:
print('Running with C=%.3f' % args.C)
C = args.C
if args.b_vae is not None:
print('Running with b_vae=%.3f' % args.b_vae)
b_vae = args.b_vae
data = load_data_format(data_format_num, trial_num, cache=True)
shuffle = False
elif args.chf:
ds, dh, drnn, reg_type, C, b_vae, lr, epochs, learn_time = get_hyperparameters_chf(version=args.version)
data = chf()
max_visits = 38
shuffle = True
num_output_dims = data.shape[1] - 4
elif args.ppmi:
b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters_ppmi()
if args.lr is not None:
print('Running with lr=%.3f' % args.lr)
lr = args.lr
if args.C is not None:
print('Running with C=%.3f' % args.C)
C = args.C
if args.b_vae is not None:
print('Running with b_vae=%.3f' % args.b_vae)
b_vae = args.b_vae
data = parkinsons()
max_visits = 17
shuffle = True
num_output_dims = data.shape[1] - 4
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=max_visits, test_per=0.2, valid_per=0.2, shuffle=shuffle, device=device)
if args.missing > 0.:
train_data_loader, train_data_dict = change_missing(train_data_dict, args.missing)
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits, device=device)
"""
best parmas found through hypertuning (cross_validation/hpsearch.py)
# sigmoid: C (0.01), dim_h (20), ds (10 mid), dim_rnn (50 mid), reg_type (l1), lr (0.1)
# quad: C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
ppmi: (0.0, 10, 10, 50, 'l1', 0.1)
"""
# dim_stochastic, dim_hidden, dim_rnn, C, dim_biomarkers=3, reg_type = 'l2',
if data_format_num is not None:
model = Sublign(d_s, d_h, d_rnn, b_vae=b_vae, dim_biomarkers=num_output_dims, sigmoid=use_sigmoid, reg_type=reg_type, auto_delta=False, max_delta=args.max_delta, learn_time=(not args.no_time), device=device)
if device == 'cuda':
device_torch = torch.device('cuda')
model.to(device_torch)
model.fit(train_data_loader, test_data_loader, args.epochs, lr, verbose=args.verbose, fname='runs/data%d_trial%d.pt' % (data_format_num, trial_num), eval_freq=25, anneal=anneal)
elif args.chf:
args.verbose = False
model = Sublign(ds, dh, drnn, dim_biomarkers=num_output_dims, sigmoid=True, reg_type=reg_type, C=C, auto_delta=False, max_delta=args.max_delta, learn_time=(not args.no_time and learn_time), device=device, b_vae=b_vae)
if device == 'cuda':
device_torch = torch.device('cuda')
model.to(device_torch)
model.fit(data_loader, data_loader, args.epochs, lr, verbose=args.verbose,fname='runs/chf_v%d_%d.pt' % (args.version, args.epochs),eval_freq=25)
X = torch.tensor(collect_dict['Y_collect']).to(model.device)
Y = torch.tensor(collect_dict['obs_t_collect']).to(model.device)
M = torch.tensor(collect_dict['mask_collect']).to(model.device)
(nelbo, nll, kl), norm_reg = model.forward(Y, None, X, M, None)
nelbo, nll, kl, norm_reg = nelbo.item(), nll.item(), kl.item(), norm_reg.item()
subtypes = model.get_subtypes_datadict(collect_dict, K=3)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
if args.cuda:
deltas = deltas.cpu().detach().numpy()
else:
deltas = deltas.detach().numpy()
import pickle
results = {
'labels':labels,
'deltas': deltas,
'subtypes': subtypes,
'nelbo': nelbo,
'nll': nll,
'kl': kl,
'norm_reg': norm_reg
}
pickle.dump(results, open('../clinical_runs/chf_v%d_%d.pk' % (args.version, args.epochs), 'wb'))
return
elif args.ppmi:
model = Sublign(d_s, d_h, d_rnn, b_vae=b_vae, C=C, dim_biomarkers=num_output_dims, sigmoid=True, reg_type=reg_type, auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(train_data_loader, test_data_loader, args.epochs, lr=lr, verbose=args.verbose, fname='runs/ppmi.pt', eval_freq=25)
results = model.score(train_data_dict, test_data_dict, K=2)
test_ari = results['ari']
print('PPMI Test ARI: %.3f' % test_ari)
# results = model.score(train_data_dict, test_data_dict, K=2)
# test_ari = results['ari']
# print('PPMI Test ARI: %.3f' % test_ari)
subtypes = model.get_subtypes_datadict(collect_dict)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
import pickle
if args.cuda:
subtypes = subtypes.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
deltas = deltas.cpu().detach().numpy()
else:
subtypes = subtypes.cpu().detach().numpy()
labels = labels.cpu().detach().numpy()
deltas = deltas.cpu().detach().numpy()
pickle.dump((labels, deltas, subtypes), open('../clinical_runs/ppmi_icml.pk', 'wb'))
return
# subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
train_results = model.score(train_data_dict, train_data_dict)
test_results = model.score(train_data_dict, test_data_dict)
train_mse = train_results['mse']
train_ari = train_results['ari']
train_swaps = train_results['swaps']
train_pear = train_results['pear']
mse = test_results['mse']
ari = test_results['ari']
swaps = test_results['swaps']
pear = test_results['pear']
# nelbo, nll, kl = model.get_loss(Y, S, X, M, anneal=1.)
# nelbo, nll, kl = nelbo.mean().detach().numpy(), nll.mean().detach().numpy(), kl.mean().detach().numpy()
# if args.verbose:
# plot_subtypes(subtypes, args.sigmoid, train_data_dict)
# plot_latent(model, test_data_dict)
trial_results[trial_num] = [mse, ari, swaps, pear]
if args.no_time:
args.model_name = 'SubNoLign'
if args.trials == 1:
print('Train: %.3f, %.3f, %.3f, %.3f' % (train_mse, train_ari, train_swaps, train_pear))
print('Test : %.3f, %.3f, %.3f, %.3f' % (mse, ari, swaps, pear))
# print('NELBO: %.3f, NLL: %.3f, KL: %.3f' % (nelbo, nll, kl))
else:
line_str = list()
for i,j in zip(trial_results.mean(axis=0), trial_results.std(axis=0)):
line_str.append('%.3f $\\pm$ %.3f' % (i,j))
print(' & '.join([args.model_name] + line_str) + '\\\\')
if args.data_num:
trials_fname = '%s_data%d_trials%d.txt' % (args.model_name, args.data_num, args.trials)
else:
trials_fname = '%s_ppmi_trials%d.txt' % (args.model_name, args.trials)
if not os.path.exists(trials_fname):
f = open(trials_fname, 'w')
else:
f = open(trials_fname, 'a')
f.write(' & '.join([args.model_name] + line_str) + '\\\\' + '\n')
f.close()
if __name__=='__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 13,728
| 0
| 80
| 278
|
a53c8a2a5ad5aded053ad7c5fd27b412fc60a466
| 2,573
|
py
|
Python
|
parsers/pdbparser.py
|
rigdenlab/conkit-web
|
bf50d28a73f43b9eb0e0c397ec1d0fd32547fdf1
|
[
"BSD-3-Clause"
] | 1
|
2020-04-16T16:52:53.000Z
|
2020-04-16T16:52:53.000Z
|
parsers/pdbparser.py
|
rigdenlab/conplot
|
9b3129d9e1b7ed93da63c6fd31f9b50e63f2d4d9
|
[
"BSD-3-Clause"
] | 47
|
2020-05-11T13:59:11.000Z
|
2022-01-21T09:37:18.000Z
|
parsers/pdbparser.py
|
rigdenlab/conkit-web
|
bf50d28a73f43b9eb0e0c397ec1d0fd32547fdf1
|
[
"BSD-3-Clause"
] | 5
|
2020-04-24T11:19:21.000Z
|
2020-05-06T08:01:36.000Z
|
import itertools
VALID_AMINOACIDS = {"A", "R", "N", "D", "C", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "M", "F", "P", "O", "S", "U",
"T", "W", "Y", "V", "B", "Z", "X", "X", "J"}
def get_chain_contacts(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
contacts = []
residue_range = list(range(1, len(chain) + 1))
assert len(residue_range) == len(chain)
iterator = itertools.product(list(zip(residue_range, chain)), list(zip(residue_range, chain)))
for (resseq1_alt, residue1), (resseq2_alt, residue2) in iterator:
seq_distance = int(residue1.id[1]) - int(residue2.id[1])
if seq_distance <= 4:
continue
for atom1, atom2 in itertools.product(residue1, residue2):
xyz_distance = atom1 - atom2
if xyz_distance > 20:
d_bin = 9
elif xyz_distance <= 4:
d_bin = 0
else:
d_bin = int(round((xyz_distance - 4) / 2, 0))
if xyz_distance < 8:
contact = (int(residue1.id[1]), int(residue2.id[1]), round(1.0 - (xyz_distance / 100), 6), d_bin, 1)
else:
contact = (int(residue1.id[1]), int(residue2.id[1]), 0, d_bin, 1)
contacts.append(contact)
return contacts
def remove_atoms(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
for residue in chain.copy():
if residue.id[0].strip() and residue.resname not in VALID_AMINOACIDS:
chain.detach_child(residue.id)
continue
for atom in residue.copy():
# if atom.is_disordered():
# chain[residue.id].detach_child(atom.id)
if residue.resname == "GLY" and atom.id == "CA":
continue
elif atom.id != "CB":
chain[residue.id].detach_child(atom.id)
| 38.402985
| 119
| 0.585309
|
from Bio.PDB import PDBParser as BioPDBParser
import io
import itertools
from operator import itemgetter
from utils.exceptions import InvalidFormat
VALID_AMINOACIDS = {"A", "R", "N", "D", "C", "C", "Q", "E", "G", "H", "I", "L", "K", "M", "M", "F", "P", "O", "S", "U",
"T", "W", "Y", "V", "B", "Z", "X", "X", "J"}
def get_chain_contacts(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
contacts = []
residue_range = list(range(1, len(chain) + 1))
assert len(residue_range) == len(chain)
iterator = itertools.product(list(zip(residue_range, chain)), list(zip(residue_range, chain)))
for (resseq1_alt, residue1), (resseq2_alt, residue2) in iterator:
seq_distance = int(residue1.id[1]) - int(residue2.id[1])
if seq_distance <= 4:
continue
for atom1, atom2 in itertools.product(residue1, residue2):
xyz_distance = atom1 - atom2
if xyz_distance > 20:
d_bin = 9
elif xyz_distance <= 4:
d_bin = 0
else:
d_bin = int(round((xyz_distance - 4) / 2, 0))
if xyz_distance < 8:
contact = (int(residue1.id[1]), int(residue2.id[1]), round(1.0 - (xyz_distance / 100), 6), d_bin, 1)
else:
contact = (int(residue1.id[1]), int(residue2.id[1]), 0, d_bin, 1)
contacts.append(contact)
return contacts
def remove_atoms(chain):
"""Credits to Felix Simkovic; code taken from GitHub rigdenlab/conkit/conkit/io/pdb.py"""
for residue in chain.copy():
if residue.id[0].strip() and residue.resname not in VALID_AMINOACIDS:
chain.detach_child(residue.id)
continue
for atom in residue.copy():
# if atom.is_disordered():
# chain[residue.id].detach_child(atom.id)
if residue.resname == "GLY" and atom.id == "CA":
continue
elif atom.id != "CB":
chain[residue.id].detach_child(atom.id)
def PDBParser(input, input_format=None):
try:
parser = BioPDBParser().get_structure('pdb', io.StringIO(input))
chain = list(parser.get_chains())[0]
remove_atoms(chain)
contacts = get_chain_contacts(chain)
except:
raise InvalidFormat('Unable to parse contacts')
if not contacts:
raise InvalidFormat('Unable to parse contacts')
output = ["PDB"]
output += sorted(contacts, key=itemgetter(2), reverse=True)
return output
| 0
| 0
| 0
| 0
| 0
| 469
| 0
| 43
| 111
|
415c056f05afba92871ed1b11cf1af7a2b45bdd6
| 1,753
|
py
|
Python
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 42
|
2020-03-02T11:42:17.000Z
|
2022-03-02T13:51:05.000Z
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 20
|
2020-03-02T11:46:43.000Z
|
2022-01-26T23:33:37.000Z
|
salmon/search/discogs.py
|
Junkbite/smoked-salmon
|
c7ee36dc9bba00707d8529af34b69e8c529d3615
|
[
"Apache-2.0"
] | 16
|
2020-03-01T11:29:55.000Z
|
2022-01-24T18:10:35.000Z
|
import re
SOURCES = {
"Vinyl": "Vinyl",
"File": "WEB",
"CD": "CD",
}
def sanitize_artist_name(name):
"""
Remove parenthentical number disambiguation bullshit from artist names,
as well as the asterisk stuff.
"""
name = re.sub(r" \(\d+\)$", "", name)
return re.sub(r"\*+$", "", name)
def parse_source(formats):
"""
Take the list of format strings provided by Discogs and iterate over them
to find a possible source for the release.
"""
for format_s, source in SOURCES.items():
if any(format_s in f for f in formats):
return source
| 30.224138
| 79
| 0.553908
|
import re
from salmon.search.base import IdentData, SearchMixin
from salmon.sources import DiscogsBase
SOURCES = {
"Vinyl": "Vinyl",
"File": "WEB",
"CD": "CD",
}
class Searcher(DiscogsBase, SearchMixin):
async def search_releases(self, searchstr, limit):
releases = {}
resp = await self.get_json(
"/database/search",
params={"q": searchstr, "type": "release", "perpage": 50},
)
for rls in resp["results"]:
artists, title = rls["title"].split(" - ", 1)
year = rls["year"] if "year" in rls else None
source = parse_source(rls["format"])
ed_title = ", ".join(set(rls["format"]))
edition = f"{year} {source}"
if rls["label"] and rls["label"][0] != "Not On Label":
edition += f" {rls['label'][0]} {rls['catno']}"
else:
edition += " Not On Label"
releases[rls["id"]] = (
IdentData(artists, title, year, None, source),
self.format_result(artists, title, edition, ed_title=ed_title),
)
if len(releases) == limit:
break
return "Discogs", releases
def sanitize_artist_name(name):
"""
Remove parenthentical number disambiguation bullshit from artist names,
as well as the asterisk stuff.
"""
name = re.sub(r" \(\d+\)$", "", name)
return re.sub(r"\*+$", "", name)
def parse_source(formats):
"""
Take the list of format strings provided by Discogs and iterate over them
to find a possible source for the release.
"""
for format_s, source in SOURCES.items():
if any(format_s in f for f in formats):
return source
| 0
| 0
| 978
| 20
| 0
| 0
| 0
| 49
| 94
|
8e0d50b482773beb3ff48ba3c18ff76723f48d7c
| 590
|
py
|
Python
|
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
scripts/relay.py
|
MDooley47/jam-house
|
f67c98e2dc3edd32fa26f7f95df03a27b5e0b3ff
|
[
"Apache-2.0"
] | null | null | null |
import RPi.GPIO as GPIO
| 16.857143
| 37
| 0.625424
|
import RPi.GPIO as GPIO
class Relay:
def __init__(self, pin):
self.pin = int(pin)
GPIO.setup(self.pin, GPIO.OUT)
def close(self):
GPIO.output(self.pin, GPIO.LOW)
return
def open(self):
GPIO.output(self.pin, GPIO.HIGH)
return
def status(self):
return bool(GPIO.input(self.pin))
def isClose(self):
return not self.status()
def isOpen(self):
return self.status()
def toggle(self):
if self.isOpen():
self.close()
elif self.isClose():
self.open()
return
def cleanup(self):
GPIO.cleanup(self.pin)
return
| 0
| 0
| 0
| 543
| 0
| 0
| 0
| 0
| 23
|
d4a0efc7601cb3d5e6ec66f5f5af3b78a9158768
| 1,309
|
py
|
Python
|
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | null | null | null |
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | null | null | null |
threaded_messages/listeners.py
|
MattBlack85/django-threaded-messages
|
da86dea6dd854f9ab37201d3953f9d028faa85e9
|
[
"MIT"
] | 1
|
2021-01-06T14:41:13.000Z
|
2021-01-06T14:41:13.000Z
|
import logging
from . import settings as sendgrid_settings
logger = logging.getLogger('threaded_messages')
if sendgrid_settings.THREADED_MESSAGES_USE_SENDGRID:
from sendgrid_parse_api.signals import email_received
else:
email_received = None
| 30.44186
| 98
| 0.663102
|
import logging
from django.utils.html import strip_tags
from . import settings as sendgrid_settings
from .signals import message_composed
logger = logging.getLogger('threaded_messages')
if sendgrid_settings.THREADED_MESSAGES_USE_SENDGRID:
from sendgrid_parse_api.signals import email_received
else:
email_received = None
def signal_received_email(sender, sma, app_id, html, text, from_field, **kwargs):
from .utils import reply_to_thread, strip_mail
logger.debug("Sendgrid signal receive: %s, %s, %s, %s, %s, %s" % (sender, sma, app_id,
html, repr(text), from_field))
if app_id == sendgrid_settings.THREADED_MESSAGES_ID:
body = ''
if text:
body = text
if not body:
body = html
if body:
body = strip_tags(body)
body = strip_mail(body)
thread = sma.content_object
reply_to_thread(thread, sma.user, body)
def start_listening():
if email_received:
logger.debug("Sendgrid start listening")
email_received.connect(signal_received_email, dispatch_uid="thm_reply")
from .utils import invalidate_count_cache
message_composed.connect(invalidate_count_cache, dispatch_uid="thm_composed")
| 0
| 0
| 0
| 0
| 0
| 928
| 0
| 35
| 91
|
4e5e61e419f37f8dd598086847f0c15a320b4ff7
| 15,814
|
py
|
Python
|
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | 11
|
2018-08-04T08:14:27.000Z
|
2021-09-03T09:00:33.000Z
|
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | null | null | null |
fzutils/free_api_utils.py
|
superonesfazai/fzutils
|
a8aafaacd94af0001af2ab139f0aa8cbcb8b5eda
|
[
"MIT"
] | 8
|
2018-08-04T08:16:17.000Z
|
2019-05-05T09:17:35.000Z
|
# coding:utf-8
'''
@author = super_fazai
@File : free_api_utils.py
@connect : [email protected]
'''
"""
api
"""
import re
# from fzutils.ip_pools import tri_ip_pool
# from fzutils.spider.fz_requests import Requests
# from fzutils.common_utils import json_2_dict
# from fzutils.internet_utils import (
# get_base_headers,)
from .ip_pools import tri_ip_pool
from .spider.fz_requests import Requests
from .common_utils import json_2_dict
from .internet_utils import (get_base_headers)
__all__ = [
'get_jd_one_goods_price_info', #
'get_express_info', #
'get_phone_num_info', #
'get_baidu_baike_info', #
# map
'get_bd_map_shop_info_list_by_keyword_and_area_name', # (api )[400]
'get_gd_map_shop_info_list_by_keyword_and_area_name', # (api )
'get_gd_input_prompt_info', # (api)
'get_gd_reverse_geocode_info', # str(api)
'get_gd_map_shop_info_list_by_lng_and_lat_and_keyword', # (), ()(api )
'get_gd_map_shop_info_list_by_gd_id', # gd_idshop info list()[, id, ]
]
def get_jd_one_goods_price_info(goods_id) -> list:
'''
:param goods_id: id
:return:
'''
base_url = 'http://p.3.cn/prices/mgets'
params = (
('skuIds', 'J_' + goods_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params)
return json_2_dict(body, default_res=[])
def get_express_info(express_type, express_id) -> dict:
'''
express_type: ps: value
{
'': 'shentong',
'ems': 'ems',
'': 'shunfeng',
'': 'yuantong',
'': 'zhongtong',
'': 'yunda',
'': 'tiantian',
'': 'huitongkuaidi',
'': 'quanfengkuaidi',
'': 'debangwuliu',
'': 'zhaijisong',
...
}
:param express_type:
:param express_id:
:return:
'''
base_url = 'http://www.kuaidi100.com/query'
params = (
('type', express_type),
('postid', express_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params,)
return json_2_dict(body)
def get_phone_num_info(phone_num) -> dict:
'''
:param phone_num:
:return:
'''
url = 'https://tcc.taobao.com/cc/json/mobile_tel_segment.htm'
params = (
('tel', str(phone_num)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
try:
res = re.compile('__GetZoneResult_ = (.*)').findall(body)[0]
return json_2_dict(res)
except IndexError:
return {}
def get_baidu_baike_info(keyword, bk_length=1000) -> dict:
'''
:param keyword:
:return:
'''
url = 'http://baike.baidu.com/api/openapi/BaikeLemmaCardApi'
params = (
('scope', '103'),
('format', 'json'),
('appid', '379020'),
('bk_key', str(keyword)),
('bk_length', str(bk_length)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
return json_2_dict(body)
def get_bd_map_shop_info_list_by_keyword_and_area_name(ak:str,
keyword:str,
area_name:str,
page_num:int,
page_size:int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
(api )[400]
:param ak: ak
:param keyword: eg: ''
:param area_name: eg: '' , , ,
:param page_num: start 1, 20
:param page_size:
:param ip_pool_type:
:param num_retries:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('query', str(keyword)),
('region', str(area_name)),
('output', 'json'),
('ak', str(ak)),
('page_num', str(page_num)),
('page_size', str(page_size)),
)
url = 'http://api.map.baidu.com/place/v2/search'
body = Requests.get_url_body(
url=url,
headers=headers,
params=params,
use_proxy=use_proxy,
ip_pool_type=ip_pool_type,
num_retries=num_retries,
timeout=timeout,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('results', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_keyword_and_area_name(gd_key:str,
keyword:str,
area_name:str,
page_num: int,
page_size: int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
children=0,
extensions='all',
poi_type='',
logger=None,) -> list:
"""
(api )
:param gd_key: key
:param keyword: eg: ''
:param area_name: eg: '' ,
:param page_num: 100
:param page_size: '20'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param children: POI, 0 or 1
:param extensions:
:param poi_type: POI, eg: '061205', !
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('types', str(poi_type)),
('city', str(area_name)),
('citylimit', 'true'),
('children', str(children)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'http://restapi.amap.com/v3/place/text'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_input_prompt_info(gd_key:str,
keyword,
city_name:str,
poi_type='',
lng:float=0.,
lat:float=0.,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
(api)
:param gd_key: key
:param keyword: eg: ''
:param city_name: eg: ''
:param poi_type: eg: '050301'
:param lng:
:param lat:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
# eg: '116.481488,39.990464'
location = ','.join([str(lng), str(lat)]) if lng != 0. or lat != 0. else ''
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('type', poi_type),
('location', location),
('city', str(city_name)),
('datatype', 'all'),
)
url= 'https://restapi.amap.com/v3/assistant/inputtips'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('tips', [])
# pprint(data)
return data
def get_gd_reverse_geocode_info(gd_key:str,
address:str,
city_name:str,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
str(api)
:param gd_key:
:param address: eg: 'A'
:param city_name: eg: ''
:param ip_pool_type:
:param num_retries:
:param timeout:
:param use_proxy:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('address', str(address)),
('city', str(city_name)),
)
url= 'https://restapi.amap.com/v3/geocode/geo'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('geocodes', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_lng_and_lat_and_keyword(gd_key:str,
lng:float,
lat:float,
keyword:str='',
radius:int=1000,
page_num:int=1,
page_size:int=20,
poi_type='',
extensions='all',
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
(), ()(api )
:param gd_key: key
:param lng:
:param lat:
:param keyword: eg: '', !
:param radius: (, radius=100, !!)
:param page_num: 100
:param page_size: '20'
:param poi_type: POI, eg: '061205', !
:param extensions:
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('location', ','.join([str(lng), str(lat)])),
('keywords', str(keyword)),
('types', str(poi_type)),
('radius', str(radius)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'https://restapi.amap.com/v3/place/around'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_gd_id(gd_key:str,
gd_id:str,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
gd_idshop info list()[, id, ]
:param gd_key: key
:param gd_id: eg: 'B0FFIR6P0B'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('id', gd_id),
('output', ''),
('key', gd_key),
)
url = 'https://restapi.amap.com/v3/place/detail'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
| 33.014614
| 139
| 0.496775
|
# coding:utf-8
'''
@author = super_fazai
@File : free_api_utils.py
@connect : [email protected]
'''
"""
一些免费api 接口的封装
"""
from pprint import pprint
import re
# from fzutils.ip_pools import tri_ip_pool
# from fzutils.spider.fz_requests import Requests
# from fzutils.common_utils import json_2_dict
# from fzutils.internet_utils import (
# get_base_headers,)
from .ip_pools import tri_ip_pool
from .spider.fz_requests import Requests
from .common_utils import json_2_dict
from .internet_utils import (
get_base_headers,)
__all__ = [
'get_jd_one_goods_price_info', # 获取京东单个商品价格
'get_express_info', # 获取快递信息
'get_phone_num_info', # 获取手机号信息
'get_baidu_baike_info', # 获取某关键字的百度百科信息
# map
'get_bd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
'get_gd_map_shop_info_list_by_keyword_and_area_name', # 根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
'get_gd_input_prompt_info', # 根据关键字和城市名获取输入提示(高德api)
'get_gd_reverse_geocode_info', # 根据地址str获取逆向地理编码(高德api)
'get_gd_map_shop_info_list_by_lng_and_lat_and_keyword', # 根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
'get_gd_map_shop_info_list_by_gd_id', # 根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
]
def get_jd_one_goods_price_info(goods_id) -> list:
'''
获取京东单个商品价格
:param goods_id: 商品id
:return:
'''
base_url = 'http://p.3.cn/prices/mgets'
params = (
('skuIds', 'J_' + goods_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params)
return json_2_dict(body, default_res=[])
def get_express_info(express_type, express_id) -> dict:
'''
获取快递信息
express_type: ps: 传字典对应的value
{
'申通': 'shentong',
'ems': 'ems',
'顺丰': 'shunfeng',
'圆通': 'yuantong',
'中通': 'zhongtong',
'韵达': 'yunda',
'天天': 'tiantian',
'汇通': 'huitongkuaidi',
'全峰': 'quanfengkuaidi',
'德邦': 'debangwuliu',
'宅急送': 'zhaijisong',
...
}
:param express_type: 快递公司名
:param express_id: 快递号
:return:
'''
base_url = 'http://www.kuaidi100.com/query'
params = (
('type', express_type),
('postid', express_id),
)
body = Requests.get_url_body(
url=base_url,
use_proxy=False,
params=params,)
return json_2_dict(body)
def get_phone_num_info(phone_num) -> dict:
'''
获取手机号信息
:param phone_num: 手机号
:return:
'''
url = 'https://tcc.taobao.com/cc/json/mobile_tel_segment.htm'
params = (
('tel', str(phone_num)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
try:
res = re.compile('__GetZoneResult_ = (.*)').findall(body)[0]
return json_2_dict(res)
except IndexError:
return {}
def get_baidu_baike_info(keyword, bk_length=1000) -> dict:
'''
获取某关键字的百度百科信息
:param keyword:
:return:
'''
url = 'http://baike.baidu.com/api/openapi/BaikeLemmaCardApi'
params = (
('scope', '103'),
('format', 'json'),
('appid', '379020'),
('bk_key', str(keyword)),
('bk_length', str(bk_length)),
)
body = Requests.get_url_body(
url=url,
params=params,
use_proxy=False)
return json_2_dict(body)
def get_bd_map_shop_info_list_by_keyword_and_area_name(ak:str,
keyword:str,
area_name:str,
page_num:int,
page_size:int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(百度api 关键字搜索服务)[测试最多前400个]
:param ak: 百度地图申请的ak
:param keyword: eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 多为省份, 城市, 具体区域
:param page_num: start 1, 最大20
:param page_size: 固定
:param ip_pool_type:
:param num_retries:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('query', str(keyword)),
('region', str(area_name)),
('output', 'json'),
('ak', str(ak)),
('page_num', str(page_num)),
('page_size', str(page_size)),
)
url = 'http://api.map.baidu.com/place/v2/search'
body = Requests.get_url_body(
url=url,
headers=headers,
params=params,
use_proxy=use_proxy,
ip_pool_type=ip_pool_type,
num_retries=num_retries,
timeout=timeout,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('results', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_keyword_and_area_name(gd_key:str,
keyword:str,
area_name:str,
page_num: int,
page_size: int=20,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
children=0,
extensions='all',
poi_type='',
logger=None,) -> list:
"""
根据关键字和区域检索店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param keyword: 关键字 eg: '鞋子'
:param area_name: eg: '杭州' 待搜索的区域, 城市名
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param children: 按照层级展示子POI数据, 取值0 or 1
:param extensions: 返回结果控制
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('types', str(poi_type)),
('city', str(area_name)),
('citylimit', 'true'),
('children', str(children)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'http://restapi.amap.com/v3/place/text'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_input_prompt_info(gd_key:str,
keyword,
city_name:str,
poi_type='',
lng:float=0.,
lat:float=0.,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据关键字和城市名获取输入提示(高德api)
:param gd_key: 申请的key
:param keyword: eg: '美食'
:param city_name: eg: '杭州'
:param poi_type: eg: '050301'
:param lng:
:param lat:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
# eg: '116.481488,39.990464' 经纬度
location = ','.join([str(lng), str(lat)]) if lng != 0. or lat != 0. else ''
params = (
('key', str(gd_key)),
('keywords', str(keyword)),
('type', poi_type),
('location', location),
('city', str(city_name)),
('datatype', 'all'),
)
url= 'https://restapi.amap.com/v3/assistant/inputtips'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('tips', [])
# pprint(data)
return data
def get_gd_reverse_geocode_info(gd_key:str,
address:str,
city_name:str,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
use_proxy=True,
logger=None,) -> list:
"""
根据地址str获取逆向地理编码(高德api)
:param gd_key:
:param address: eg: '方恒国际中心A座'
:param city_name: eg: '北京'
:param ip_pool_type:
:param num_retries:
:param timeout:
:param use_proxy:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('address', str(address)),
('city', str(city_name)),
)
url= 'https://restapi.amap.com/v3/geocode/geo'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
logger=logger,).get('geocodes', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_lng_and_lat_and_keyword(gd_key:str,
lng:float,
lat:float,
keyword:str='',
radius:int=1000,
page_num:int=1,
page_size:int=20,
poi_type='',
extensions='all',
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据经纬度(主要根据), 关键字(附加条件)等条件检索附近店铺信息(高德api 关键字搜索服务)
:param gd_key: 申请的key
:param lng: 经度
:param lat: 纬度
:param keyword: 关键字 eg: '鞋子', 默认空值!
:param radius: 半径 (如果已知的经纬度能准确定位到某家店铺, 可将radius=100, 来提高定位返回信息精确度!!)
:param page_num: 最大翻页数100
:param page_size: 默认值'20'
:param poi_type: 查询POI类型, eg: '061205', 可默认为空值!
:param extensions: 返回结果控制
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('key', str(gd_key)),
('location', ','.join([str(lng), str(lat)])),
('keywords', str(keyword)),
('types', str(poi_type)),
('radius', str(radius)),
('offset', str(page_size)),
('page', str(page_num)),
('extensions', str(extensions)),
)
url = 'https://restapi.amap.com/v3/place/around'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
def get_gd_map_shop_info_list_by_gd_id(gd_key:str,
gd_id:str,
use_proxy=True,
ip_pool_type=tri_ip_pool,
num_retries=6,
timeout=20,
logger=None,) -> list:
"""
根据gd_id来得到指定的shop info list(一般为第一个)[测试发现不准确, 根据id, 常返回不相干商家]
:param gd_key: 申请的key
:param gd_id: eg: 'B0FFIR6P0B'
:param use_proxy:
:param ip_pool_type:
:param num_retries:
:param timeout:
:param logger:
:return:
"""
headers = get_base_headers()
headers.update({
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
})
params = (
('id', gd_id),
('output', ''),
('key', gd_key),
)
url = 'https://restapi.amap.com/v3/place/detail'
body = Requests.get_url_body(
use_proxy=use_proxy,
url=url,
headers=headers,
params=params,
ip_pool_type=ip_pool_type,
timeout=timeout,
num_retries=num_retries,)
# print(body)
data = json_2_dict(
json_str=body,
default_res={},
logger=logger,).get('pois', [])
# pprint(data)
return data
| 1,806
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 23
|
b24c57ae9801978655a2dd8c90c7f52a6c81983c
| 377
|
py
|
Python
|
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
tests/test_axpy_weather.py
|
AxxAxx/axpy_weather
|
2714397968b55b63b784ce08a2df0ade08aa2008
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_axpy_weather
----------------------------------
Tests for `axpy_weather` module.
"""
| 13
| 42
| 0.599469
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_axpy_weather
----------------------------------
Tests for `axpy_weather` module.
"""
import sys
import unittest
from axpy_weather import axpy_weather
class TestAxpy_weather(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
| 0
| 0
| 0
| 142
| 0
| 0
| 0
| -1
| 91
|
dad37465db8abb220a6642fa8e0c3fe096021b1a
| 446
|
py
|
Python
|
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
GeorgianaElena/jupyterhub-configurator
|
f356175732d487c520415b84368b3368397d8b60
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="jupyterhub-configurator",
version="1.0",
packages=find_packages(),
license="3-BSD",
author="yuvipanda",
author_email="[email protected]",
install_requires=["tornado", "aiohttp", "jupyterhub", "deepmerge", "pluggy"],
include_package_data=True,
entry_points={
"jupyterhub_configurator": ["z2jh = jupyterhub_configurator.schemas.z2jh"]
},
)
| 27.875
| 82
| 0.686099
|
from setuptools import setup, find_packages
setup(
name="jupyterhub-configurator",
version="1.0",
packages=find_packages(),
license="3-BSD",
author="yuvipanda",
author_email="[email protected]",
install_requires=["tornado", "aiohttp", "jupyterhub", "deepmerge", "pluggy"],
include_package_data=True,
entry_points={
"jupyterhub_configurator": ["z2jh = jupyterhub_configurator.schemas.z2jh"]
},
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
85bc4000ac9a7feae9aa1e58301dae7af7b354a8
| 2,767
|
py
|
Python
|
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
src/austin_heller_repo/component_manager.py
|
AustinHellerRepo/ComponentManager
|
bd347d87cb0c19acf07419ba8e8c30d4fa6f6027
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
try:
import urequests as requests
except ImportError:
try:
import ujson as json
except ImportError:
| 28.234694
| 152
| 0.729671
|
from __future__ import annotations
from typing import List, Tuple, Dict
try:
import urequests as requests
except ImportError:
import requests
try:
import ujson as json
except ImportError:
import json
class MethodTypeEnum():
Get = 0
Post = 1
class ApiInterface():
def __init__(self, *, api_base_url: str):
self.__api_base_url = api_base_url
def _get_json_result_from_url(self, *, method_type, url: str, arguments_json_object: dict) -> dict:
print("Trying to " + str(method_type) + " to \"" + url + "\"...")
if method_type == MethodTypeEnum.Get:
_response = requests.get(url, json=arguments_json_object)
elif method_type == MethodTypeEnum.Post:
_response = requests.post(url, json=arguments_json_object)
else:
raise NotImplementedError()
if _response.status_code != 200:
raise Exception("Unexpected status code: " + str(_response.status_code) + ": " + str(_response.reason) + ". Error: \"" + str(_response.text) + "\".")
else:
_json_response = _response.json()
if "is_successful" not in _json_response:
raise Exception("Unexpected missing key \"is_successful\": " + str(_json_response))
elif "response" not in _json_response:
raise Exception("Unexpected missing key \"response\": " + str(_json_response))
elif "error" not in _json_response:
raise Exception("Unexpected missing key \"error\": " + str(_json_response))
else:
_is_successful = _json_response["is_successful"]
_response_value = _json_response["response"]
_error = _json_response["error"]
if not _is_successful:
raise Exception("Error from messaging system: \"" + str(_error) + "\".")
else:
return _response_value
def _get_formatted_url(self, *, url_part: str) -> str:
return self.__api_base_url + url_part
class ComponentManagerApiInterface(ApiInterface):
def __init__(self, *, component_manager_api_base_url: str):
super().__init__(
api_base_url=component_manager_api_base_url
)
def get_health(self) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Get,
url=self._get_formatted_url(
url_part="/v1/test/health"
),
arguments_json_object={}
)
def get_docker_api_specification(self) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Post,
url=self._get_formatted_url(
url_part="/v1/api/get_docker_api_specification"
),
arguments_json_object={}
)
def get_component_specification_by_component_uuid(self, *, component_uuid: str) -> Dict:
return self._get_json_result_from_url(
method_type=MethodTypeEnum.Post,
url=self._get_formatted_url(
url_part="/v1/api/get_component_specification_by_component_uuid"
),
arguments_json_object={
"component_uuid": component_uuid
}
)
| 0
| 0
| 0
| 2,489
| 0
| 0
| 0
| -1
| 137
|
067659e95365ddba0dd9591d5eb66a3a527b4438
| 100
|
py
|
Python
|
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
python/hackerrank/conditional code/which one is greater/task.py
|
3keepmovingforward3/ENGR1102
|
b4f38a70560fc695d70706279047b1dec9f5c7f4
|
[
"MIT"
] | null | null | null |
# Start your code below (tip: Make sure to indent your code)
| 20
| 64
| 0.71
|
def greater_if_else(num1, num2):
# Start your code below (tip: Make sure to indent your code)
| 0
| 0
| 0
| 0
| 0
| 11
| 0
| 0
| 23
|
ca8820199ef0c7948e24a842fd58013d23375baa
| 2,451
|
py
|
Python
|
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
pmst/tests/test_component.py
|
talonchandler/pmst
|
c7d4d00a9a377726f8996cb416970037af92c40a
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("../../")
# Plane source converges
| 35.014286
| 95
| 0.571195
|
import sys
sys.path.append("../../")
from unittest import TestCase
from pmst.geometry import Point, Ray
from pmst.component import Lens
from pmst.microscope import Microscope
import pmst.source
import numpy as np
class TestPixel(TestCase):
def setUp(self):
origin = np.array((0, 0, 0))
normal = np.array((0, 0))
dimensions = np.array((.1, .2))
class TestLensIsoSourceAtFocal(TestCase):
def setUp(self):
self.r0 = Ray(Point(0, 0, 0), Point(0, 0, 1))
self.r1 = Ray(Point(0, 0, 0), Point(0, 0.1, 1))
self.r1 = Ray(Point(0, 0, 0), Point(0, 0.5, 1)) # numerical error here
self.r2 = Ray(Point(0, 0, 0), Point(0.1, 0.1, 1))
self.ray_list = [self.r0, self.r1, self.r2]
self.s = pmst.source.RayListSource(self.ray_list)
self.s.generate_rays()
self.m = Microscope(source=self.s)
self.l = Lens(Point(0, 0, 1), n=1.5, normal=Point(0, 0, 2), f=1,
radius=1.0)
self.m.add_component(self.l)
self.m.simulate()
def test_Lens(self):
print('Focal', self.s.ray_list)
self.assertTrue(self.s.n_rays == 3)
self.assertTrue(self.s.ray_list.get_ray(0) == self.r0)
# self.assertTrue(self.s.ray_list.get_ray(1) == Ray(Point(0, .5, 1), Point(0, .5, 2)))
# self.assertTrue(self.s.ray_list.get_ray(2) == Ray(Point(.1, .1, 1), Point(.1, .1, 2)))
# Plane source converges
class TestLensPlaneSourceAtFocal(TestCase):
def setUp(self):
self.r0 = Ray(Point(0, 0, 0), Point(0, 0, 1))
self.r1 = Ray(Point(0, .5, 0), Point(0, .5, 1))
self.r2 = Ray(Point(.1, .1, 0), Point(.1, .1, 1))
self.ray_list = [self.r0, self.r1, self.r2]
self.s = pmst.source.RayListSource(self.ray_list)
self.s.generate_rays()
self.m = Microscope(source=self.s)
self.l = Lens(Point(0, 0, 1), n=1.5, normal=Point(0, 0, 2), f=1,
radius=1.0)
self.m.add_component(self.l)
self.m.simulate()
def test_Lens(self):
print('Plane', self.s.ray_list)
self.assertTrue(self.s.n_rays == 3)
self.assertTrue(self.s.ray_list.get_ray(0) == Ray(Point(0, 0, 1), Point(0, 0, 2)))
self.assertTrue(self.s.ray_list.get_ray(1) == Ray(Point(0, .5, 1), Point(0, 0, 2)))
self.assertTrue(self.s.ray_list.get_ray(2) == Ray(Point(.1, .1, 1), Point(0, 0, 2)))
| 0
| 0
| 0
| 2,131
| 0
| 0
| 0
| 44
| 202
|
19c0a3b65b67a59f869878709be52202016b86ff
| 3,021
|
py
|
Python
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | 1
|
2022-03-08T22:06:35.000Z
|
2022-03-08T22:06:35.000Z
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | 191
|
2018-07-09T20:49:34.000Z
|
2021-02-09T18:44:28.000Z
|
src/bmeg/utils.py
|
bmeg/bmeg-etl
|
3efa28a7775d6defd77457838e92817a2fbc9e99
|
[
"MIT"
] | null | null | null |
import inspect
def enforce_types(callable):
"""
From:
https://stackoverflow.com/questions/50563546/validating-detailed-types-in-python-dataclasses
"""
spec = inspect.getfullargspec(callable)
if inspect.isclass(callable):
callable.__init__ = decorate(callable.__init__)
return callable
return decorate(callable)
| 27.216216
| 96
| 0.586892
|
import os
import inspect
import typing
import threading
from contextlib import suppress
from functools import wraps
def ensure_directory(*args):
path = os.path.join(*args)
if os.path.isfile(path):
raise Exception(
"Emitter output directory %s is a regular file", path)
if not os.path.exists(path):
os.makedirs(path)
def enforce_types(callable):
"""
From:
https://stackoverflow.com/questions/50563546/validating-detailed-types-in-python-dataclasses
"""
spec = inspect.getfullargspec(callable)
def check_types(*args, **kwargs):
parameters = dict(zip(spec.args, args))
parameters.update(kwargs)
# allow thread to control if check skipped
try:
if threading.local().skip_check_types:
return
except AttributeError:
pass
for name, value in parameters.items():
# Assume un-annotated parameters can be any type
with suppress(KeyError):
type_hint = spec.annotations[name]
if isinstance(type_hint, typing._SpecialForm):
# No check for typing.Any, typing.Union, typing.ClassVar
# (without parameters)
continue
try:
actual_type = type_hint.__origin__
except AttributeError:
actual_type = type_hint
if isinstance(actual_type, typing._SpecialForm):
# case of typing.Union[…] or typing.ClassVar[…]
actual_type = type_hint.__args__
if not isinstance(value, actual_type):
raise TypeError(
"Unexpected type for '{}' (expected {} but found {})".
format(name, type_hint, type(value))
)
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
check_types(*args, **kwargs)
return func(*args, **kwargs)
return wrapper
if inspect.isclass(callable):
callable.__init__ = decorate(callable.__init__)
return callable
return decorate(callable)
def set_gid(obj, gid):
object.__setattr__(obj, "gid", gid)
def get_tcga_individual_barcode(id):
parts = id.split("-")
return "-".join(parts[0:3])
def get_tcga_sample_barcode(id):
parts = id.split("-")
return "-".join(parts[0:4])
def get_tcga_portion_barcode(id):
parts = id.split("-")
parts[5] = parts[5][:-1]
return "-".join(parts[0:5])
def get_tcga_analyte_barcode(id):
parts = id.split("-")
return "-".join(parts[0:5])
def get_tcga_aliquot_barcode(id):
parts = id.split("-")
return "-".join(parts[0:7])
def tcga_barcode_is_tumor(id):
parts = id.split("-")
sample_number = parts[4][:-1]
return sample_number < 10
def tcga_barcode_is_normal(id):
parts = id.split("-")
sample_number = parts[4][:-1]
return sample_number >= 10
| 6
| 111
| 0
| 0
| 0
| 2,178
| 0
| -9
| 371
|
aadac3c14b4d6bf52fd38741ccf5cbd8ff170fdc
| 5,352
|
py
|
Python
|
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
check_mariadb_slaves.py
|
flickerfly/check_mariadb_slaves
|
b9917c5a097a9806d19caee83c5644afab924366
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""MariaDB slave status checker"""
import sys
import argparse
def main(args=None):
"""starter method"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='MariaDB slave status checker')
parser.add_argument('--hostname', default='localhost', type=str,
help="MariaDB hostname")
parser.add_argument('--username', type=str, help="MariaDB username")
parser.add_argument('--password', type=str, help="MariaDB password")
parser.add_argument('--connection', required=True, type=str,
help="MariaDB slave connection name")
parser.add_argument('--mode', type=str, required=True,
choices=SlaveStatusCheck.MODES,
help="slave state to check")
parser.add_argument('-w', '--warning', type=int, default=None,
help="warning limit")
parser.add_argument('-c', '--critical', type=int, default=None,
help="critical limit")
parser.add_argument('--verbose', action='store_true', default=False,
help="enable verbose mode")
args = parser.parse_args(args)
ssc = SlaveStatusCheck(args.hostname, args.username, args.password,
args.connection, args.mode, args.verbose,
args.warning, args.critical)
ssc.get_slave_status()
ssc.run_check()
if __name__ == '__main__':
main() # pragma: no cover
| 33.873418
| 80
| 0.596599
|
#!/usr/bin/env python
"""MariaDB slave status checker"""
import sys
import argparse
import MySQLdb
class NagiosPlugin(object):
"""Nagios Plugin base class"""
def __init__(self, warning, critical, *args, **kwargs):
self.warning = warning
self.critical = critical
def run_check(self):
raise NotImplementedError
def ok_state(self, msg):
print "OK - {0}".format(msg)
sys.exit(0)
def warning_state(self, msg):
print "WARNING - {0}".format(msg)
sys.exit(1)
def critical_state(self, msg):
print "CRITICAL - {0}".format(msg)
sys.exit(2)
def unknown_state(self, msg):
print "UNNKNOWN - {0}".format(msg)
sys.exit(3)
class SlaveStatusCheck(NagiosPlugin):
"""Class to help us run slave status queries against MariaDB"""
REPLICATION_LAG_MODE = 'replication_lag'
SLAVESQL_MODE = 'slave_sql'
SLAVEIO_MODE = 'slave_io'
MODES = (REPLICATION_LAG_MODE,
SLAVESQL_MODE,
SLAVEIO_MODE)
def __init__(self, hostname, username, password, connection_name,
mode, verbose=False, warning=None, critical=None):
super(SlaveStatusCheck, self).__init__(warning, critical)
self.hostname = hostname
self.username = username
self.password = password
self.connection_name = connection_name
self.verbose = verbose
self.mode = mode
self._slave_status = {}
def run_check(self):
"""Execute the check against the given mode"""
check_fn = getattr(self, self.mode)
check_fn()
def replication_lag(self):
"""Check replication lag thresholds"""
lag = self._slave_status.get('Seconds_Behind_Master')
if lag is None:
self.unknown_state("No replication lag reported")
if not self.warning or not self.critical:
self.unknown_state("Warning and critical thresholds undefined")
lag = int(lag)
warning = int(self.warning)
critical = int(self.critical)
lag_performance_msg = "log={0}s;{1};{2};0".format(lag,warning,critical)
lag_display_msg = "Slave is {0} seconds behinds master".format(lag)
lag_msg = "{0} | {1}".format(lag_display_msg,lag_performance_msg)
if lag >= warning and lag < critical:
self.warning_state(lag_msg)
elif lag >= critical:
self.critical_state(lag_msg)
self.ok_state(lag_msg)
def slave_sql(self):
"""Check that Slave_SQL_Running = Yes"""
if self._slave_status.get('Slave_SQL_Running') == "No":
msg = "Slave sql is not running. Last error: {0}".format(
self._slave_status.get('Last_SQL_Error'))
self.critical_state(msg)
self.ok_state("Slave sql is running")
def slave_io(self):
"""Check that Slave_IO_Running = Yes"""
if self._slave_status.get('Slave_IO_Running') == "No":
msg = "Slave io is not running. Last error: {0}".format(
self._slave_status.get('Last_IO_Error'))
self.critical_state(msg)
self.ok_state("Slave io is running")
def get_slave_status(self):
"""Run the query!"""
try:
sql = 'SHOW SLAVE "{0}" STATUS'.format(self.connection_name)
conn = None
conn = MySQLdb.Connection(
self.hostname,
self.username,
self.password)
curs = conn.cursor(MySQLdb.cursors.DictCursor)
curs.execute(sql)
conn.commit()
self._slave_status = curs.fetchall()[0]
if self.verbose:
print self._slave_status
except MySQLdb.Error, exc:
msg = "{0}: {1}".format(exc.args[0], exc.args[1])
self.unknown_state(msg)
finally:
if conn:
conn.close()
def main(args=None):
"""starter method"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='MariaDB slave status checker')
parser.add_argument('--hostname', default='localhost', type=str,
help="MariaDB hostname")
parser.add_argument('--username', type=str, help="MariaDB username")
parser.add_argument('--password', type=str, help="MariaDB password")
parser.add_argument('--connection', required=True, type=str,
help="MariaDB slave connection name")
parser.add_argument('--mode', type=str, required=True,
choices=SlaveStatusCheck.MODES,
help="slave state to check")
parser.add_argument('-w', '--warning', type=int, default=None,
help="warning limit")
parser.add_argument('-c', '--critical', type=int, default=None,
help="critical limit")
parser.add_argument('--verbose', action='store_true', default=False,
help="enable verbose mode")
args = parser.parse_args(args)
ssc = SlaveStatusCheck(args.hostname, args.username, args.password,
args.connection, args.mode, args.verbose,
args.warning, args.critical)
ssc.get_slave_status()
ssc.run_check()
if __name__ == '__main__':
main() # pragma: no cover
| 0
| 0
| 0
| 3,776
| 0
| 0
| 0
| -7
| 68
|
10099580e70302fbeaf41e9a358a3baf413b5d47
| 6,025
|
py
|
Python
|
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | null | null | null |
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | null | null | null |
src/prg_state_ctrl.py
|
ccarr66/handwriting_gui
|
c5d8a8925ee37d874ee794a241e50974a7b9d921
|
[
"MIT"
] | 1
|
2020-11-17T21:31:55.000Z
|
2020-11-17T21:31:55.000Z
|
try:
import PIL.Image
except ModuleNotFoundError:
print('Required libraries not found, please install PIL')
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
debug = True
#******************************************** Program state independent logic
filepathSlash = '\\' if isWindowsOS() else '/'
#******************************************** Object that contains program state
| 33.848315
| 137
| 0.601494
|
import sys, os, platform
import ocr_image_analyzer as OCR
try:
import PIL.Image
import PIL.ImageTk
except ModuleNotFoundError:
print('Required libraries not found, please install PIL')
if __name__ == "__main__":
raise Exception('Cannot be called as main script')
debug = True
#******************************************** Program state independent logic
def isWindowsOS():
return platform.system == "Windows"
filepathSlash = '\\' if isWindowsOS() else '/'
def scaleImage(imgObj, width, height):
imgWidth, imgHeight = imgObj.size
smallestOutDim = min(width, height)
largestInDim = max(imgObj.size)
imageScale = smallestOutDim/largestInDim
newWidth = (int)(imageScale * imgWidth)
newHeight = (int)(imageScale * imgHeight)
imgObj = imgObj.resize((newWidth,newHeight), PIL.Image.ANTIALIAS)
offsetX = (int)(abs(width - newWidth)/2)
offsetY = (int)(abs(height - newHeight)/2)
background = PIL.Image.new('RGBA', (width, height), (255, 0, 0, 0))
foreground = imgObj.convert('RGBA')
background.paste(foreground, (offsetX, offsetY), foreground)
return background
#******************************************** Object that contains program state
class PrgStateCtrl:
_execPath = os.path.dirname(os.path.realpath(__file__))
_resSubFolderPath = "\\res" if isWindowsOS() else "/res"
_imgSubFolderPath = "\\images" if isWindowsOS() else "/images"
_modelName = ""
_modelPath = _execPath + _resSubFolderPath + filepathSlash
_imgName = ""
_imgPath = _execPath + _resSubFolderPath + _imgSubFolderPath
_outputFileName = "results"
_outputFilePath = _execPath
_cachedImage = PIL.Image.new('RGBA', (400, 550), (0,0,0,0))
_modelSet = False
_imageLoaded = False
_outputIsValid = False
_currentOutputImageIdx = 0
_currentOutputImages = []
_currentOutputImageLabels = []
_currentOutputText = ""
def __init__(self):
if(debug):
self._imgName = "perry3.png"
self._modelName = "handwriting_v1.model"
self.SetModel(self._modelName)
self.LoadImage(self._imgName)
def GetImagePath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash
def GetImageFullPath(self):
return self._execPath + self._resSubFolderPath + self._imgSubFolderPath + filepathSlash + self._imgName
def GetModelPath(self):
return self._modelPath
def GetModelFullPath(self):
return self._modelPath + self._modelName
def GetCachedImage(self):
return (self._cachedImage, "Cached Image")
def GetOutputPath(self):
return self._outputFilePath
def GetOutputName(self):
return self._outputFileName
def isValidImg(self, name):
try:
PIL.Image.open(self.GetImagePath() + name)
return True
except:
return False
def SetModel(self, modelName):
if OCR.modelIsValid(self.GetModelPath() + modelName):
self._modelName = modelName
self._modelSet = True
return True
else:
return False
def LoadImage(self, imageName):
if self.isValidImg(imageName):
self._imgName = imageName
self._cachedImage = scaleImage(PIL.Image.open(self.GetImagePath() + self._imgName), 400, 550)
self._imageLoaded = True
self._outputIsValid = False;
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
self._currentOutputImageIdx = 0
return True
else:
return False
def PerformOCR(self):
self._currentOutputImages.clear()
self._currentOutputImageLabels.clear()
if self._modelSet and self._imageLoaded:
try:
self._currentOutputImageIdx = 0
self._currentOutputImages.append(self._cachedImage)
self._currentOutputImageLabels.append("Original")
text, images, imageLabels = OCR.analyzeImage(self.GetImageFullPath())
self._currentOutputText = ""
for c in text:
self._currentOutputText += c
for img in images:
img_pil = PIL.Image.fromarray(img)
scaledImg = scaleImage(img_pil,400,550)
self._currentOutputImages.append(scaledImg)
for label in imageLabels:
self._currentOutputImageLabels.append(label)
self._outputIsValid = True
except:
self._outputIsValid = False
def GetOutputText(self):
if self._outputIsValid:
return self._currentOutputText
else:
return ""
def GetNextOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == len(self._currentOutputImages) - 1:
self._currentOutputImageIdx = 0
else:
self._currentOutputImageIdx += 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
def GetPrevOutputImage(self):
if self._outputIsValid:
if self._currentOutputImageIdx == 0:
self._currentOutputImageIdx = len(self._currentOutputImages) - 1
else:
self._currentOutputImageIdx -= 1
return (self._currentOutputImages[self._currentOutputImageIdx], self._currentOutputImageLabels[self._currentOutputImageIdx])
else:
return (self._cachedImage, "Cached Image")
| 0
| 0
| 0
| 4,729
| 0
| 686
| 0
| 11
| 148
|
54692a530049e8154ae7b77f87421496fbec60bd
| 1,013
|
py
|
Python
|
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
02_assignment/toolbox/Toolbox_Python02450/Scripts/ex10_2_1.py
|
LukaAvbreht/ML_projects
|
8b36acdeb017ce8a57959c609b96111968852d5f
|
[
"MIT"
] | null | null | null |
# exercise 10.2.1
from matplotlib.pyplot import figure, show
from scipy.io import loadmat
from toolbox_02450 import clusterplot
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
# Load Matlab data file and extract variables of interest
mat_data = loadmat('../Data/synth1.mat')
X = mat_data['X']
y = mat_data['y'].squeeze()
attributeNames = [name[0] for name in mat_data['attributeNames'].squeeze()]
classNames = [name[0][0] for name in mat_data['classNames']]
N, M = X.shape
C = len(classNames)
# Perform hierarchical/agglomerative clustering on data matrix
Method = 'single'
Metric = 'euclidean'
Z = linkage(X, method=Method, metric=Metric)
# Compute and display clusters by thresholding the dendrogram
Maxclust = 4
cls = fcluster(Z, criterion='maxclust', t=Maxclust)
figure(1)
clusterplot(X, cls.reshape(cls.shape[0],1), y=y)
# Display dendrogram
max_display_levels=6
figure(2,figsize=(10,4))
dendrogram(Z, truncate_mode='level', p=max_display_levels)
show()
print('Ran Exercise 10.2.1')
| 28.138889
| 75
| 0.755183
|
# exercise 10.2.1
from matplotlib.pyplot import figure, show
from scipy.io import loadmat
from toolbox_02450 import clusterplot
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
# Load Matlab data file and extract variables of interest
mat_data = loadmat('../Data/synth1.mat')
X = mat_data['X']
y = mat_data['y'].squeeze()
attributeNames = [name[0] for name in mat_data['attributeNames'].squeeze()]
classNames = [name[0][0] for name in mat_data['classNames']]
N, M = X.shape
C = len(classNames)
# Perform hierarchical/agglomerative clustering on data matrix
Method = 'single'
Metric = 'euclidean'
Z = linkage(X, method=Method, metric=Metric)
# Compute and display clusters by thresholding the dendrogram
Maxclust = 4
cls = fcluster(Z, criterion='maxclust', t=Maxclust)
figure(1)
clusterplot(X, cls.reshape(cls.shape[0],1), y=y)
# Display dendrogram
max_display_levels=6
figure(2,figsize=(10,4))
dendrogram(Z, truncate_mode='level', p=max_display_levels)
show()
print('Ran Exercise 10.2.1')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
67bd1f4cc17ab520aed02257323fdafae66bc88e
| 2,002
|
py
|
Python
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 1
|
2017-06-07T08:11:57.000Z
|
2017-06-07T08:11:57.000Z
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 106
|
2017-05-10T18:16:31.000Z
|
2019-09-18T15:09:07.000Z
|
modules/network/PrefabGateOne.py
|
Jumpscale/rsal9
|
e7ff7638ca53dafe872ce3030a379e8b65cb4831
|
[
"Apache-2.0"
] | 5
|
2018-01-26T16:11:52.000Z
|
2018-08-22T15:12:52.000Z
|
from js9 import j
app = j.tools.prefab._getBaseAppClass()
| 26
| 131
| 0.584915
|
from js9 import j
app = j.tools.prefab._getBaseAppClass()
class PrefabGateOne(app):
NAME = "gateone"
def build(self, reset=False):
"""
Build Gateone
:param reset: reset build if already built before
:return:
"""
if self.doneCheck("build", reset):
return
self.prefab.tools.git.pullRepo("https://github.com/liftoff/GateOne", branch="master")
self.doneSet('build')
def install(self, reset=False):
"""
Installs gateone
@param reset: boolean: forces the install operation.
"""
if reset is False and self.isInstalled():
return
cmd = """
cd /opt/code/github/liftoff/GateOne
apt-get install build-essential python3-dev python3-setuptools python3-pip -y
pip3 install tornado==4.5.3
python3 setup.py install
cp /usr/local/bin/gateone $BINDIR/gateone
ln -s /usr/bin/python3 /usr/bin/python
"""
self.prefab.core.run(cmd)
self.prefab.system.ssh.keygen(name="id_rsa")
self.doneSet('install')
def start(self, name="main", address="localhost", port=10443):
"""
Starts gateone.
@param name: str: instance name.
@param address: str: bind address.
@param port: int: port number.
"""
cmd = "eval `ssh-agent -s` ssh-add /root/.ssh/id_rsa && gateone --address={} --port={} --disable_ssl".format(address, port)
pm = self.prefab.system.processmanager.get()
pm.ensure(name='gateone_{}'.format(name), cmd=cmd)
def stop(self, name='main'):
"""
Stops gateone
"""
pm = self.prefab.system.processmanager.get()
pm.stop(name='gateone_{}'.format(name))
def restart(self, name="main"):
"""
Restart GateOne instance by name.
"""
self.stop(name)
self.start(name)
def reset(self):
"""
helper method to clean what this module generates.
"""
pass
| 0
| 0
| 0
| 1,919
| 0
| 0
| 0
| 0
| 23
|
d6055403ada75fdb58112230e49db04f73faeaa8
| 7,213
|
py
|
Python
|
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipe_modules/skia/resources/trigger_wait_ct_task.py
|
bopopescu/build
|
4e95fd33456e552bfaf7d94f7d04b19273d1c534
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T11:05:06.000Z
|
2020-07-23T11:05:06.000Z
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python utility that triggers and waits for tasks to complete on CTFE."""
import base64
import hashlib
import json
import math
import optparse
import requests
import sys
import time
CTFE_HOST = "https://ct.skia.org"
CTFE_QUEUE = CTFE_HOST + '/queue/'
CHROMIUM_PERF_TASK_POST_URI = CTFE_HOST + "/_/webhook_add_chromium_perf_task"
GET_CHROMIUM_PERF_RUN_STATUS_URI = CTFE_HOST + "/get_chromium_perf_run_status"
CHROMIUM_PERF_RUNS_HISTORY = CTFE_HOST + "/chromium_perf_runs/"
GCE_WEBHOOK_SALT_METADATA_URI = (
"http://metadata/computeMetadata/v1/project/attributes/"
"webhook_request_salt")
CTFE_CONNECTION_RETRIES = 5
CONNECTION_WAIT_BASE = 5
POLLING_FREQUENCY_SECS = 30 # 30 seconds.
TRYBOT_DEADLINE_SECS = 24 * 60 * 60 # 24 hours.
def retry():
"""A retry decorator with exponential backoff."""
return decorator
def _CreateTaskJSON(options):
"""Creates a JSON representation of the requested task."""
task_params = {}
task_params["username"] = options.requester
task_params["benchmark"] = options.benchmark
task_params["platform"] = "Linux"
task_params["page_sets"] = "10k"
task_params["repeat_runs"] = "3"
task_params["run_in_parallel"] = str(options.parallel)
task_params["benchmark_args"] = "--output-format=csv-pivot-table"
task_params["browser_args_nopatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
task_params["browser_args_withpatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
trybot_params = {}
trybot_params["issue"] = options.issue
trybot_params["patchset"] = options.patchset
trybot_params["task"] = task_params
return json.dumps(trybot_params)
def _GetWebhookSaltFromMetadata():
"""Gets webhook_request_salt from GCE's metadata server."""
headers = {"Metadata-Flavor": "Google"}
resp = requests.get(GCE_WEBHOOK_SALT_METADATA_URI, headers=headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GCE_WEBHOOK_SALT_METADATA_URI,
resp.status_code))
return base64.standard_b64decode(resp.text)
def _TriggerTask(options):
"""Triggers the requested task on CTFE and returns the new task's ID."""
task = _CreateTaskJSON(options)
m = hashlib.sha512()
m.update(task)
m.update('notverysecret' if options.local else _GetWebhookSaltFromMetadata())
encoded = base64.standard_b64encode(m.digest())
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-Webhook-Auth-Hash": encoded}
resp = _AddTaskToCTFE(task, headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (CHROMIUM_PERF_TASK_POST_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (
CHROMIUM_PERF_TASK_POST_URI, e))
return ret["taskID"]
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--issue',
help='The Rietveld CL number to get the patch from.')
option_parser.add_option(
'', '--patchset',
help='The Rietveld CL patchset to use.')
option_parser.add_option(
'', '--requester',
help='Email address of the user who requested this run.')
option_parser.add_option(
'', '--benchmark',
help='The CT benchmark to run on the patch.')
option_parser.add_option(
'', '--parallel', default=False, action='store_true',
help='Whether to run this benchmark in parallel.')
option_parser.add_option(
'', '--local', default=False, action='store_true',
help='Uses a dummy metadata salt if this flag is true else it tries to '
'get the salt from GCE metadata.')
options, unused_args = option_parser.parse_args()
if (not options.issue or not options.patchset or not options.requester
or not options.benchmark):
option_parser.error('Must specify issue, patchset, requester and benchmark')
sys.exit(TriggerAndWait(options))
| 33.087156
| 80
| 0.674338
|
#!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Python utility that triggers and waits for tasks to complete on CTFE."""
import base64
import hashlib
import json
import math
import optparse
import requests
import sys
import time
CTFE_HOST = "https://ct.skia.org"
CTFE_QUEUE = CTFE_HOST + '/queue/'
CHROMIUM_PERF_TASK_POST_URI = CTFE_HOST + "/_/webhook_add_chromium_perf_task"
GET_CHROMIUM_PERF_RUN_STATUS_URI = CTFE_HOST + "/get_chromium_perf_run_status"
CHROMIUM_PERF_RUNS_HISTORY = CTFE_HOST + "/chromium_perf_runs/"
GCE_WEBHOOK_SALT_METADATA_URI = (
"http://metadata/computeMetadata/v1/project/attributes/"
"webhook_request_salt")
CTFE_CONNECTION_RETRIES = 5
CONNECTION_WAIT_BASE = 5
POLLING_FREQUENCY_SECS = 30 # 30 seconds.
TRYBOT_DEADLINE_SECS = 24 * 60 * 60 # 24 hours.
class CtTrybotException(Exception):
pass
def retry():
"""A retry decorator with exponential backoff."""
def decorator(func):
def wrapper(*args, **kwargs):
tries = CTFE_CONNECTION_RETRIES
delay = CONNECTION_WAIT_BASE
while tries > 0:
try:
ret = func(*args, **kwargs)
return ret
except:
print >> sys.stderr, 'Failed to connect to CTFE.'
tries -= 1
if tries == 0:
raise
print 'Retry in %d seconds.' % delay
time.sleep(delay)
delay *= 2
return wrapper
return decorator
@retry()
def _AddTaskToCTFE(task, headers):
return requests.post(CHROMIUM_PERF_TASK_POST_URI, task, headers=headers)
@retry()
def _GetTaskStatusFromCTFE(get_url):
return requests.get(get_url)
def _CreateTaskJSON(options):
"""Creates a JSON representation of the requested task."""
task_params = {}
task_params["username"] = options.requester
task_params["benchmark"] = options.benchmark
task_params["platform"] = "Linux"
task_params["page_sets"] = "10k"
task_params["repeat_runs"] = "3"
task_params["run_in_parallel"] = str(options.parallel)
task_params["benchmark_args"] = "--output-format=csv-pivot-table"
task_params["browser_args_nopatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
task_params["browser_args_withpatch"] = (
"--disable-setuid-sandbox --enable-threaded-compositing "
"--enable-impl-side-painting")
trybot_params = {}
trybot_params["issue"] = options.issue
trybot_params["patchset"] = options.patchset
trybot_params["task"] = task_params
return json.dumps(trybot_params)
def _GetWebhookSaltFromMetadata():
"""Gets webhook_request_salt from GCE's metadata server."""
headers = {"Metadata-Flavor": "Google"}
resp = requests.get(GCE_WEBHOOK_SALT_METADATA_URI, headers=headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GCE_WEBHOOK_SALT_METADATA_URI,
resp.status_code))
return base64.standard_b64decode(resp.text)
def _TriggerTask(options):
"""Triggers the requested task on CTFE and returns the new task's ID."""
task = _CreateTaskJSON(options)
m = hashlib.sha512()
m.update(task)
m.update('notverysecret' if options.local else _GetWebhookSaltFromMetadata())
encoded = base64.standard_b64encode(m.digest())
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-Webhook-Auth-Hash": encoded}
resp = _AddTaskToCTFE(task, headers)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (CHROMIUM_PERF_TASK_POST_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (
CHROMIUM_PERF_TASK_POST_URI, e))
return ret["taskID"]
def TriggerAndWait(options):
task_id = _TriggerTask(options)
print
print 'Task %s has been successfull scheduled on CTFE (%s).' % (
task_id, CHROMIUM_PERF_RUNS_HISTORY)
print 'You will get an email once the task has been picked up by the server.'
print
print
# Now poll CTFE till the task completes or till deadline is hit.
time_started_polling = time.time()
while True:
if (time.time() - time_started_polling) > TRYBOT_DEADLINE_SECS:
raise CtTrybotException(
'Task did not complete in the deadline of %s seconds.' % (
TRYBOT_DEADLINE_SECS))
# Get the status of the task the trybot added.
get_url = '%s?task_id=%s' % (GET_CHROMIUM_PERF_RUN_STATUS_URI, task_id)
resp = _GetTaskStatusFromCTFE(get_url)
if resp.status_code != 200:
raise CtTrybotException(
'Return code from %s was %s' % (GET_CHROMIUM_PERF_RUN_STATUS_URI,
resp.status_code))
try:
ret = json.loads(resp.text)
except ValueError, e:
raise CtTrybotException(
'Did not get a JSON response from %s: %s' % (get_url, e))
# Assert that the status is for the task we asked for.
assert int(ret["taskID"]) == int(task_id)
status = ret["status"]
if status == "Completed":
results = ret["resultsLink"]
print
print 'Your run was successfully completed.'
if results:
print 'The output of your run is available here: %s' % results
print
print '@@@STEP_LINK@%s@%s@@@' % ('CT Perf Results', results)
print
return 0
elif status == "Completed with failures":
print
raise CtTrybotException(
'Your run was completed with failures. Please check your email for '
'links to logs of the run.')
print ('The current status of the task %s is "%s". You can view the size '
'of the queue here: %s' % (task_id, status, CTFE_QUEUE))
print 'Checking again after %s seconds' % POLLING_FREQUENCY_SECS
print
time.sleep(POLLING_FREQUENCY_SECS)
if '__main__' == __name__:
option_parser = optparse.OptionParser()
option_parser.add_option(
'', '--issue',
help='The Rietveld CL number to get the patch from.')
option_parser.add_option(
'', '--patchset',
help='The Rietveld CL patchset to use.')
option_parser.add_option(
'', '--requester',
help='Email address of the user who requested this run.')
option_parser.add_option(
'', '--benchmark',
help='The CT benchmark to run on the patch.')
option_parser.add_option(
'', '--parallel', default=False, action='store_true',
help='Whether to run this benchmark in parallel.')
option_parser.add_option(
'', '--local', default=False, action='store_true',
help='Uses a dummy metadata salt if this flag is true else it tries to '
'get the salt from GCE metadata.')
options, unused_args = option_parser.parse_args()
if (not options.issue or not options.patchset or not options.requester
or not options.benchmark):
option_parser.error('Must specify issue, patchset, requester and benchmark')
sys.exit(TriggerAndWait(options))
| 0
| 152
| 0
| 21
| 0
| 2,509
| 0
| 0
| 116
|