hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
351525ff3510e81241132c03602b819a2a740942
| 70
|
py
|
Python
|
core/src/static_classes/__init__.py
|
azurlane-doujin/AzurLanePaintingExtract-v1.0
|
ef4f25e70b3ca1b9df4304132cc7612c8f5efebb
|
[
"MIT"
] | 144
|
2019-06-13T06:43:43.000Z
|
2022-03-29T15:07:57.000Z
|
core/src/static_classes/__init__.py
|
Shabi1213/AzurLanePaintingExtract-v1.0
|
ef4f25e70b3ca1b9df4304132cc7612c8f5efebb
|
[
"MIT"
] | 2
|
2020-08-02T15:08:58.000Z
|
2021-11-29T02:34:18.000Z
|
core/src/static_classes/__init__.py
|
Goodjooy/ArknightsPaintingExtract
|
e1e6ef339c6f76cab45a26df66497126c11a21a8
|
[
"MIT"
] | 19
|
2020-03-01T10:06:52.000Z
|
2022-02-06T13:49:26.000Z
|
__all__ = ["file_read", 'image_deal', 'search_order', 'static_data']
| 35
| 69
| 0.7
|
__all__ = ["file_read", 'image_deal', 'search_order', 'static_data']
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
13e87111dffd55a11464ba7c203a6cc1cb2cb9ac
| 412
|
py
|
Python
|
Demo/wdt/example_wdt_file.py
|
quecpython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | 4
|
2021-01-28T01:30:59.000Z
|
2021-06-15T07:13:41.000Z
|
Demo/wdt/example_wdt_file.py
|
QuePython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | null | null | null |
Demo/wdt/example_wdt_file.py
|
QuePython/EC100Y-SDK
|
712c7eb7b54a3971009d94f6d6b21a6011d56f68
|
[
"MIT"
] | 3
|
2021-04-07T09:55:59.000Z
|
2022-01-08T15:15:23.000Z
|
'''
@Author: Pawn
@Date: 2020-08-12
@LastEditTime: 2020-08-12 17:06:08
@Description: example for module timer
@FilePath: example_wdt.py
'''
from machine import WDT
from machine import Timer
timer1 = Timer(Timer.Timer1)
if __name__ == '__main__':
wdt = WDT(20) #
timer1.start(period=15000, mode=timer1.PERIODIC, callback=feed) #
# wdt.stop()
| 17.913043
| 78
| 0.682039
|
'''
@Author: Pawn
@Date: 2020-08-12
@LastEditTime: 2020-08-12 17:06:08
@Description: example for module timer
@FilePath: example_wdt.py
'''
from machine import WDT
from machine import Timer
timer1 = Timer(Timer.Timer1)
def feed(t):
wdt.feed()
if __name__ == '__main__':
wdt = WDT(20) # 启动看门狗,间隔时长
timer1.start(period=15000, mode=timer1.PERIODIC, callback=feed) # 使用定时器喂狗
# wdt.stop()
| 51
| 0
| 0
| 0
| 0
| 6
| 0
| 0
| 23
|
bcb5024cd6f5e64a630af32466bb1b12cbac2b4a
| 2,752
|
py
|
Python
|
users/tests/test_urls.py
|
jewells07/mumbleapi
|
beee0b50eefb3b1ff3e21073400c778323eece98
|
[
"Apache-2.0"
] | 1
|
2021-05-18T11:37:44.000Z
|
2021-05-18T11:37:44.000Z
|
users/tests/test_urls.py
|
TomNewton1/mumbleapi
|
108d5a841b97d38285bede523f243624e05bc231
|
[
"Apache-2.0"
] | null | null | null |
users/tests/test_urls.py
|
TomNewton1/mumbleapi
|
108d5a841b97d38285bede523f243624e05bc231
|
[
"Apache-2.0"
] | null | null | null |
# Create your tests here.
| 37.69863
| 87
| 0.703125
|
from django.conf.urls import url
from django.urls import reverse , resolve
from rest_framework import status
from rest_framework.test import APITestCase
from users.views import (
followUser , users , UserProfileUpdate ,
ProfilePictureUpdate , usersRecommended ,
user , userMumbles, userArticles, passwordChange,
sendActivationEmail, sendActivationEmail , activate)
# Create your tests here.
class AccountTests(APITestCase):
def setUp(self):
pass
def test_users_url(self):
url = 'users-api:users'
reversed_url = reverse(url)
response = self.client.get('/api/users/')
self.assertEqual(resolve(reversed_url).func,users)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_users_follow_url(self):
url = 'users-api:follow-user'
reversed_url = reverse(url,args=['praveen'])
self.assertEqual(resolve(reversed_url).func,followUser)
def test_user_profile_update_url(self):
url = 'users-api:profile_update'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func.view_class,UserProfileUpdate)
def test_profile_update_photo_url(self):
url = 'users-api:profile_update_photo'
reversed_url = reverse(url)
resolved = resolve(reversed_url).func
self.assertEqual(resolved.view_class,ProfilePictureUpdate)
def test_users_recommended_url(self):
url = 'users-api:users-recommended'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,usersRecommended)
def test_user_url(self):
url = 'users-api:user'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,user)
def test_user_mumbles(self):
url = 'users-api:user-mumbles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userMumbles)
def test_user_articles_url(self):
url = 'users-api:user-articles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userArticles)
def test_user_password_url(self):
url = 'users-api:password-change'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,passwordChange)
def test_send_activation_email_url(self):
url = 'users-api:send-activation-email'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,sendActivationEmail)
def test_active_user_account_url(self):
url = 'users-api:verify'
reversed_url = reverse(url,args=['903u924u934u598348943','*&6g83chruhrweriuj'])
self.assertEqual(resolve(reversed_url).func,activate)
| 0
| 0
| 0
| 2,322
| 0
| 0
| 0
| 272
| 133
|
73e1afd1d4cf91f0ff98fd1d78bfc8ce897e5c54
| 4,921
|
py
|
Python
|
src/Testing/ZopeTestCase/utils.py
|
tseaver/Zope-RFA
|
08634f39b0f8b56403a2a9daaa6ee4479ef0c625
|
[
"ZPL-2.1"
] | 2
|
2015-12-21T10:34:56.000Z
|
2017-09-24T11:07:58.000Z
|
src/Testing/ZopeTestCase/utils.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
src/Testing/ZopeTestCase/utils.py
|
MatthewWilkes/Zope
|
740f934fc9409ae0062e8f0cd6dcfd8b2df00376
|
[
"ZPL-2.1"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility functions
These functions are designed to be imported and run at
module level to add functionality to the test environment.
"""
import os
import sys
import time
import random
import transaction
def importObjectFromFile(container, filename, quiet=0):
'''Imports an object from a (.zexp) file into the given container.'''
from ZopeLite import _print, _patched
quiet = quiet or not _patched
start = time.time()
if not quiet: _print("Importing %s ... " % os.path.basename(filename))
container._importObjectFromFile(filename, verify=0)
transaction.commit()
if not quiet: _print('done (%.3fs)\n' % (time.time() - start))
_Z2HOST = None
_Z2PORT = None
def startZServer(number_of_threads=1, log=None):
'''Starts an HTTP ZServer thread.'''
global _Z2HOST, _Z2PORT
if _Z2HOST is None:
_Z2HOST = '127.0.0.1'
_Z2PORT = random.choice(range(55000, 55500))
from threadutils import setNumberOfThreads
setNumberOfThreads(number_of_threads)
from threadutils import QuietThread, zserverRunner
t = QuietThread(target=zserverRunner, args=(_Z2HOST, _Z2PORT, log))
t.setDaemon(1)
t.start()
time.sleep(0.1) # Sandor Palfy
return _Z2HOST, _Z2PORT
def makerequest(app, stdout=sys.stdout):
'''Wraps the app into a fresh REQUEST.'''
from Testing.makerequest import makerequest as _makerequest
environ = {}
environ['SERVER_NAME'] = _Z2HOST or 'nohost'
environ['SERVER_PORT'] = '%d' % (_Z2PORT or 80)
environ['REQUEST_METHOD'] = 'GET'
return _makerequest(app, stdout=stdout, environ=environ)
def appcall(func, *args, **kw):
'''Calls a function passing 'app' as first argument.'''
from base import app, close
app = app()
args = (app,) + args
try:
return func(*args, **kw)
finally:
transaction.abort()
close(app)
def makelist(arg):
'''Turns arg into a list. Where arg may be
list, tuple, or string.
'''
if type(arg) == type([]):
return arg
if type(arg) == type(()):
return list(arg)
if type(arg) == type(''):
return filter(None, [arg])
raise ValueError('Argument must be list, tuple, or string')
__all__ = [
'setupCoreSessions',
'setupSiteErrorLog',
'startZServer',
'importObjectFromFile',
'appcall',
'makerequest',
'makelist',
]
| 31.544872
| 83
| 0.636456
|
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Utility functions
These functions are designed to be imported and run at
module level to add functionality to the test environment.
"""
import os
import sys
import time
import random
import transaction
import layer
@layer.appcall
def setupCoreSessions(app):
'''Sets up the session_data_manager e.a.'''
from Acquisition import aq_base
commit = 0
if not hasattr(app, 'temp_folder'):
from Products.TemporaryFolder.TemporaryFolder import MountedTemporaryFolder
tf = MountedTemporaryFolder('temp_folder', 'Temporary Folder')
app._setObject('temp_folder', tf)
commit = 1
if not hasattr(aq_base(app.temp_folder), 'session_data'):
from Products.Transience.Transience import TransientObjectContainer
toc = TransientObjectContainer('session_data',
'Session Data Container',
timeout_mins=3,
limit=100)
app.temp_folder._setObject('session_data', toc)
commit = 1
if not hasattr(app, 'browser_id_manager'):
from Products.Sessions.BrowserIdManager import BrowserIdManager
bid = BrowserIdManager('browser_id_manager',
'Browser Id Manager')
app._setObject('browser_id_manager', bid)
commit = 1
if not hasattr(app, 'session_data_manager'):
from Products.Sessions.SessionDataManager import SessionDataManager
sdm = SessionDataManager('session_data_manager',
title='Session Data Manager',
path='/temp_folder/session_data',
requestName='SESSION')
app._setObject('session_data_manager', sdm)
commit = 1
if commit:
transaction.commit()
@layer.appcall
def setupSiteErrorLog(app):
'''Sets up the error_log object required by ZPublisher.'''
if not hasattr(app, 'error_log'):
try:
from Products.SiteErrorLog.SiteErrorLog import SiteErrorLog
except ImportError:
pass
else:
app._setObject('error_log', SiteErrorLog())
transaction.commit()
def importObjectFromFile(container, filename, quiet=0):
'''Imports an object from a (.zexp) file into the given container.'''
from ZopeLite import _print, _patched
quiet = quiet or not _patched
start = time.time()
if not quiet: _print("Importing %s ... " % os.path.basename(filename))
container._importObjectFromFile(filename, verify=0)
transaction.commit()
if not quiet: _print('done (%.3fs)\n' % (time.time() - start))
_Z2HOST = None
_Z2PORT = None
def startZServer(number_of_threads=1, log=None):
'''Starts an HTTP ZServer thread.'''
global _Z2HOST, _Z2PORT
if _Z2HOST is None:
_Z2HOST = '127.0.0.1'
_Z2PORT = random.choice(range(55000, 55500))
from threadutils import setNumberOfThreads
setNumberOfThreads(number_of_threads)
from threadutils import QuietThread, zserverRunner
t = QuietThread(target=zserverRunner, args=(_Z2HOST, _Z2PORT, log))
t.setDaemon(1)
t.start()
time.sleep(0.1) # Sandor Palfy
return _Z2HOST, _Z2PORT
def makerequest(app, stdout=sys.stdout):
'''Wraps the app into a fresh REQUEST.'''
from Testing.makerequest import makerequest as _makerequest
environ = {}
environ['SERVER_NAME'] = _Z2HOST or 'nohost'
environ['SERVER_PORT'] = '%d' % (_Z2PORT or 80)
environ['REQUEST_METHOD'] = 'GET'
return _makerequest(app, stdout=stdout, environ=environ)
def appcall(func, *args, **kw):
'''Calls a function passing 'app' as first argument.'''
from base import app, close
app = app()
args = (app,) + args
try:
return func(*args, **kw)
finally:
transaction.abort()
close(app)
def makelist(arg):
'''Turns arg into a list. Where arg may be
list, tuple, or string.
'''
if type(arg) == type([]):
return arg
if type(arg) == type(()):
return list(arg)
if type(arg) == type(''):
return filter(None, [arg])
raise ValueError('Argument must be list, tuple, or string')
__all__ = [
'setupCoreSessions',
'setupSiteErrorLog',
'startZServer',
'importObjectFromFile',
'appcall',
'makerequest',
'makelist',
]
| 0
| 1,844
| 0
| 0
| 0
| 0
| 0
| -9
| 68
|
c4ac1344ac12b2b41b5b5813289b0939cfb026e8
| 977
|
py
|
Python
|
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
experiments/mcompress/set_options.py
|
paralab/EigenMM
|
5c94233524ae2758ebf47c3b3fdb6570a6cc4e59
|
[
"MIT"
] | null | null | null |
emm_fmt = """<?xml version="1.0" encoding="utf-8" ?>
<EIGEN_MM>
<OPTIONS
_splitmaxiters="10"
_nodesperevaluator="1"
_subproblemsperevaluator="1"
_totalsubproblems="1"
_nevaluators="1"
_taskspernode="%d"
_nevals="-1"
_nk="10"
_nb="4"
_p="0"
_nv="10"
_raditers="20"
_splittol="0.9"
_radtol="1e-8"
_L="1.1"
_R="-1"
_terse="0"
_details="0"
_debug="1"
_save_correctness="0"
_save_operators="0"
_save_eigenvalues="0"
_save_eigenbasis="1"
_correctness_filename=""
_operators_filename=""
_eigenvalues_filename=""
_eigenbasis_filename="%s" />
</EIGEN_MM>"""
import sys
if __name__ == "__main__":
taskspernode = int(sys.argv[1])
optionsdir = sys.argv[2]
outputdir = sys.argv[3]
expname = sys.argv[4]
emmpath = optionsdir + "/" + expname + "_options.xml"
f = open(emmpath, 'w')
f_str = emm_fmt % (taskspernode, outputdir + "/" + expname)
f.write(f_str)
f.close()
| 20.354167
| 63
| 0.616172
|
emm_fmt = """<?xml version="1.0" encoding="utf-8" ?>
<EIGEN_MM>
<OPTIONS
_splitmaxiters="10"
_nodesperevaluator="1"
_subproblemsperevaluator="1"
_totalsubproblems="1"
_nevaluators="1"
_taskspernode="%d"
_nevals="-1"
_nk="10"
_nb="4"
_p="0"
_nv="10"
_raditers="20"
_splittol="0.9"
_radtol="1e-8"
_L="1.1"
_R="-1"
_terse="0"
_details="0"
_debug="1"
_save_correctness="0"
_save_operators="0"
_save_eigenvalues="0"
_save_eigenbasis="1"
_correctness_filename=""
_operators_filename=""
_eigenvalues_filename=""
_eigenbasis_filename="%s" />
</EIGEN_MM>"""
import sys
if __name__ == "__main__":
taskspernode = int(sys.argv[1])
optionsdir = sys.argv[2]
outputdir = sys.argv[3]
expname = sys.argv[4]
emmpath = optionsdir + "/" + expname + "_options.xml"
f = open(emmpath, 'w')
f_str = emm_fmt % (taskspernode, outputdir + "/" + expname)
f.write(f_str)
f.close()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cb18427c6dda988b4a46b9e6269b431bec7b5ea3
| 5,758
|
py
|
Python
|
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
qtpyvcp/widgets/display_widgets/atc_widget/atc.py
|
awigen/qtpyvcp
|
5a23c4bca78accb159a76ac03652c74d5a07d14f
|
[
"BSD-3-Clause-LBNL",
"MIT"
] | null | null | null |
import os
# Workarround for nvidia propietary drivers
import ctypes
import ctypes.util
ctypes.CDLL(ctypes.util.find_library("GL"), mode=ctypes.RTLD_GLOBAL)
# end of Workarround
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities import logger
LOG = logger.getLogger(__name__)
STATUS = getPlugin('status')
TOOLTABLE = getPlugin('tooltable')
IN_DESIGNER = os.getenv('DESIGNER', False)
WIDGET_PATH = os.path.dirname(os.path.abspath(__file__))
| 30.146597
| 83
| 0.633032
|
import os
# Workarround for nvidia propietary drivers
import ctypes
import ctypes.util
ctypes.CDLL(ctypes.util.find_library("GL"), mode=ctypes.RTLD_GLOBAL)
# end of Workarround
from qtpy.QtCore import Signal, Slot, QUrl, QTimer
from qtpy.QtQuickWidgets import QQuickWidget
from qtpyvcp.plugins import getPlugin
from qtpyvcp.utilities import logger
from qtpyvcp.utilities.hal_qlib import QComponent
LOG = logger.getLogger(__name__)
STATUS = getPlugin('status')
TOOLTABLE = getPlugin('tooltable')
IN_DESIGNER = os.getenv('DESIGNER', False)
WIDGET_PATH = os.path.dirname(os.path.abspath(__file__))
class DynATC(QQuickWidget):
moveToPocketSig = Signal(int, int, arguments=['previous_pocket', 'pocket_num'])
# toolInSpindleSig = Signal(int, arguments=['tool_num'])
rotateFwdSig = Signal(int, arguments=['steps'])
rotateRevSig = Signal(int, arguments=['steps'])
showToolSig = Signal(int, int, arguments=['pocket', 'tool_num'])
hideToolSig = Signal(int, arguments=['tool_num'])
homeMsgSig = Signal(str, arguments=["message"])
homingMsgSig = Signal(str, arguments=["message"])
def __init__(self, parent=None):
super(DynATC, self).__init__(parent)
if IN_DESIGNER:
return
self.atc_position = 0
self.pocket = 1
self.home = 0
self.homing = 0
self.pocket_slots = 12
self.component = QComponent("atc-widget")
# define pocket pins to store tools
for i in range(self.pocket_slots):
pin_name = "pocket-{}".format(i+1)
self.component.newPin(pin_name, "s32", "in")
self.component[pin_name].valueChanged.connect(self.pocket_changed)
self.component.newPin('home', "float", "in")
self.component.newPin('homing', "float", "in")
self.component.newPin("goto", "float", "in")
self.component.newPin('goto-enable', "bit", "in")
self.component.newPin("steps", "float", "in")
self.component.newPin('steps-fwd', "bit", "in")
self.component.newPin('steps-rev', "bit", "in")
self.component.newPin('jog-fwd', "bit", "in")
self.component.newPin('jog-rev', "bit", "in")
self.component['home'].valueIncreased.connect(self.home_message)
self.component['homing'].valueIncreased.connect(self.homing_message)
self.component['goto-enable'].valueIncreased.connect(self.goto)
self.component['steps-fwd'].valueIncreased.connect(self.steps_fwd)
self.component['steps-rev'].valueIncreased.connect(self.steps_rev)
self.component['jog-fwd'].valueIncreased.connect(self.jog_fwd)
self.component['jog-rev'].valueIncreased.connect(self.jog_rev)
self.component.ready()
self.engine().rootContext().setContextProperty("atc_spiner", self)
qml_path = os.path.join(WIDGET_PATH, "atc.qml")
url = QUrl.fromLocalFile(qml_path)
self.setSource(url) # Fixme fails on qtdesigner
self.tool_table = None
self.status_tool_table = None
self.pockets = dict()
self.tools = None
self.load_tools()
self.draw_tools()
STATUS.tool_table.notify(self.load_tools)
STATUS.pocket_prepped.notify(self.on_pocket_prepped)
STATUS.tool_in_spindle.notify(self.on_tool_in_spindle)
def hideEvent(self, *args, **kwargs):
pass # hack to prevent animation glitch when we are on another tab
def load_tools(self):
self.tool_table = TOOLTABLE.getToolTable()
self.status_tool_table = STATUS.tool_table
self.pockets = dict()
self.tools = dict()
for i in range(self.pocket_slots):
pin_name = "pocket-{}".format(i+1)
self.pockets[i + 1] = self.component[pin_name].value
def draw_tools(self):
for i in range(1, 13):
self.hideToolSig.emit(i)
for pocket, tool in self.pockets.items():
if 0 < pocket < 13:
if tool != 0:
self.showToolSig.emit(pocket, tool)
def pocket_changed(self):
self.load_tools()
self.draw_tools()
def on_tool_in_spindle(self, tool):
self.load_tools()
self.draw_tools()
def on_pocket_prepped(self, pocket_num):
self.load_tools()
self.draw_tools()
def homing_message(self, *args, **kwargs):
self.homing = args[0]
if self.homing:
self.homingMsgSig.emit("REFERENCING")
else:
self.homingMsgSig.emit("")
def home_message(self, *args, **kwargs):
self.home = args[0]
if self.homing:
self.homeMsgSig.emit("")
else:
self.homeMsgSig.emit("UN REFERENCED")
def goto(self):
self.component["goto-enable"].value = 0
pocket = self.component["goto"].value
if self.pocket > pocket:
steps = self.pocket - pocket
self.rotate_rev(steps)
elif self.pocket < pocket:
steps = pocket - self.pocket
self.rotate_fwd(steps)
def steps_fwd(self):
self.component["steps-fwd"].value = 0
steps = self.component["steps"].value
self.rotate_fwd(steps)
def steps_rev(self):
self.component["steps-rev"].value = 0
steps = self.component["steps"].value
self.rotate_rev(steps)
def rotate_fwd(self, steps):
self.rotateFwdSig.emit(steps)
def rotate_rev(self, steps):
self.rotateRevSig.emit(steps)
def jog_fwd(self, *args, **kwargs):
self.rotateFwdSig.emit(1)
self.command.set_digital_output(5, 0)
def jog_rev(self, *args, **kwargs):
self.rotateRevSig.emit(1)
self.command.set_digital_output(6, 0)
| 0
| 0
| 0
| 5,131
| 0
| 0
| 0
| 80
| 90
|
0515dfbce20f8b6db5af0d540ac7d973ccefba31
| 603
|
py
|
Python
|
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | 1
|
2022-02-06T07:08:13.000Z
|
2022-02-06T07:08:13.000Z
|
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | null | null | null |
oreo_backend/memes/migrations/0003_auto_20211108_1250.py
|
TaipeiTechIAEWorkplace/Website
|
fc962d5f8163c08f901fe4d97af14b8e7b3cfc9c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-08 04:50
| 22.333333
| 58
| 0.557214
|
# Generated by Django 3.2.9 on 2021-11-08 04:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('memes', '0002_auto_20211108_1233'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='hashtag',
),
migrations.RemoveField(
model_name='photo',
name='uploader',
),
migrations.AlterField(
model_name='photo',
name='upload_date',
field=models.DateTimeField(auto_now_add=True),
),
]
| 0
| 0
| 0
| 489
| 0
| 0
| 0
| 19
| 46
|
fd2d2d27a90eb687cfa5ddaaf7a717a930d940df
| 2,951
|
py
|
Python
|
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
ldap_sync/__main__.py
|
JuKu/pycroft
|
15595f9b4327da5c52c77174def73660226da7dc
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
from .exporter import logger
logger = logging.getLogger('ldap_sync')
NAME_LEVEL_MAPPING = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
parser = argparse.ArgumentParser(description="Pycroft ldap syncer")
parser.add_argument('--fake', dest='fake', action='store_true', default=False,
help="Use a mocked LDAP backend")
parser.add_argument("-l", "--log", dest='loglevel', type=str,
choices=list(NAME_LEVEL_MAPPING.keys()), default='info',
help="Set the loglevel")
parser.add_argument("-d", "--debug", dest='loglevel', action='store_const',
const='debug', help="Short for --log=debug")
if __name__ == '__main__':
exit(main())
| 30.42268
| 91
| 0.683497
|
import argparse
import logging
import os
from .exporter import add_stdout_logging, establish_and_return_ldap_connection, \
establish_and_return_session, fake_connection, fetch_current_ldap_users, \
fetch_users_to_sync, get_config_or_exit, logger, sync_all
logger = logging.getLogger('ldap_sync')
def sync_production():
logger.info("Starting the production sync. See --help for other options.")
config = get_config_or_exit(required_property='ldap')
db_users = fetch_users_to_sync(
session=establish_and_return_session(config.db_uri),
required_property=config.required_property,
)
logger.info("Fetched %s database users", len(db_users))
connection = establish_and_return_ldap_connection(
host=config.host,
port=config.port,
bind_dn=config.bind_dn,
bind_pw=config.bind_pw,
)
ldap_users = fetch_current_ldap_users(connection, base_dn=config.base_dn)
logger.info("Fetched %s ldap users", len(ldap_users))
sync_all(db_users, ldap_users, connection, base_dn=config.base_dn)
def sync_fake():
logger.info("Starting sync using a mocked LDAP backend. See --help for other options.")
try:
db_uri = os.environ['PYCROFT_DB_URI']
except KeyError:
logger.critical('PYCROFT_DB_URI not set')
exit()
db_users = fetch_users_to_sync(
session=establish_and_return_session(db_uri)
)
logger.info("Fetched %s database users", len(db_users))
connection = fake_connection()
BASE_DN = 'ou=pycroft,dc=agdsn,dc=de'
logger.debug("BASE_DN set to %s", BASE_DN)
ldap_users = fetch_current_ldap_users(connection, base_dn=BASE_DN)
logger.info("Fetched %s ldap users", len(ldap_users))
sync_all(db_users, ldap_users, connection, base_dn=BASE_DN)
NAME_LEVEL_MAPPING = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
parser = argparse.ArgumentParser(description="Pycroft ldap syncer")
parser.add_argument('--fake', dest='fake', action='store_true', default=False,
help="Use a mocked LDAP backend")
parser.add_argument("-l", "--log", dest='loglevel', type=str,
choices=list(NAME_LEVEL_MAPPING.keys()), default='info',
help="Set the loglevel")
parser.add_argument("-d", "--debug", dest='loglevel', action='store_const',
const='debug', help="Short for --log=debug")
def main():
args = parser.parse_args()
add_stdout_logging(logger, level=NAME_LEVEL_MAPPING[args.loglevel])
try:
if args.fake:
sync_fake()
else:
sync_production()
except KeyboardInterrupt:
logger.fatal("SIGINT received, stopping.")
logger.info("Re-run the syncer to retain a consistent state.")
return 1
return 0
if __name__ == '__main__':
exit(main())
| 0
| 0
| 0
| 0
| 0
| 1,821
| 0
| 184
| 91
|
a20fcaf6ccf8820b917742d329e834e07689579f
| 6,837
|
py
|
Python
|
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
visdex/exploratory_graphs/__init__.py
|
mcraig-ibme/visdex
|
bbf8365e627f6d52fb201ae4ae6fef6775c4d716
|
[
"Apache-2.0"
] | null | null | null |
"""
visdex: Exploratory graphs
The exploratory graphs section defines specialised data visualisations that
can be generated by the user on request
"""
import logging
from dash import html, dcc
import plotly.graph_objects as go
from visdex.common import plot_style
LOG = logging.getLogger(__name__)
def generate_generic_group(n_clicks, group_type):
"""
The generic builder for each of the component types.
:param n_clicks:
:param group_type:
:param component_list:
:return:
"""
LOG.info(f"generate_generic_group {group_type}")
children = list()
component_list = all_components[group_type]
for component in component_list:
name = component["id"]
args_to_replicate = dict(component)
del args_to_replicate["component_type"]
del args_to_replicate["id"]
del args_to_replicate["label"]
# Generate each component with the correct id, index, and arguments, inside its
# own Div.
children.append(
html.Div(
[
component["label"] + ":",
component["component_type"](
id={"type": group_type + "-" + name, "index": n_clicks},
**args_to_replicate,
),
],
id={"type": "div-" + group_type + "-" + name, "index": n_clicks},
style=plot_style,
)
)
children.append(
dcc.Graph(
id={"type": "gen-" + group_type + "-graph", "index": n_clicks},
figure=go.Figure(data=go.Scatter()),
)
)
LOG.debug(f"{children}")
return html.Div(
id={"type": "filter-graph-group-" + group_type, "index": n_clicks},
children=children,
)
| 34.356784
| 91
| 0.511482
|
"""
visdex: Exploratory graphs
The exploratory graphs section defines specialised data visualisations that
can be generated by the user on request
"""
import logging
from dash import html, dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State, MATCH
import plotly.graph_objects as go
from . import (
bar_graph,
histogram_graph,
manhattan_graph,
scatter_graph,
violin_graph,
)
from visdex.common import standard_margin_left, vstack, plot_style
LOG = logging.getLogger(__name__)
def get_layout(app):
@app.callback(
[
Output("explore-collapse", "is_open"),
Output("collapse-explore-button", "children"),
],
[Input("collapse-explore-button", "n_clicks")],
[State("explore-collapse", "is_open")],
prevent_initial_call=True,
)
def toggle_collapse_explore(n, is_open):
"""
Handle click on the 'Explore' expand/collapse button
"""
LOG.info(f"toggle_collapse_explore {n} {is_open}")
if n:
return not is_open, "+" if is_open else "-"
return is_open, "-"
@app.callback(
Output("graph-group-container", "children"),
[Input("add-graph-button", "n_clicks")],
[State("graph-group-container", "children")],
prevent_initial_call=True,
)
def add_graph_group(n_clicks, children):
# Add a new graph group each time the button is clicked. The if None guard stops
# there being an initial graph.
LOG.info(f"add_graph_group")
if n_clicks is not None:
# This dropdown controls what type of graph-group to display next to it.
new_graph_type_dd = html.Div(
[
"Graph type:",
dcc.Dropdown(
id={"type": "graph-type-dd", "index": n_clicks},
options=[
{"label": str(value).capitalize(), "value": value}
for value in all_components.keys()
],
value="scatter",
style={"width": "50%"},
),
# This is a placeholder for the 'filter-graph-group-scatter' or
# 'filter-graph-group-bar' to be placed here.
# Because graph-type-dd above is set to Scatter, this will initially be
# automatically filled with a filter-graph-group-scatter.
# But on the initial generation of this object, we give it type
# 'placeholder' to make it easy to check its value in
# change_graph_group_type()
html.Div(id={"type": "placeholder", "index": n_clicks}),
],
id={"type": "divgraph-type-dd", "index": n_clicks},
style=vstack,
)
children.append(new_graph_type_dd)
return children
@app.callback(
Output({"type": "divgraph-type-dd", "index": MATCH}, "children"),
[Input({"type": "graph-type-dd", "index": MATCH}, "value")],
[
State({"type": "graph-type-dd", "index": MATCH}, "id"),
State({"type": "divgraph-type-dd", "index": MATCH}, "children"),
],
)
def change_graph_group_type(graph_type, id, children):
LOG.info(f"change_graph_group_type {graph_type} {id}")
# Generate a new group of the right type.
if "filter-graph-group-" + str(graph_type) != children[-1]["props"]["id"]["type"]:
children[-1] = generate_generic_group(id["index"], graph_type)
return children
bar_graph.define_cbs(app)
histogram_graph.define_cbs(app)
manhattan_graph.define_cbs(app)
scatter_graph.define_cbs(app)
violin_graph.define_cbs(app)
return html.Div(children=[
html.Div(
[
dbc.Button(
"+",
id="collapse-explore-button",
style={
"display": "inline-block",
"margin-left": "10px",
"width": "40px",
"vertical-align" : "middle",
},
),
html.H2(
"Exploratory graphs",
style={
"display": "inline-block",
"margin-left": standard_margin_left,
"margin-bottom": "0",
"vertical-align" : "middle",
},
),
],
),
dbc.Collapse(
id="explore-collapse",
children=[
# Container to hold all the exploratory graphs
html.Div(id="graph-group-container", children=[]),
# Button at the page bottom to add a new graph
html.Button(
"New Graph",
id="add-graph-button",
style={
"margin-top": "10px",
"margin-left": standard_margin_left,
"margin-bottom": "40px",
},
),
],
is_open=False,
),
])
def generate_generic_group(n_clicks, group_type):
"""
The generic builder for each of the component types.
:param n_clicks:
:param group_type:
:param component_list:
:return:
"""
LOG.info(f"generate_generic_group {group_type}")
children = list()
component_list = all_components[group_type]
for component in component_list:
name = component["id"]
args_to_replicate = dict(component)
del args_to_replicate["component_type"]
del args_to_replicate["id"]
del args_to_replicate["label"]
# Generate each component with the correct id, index, and arguments, inside its
# own Div.
children.append(
html.Div(
[
component["label"] + ":",
component["component_type"](
id={"type": group_type + "-" + name, "index": n_clicks},
**args_to_replicate,
),
],
id={"type": "div-" + group_type + "-" + name, "index": n_clicks},
style=plot_style,
)
)
children.append(
dcc.Graph(
id={"type": "gen-" + group_type + "-graph", "index": n_clicks},
figure=go.Figure(data=go.Scatter()),
)
)
LOG.debug(f"{children}")
return html.Div(
id={"type": "filter-graph-group-" + group_type, "index": n_clicks},
children=children,
)
| 0
| 3,069
| 0
| 0
| 0
| 1,717
| 0
| 174
| 90
|
eebf786325342f19a4237a7fea589022310860b1
| 4,982
|
py
|
Python
|
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | 1
|
2021-07-22T18:01:28.000Z
|
2021-07-22T18:01:28.000Z
|
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | null | null | null |
intel_software/pkg_contents/micperf/CONTENTS/usr/share/micperf/micp/micp/kernels/mkl_conv.py
|
antoinecarme/xeon-phi-data
|
883a6e2f31b2e729715303725f417b2990d923be
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2012-2017, Intel Corporation, All Rights Reserved.
#
# This software is supplied under the terms of a license
# agreement or nondisclosure agreement with Intel Corp.
# and may not be copied or disclosed except in accordance
# with the terms of that agreement.
confParamNames = [ 'groups', 'nImg', 'inpWidth', 'inpHeight', 'nIfm', \
'nOfm', 'kw', 'kh', 'stride', 'pad', 'iters' ]
optimalParamValues = '1 16 224 224 3 64 7 7 2 3 100'
# expected minimal number of parsed scores in output
CONST_expected_perf_scores = 3
# expected number of "|"-separated sections in output
CONST_expected_sections = 2
# expected measurements per row
CONST_expected_meas_per_row = 4
| 35.585714
| 119
| 0.609193
|
# Copyright 2012-2017, Intel Corporation, All Rights Reserved.
#
# This software is supplied under the terms of a license
# agreement or nondisclosure agreement with Intel Corp.
# and may not be copied or disclosed except in accordance
# with the terms of that agreement.
import os
import re
import micp.kernel as micp_kernel
import micp.info as micp_info
import micp.common as micp_common
import micp.params as micp_params
from micp.common import mp_print, get_ln, CAT_ERROR
confParamNames = [ 'groups', 'nImg', 'inpWidth', 'inpHeight', 'nIfm', \
'nOfm', 'kw', 'kh', 'stride', 'pad', 'iters' ]
optimalParamValues = '1 16 224 224 3 64 7 7 2 3 100'
# expected minimal number of parsed scores in output
CONST_expected_perf_scores = 3
# expected number of "|"-separated sections in output
CONST_expected_sections = 2
# expected measurements per row
CONST_expected_meas_per_row = 4
class mkl_conv(micp_kernel.Kernel):
def __init__(self):
optimalParamsString = ''
self._categoryParams = {}
info = micp_info.Info()
maxCount = info.num_cores()
self.name = 'mkl_conv'
self.param_validator = micp_params.NO_VALIDATOR
# for ease of use, split params into two lists
self._paramNames = ['omp_num_threads', 'with_padding', 'output']
self._paramNames.extend(confParamNames)
self._paramDefaults = {'omp_num_threads':str(maxCount),
'with_padding':'0',
'output':'--original-output'}
for (idx, val) in enumerate(optimalParamValues.split(' ')):
optimalParamsString += '--{0} {1} '.format(confParamNames[idx], val)
self._paramDefaults[confParamNames[idx]] = val
self._categoryParams['test'] = [ optimalParamsString ]
self._categoryParams['optimal'] = [ optimalParamsString ]
self._categoryParams['optimal_quick'] = self._categoryParams['optimal']
self._categoryParams['scaling'] = self._categoryParams['optimal']
self._categoryParams['scaling_quick'] = self._categoryParams['optimal']
# scale with step 10
coreConfig = range(1, maxCount, 10)
self._categoryParams['scaling_core'] = \
[ ' '.join(['--omp_num_threads {0}'.format(cc), optimalParamsString]) \
for cc in coreConfig]
def path_host_exec(self, offload_method):
if offload_method == 'local':
return self._path_exec(micp_kernel.LIBEXEC_HOST, "std_conv_bench")
else:
return None
def _do_unit_test(self):
return True
def offload_methods(self):
return ['local']
def param_type(self):
return 'pos'
def independent_var(self, category):
return 'omp_num_threads'
def param_for_env(self):
return ['omp_num_threads']
def path_dev_exec(self, offType):
""" Intel Xeon Phi Coprocessors is not supported """
return None
def environment_host(self):
return {'LD_LIBRARY_PATH':self.ld_library_path(),
'KMP_PLACE_THREADS':'1T',
'KMP_AFFINITY':'compact,granularity=fine'}
def get_process_modifiers(self):
info = micp_info.Info()
if info.is_processor_mcdram_available():
return ['numactl', '--membind=1']
else:
return []
def parse_desc(self, raw):
res_line = raw.splitlines()
# get general parameters before '|' character
try:
out_sections = res_line[1].rsplit("|", 1)
except IndexError:
micp_kernel.raise_parse_error(raw)
if len(out_sections) != CONST_expected_sections:
micp_kernel.raise_parse_error(raw)
return out_sections[0].strip()
def parse_perf(self, raw):
res_lines = raw.splitlines()
result = {}
for line in res_lines:
# example one line of output:
# FWD w/ padding in flops min(ms) 0.01; max(gflop/s) 2.70;avg(ms) 0.02; avg(gflop/s) 1.58;
# ex. ( FWD )
propagation = re.search('([F|B]WD[A-Z_]*)', line)
# ex. (avg ) ((gflops/s)) (1.58 )
values = re.findall('([a-zA-Z]*)\(([a-zA-Z/]*)\)\s*([0-9]*\.[0-9]*)', line)
# skip text data lines
if not (propagation and values):
continue
# check syntax (4 measurements per row)
if len(values) != CONST_expected_meas_per_row:
micp_kernel.raise_parse_error(raw)
propag_txt = propagation.group(0)
for (prop, unit, value) in values:
if prop != 'avg':
continue
if unit == 'gflop/s':
result['Computation.Avg.{0}'.format(propag_txt)] = {'value':value, 'units':'GFlops', 'rollup':True}
if len(result) != CONST_expected_perf_scores:
micp_kernel.raise_parse_error(raw)
return result
| 0
| 0
| 0
| 4,059
| 0
| 0
| 0
| 50
| 180
|
5499e89c9e89f497892f031f5a9cc83e7deaabf6
| 610
|
py
|
Python
|
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
wfsim/utils.py
|
jmeyers314/wfsim
|
c2ad60c100ec1c4046368801a56a5211499f0c51
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import galsim
def BBSED(T):
"""(unnormalized) Blackbody SED for temperature T in Kelvin.
"""
waves_nm = np.arange(330.0, 1120.0, 10.0)
flambda = planck(T, waves_nm*1e-9)
return galsim.SED(
galsim.LookupTable(waves_nm, flambda),
wave_type='nm',
flux_type='flambda'
)
| 27.727273
| 64
| 0.57377
|
import numpy as np
import galsim
def BBSED(T):
"""(unnormalized) Blackbody SED for temperature T in Kelvin.
"""
waves_nm = np.arange(330.0, 1120.0, 10.0)
def planck(t, w):
# t in K
# w in m
c = 2.99792458e8 # speed of light in m/s
kB = 1.3806488e-23 # Boltzmann's constant J per Kelvin
h = 6.62607015e-34 # Planck's constant in J s
return w**(-5) / (np.exp(h*c/(w*kB*t))-1)
flambda = planck(T, waves_nm*1e-9)
return galsim.SED(
galsim.LookupTable(waves_nm, flambda),
wave_type='nm',
flux_type='flambda'
)
| 0
| 0
| 0
| 0
| 0
| 249
| 0
| 0
| 26
|
a5f484ac8ab36970a0402fcb7d92a67abbe863f9
| 1,495
|
py
|
Python
|
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | 1
|
2021-06-05T03:52:21.000Z
|
2021-06-05T03:52:21.000Z
|
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | null | null | null |
src/app/search.py
|
delgadofarid/my-first-search-engine
|
e8ea909030a599bb4bba739fe77747c98395dc29
|
[
"Apache-2.0"
] | null | null | null |
from elasticsearch import Elasticsearch
# initialize Elasticsearch client
es = Elasticsearch()
| 30.510204
| 103
| 0.626756
|
import re
from elasticsearch import Elasticsearch, helpers
from itertools import islice
# initialize Elasticsearch client
es = Elasticsearch()
def first_n(iterable, n):
return islice(iterable, 0, n)
def format_es_response(user_question, es_candidates):
results = list()
for c in es_candidates:
par = dict()
par['questionText'] = user_question
par['bookTitle'] = c['_source']['bookTitle']
par['paragraphText'] = c['_source']['paragraphText']
par['esScore'] = c['_score']
par['paragraphId'] = c['_source']['paragraphId']
par['bookURL'] = c['_source']['bookURL']
par['bookId'] = c['_source']['bookId']
results.append(par)
return results
def search_candidates(user_question, index_name="wikibooks-search-index", size=20, es=Elasticsearch()):
match_queries = [
{"match": {"bookTitle": user_question}},
{"match": {"paragraphText": user_question}}
]
quoted_text = re.findall('"([^"]*)"', user_question)
for text in quoted_text:
match_queries.append({"match_phrase": {"bookTitle": text}})
match_queries.append({"match_phrase": {"paragraphText": text}})
es_query = {
"query": {
"bool": {
"should": match_queries
}
}
}
results = helpers.scan(es, query=es_query, index=index_name, preserve_order=True)
results = first_n(results, size)
return format_es_response(user_question, results)
| 0
| 0
| 0
| 0
| 0
| 1,279
| 0
| 4
| 113
|
5786c329b92403e4f8b652789de8bbe26502cea4
| 24,221
|
py
|
Python
|
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
tests/test_configfetch.py
|
openandclose/configfetch
|
fc0b329e6861cc73f0a108ddaea636e6956dd56f
|
[
"MIT"
] | null | null | null |
import functools
import configfetch
fetch_ = configfetch.fetch
fetch = functools.partial(
configfetch.fetch, option_builder=configfetch.FiniOptionBuilder)
# Just checking the standard library's behaviors.
| 24.842051
| 85
| 0.473886
|
import argparse
import configparser
import functools
import textwrap
import pytest
import configfetch
fetch_ = configfetch.fetch
fetch = functools.partial(
configfetch.fetch, option_builder=configfetch.FiniOptionBuilder)
def f(string):
return textwrap.dedent(string.strip('\n'))
def _get_action(conf, option_strings):
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
# parser.print_help()
for action in parser._get_optional_actions():
if option_strings in action.option_strings:
return action
raise ValueError('No action with option_strings: %r' % option_strings)
class TestEscapedSplit:
def check_comma(self, value, expected):
ret = configfetch._parse_comma(value)
assert ret == expected
def check_line(self, value, expected):
ret = configfetch._parse_line(value)
assert ret == expected
def test_comma(self):
self.check_comma('aaaa', ['aaaa'])
self.check_comma(r'\aaaa', [r'\aaaa'])
self.check_comma(r'aa\aa', [r'aa\aa'])
self.check_comma(r'aaa\a', [r'aaa\a'])
self.check_comma(r'aaaa\\', [r'aaaa\\'])
self.check_comma(r'aa\\aa', [r'aa\\aa'])
self.check_comma(r'aa\\\aa', [r'aa\\\aa'])
self.check_comma('aa, bb', ['aa', 'bb'])
self.check_comma(r'aa\, bb', ['aa, bb'])
self.check_comma(r'aa\\, bb', [r'aa\, bb'])
self.check_comma(r'aa\\\, bb', [r'aa\\, bb'])
self.check_comma(r'aa\a, bb', [r'aa\a', 'bb'])
self.check_comma(r'aa\\a, bb', [r'aa\\a', 'bb'])
self.check_comma(r'aa\\\a, bb', [r'aa\\\a', 'bb'])
self.check_comma(',aa', ['aa'])
self.check_comma('aa,', ['aa'])
self.check_comma('aa,,', ['aa'])
def test_line(self):
self.check_line('aa\nbb', ['aa', 'bb'])
self.check_line('aa\\\nbb', ['aa\nbb'])
self.check_line('aa\\\\\nbb', ['aa\\\nbb'])
self.check_line('aa\\\\\\\nbb', ['aa\\\\\nbb'])
self.check_line('aa\nbb,', ['aa', 'bb,'])
class TestInheritance:
def test_iter(self):
data = f("""
[sec1]
[sec2]
""")
conf = fetch(data)
assert list(conf.__iter__()) == ['DEFAULT', 'sec1', 'sec2']
def test_iter_option(self):
data = f("""
[sec1]
aa = xxx
bb = yyy
""")
conf = fetch(data)
assert list(conf.sec1.__iter__()) == ['aa', 'bb']
def test_contains(self):
data = f("""
[sec1]
[sec2]
""")
conf = fetch(data)
assert 'sec2' in conf
def test_contains_option(self):
data = f("""
[sec1]
aa = xxx
bb = yyy
""")
conf = fetch(data)
assert 'bb' in conf.sec1
class TestParseConfig:
def test_conf_str(self):
data = f("""
[sec1]
aa = xxx
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_blank(self):
data = f("""
[sec1]
""")
conf = fetch(data)
with pytest.raises(configfetch.NoOptionError):
assert conf.sec1.aa == ''
def test_conf_str_nosection(self):
data = f("""
[sec1]
aa = xxx
""")
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec2
def test_conf_str_default(self):
data = f("""
[DEFAULT]
aa = xxx
[sec1]
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_nosection(self):
data = f("""
[DEFAULT]
aa = xxx
""")
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_read_section(self):
data = f("""
[DEFAULT]
aa = xxx
""")
conf = fetch(data)
data = f("""
[sec1]
""")
conf._config.read_string(data)
assert conf.sec1.aa == 'xxx'
def test_conf_str_default_blank(self):
data = f("""
[DEFAULT]
[sec1]
""")
conf = fetch(data)
with pytest.raises(configfetch.NoOptionError):
assert conf.sec1.aa == ''
def test_conf_str_default_blank_nosection(self):
data = ''
conf = fetch(data)
with pytest.raises(configfetch.NoSectionError):
assert conf.sec1.aa == ''
def test_conf_bool(self):
data = f("""
[sec1]
aa = :: f: bool
Yes
""")
conf = fetch(data)
assert conf.sec1.aa is True
def test_conf_bool_no(self):
data = f("""
[sec1]
aa = :: f: bool
No
""")
conf = fetch(data)
assert conf.sec1.aa is False
# blank string returns ``None``
def test_conf_bool_blank(self):
data = f("""
[sec1]
aa = :: f: bool
""")
conf = fetch(data)
assert conf.sec1.aa is None
def test_conf_comma(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_comma_indent(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2,
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_comma_newline(self):
data = f("""
[sec1]
aa = :: f: comma
xxx1, xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2\nxxx3']
def test_conf_comma_blank(self):
data = f("""
[sec1]
aa = :: f: comma
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_line(self):
data = f("""
[sec1]
aa = :: f: line
xxx1
xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
def test_conf_line_comma(self):
data = f("""
[sec1]
aa = :: f: line
xxx1
xxx2
xxx3, xxx4
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3, xxx4']
def test_conf_line_blank(self):
data = f("""
[sec1]
aa = :: f: line
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_line_multiblanks(self):
data = f("""
[sec1]
aa = :: f: line
""")
conf = fetch(data)
assert conf.sec1.aa == []
def test_conf_bar_comma(self):
data = f("""
[sec1]
aa = :: f: comma, bar
xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx1|xxx2|xxx3'
def test_conf_bar_comma_blank(self):
data = f("""
[sec1]
aa = :: f: comma, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_comma_blank_spaces(self):
data = f("""
[sec1]
aa = :: f: comma, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_line(self):
data = f("""
[sec1]
aa = :: f: line, bar
xxx1
xxx2
xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == 'xxx1|xxx2|xxx3'
def test_conf_bar_line_blank(self):
data = f("""
[sec1]
aa = :: f: line, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_bar_line_blank_spaces(self):
data = f("""
[sec1]
aa = :: f: line, bar
""")
conf = fetch(data)
assert conf.sec1.aa == ''
def test_conf_cmd(self):
data = f("""
[sec1]
aa = :: f: cmd
--aaa -b "ccc cc" ddd,dd
""")
conf = fetch(data)
assert conf.sec1.aa == ['--aaa', '-b', 'ccc cc', 'ddd,dd']
def test_conf_cmds(self):
data = f("""
[sec1]
aa = :: f: line, cmds
ls *.txt
find . "aaa"
""")
conf = fetch(data)
assert conf.sec1.aa == [['ls', '*.txt'], ['find', '.', 'aaa']]
def test_conf_fmt(self):
data = f("""
[sec1]
aa = :: f: fmt
{USER}/data/my.css
""")
conf = fetch(data, fmts={'USER': '/home/john'})
assert conf.sec1.aa == '/home/john/data/my.css'
class TestParseContexts:
def test_ctx_default_bool(self):
data = f("""
[DEFAULT]
aa = :: f: bool
no
[sec1]
""")
conf = fetch(data)
assert conf.sec1.aa is False
def test_ctx_default_bool_noop(self):
data = f("""
[DEFAULT]
aa = :: f: bool
[sec1]
aa = no
""")
conf = fetch(data)
assert conf.sec1.aa is False
def test_ctx_default_comma(self):
data = f("""
[DEFAULT]
aa = :: f: comma
[sec1]
aa = xxx1, xxx2, xxx3
""")
conf = fetch(data)
assert conf.sec1.aa == ['xxx1', 'xxx2', 'xxx3']
class TestParseFunc:
def test_func_newline(self):
data = f("""
[sec1]
aa =
:: f: bool
no
""")
conf = fetch(data)
assert conf.sec1.aa is False
# Just checking the standard library's behaviors.
class TestConfigParser:
def test_indent(self):
data = f("""
[sec1]
aa =
xxx
""")
config = configparser.ConfigParser()
config.read_string(data)
assert config['sec1']['aa'] == '\nxxx'
data = f("""
[sec1]
aa =
xxx
""")
config = configparser.ConfigParser()
with pytest.raises(configparser.ParsingError):
config.read_string(data)
def test_allow_no_value(self):
data = f("""
[sec1]
aa =
:: f: bool
no
""")
config = configparser.ConfigParser(allow_no_value=True)
config.read_string(data)
assert config['sec1']['aa'] == '\n:: f: bool\nno'
class TestArgparse:
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa')
parser.add_argument('-b', '--bb')
parser.add_argument('-c', '--cc', action='store_const', default='', const='yes')
parser.add_argument('-d', '--no-cc', action='store_const', const='no', dest='cc')
parser.add_argument('-e', '--ee-eee')
def get_args(self, cmd):
return self.parser.parse_args(cmd)
def test_args_and_conf(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args(['--aa', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'axxx'
def test_args_and_conf_short(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args(['-a', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'axxx'
def test_args_and_conf_none(self):
data = f("""
[sec1]
aa = xxx
""")
args = self.get_args([])
conf = fetch(data, args=args)
assert conf.sec1.aa == 'xxx'
def test_args_and_conf_const(self):
data = f("""
[sec1]
cc = :: f: bool
""")
args = self.get_args(['--cc'])
conf = fetch(data, args=args)
assert conf.sec1.cc is True
def test_args_and_conf_const_false(self):
data = f("""
[sec1]
cc = :: f: bool
true
""")
args = self.get_args(['--no-cc'])
conf = fetch(data, args=args)
assert conf.sec1.cc is False
def test_args_and_conf_dash(self):
data = f("""
[sec1]
ee_eee = xxx
""")
args = self.get_args(['-e', 'axxx'])
conf = fetch(data, args=args)
assert conf.sec1.ee_eee == 'axxx'
class _CustomFunc(configfetch.Func):
"""Used the test below."""
@configfetch.register
def custom(self, value):
return 'test'
class TestCustomize:
def test_customfunc(self):
data = f("""
[sec1]
aa = :: f: custom
xxx
""")
conf = fetch(data, Func=_CustomFunc)
assert conf.sec1.aa == 'test'
class TestDouble:
def test_nooption_nooption(self):
data = f("""
[sec1]
aa = xxx
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
with pytest.raises(configfetch.NoOptionError):
assert double.bb == 'zzz'
def test_nooption_blank(self):
data = f("""
[sec1]
aa = xxx
""")
conf1 = fetch(data)
data = f("""
[sec1]
bb =
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_blank_nooption(self):
data = f("""
[sec1]
bb =
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_blank_blank(self):
data = f("""
[sec1]
bb =
""")
conf1 = fetch(data)
data = f("""
[sec1]
bb = :: f: comma
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.bb == ''
def test_plus(self):
data = f("""
[sec1]
aa = :: f: plus
xxx, yyy
""")
conf1 = fetch(data)
data = f("""
[sec1]
aa = :: f: plus
-yyy
""")
conf2 = fetch(data)
double = configfetch.Double(conf2.sec1, conf1.sec1)
assert double.aa == ['xxx']
class TestGetPlusMinusValues:
initial = ['aaa', 'bbb', 'ccc']
def compare(self, adjusts, initial, expected):
values = configfetch._get_plusminus_values(adjusts, initial)
assert values == expected
def test_adjusts_argument(self):
args = (['ddd'], None, ['ddd'])
self.compare(*args)
args = (['+ddd'], None, ['ddd'])
self.compare(*args)
args = (['-bbb'], None, [])
self.compare(*args)
args = (['ddd'], self.initial, ['ddd'])
self.compare(*args)
args = (['+ddd'], self.initial, ['aaa', 'bbb', 'ccc', 'ddd'])
self.compare(*args)
args = (['-bbb'], self.initial, ['aaa', 'ccc'])
self.compare(*args)
args = (['-aaa, -bbb'], self.initial, ['ccc'])
self.compare(*args)
args = (['-aaa, +ddd, +eee'], self.initial,
['bbb', 'ccc', 'ddd', 'eee'])
self.compare(*args)
class TestMinusAdapter:
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa', action='store_const', const='A')
parser.add_argument('-b', '--bb', action='store_true')
parser.add_argument('-c', '--cc', action='store_false')
parser.add_argument('-d', '--dd', action='append')
parser.add_argument('-e', '--ee', action='append_const', const='E')
parser.add_argument('-f', '--ff', action='count')
parser.add_argument('-x', '--xx')
parser.add_argument('-y', '--yy', nargs=1)
def compare(self, args, new_args, matcher=None):
assert configfetch.minusadapter(self.parser, matcher, args) == new_args
def test(self):
# No Minus argument
args = ['--aa', '--xx', 'xxxx', '--bb']
new_args = ['--aa', '--xx', 'xxxx', '--bb']
self.compare(args, new_args)
# Minus argument
args = ['--aa', '--xx', '-xxxx', '--bb']
new_args = ['--aa', '--xx=-xxxx', '--bb']
self.compare(args, new_args)
# Minus with another StoreAction
args = ['--aa', '--xx', '-xxxx', '--yy', 'yyyy']
new_args = ['--aa', '--xx=-xxxx', '--yy', 'yyyy']
self.compare(args, new_args)
# Minus with AppendAction
args = ['--dd', '-dddd', '--xx', '-xxxx', '--bb']
new_args = ['--dd=-dddd', '--xx=-xxxx', '--bb']
self.compare(args, new_args)
# Minus, short option version
args = ['--aa', '-x', '-xxxx', '--bb']
new_args = ['--aa', '-x-xxxx', '--bb']
self.compare(args, new_args)
class TestParseArgs:
def test_help(self):
data = f("""
[sec1]
aa = : help string
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'help string'
def test_help_multilines(self):
data = f("""
[sec1]
aa = : This
: is a
: help.
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'This\nis a\nhelp.'
def test_help_multilines_blank(self):
# testing both ':' and ': '
data = f("""
[sec1]
aa = : This
: is a
:
:
: help.
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'This\nis a\n\n\nhelp.'
def test_help_and_choices(self):
data = f("""
[sec1]
aa = : help string
:: choices: ss, tt
tt
""")
conf = fetch(data)
args = conf._ctx['aa']['argparse']
assert args['help'] == 'help string'
assert args['choices'] == ['ss', 'tt']
class TestBuildArgs:
def test_help(self):
data = f("""
[sec1]
aa = : help string
:: f: comma
xxx1, xxx2
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.help == 'help string'
def test_help_and_choices(self):
data = f("""
[sec1]
aa = : help string
:: choices: ss, tt
tt
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.choices == ['ss', 'tt']
def test_names(self):
data = f("""
[sec1]
aa = : help string
:: names: a
true
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.option_strings == ['-a', '--aa']
def test_bool(self):
data = f("""
[sec1]
aa = : help string
:: f: bool
yes
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreConstAction)
assert action.const == 'yes'
def test_bool_opposite(self):
data = f("""
[sec1]
aa = : help string
:: f: bool
yes
no_aa = : help string2
:: dest: aa
:: f: bool
no
""")
conf = fetch(data)
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
namespace = parser.parse_args(['--aa'])
assert namespace.__dict__['aa'] == 'yes'
namespace = parser.parse_args(['--no-aa'])
assert namespace.__dict__['aa'] == 'no'
def test_bool_default_no(self):
data = f("""
[sec1]
overwrite = : help string
:: f: bool
no
""")
conf = fetch(data)
action = _get_action(conf, '--overwrite')
assert isinstance(action, argparse._StoreConstAction)
assert action.const == 'yes'
def test_bool_opposite_default_no(self):
data = f("""
[sec1]
overwrite = : help string
:: f: bool
no
no_overwrite = : help string2
:: dest: overwrite
:: f: bool
yes
""")
conf = fetch(data)
parser = argparse.ArgumentParser(prog='test')
conf.build_arguments(parser)
namespace = parser.parse_args(['--overwrite'])
assert namespace.__dict__['overwrite'] == 'yes'
namespace = parser.parse_args(['--no-overwrite'])
assert namespace.__dict__['overwrite'] == 'no'
class TestBuildArgsCommandlineOnly:
def test_int(self):
data = f("""
[sec1]
aa = : help string
:: default: 1
xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.default == 1
def test_int_like_string(self):
data = f("""
[sec1]
aa = : help string
:: default: '1'
xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.default == '1'
def test_type(self):
data = f("""
[sec1]
aa = : help string
:: type: int
42
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.type == int
def test_suppress(self):
data = f("""
[DEFAULT]
aa = : argparse.SUPPRESS
:: default: argparse.SUPPRESS
[sec1]
aa = xxx
""")
conf = fetch(data)
action = _get_action(conf, '--aa')
assert isinstance(action, argparse._StoreAction)
assert action.help == argparse.SUPPRESS
assert action.default == argparse.SUPPRESS
assert conf.sec1.aa == 'xxx'
def test_print_data():
data = f("""
[DEFAULT]
aa = aaa
[sec1]
bb = bbb
cc = : help string
:: names: c
:: f: bool
ccc
dd =
""")
dict_string = f("""
{
'DEFAULT': {
'aa': {
'value': 'aaa',
},
},
'sec1': {
'bb': {
'value': 'bbb',
},
'cc': {
'argparse': {
'help': 'help string',
'names': ['c'],
},
'func': ['bool'],
'value': 'ccc',
},
'dd': {
'value': '',
},
},
}
""")
ini_string = f("""
[DEFAULT]
aa= aaa
[sec1]
bb= bbb
cc= ccc
dd=
""")
conf = fetch(data, option_builder=configfetch.FiniOptionBuilder)
printer = configfetch.ConfigPrinter
ret = []
printer(conf, print=ret.append).print_dict()
assert '\n'.join(ret) == dict_string[:-1]
ret = []
printer(conf, print=ret.append).print_ini()
assert '\n'.join(ret) == ini_string[:-1]
dict_ = eval(dict_string)
conf = fetch(dict_, option_builder=configfetch.DictOptionBuilder)
ret = []
printer(conf, print=ret.append).print_dict()
assert '\n'.join(ret) == dict_string[:-1]
ret = []
printer(conf, print=ret.append).print_ini()
assert '\n'.join(ret) == ini_string[:-1]
| 0
| 51
| 0
| 21,633
| 0
| 1,826
| 0
| -22
| 503
|
a5bd7b16ae0ef9281e8935c406154bcc19d183b1
| 10,477
|
py
|
Python
|
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
pt3/client.py
|
Aerun/pytyle3
|
86876fa7ad652fc99b77f5482559733c95490e84
|
[
"WTFPL"
] | null | null | null |
import xpybutil
import xpybutil.event as event
import xpybutil.ewmh as ewmh
import xpybutil.motif as motif
import xpybutil.icccm as icccm
import xpybutil.rect as rect
import xpybutil.util as util
import xpybutil.window as window
clients = {}
ignore = [] # Some clients are never gunna make it...
event.connect('PropertyNotify', xpybutil.root, cb_property_notify)
| 34.127036
| 98
| 0.592345
|
import time
import xcffib.xproto
import xpybutil
import xpybutil.event as event
import xpybutil.ewmh as ewmh
import xpybutil.motif as motif
import xpybutil.icccm as icccm
import xpybutil.rect as rect
import xpybutil.util as util
import xpybutil.window as window
from debug import debug
import config
import state
import tile
clients = {}
ignore = [] # Some clients are never gunna make it...
class Client(object):
def __init__(self, wid):
self.wid = wid
self.name = ewmh.get_wm_name(self.wid).reply() or 'N/A'
debug('Connecting to %s' % self)
window.listen(self.wid, 'PropertyChange', 'FocusChange')
event.connect('PropertyNotify', self.wid, self.cb_property_notify)
event.connect('FocusIn', self.wid, self.cb_focus_in)
event.connect('FocusOut', self.wid, self.cb_focus_out)
# This connects to the parent window (decorations)
# We get all resize AND move events... might be too much
self.parentid = window.get_parent_window(self.wid)
window.listen(self.parentid, 'StructureNotify')
event.connect('ConfigureNotify', self.parentid,
self.cb_configure_notify)
# A window should only be floating if that is default
self.floating = getattr(config, 'floats_default', False)
# Not currently in a "moving" state
self.moving = False
# Load some data
self.desk = ewmh.get_wm_desktop(self.wid).reply()
# Add it to this desktop's tilers
tile.update_client_add(self)
# First cut at saving client geometry
self.save()
def remove(self):
tile.update_client_removal(self)
debug('Disconnecting from %s' % self)
event.disconnect('ConfigureNotify', self.parentid)
event.disconnect('PropertyNotify', self.wid)
event.disconnect('FocusIn', self.wid)
event.disconnect('FocusOut', self.wid)
def activate(self):
ewmh.request_active_window_checked(self.wid, source=1).check()
def unmaximize(self):
vatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_VERT')
hatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
ewmh.request_wm_state_checked(self.wid, 0, vatom, hatom).check()
def save(self):
self.saved_geom = window.get_geometry(self.wid)
self.saved_state = ewmh.get_wm_state(self.wid).reply()
def restore(self):
debug('Restoring %s' % self)
if getattr(config, 'remove_decorations', False):
motif.set_hints_checked(self.wid,2,decoration=1).check()
if getattr(config, 'tiles_below', False):
ewmh.request_wm_state_checked(self.wid,0,util.get_atom('_NET_WM_STATE_BELOW')).check()
if self.saved_state:
fullymaxed = False
vatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_VERT')
hatom = util.get_atom('_NET_WM_STATE_MAXIMIZED_HORZ')
if vatom in self.saved_state and hatom in self.saved_state:
fullymaxed = True
ewmh.request_wm_state_checked(self.wid, 1, vatom, hatom).check()
elif vatom in self.saved_state:
ewmh.request_wm_state_checked(self.wid, 1, vatom).check()
elif hatom in self.saved_state:
ewmh.request_wm_state_checked(self.wid, 1, hatom).check()
# No need to continue if we've fully maximized the window
if fullymaxed:
return
mnow = rect.get_monitor_area(window.get_geometry(self.wid),
state.monitors)
mold = rect.get_monitor_area(self.saved_geom, state.monitors)
x, y, w, h = self.saved_geom
# What if the client is on a monitor different than what it was before?
# Use the same algorithm in Openbox to convert one monitor's
# coordinates to another.
if mnow != mold:
nowx, nowy, noww, nowh = mnow
oldx, oldy, oldw, oldh = mold
xrat, yrat = float(noww) / float(oldw), float(nowh) / float(oldh)
x = nowx + (x - oldx) * xrat
y = nowy + (y - oldy) * yrat
w *= xrat
h *= yrat
window.moveresize(self.wid, x, y, w, h)
def moveresize(self, x=None, y=None, w=None, h=None):
# Ignore this if the user is moving the window...
if self.moving:
print 'Sorry but %s is moving...' % self
return
try:
window.moveresize(self.wid, x, y, w, h)
except:
pass
def is_button_pressed(self):
try:
pointer = xpybutil.conn.core.QueryPointer(self.wid).reply()
if pointer is None:
return False
if (xcffib.xproto.KeyButMask.Button1 & pointer.mask or
xcffib.xproto.KeyButMask.Button3 & pointer.mask):
return True
except xcffib.xproto.BadWindow:
pass
return False
def cb_focus_in(self, e):
if self.moving and e.mode == xcffib.xproto.NotifyMode.Ungrab:
state.GRAB = None
self.moving = False
tile.update_client_moved(self)
def cb_focus_out(self, e):
if e.mode == xcffib.xproto.NotifyMode.Grab:
state.GRAB = self
def cb_configure_notify(self, e):
if state.GRAB is self and self.is_button_pressed():
self.moving = True
def cb_property_notify(self, e):
aname = util.get_atom_name(e.atom)
try:
if aname == '_NET_WM_DESKTOP':
if should_ignore(self.wid):
untrack_client(self.wid)
return
olddesk = self.desk
self.desk = ewmh.get_wm_desktop(self.wid).reply()
if self.desk is not None and self.desk != olddesk:
tile.update_client_desktop(self, olddesk)
else:
self.desk = olddesk
elif aname == '_NET_WM_STATE':
if should_ignore(self.wid):
untrack_client(self.wid)
return
except xcffib.xproto.BadWindow:
pass # S'ok...
def __str__(self):
return '{%s (%d)}' % (self.name[0:30], self.wid)
def update_clients():
client_list = ewmh.get_client_list_stacking().reply()
client_list = list(reversed(client_list))
for c in client_list:
if c not in clients:
track_client(c)
for c in clients.keys():
if c not in client_list:
untrack_client(c)
def track_client(client):
assert client not in clients
try:
if not should_ignore(client):
if state.PYTYLE_STATE == 'running':
# This is truly unfortunate and only seems to be necessary when
# a client comes back from an iconified state. This causes a
# slight lag when a new window is mapped, though.
time.sleep(0.2)
clients[client] = Client(client)
except xcffib.xproto.BadWindow:
debug('Window %s was destroyed before we could finish inspecting it. '
'Untracking it...' % client)
untrack_client(client)
def untrack_client(client):
if client not in clients:
return
c = clients[client]
del clients[client]
c.remove()
def should_ignore(client):
# Don't waste time on clients we'll never possibly tile
if client in ignore:
return True
nm = ewmh.get_wm_name(client).reply()
wm_class = icccm.get_wm_class(client).reply()
if wm_class is not None:
try:
inst, cls = wm_class
matchNames = set([inst.lower(), cls.lower()])
if matchNames.intersection(config.ignore):
debug('Ignoring %s because it is in the ignore list' % nm)
return True
if hasattr(config, 'tile_only') and config.tile_only:
if not matchNames.intersection(config.tile_only):
debug('Ignoring %s because it is not in the tile_only '
'list' % nm)
return True
except ValueError:
pass
if icccm.get_wm_transient_for(client).reply() is not None:
debug('Ignoring %s because it is transient' % nm)
ignore.append(client)
return True
wtype = ewmh.get_wm_window_type(client).reply()
if wtype:
for atom in wtype:
aname = util.get_atom_name(atom)
if aname in ('_NET_WM_WINDOW_TYPE_DESKTOP',
'_NET_WM_WINDOW_TYPE_DOCK',
'_NET_WM_WINDOW_TYPE_TOOLBAR',
'_NET_WM_WINDOW_TYPE_MENU',
'_NET_WM_WINDOW_TYPE_UTILITY',
'_NET_WM_WINDOW_TYPE_SPLASH',
'_NET_WM_WINDOW_TYPE_DIALOG',
'_NET_WM_WINDOW_TYPE_DROPDOWN_MENU',
'_NET_WM_WINDOW_TYPE_POPUP_MENU',
'_NET_WM_WINDOW_TYPE_TOOLTIP',
'_NET_WM_WINDOW_TYPE_NOTIFICATION',
'_NET_WM_WINDOW_TYPE_COMBO',
'_NET_WM_WINDOW_TYPE_DND'):
debug('Ignoring %s because it has type %s' % (nm, aname))
ignore.append(client)
return True
wstate = ewmh.get_wm_state(client).reply()
if wstate is None:
debug('Ignoring %s because it does not have a state' % nm)
return True
for atom in wstate:
aname = util.get_atom_name(atom)
# For now, while I decide how to handle these guys
if aname == '_NET_WM_STATE_STICKY':
debug('Ignoring %s because it is sticky and they are weird' % nm)
return True
if aname in ('_NET_WM_STATE_SHADED', '_NET_WM_STATE_HIDDEN',
'_NET_WM_STATE_FULLSCREEN', '_NET_WM_STATE_MODAL'):
debug('Ignoring %s because it has state %s' % (nm, aname))
return True
d = ewmh.get_wm_desktop(client).reply()
if d == 0xffffffff:
debug('Ignoring %s because it\'s on all desktops' \
'(not implemented)' % nm)
return True
return False
def cb_property_notify(e):
aname = util.get_atom_name(e.atom)
if aname == '_NET_CLIENT_LIST_STACKING':
update_clients()
event.connect('PropertyNotify', xpybutil.root, cb_property_notify)
| 0
| 0
| 0
| 5,878
| 0
| 3,995
| 0
| -36
| 273
|
1782765336d1c920b25f3e04b8d6dd09f0344112
| 905
|
py
|
Python
|
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
index_cli/core/json_type.py
|
lishnih/index_cli
|
57f23d5df5168bcc73e23e0eeabbb8317014585b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-09-27
from __future__ import (division, absolute_import,
print_function, unicode_literals)
# class JsonType(UserDefinedType):
# def get_col_spec(self, **kw):
# return "JSON"
#
# def bind_processor(self, dialect):
# def process(value):
# return json.dumps(value, ensure_ascii=False).encode('utf8')
#
# return process
#
# def result_processor(self, dialect, coltype):
# def process(value):
# return json.loads(value)
#
# return process
| 23.815789
| 73
| 0.653039
|
#!/usr/bin/env python
# coding=utf-8
# Stan 2018-09-27
from __future__ import (division, absolute_import,
print_function, unicode_literals)
import json
from sqlalchemy.types import UserDefinedType, TypeDecorator, Text
class JsonType(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
return json.dumps(value, ensure_ascii=False)
def process_result_value(self, value, dialect):
return json.loads(value)
# class JsonType(UserDefinedType):
# def get_col_spec(self, **kw):
# return "JSON"
#
# def bind_processor(self, dialect):
# def process(value):
# return json.dumps(value, ensure_ascii=False).encode('utf8')
#
# return process
#
# def result_processor(self, dialect, coltype):
# def process(value):
# return json.loads(value)
#
# return process
| 0
| 0
| 0
| 215
| 0
| 0
| 0
| 34
| 69
|
ed5e6c0f6c69ec6fdd90183710bf386418d25c66
| 1,563
|
py
|
Python
|
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | 1
|
2019-11-18T19:57:42.000Z
|
2019-11-18T19:57:42.000Z
|
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_settings.py
|
sneJ-/chaostoolkit-lib
|
07b00c8bffe8cda7494b049f9640cdbba3bad8bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
settings_dir = os.path.join(os.path.dirname(__file__), "fixtures")
| 28.944444
| 79
| 0.715931
|
# -*- coding: utf-8 -*-
import os.path
from chaoslib.settings import get_loaded_settings, load_settings, save_settings
settings_dir = os.path.join(os.path.dirname(__file__), "fixtures")
def test_do_not_fail_when_settings_do_not_exist():
assert load_settings(
os.path.join(settings_dir, "no_settings.yaml")) is None
def test_load_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
assert "notifications" in settings
def test_save_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
new_settings_location = os.path.join(settings_dir, "new_settings.yaml")
try:
os.remove(new_settings_location)
except OSError:
pass
save_settings(settings, new_settings_location)
saved_settings = load_settings(new_settings_location)
assert "notifications" in saved_settings
os.remove(new_settings_location)
def test_load_unsafe_settings():
settings = load_settings(
os.path.join(settings_dir, "unsafe-settings.yaml"))
assert settings is None
def test_create_settings_file_on_save():
ghost = os.path.abspath(os.path.join(settings_dir, "bah", "ghost.yaml"))
assert not os.path.exists(ghost)
try:
save_settings({}, ghost)
assert os.path.exists(ghost)
finally:
try:
os.remove(ghost)
except OSError:
pass
def test_get_loaded_settings():
settings = load_settings(os.path.join(settings_dir, "settings.yaml"))
assert get_loaded_settings() is settings
| 0
| 0
| 0
| 0
| 0
| 1,231
| 0
| 58
| 160
|
0da55faa65c939131e74dd60e3f512e40b9acbf0
| 49
|
py
|
Python
|
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
instance/config.py
|
davideguidobene/cinema-web-app
|
1a83576a1e37ea69bec2b2a80f584912cfc9b264
|
[
"MIT"
] | null | null | null |
import os
SECRET_KEY = os.getenv("SECRET_KEY")
| 9.8
| 36
| 0.734694
|
import os
SECRET_KEY = os.getenv("SECRET_KEY")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7a777dd89c577420d917a03e50e383d90d26f239
| 652
|
py
|
Python
|
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
cit_vipnet/inventory/migrations/0002_auto_20210906_1138.py
|
mr-Marshanskiy/cit-vipnet
|
6a0e56a13cae57252957c82af3d4e98da5d9d6a4
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2 on 2021-09-06 08:38
| 29.636364
| 137
| 0.627301
|
# Generated by Django 2.2 on 2021-09-06 08:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('inventory', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='hardwareplatform',
options={'ordering': ['-name'], 'verbose_name': 'Аппаратная платформа', 'verbose_name_plural': 'Аппаратные платформы'},
),
migrations.AlterModelOptions(
name='modification',
options={'ordering': ['-name'], 'verbose_name': 'Модификация исполненеия', 'verbose_name_plural': 'Модификации исполненеий'},
),
]
| 164
| 0
| 0
| 466
| 0
| 0
| 0
| 11
| 46
|
a218e268b041cea723c99b9e20c6c99c665876db
| 88
|
py
|
Python
|
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | null | null | null |
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | 1
|
2022-02-27T23:23:50.000Z
|
2022-02-27T23:23:50.000Z
|
main.py
|
hailleenvarela/data-2022-1
|
8f92e1325b6fcbf727b426c50ddf32d10e38db89
|
[
"MIT"
] | 3
|
2022-02-27T23:14:24.000Z
|
2022-03-02T00:47:12.000Z
|
from source.etl import ETL
x = ETL()
df = x.extract(True)
x.transform(df)
#x.load(df)
| 11
| 26
| 0.670455
|
from source.etl import ETL
x = ETL()
df = x.extract(True)
x.transform(df)
#x.load(df)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
97d4c4d8955d3f56c8e11f52c3ceebef2f337f77
| 2,533
|
py
|
Python
|
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
2021/advent2021_9.py
|
aatango/Advent-of-Code
|
f229abc7acaaa0a2316839bf11fa7e2fdf9caf2c
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2021, day 9: Smoke Basin"""
def main(input_matrix: tuple[str]) -> int:
"""
Find all of the low points on your heightmap.
What is the sum of the risk levels of all low points on your heightmap?
"""
# It's a brute force approach that does not scale to part two,
# but it's what I could think of with very little time.
# Transform string input into usable int values.
for line in input_matrix:
int_line: list[int] = []
for num in line:
int_line.append(int(num))
DEPTH_MAP.append(int_line)
# Find local minima.
low_points: list[int] = []
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
neighbours: list[int] = []
if point_index - 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index - 1])
if point_index + 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index + 1])
if line_index - 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index - 1][point_index])
if line_index + 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index + 1][point_index])
if point < min(neighbours):
low_points.append(point + 1)
return sum(low_points)
def part_two():
"""What do you get if you multiply together the sizes of the three largest basins?
Unlike most other days, this part_two() is dependent on main(),
as it's there that the global DEPTH_MAP is generated.
"""
basins_sizes: list[int] = []
# This loop is to initiate recursive calls, whenever it finds a new basin.
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
if point < 9:
basins_sizes.append(map_basin((line_index, point_index)))
basins_sizes.sort()
return basins_sizes[-3] * basins_sizes[-2] * basins_sizes[-1]
if __name__ == "__main__":
with open("../input", "r") as file:
INPUT_FILE = tuple(file.read().splitlines())
# Global so that it doesn't have to be remade for part two.
DEPTH_MAP: list[list[int]] = []
print(main(INPUT_FILE))
print(part_two())
| 28.784091
| 83
| 0.684959
|
"""Advent of Code 2021, day 9: Smoke Basin"""
def main(input_matrix: tuple[str]) -> int:
"""
Find all of the low points on your heightmap.
What is the sum of the risk levels of all low points on your heightmap?
"""
# It's a brute force approach that does not scale to part two,
# but it's what I could think of with very little time.
# Transform string input into usable int values.
for line in input_matrix:
int_line: list[int] = []
for num in line:
int_line.append(int(num))
DEPTH_MAP.append(int_line)
# Find local minima.
low_points: list[int] = []
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
neighbours: list[int] = []
if point_index - 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index - 1])
if point_index + 1 in range(0, len(line)):
neighbours.append(DEPTH_MAP[line_index][point_index + 1])
if line_index - 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index - 1][point_index])
if line_index + 1 in range(0, len(DEPTH_MAP)):
neighbours.append(DEPTH_MAP[line_index + 1][point_index])
if point < min(neighbours):
low_points.append(point + 1)
return sum(low_points)
def part_two():
"""What do you get if you multiply together the sizes of the three largest basins?
Unlike most other days, this part_two() is dependent on main(),
as it's there that the global DEPTH_MAP is generated.
"""
def map_basin(pos: tuple[int], basin_size: int = 0) -> int:
if DEPTH_MAP[pos[0]][pos[1]] >= 9:
return basin_size
basin_size += 1
DEPTH_MAP[pos[0]][pos[1]] = 9
basin_size += map_basin((max(pos[0] - 1, 0), pos[1]))
basin_size += map_basin((min(pos[0] + 1, len(DEPTH_MAP) - 1), pos[1]))
basin_size += map_basin((pos[0], max(pos[1] - 1, 0)))
basin_size += map_basin((pos[0], min(pos[1] + 1, len(DEPTH_MAP[0]) - 1)))
return basin_size
basins_sizes: list[int] = []
# This loop is to initiate recursive calls, whenever it finds a new basin.
for line_index, line in enumerate(DEPTH_MAP):
for point_index, point in enumerate(line):
if point < 9:
basins_sizes.append(map_basin((line_index, point_index)))
basins_sizes.sort()
return basins_sizes[-3] * basins_sizes[-2] * basins_sizes[-1]
if __name__ == "__main__":
with open("../input", "r") as file:
INPUT_FILE = tuple(file.read().splitlines())
# Global so that it doesn't have to be remade for part two.
DEPTH_MAP: list[list[int]] = []
print(main(INPUT_FILE))
print(part_two())
| 0
| 0
| 0
| 0
| 0
| 430
| 0
| 0
| 24
|
8485ba5f72fd09655120694f54a0ea9e297a8fe8
| 545
|
py
|
Python
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/pythondata-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 3
|
2021-05-12T21:57:55.000Z
|
2021-07-29T19:56:04.000Z
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/litex-data-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 1
|
2020-05-02T02:41:24.000Z
|
2020-05-02T02:44:25.000Z
|
pythondata_cpu_blackparrot/system_verilog/black-parrot/external/basejump_stl/testing/bsg_test/dramsim3_bandwidth2/const_random.py
|
litex-hub/litex-data-cpu-blackparrot
|
ba50883f12d33e1d834640640c84ddc9329bb68a
|
[
"BSD-3-Clause"
] | 2
|
2020-05-01T08:33:19.000Z
|
2021-07-29T19:56:12.000Z
|
import sys
import random
if __name__ == "__main__":
random.seed(0)
num_cache_p = int(sys.argv[1])
block_size_in_words_p = int(sys.argv[2])
tg = TraceGen(block_size_in_words_p)
tg.clear_tags()
#words = (2**18)/num_cache_p # 1MB
words = (2**18)/num_cache_p # 1MB
max_range = (2**14)# 64KB
for i in range(words):
taddr = random.randint(0, max_range-1) << 2
write_not_read = random.randint(0,1)
if write_not_read:
tg.send_write(taddr)
else:
tg.send_read(taddr)
tg.done()
| 20.185185
| 47
| 0.66055
|
import sys
import random
from trace_gen import *
if __name__ == "__main__":
random.seed(0)
num_cache_p = int(sys.argv[1])
block_size_in_words_p = int(sys.argv[2])
tg = TraceGen(block_size_in_words_p)
tg.clear_tags()
#words = (2**18)/num_cache_p # 1MB
words = (2**18)/num_cache_p # 1MB
max_range = (2**14)# 64KB
for i in range(words):
taddr = random.randint(0, max_range-1) << 2
write_not_read = random.randint(0,1)
if write_not_read:
tg.send_write(taddr)
else:
tg.send_read(taddr)
tg.done()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 22
|
74ee5adaad45c0809358f0e7260945651ef42945
| 5,699
|
py
|
Python
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 14
|
2015-01-05T18:18:04.000Z
|
2022-02-07T19:35:12.000Z
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 106
|
2015-01-06T00:17:13.000Z
|
2019-09-07T00:35:32.000Z
|
touchdown/tests/test_aws_vpc_subnet.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 5
|
2015-01-30T10:18:24.000Z
|
2022-02-07T19:35:13.000Z
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 32.565714
| 87
| 0.589402
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.tests.aws import StubberTestCase
from touchdown.tests.fixtures.aws import (
NetworkAclFixture,
RouteTableFixture,
VpcFixture,
)
from touchdown.tests.stubs.aws import SubnetStubber
class TestSubnetCreation(StubberTestCase):
def test_create_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"apply",
)
)
)
subnet.add_describe_subnets_empty_response()
subnet.add_create_subnet()
subnet.add_create_tags(Name="test-subnet")
# Wait for the subnet to exist
subnet.add_describe_subnets_empty_response()
subnet.add_describe_subnets_empty_response()
subnet.add_describe_subnets_one_response()
# Call describe_object again to make sure remote state is correctly cached
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
goal.execute()
def test_adding_route_table_to_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
route_table = self.fixtures.enter_context(
RouteTableFixture(goal, self.aws, vpcf.vpc)
)
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet",
cidr_block="192.168.0.0/25",
route_table=route_table,
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_associate_route_table("rt-52f2381b")
goal.execute()
def test_adding_nacl_table_to_subnet(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
nacl = self.fixtures.enter_context(NetworkAclFixture(goal, self.aws, vpcf.vpc))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet",
cidr_block="192.168.0.0/25",
network_acl=nacl,
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_replace_network_acl_association()
goal.execute()
def test_create_subnet_idempotent(self):
goal = self.create_goal("apply")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"apply",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(subnet.resource)), 0)
class TestSubnetDestroy(StubberTestCase):
def test_destroy_subnet(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"destroy",
)
)
)
subnet.add_describe_subnets_one_response()
subnet.add_describe_network_acls()
subnet.add_describe_route_tables()
subnet.add_delete_subnet()
goal.execute()
def test_destroy_subnet_idempotent(self):
goal = self.create_goal("destroy")
vpcf = self.fixtures.enter_context(VpcFixture(goal, self.aws))
subnet = self.fixtures.enter_context(
SubnetStubber(
goal.get_service(
vpcf.vpc.add_subnet(
name="test-subnet", cidr_block="192.168.0.0/25"
),
"destroy",
)
)
)
subnet.add_describe_subnets_empty_response()
self.assertEqual(len(list(goal.plan())), 0)
self.assertEqual(len(goal.get_changes(subnet.resource)), 0)
| 0
| 0
| 0
| 4,866
| 0
| 0
| 0
| 141
| 113
|
62b58d3a59f61b26ea27943ea666bc132820d76e
| 8,597
|
py
|
Python
|
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
pymbs/processing/loops/fourbar.py
|
brutzl/pymbs
|
fb7c91435f56b5c4d460f82f081d5d1960fea886
|
[
"MIT"
] | null | null | null |
AL = 'FB_%s_AL'
BE = 'FB_%s_BE'
GA = 'FB_%s_GA'
DE = 'FB_%s_DE'
L1 = 'FB_%s_L1'
L2 = 'FB_%s_L2'
L3 = 'FB_%s_L3'
L4 = 'FB_%s_L4'
PHI = 'FB_%s_PHI'
PSI = 'FB_%s_PSI'
THETA = 'FB_%s_THETA'
A = 'FB_%s_A'
B = 'FB_%s_B'
C = 'FB_%s_C'
D = 'FB_%s_D'
E = 'FB_%s_E'
F = 'FB_%s_F'
| 35.378601
| 310
| 0.562289
|
from pymbs.processing.loops.loop import Loop
from pymbs.common.functions import sqrt
from pymbs.processing import Frame
from pymbs.processing.loads.constraint import Constraint
from numpy import pi
from pymbs.symbolics import Matrix, eye, cos, sin, atan, atan2, acos, zeros, transpose
AL = 'FB_%s_AL'
BE = 'FB_%s_BE'
GA = 'FB_%s_GA'
DE = 'FB_%s_DE'
L1 = 'FB_%s_L1'
L2 = 'FB_%s_L2'
L3 = 'FB_%s_L3'
L4 = 'FB_%s_L4'
PHI = 'FB_%s_PHI'
PSI = 'FB_%s_PSI'
THETA = 'FB_%s_THETA'
A = 'FB_%s_A'
B = 'FB_%s_B'
C = 'FB_%s_C'
D = 'FB_%s_D'
E = 'FB_%s_E'
F = 'FB_%s_F'
from pymbs.symbolics import Graph
class FourBar(Loop):
'''
Explicit Treatment of a Four Bar Linkage Mechanism
'''
'''
Sketch:
B--3--C
/ \
2 4
/ \
A-----1------D
'''
def __init__(self, name, csCa, csCb, posture):
'''
Constructor
@param setup: Four Bar Linkage has two setups: -1, 1
'''
# Assertions
assert ( isinstance(csCa, Frame) )
assert ( isinstance(csCb, Frame) )
assert ( isinstance(posture, int) )
assert ( (posture == 1) or (posture == -1 ))
# Check parents
if (csCa.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCa.name))
if (csCb.parentBody.joint is None):
raise ValueError('Loop "%s": Coordinate System "%s" is directly connected to the world!'%(name,csCb.name))
# Build complete FourBarLinkage
jB = csCa.parentBody.joint
jD = csCb.parentBody.joint
if (jB.coordSys.parentBody.joint == None):
jB = csCb.parentBody.joint
jD = csCa.parentBody.joint
jA = jB.coordSys.parentBody.joint
csC3 = csCb
csC4 = csCa
else:
jA = jB.coordSys.parentBody.joint
csC3 = csCa
csC4 = csCb
# Do the Joints have the same axis of Rotation
if (jA.Phi == Matrix([1,0,0])):
self.sign = 1
self.pick = Matrix([[0,1,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,1,0])):
self.sign = -1
self.pick = Matrix([[1,0,0],
[0,0,1]])
elif (jA.Phi == Matrix([0,0,1])):
self.sign = 1
self.pick = Matrix([[1,0,0],
[0,1,0]])
else:
raise ValueError('Loop "%s": Axis of Rotation must be either x,y or z!'%name)
assert( jA.Phi == jB.Phi ), 'jA.Phi(%s): %s, jB.Phi(%s): %s'%(jA.name,jA.Phi,jB.name,jB.Phi)
assert( jA.Phi == jD.Phi ), 'jA.Phi(%s): %s, jD.Phi(%s): %s'%(jA.name,jA.Phi,jD.name,jD.Phi)
assert( jA.Psi.norm() == 0 )
assert( jB.Psi.norm() == 0 )
assert( jD.Psi.norm() == 0 )
# Are All Coordinate Systems aligned like their parentBody?
assert( (jA.coordSys.R - eye(3)) == zeros(3) )
assert( (jB.coordSys.R - eye(3)) == zeros(3) )
assert( (jD.coordSys.R - eye(3)) == zeros(3) )
# Check that bodies between joints are the same
assert( jA.coordSys.parentBody == jD.coordSys.parentBody )
assert( jA.body == jB.coordSys.parentBody )
assert( jB.body == csC3.parentBody )
assert( jD.body == csC4.parentBody )
# Super Constructor
Loop.__init__(self, name)
# Save Parameters
self.jA = jA
self.jB = jB
self.jD = jD
self.csC3 = csC3
self.csC4 = csC4
self.posture = posture
# Independent Coordinates
self.u = [jA.q]
self.ud = [jA.qd]
self.udd = [jA.qdd]
# Dependent Coordinates
self.v = [jB.q, jD.q]
self.vd = [jB.qd, jD.qd]
self.vdd = [jB.qdd, jD.qdd]
def calc(self, graph):
'''
Returns precalculated v(u), Bvu and b_prime, s.t.
q = [u,v]', where
u: independent coordinates
v: dependent coordinates
Starting from the Constraint Equation: Phi(q) = 0,
One Obtains by Differentiation:
(d(Phi)/du)*u_dot + (d(Phi)/dv)*v_dot = 0
Ju*u_dot + Jv+v_dot = 0
Thus, v_dot = -(inv(Jv)*Ju)*u_dot = Bvu*u_dot, with Jv = d(Phi)/dv and Ju = d(Phi)/du
Differentiating once more, yields
Ju*u_ddot + Jv*v_ddot + [Ju_dot, Jv_dot]*[u_dot,v_dot]' = 0
Ju*u_ddot + Jv*v_ddot + J_dot*q_dot = 0
Using this relations, one may obtain an expression for v_ddot
v_ddot = -(inv(Jv)*Ju)*u_ddot - inv(Jv)*J_dot*q_dot
= Bvu*u_ddot + b_prime, with b_prime = -inv(Jv)*J_dot*q_dot
Finally one can transform the Equation of Motion
M*q_ddot + h = f + W'*mu
M*(J*u_ddot + b) + h = f + W'*mu with J = [1, Bvu']' and b = [0,b_prime']'
(J'*M*J)*u_ddot + J'*M*b + J'*h = J'*f, since J'*W' = 0
M_star*u_ddot + h_star = f_star
M_star = (J'*M*J)
h_star = J'*M*b + J'*h
f_star = J'*f
'''
assert isinstance(graph, Graph)
# Abbrevations
s = self.sign
# Generalised Coordinates
q1 = self.jA.q # u[0] # angle between x-axes
q1d = self.jA.qd
q2 = self.jB.q # v[0] # angle between x-axes
q2d = self.jB.qd
q3 = self.jD.q # v[1] # angle between x-axes
q3d = self.jD.qd
# Length of bars and angle between x-axis and bar
l1_vec = self.jD.coordSys.p - self.jA.coordSys.p
l1_vec2 = self.pick*l1_vec
l1 = graph.addEquation(L1%self.name, sqrt((transpose(l1_vec)*l1_vec)))
alpha = graph.addEquation(AL%self.name, s*atan2(l1_vec2[1],l1_vec2[0]))
l2_vec = self.jB.coordSys.p
l2_vec2 = self.pick*l2_vec
l2 = graph.addEquation(L2%self.name, sqrt((transpose(l2_vec)*l2_vec)))
beta = graph.addEquation(BE%self.name, s*atan2(l2_vec2[1],l2_vec2[0]))
l3_vec = self.csC3.p
l3_vec2 = self.pick*l3_vec
l3 = graph.addEquation(L3%self.name, sqrt((transpose(l3_vec)*l3_vec)))
gamma = graph.addEquation(GA%self.name, s*atan2(l3_vec2[1],l3_vec2[0]))
l4_vec = self.csC4.p
l4_vec2 = self.pick*l4_vec
l4 = graph.addEquation(L4%self.name, sqrt((transpose(l4_vec)*l4_vec)))
delta = graph.addEquation(DE%self.name, s*atan2(l4_vec2[1],l4_vec2[0]))
# angle between bars
phi_prime = graph.addEquation(PHI%self.name, q1 + beta - alpha)
# A = P1, B = P2, C = P3
#P1 = graph.addEquation(A%self.name, 2*l4*(l1-l2*cos(phi_prime)))
#P2 = graph.addEquation(B%self.name, -2*l2*l4*sin(phi_prime))
#P3 = graph.addEquation(C%self.name, l1**2+l2**2-l3**2+l4**2-2*l1*l2*cos(phi_prime))
# D = P1, E = P2, F = P3
P4 = graph.addEquation(D%self.name, 2*l3*(l2-l1*cos(-phi_prime)))
P5 = graph.addEquation(E%self.name, -2*l1*l3*sin(-phi_prime))
P6 = graph.addEquation(F%self.name, l2**2+l1**2-l4**2+l3**2-2*l2*l1*cos(-phi_prime))
# Calculate v
theta_prime = graph.addEquation(THETA%self.name, 2*atan((P5-self.posture*sqrt(P4**2+P5**2-P6**2))/(P4-P6)))
psi_prime = graph.addEquation(PSI%self.name, ((l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime))/abs(l2*sin(phi_prime)+l3*sin(phi_prime+theta_prime)))*acos((l2*cos(phi_prime)+l3*cos(phi_prime+theta_prime)-l1)/l4))
v1 = (psi_prime + alpha - delta)
v0 = (theta_prime + beta - gamma)
Bvu = Matrix( [[-l2*sin(phi_prime-psi_prime)/(l3*sin(phi_prime+theta_prime-psi_prime))-1], [(l2*sin(theta_prime))/(l4*sin(phi_prime+theta_prime-psi_prime))]] )
b_prime = Matrix( [-(q1d**2*l2*cos(phi_prime-psi_prime)+l3*cos(phi_prime+theta_prime-psi_prime)*(q1d+q2d)**2-l4*q3d**2)/(l3*sin(phi_prime+theta_prime-psi_prime)) , -(q1d**2*l2*cos(theta_prime)+l3*(q1d+q2d)**2-l4*q3d**2*cos(phi_prime+theta_prime-psi_prime))/(l4*sin(phi_prime+theta_prime-psi_prime)) ] )
return ([v0,v1],Bvu,b_prime)
def applyConstraintLoads(self):
'''
apply Constraint Forces at the end of the cut
'''
# locking all directions perpendicular to axis of rotation
transLock = [0,0,0]
for i in [0,1,2]:
if (self.jA.Phi[i] == 0):
transLock[i] = 1
# apply Constraint
c = Constraint(name='Constraint_%s'%self.name, parent=self.csC3, child=self.csC4, \
transLock=transLock, rotLock=[0,0,0], active=False)
# return load object
return c
| 0
| 0
| 0
| 7,975
| 0
| 0
| 0
| 165
| 181
|
127d839e1bbc55e99f4f321f7c332ef610cb53d8
| 1,812
|
py
|
Python
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 2,661
|
2017-03-20T22:12:50.000Z
|
2022-03-30T09:43:19.000Z
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 1,531
|
2017-03-24T17:20:32.000Z
|
2022-03-16T18:11:14.000Z
|
earth_enterprise/src/server/wsgi/wms/ogc/common/image_specs.py
|
ezeeyahoo/earthenterprise
|
b6cac9e6228946f2f17d1edb75e118aeb3e8e8c9
|
[
"Apache-2.0"
] | 990
|
2017-03-24T11:54:28.000Z
|
2022-03-22T11:51:47.000Z
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Holds meta-information about the image formats we support."""
import collections
ImageSpec = collections.namedtuple(
"ImageSpec", "content_type file_extension pil_format")
IMAGE_SPECS = {"jpg": ImageSpec("image/jpeg", "jpg", "JPEG"),
"png": ImageSpec("image/png", "png", "PNG")
}
def IsKnownFormat(fmt):
"""Checks if the format is supported.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is supported.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return True
return False
def GetImageSpec(fmt):
"""Get the Imagespec.
Args:
fmt: Format of the image.
Returns:
image_spec: image spec.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return spec
return None
def FormatIsPng(fmt):
"""Checks if the format is of type png.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is png or not.
"""
for typ, spec in IMAGE_SPECS.iteritems():
if spec.content_type == fmt:
return typ == "png"
return False
if __name__ == "__main__":
main()
| 22.65
| 74
| 0.679912
|
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Holds meta-information about the image formats we support."""
import collections
ImageSpec = collections.namedtuple(
"ImageSpec", "content_type file_extension pil_format")
IMAGE_SPECS = {"jpg": ImageSpec("image/jpeg", "jpg", "JPEG"),
"png": ImageSpec("image/png", "png", "PNG")
}
def IsKnownFormat(fmt):
"""Checks if the format is supported.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is supported.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return True
return False
def GetImageSpec(fmt):
"""Get the Imagespec.
Args:
fmt: Format of the image.
Returns:
image_spec: image spec.
"""
for spec in IMAGE_SPECS.values():
if spec.content_type == fmt:
return spec
return None
def FormatIsPng(fmt):
"""Checks if the format is of type png.
Args:
fmt: Format of the image.
Returns:
boolean: If the format is png or not.
"""
for typ, spec in IMAGE_SPECS.iteritems():
if spec.content_type == fmt:
return typ == "png"
return False
def main():
is_format = IsKnownFormat("jpeg")
print is_format
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 44
| 0
| 0
| 23
|
9b3205aefcc2508985db4f069099edf5e7dbfa1b
| 662
|
py
|
Python
|
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
ClassFromQueryGenerator/CRUDPyMacros/Update.py
|
UnstableMutex/ClassFromQueryGenerator
|
5de03f61059d2c61783a9b66ab4e11060343e803
|
[
"MIT"
] | null | null | null |
comma=","
result="SET ANSI_NULLS ON\n"
result+="GO\n"
result+="SET QUOTED_IDENTIFIER ON\n"
result+="GO\n"
result+="CREATE PROCEDURE "+Model.TableName+"_Update\n"
result+=mapcols(pars)
result+="AS\n"
result+="BEGIN\n"
result+="SET NOCOUNT ON;\n"
result+="update [dbo].["+Model.TableName+"]\n"
result+=" set ("
result+=mapusual(sqf)
result+=")\n"
result+="WHERE " +Model.PK.Name+"=@"+Model.PK.Name+"\n"
result+="END\n"
result+="GO\n"
| 24.518519
| 55
| 0.669184
|
def sqf(col):
return "["+col.Name+"] = @"+col.Name
def pars(col):
return "@"+col.Name+" "+col.SQLType+"\n"
comma=","
def mapcols(f):
return comma.join(map(f,Model.Columns))
def mapusual(f):
return comma.join(map(f,Model.UsualColumns))
result="SET ANSI_NULLS ON\n"
result+="GO\n"
result+="SET QUOTED_IDENTIFIER ON\n"
result+="GO\n"
result+="CREATE PROCEDURE "+Model.TableName+"_Update\n"
result+=mapcols(pars)
result+="AS\n"
result+="BEGIN\n"
result+="SET NOCOUNT ON;\n"
result+="update [dbo].["+Model.TableName+"]\n"
result+=" set ("
result+=mapusual(sqf)
result+=")\n"
result+="WHERE " +Model.PK.Name+"=@"+Model.PK.Name+"\n"
result+="END\n"
result+="GO\n"
| 0
| 0
| 0
| 0
| 0
| 141
| 0
| 0
| 89
|
e33365306faf8e05ad78b480b5ad8b2e0c36c04f
| 6,338
|
py
|
Python
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | 2
|
2016-04-07T23:36:46.000Z
|
2016-12-20T15:35:17.000Z
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | 5
|
2016-02-04T16:28:33.000Z
|
2016-03-18T17:02:07.000Z
|
tests/core/testRpg.py
|
rrpg/engine
|
989f701b82aa7c73ea98003eed13077e5d6f15f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
| 37.502959
| 297
| 0.736668
|
# -*- coding: utf-8 -*-
import unittest
import tests.common
import core
from core.localisation import _
from core import Rpg
import models.player
from models.saved_game import saved_game
import json
import sqlite3
class rpgTests(tests.common.common):
idSavedGame = 1
idFaultySavedGame = 2
idEmptySavedGame = 3
incorrectIdSavedGame = 42
def test_unknown_world(self):
rpgEngine = Rpg.Rpg()
try:
rpgEngine.initWorld("some/unexisting/world")
except core.exception.exception as e:
self.assertEquals(str(e), _('ERROR_UNKNOWN_SELECTED_WORLD'))
def test_invalid_saved_game_id(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initSavedGame(self.incorrectIdSavedGame)
self.assertEquals(str(raised.exception), _('ERROR_RRPG_INIT_INVALID_SAVED_GAME_ID'))
def test_load_player_with_no_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_SAVED_GAME_NEEDED_TO_INIT_PLAYER'))
def test_load_player_with_empty_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idEmptySavedGame)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_NON_EMPTY_SAVED_GAME_NEEDED_TO_INIT_PLAYER'))
def test_load_player_with_faulty_save(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idFaultySavedGame)
with self.assertRaises(core.exception.exception) as raised:
rpgEngine.initPlayer()
self.assertEquals(str(raised.exception), _('ERROR_CONNECT_INVALID_CREDENTIALS'))
def test_invalid_world(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld("tests/invalidDB")
rpgEngine.initSavedGame(self.idSavedGame)
self.assertRaises(sqlite3.OperationalError, rpgEngine.initPlayer)
def test_invalid_action_format(self):
with self.assertRaises(TypeError) as raised:
self.rpg.setAction("Not list action")
self.assertEquals(str(raised.exception), _('ERROR_INVALID_FORMAT_ACTION'))
def test_invalid_action_text(self):
self.rpg.setAction(["Unknown action"])
output = self.rpg._runAction()
self.assertEquals(output, _('ERROR_UNKNOWN_COMMAND'))
def test_invalid_action_json(self):
self.rpg.setAction(["Unknown action"])
output = self.rpg._runAction(True)
self.assertEquals(output, {'error': {'message': _('ERROR_UNKNOWN_COMMAND'), 'code': 1}})
def compareSavedGamesSaveOk(self):
saves = saved_game.loadAll()
expectedSaves = [
{
'id_saved_game': 1,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER_SOME", "id_character": 4, "id_player": 3, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER_SOME", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 3,
'id_character': 4
},
{
'id_saved_game': 2,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER2bis", "id_character": 3, "id_player": 2, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER2bis", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 2,
'id_character': 3
},
{
'id_saved_game': 3,
'snapshot_player': '',
'id_player': None,
'id_character': None
}
]
self.assertEquals(saves, expectedSaves)
def compareSavedGamesSaveKo(self):
saves = saved_game.loadAll()
expectedSaves = [
{
'id_saved_game': 1,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER", "id_character": 2, "id_player": 1, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 1,
'id_character': 2
},
{
'id_saved_game': 2,
'snapshot_player': '{"id_gender": 1, "name": "TEST_PLAYER2bis", "id_character": 3, "id_player": 2, "stat_defence": 2, "stat_attack": 4, "stat_max_hp": 20, "inventory": null, "id_area": 1, "stat_current_hp": 20, "login": "TEST_PLAYER2bis", "stat_speed": 2, "id_species": 1, "stat_luck": 10}',
'id_player': 2,
'id_character': 3
},
{
'id_saved_game': 3,
'snapshot_player': '',
'id_player': None,
'id_character': None
}
]
self.assertEquals(saves, expectedSaves)
def test_login_already_used(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER', 1, 1)
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_LOGIN_ALREADY_USED'))
self.compareSavedGamesSaveKo()
def test_invalid_gender(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER_SOME', 'some gender', 1)
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_INVALID_GENDER'))
self.compareSavedGamesSaveKo()
def test_invalid_species(self):
with self.assertRaises(models.player.exception) as raised:
self.rpg.createPlayer('TEST_PLAYER_SOME', 1, 'some species')
self.assertEquals(str(raised.exception), _('ERROR_SIGNUP_INVALID_SPECIES'))
self.compareSavedGamesSaveKo()
def test_ok(self):
self.rpg.createPlayer('TEST_PLAYER_SOME', '1', '1')
self.compareSavedGamesSaveOk()
def test_command_with_no_saved_game(self):
rpgEngine = Rpg.Rpg()
rpgEngine.setAction([_('LOOK_COMMAND')])
with self.assertRaises(core.exception.exception) as raised:
rpgEngine._runAction(True)
self.assertEquals(str(raised.exception), _('ERROR_SAVED_GAME_NEEDED_TO_RUN_ACTION'))
def test_command_with_no_player(self):
rpgEngine = Rpg.Rpg()
rpgEngine.initWorld(self.dbFile)
rpgEngine.initSavedGame(self.idEmptySavedGame)
rpgEngine.setAction([_('LOOK_COMMAND')])
with self.assertRaises(core.exception.exception) as raised:
rpgEngine._runAction(True)
self.assertEquals(str(raised.exception), _('ERROR_CONNECTED_PLAYER_NEEDED_FOR_COMMAND'))
def test_run_action_with_no_action(self):
with self.assertRaises(core.exception.exception) as raised:
self.rpg._runAction()
self.assertEquals(str(raised.exception), _('ERROR_NO_ACTION_SET'))
| 0
| 0
| 0
| 6,098
| 0
| 0
| 0
| -8
| 223
|
b6564c976990b7fa6fc560e5b2308ec16d5f0a89
| 492
|
py
|
Python
|
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/210/c.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 18.923077
| 37
| 0.497967
|
from collections import defaultdict
def main():
# input
N, K = map(int, input().split())
cs = [*map(int, input().split())]
# compute
ddict = defaultdict(int)
for i in range(K):
ddict[cs[i]] += 1
ans = len(ddict)
for i in range(N-K):
ddict[cs[i]] -= 1
if ddict[cs[i]] == 0:
del ddict[cs[i]]
ddict[cs[i+K]] += 1
ans = max(ans, len(ddict))
# output
print(ans)
if __name__ == '__main__':
main()
| 0
| 0
| 0
| 0
| 0
| 393
| 0
| 14
| 45
|
da6567f46ca5114b211c9251f1de332e436be104
| 1,892
|
py
|
Python
|
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | 11
|
2022-03-23T21:41:54.000Z
|
2022-03-26T13:41:01.000Z
|
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | null | null | null |
code/model/intent_classification/intent_classifier_inference.py
|
vipulraheja/IteraTeR
|
80c1939969de909c39e41e16b8866355c038b6d2
|
[
"Apache-2.0"
] | 1
|
2022-03-24T15:55:16.000Z
|
2022-03-24T15:55:16.000Z
|
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', required=True,
help='path to Pegasus model checkpoint')
args = parser.parse_args()
main(args)
| 35.037037
| 132
| 0.691332
|
import json
import torch
import argparse
import numpy as np
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import Trainer, TrainingArguments, RobertaTokenizer, RobertaModel, RobertaConfig, RobertaForSequenceClassification
def main(args):
checkpoint = args.checkpoint
model_name = 'roberta-large'
model_cache_dir='roberta-large-model-cache/'
model_type = RobertaForSequenceClassification
config_type = RobertaConfig
tokenizer_type = RobertaTokenizer
tokenizer = tokenizer_type.from_pretrained(
model_name,
cache_dir=model_cache_dir
)
id2label = {0: "clarity", 1: "fluency", 2: "coherence", 3: "style", 4: "meaning-changed"}
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
checkpoint = args.checkpoint
model = model_type.from_pretrained(checkpoint)
model.eval()
model.to(device)
before_text = 'I likes coffee.'
after_text = 'I like coffee.'
def score_text(before_text, after_text, tokenizer, model):
input_ids = tokenizer(before_text, after_text, return_tensors='pt', padding=True, truncation=True)
with torch.no_grad():
input_ids = input_ids.to(device)
outputs = model(**input_ids)
softmax_scores = torch.softmax(outputs.logits, dim=1)
softmax_scores = softmax_scores[0].cpu().numpy()
index = np.argmax(softmax_scores)
return index, softmax_scores[index]
index, confidence = score_text([before_text], [after_text], tokenizer, model)
label = id2label[index]
print(label)
print(confidence)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', '-c', required=True,
help='path to Pegasus model checkpoint')
args = parser.parse_args()
main(args)
| 0
| 0
| 0
| 0
| 0
| 1,370
| 0
| 134
| 133
|
740141a9f07306e45b5ebf41d61bb31b1b134c05
| 2,035
|
py
|
Python
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | 19
|
2018-08-21T06:25:30.000Z
|
2018-08-21T12:34:14.000Z
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | 1
|
2018-06-01T09:14:36.000Z
|
2018-06-01T09:20:49.000Z
|
Forms/mengban_seed.py
|
UlordChain/uwallet-client
|
c41f89f34dd17699cb4b285dbba9053f28be5603
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2017/12/18
# @Author : Shu
# @Email : [email protected]
| 40.7
| 89
| 0.584767
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/18
# @Author : Shu
# @Email : [email protected]
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from FormUI.ui_getseed import Ui_getseedWD
class SeedWidget(QWidget, Ui_getseedWD):
def __init__(self, parent=None):
super(SeedWidget, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.setStyleSheet("""QFrame#frame_left{border-image:url(:/images/heisemengban)}
QFrame#frame_top{border-image:url(:/images/baisemengban)}
QFrame#frame_rigth{background-color:white;}
""")
self.ted_setting_getseed.setReadOnly(True)
self.btn_seed_password.clicked.connect(self.slot_password)
self.frame_left.installEventFilter(self.parent)
self.frame_top.installEventFilter(self.parent)
def slot_password(self):
"""查看seed之前, 需要输入密码"""
if self.led_seed_password.text().isEmpty():
self.led_seed_password.setStyleSheet("""border:1px solid red;""")
else:
self.led_seed_password.setStyleSheet("")
password = unicode(self.led_seed_password.text()).encode('utf-8')
try:
args = ['getseed', '--client']
if password:
args.append('-W')
args.append(password)
rs = self.parent.bwallet_main(*args, thread_safe=True)
except Exception as e:
print (e)
if 'Incorrect password' in str(e):
self.led_seed_password.setStyleSheet("""border:1px solid red;""")
else:
self.led_seed_password.setStyleSheet("""border:1px solid yellow;""")
else:
self.ted_setting_getseed.setText(rs)
self.ted_setting_getseed.setVisible(True)
self.led_seed_password.setVisible(False)
self.btn_seed_password.setVisible(False)
| 30
| 0
| 0
| 1,804
| 0
| 0
| 0
| 30
| 90
|
f52c7e893c3ecdab0771489d791ee3bc29fa08c0
| 325
|
py
|
Python
|
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
test/test_json_equal.py
|
dakotahawkins/MCSchematicIntersection
|
a5bc130c9f887ca6a253c0a6508fcbca5f164df5
|
[
"MIT"
] | null | null | null |
"""Tests two schematic json files to ensure they're equal
"""
import json
import sys
INPUT_A: str = sys.argv[1]
INPUT_B: str = sys.argv[2]
with open(INPUT_A, 'r') as infile_a:
with open(INPUT_B, 'r') as infile_b:
if json.load(infile_a)['nbt'] != json.load(infile_b)['nbt']:
sys.exit(1)
sys.exit(0)
| 21.666667
| 68
| 0.64
|
"""Tests two schematic json files to ensure they're equal
"""
import json
import sys
INPUT_A: str = sys.argv[1]
INPUT_B: str = sys.argv[2]
with open(INPUT_A, 'r') as infile_a:
with open(INPUT_B, 'r') as infile_b:
if json.load(infile_a)['nbt'] != json.load(infile_b)['nbt']:
sys.exit(1)
sys.exit(0)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ae2007157bf4f09f792df527fa386d5e97a2fa2a
| 54
|
py
|
Python
|
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | 20
|
2021-06-24T13:37:21.000Z
|
2022-03-25T10:50:26.000Z
|
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | null | null | null |
kenning/resources/reports/__init__.py
|
antmicro/edge-ai-tester
|
6b145145ed1cec206ae0229c846fb33d272f3ffa
|
[
"Apache-2.0"
] | 1
|
2021-11-09T17:23:04.000Z
|
2021-11-09T17:23:04.000Z
|
"""
Contains the templates for benchmark reports.
"""
| 13.5
| 45
| 0.722222
|
"""
Contains the templates for benchmark reports.
"""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ae71a18b83fe0ab9540c787415a3e73b56ccb447
| 1,884
|
py
|
Python
|
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
fs.py
|
mission-liao/fin-stmt-additional
|
da9ef5299e6ff10406996d0cb0975b46498d3c39
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
if __name__ == '__main__':
cli()
| 28.984615
| 75
| 0.685775
|
# -*- coding: utf-8 -*-
import os
import click
from fstmt import TableAdaptorFactory, DashboardFactory, table
def get_data_dir():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def get_database_path():
return os.path.join(get_data_dir(), 'fstmt.sqlite')
@click.group()
def cli():
pass
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year', type=int)
@click.option('--quarter', type=int, default=4)
@click.option('--col', type=(str, str), multiple=True)
def insert(target, market, symbol, year, quarter, col):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
market = market.upper()
if isinstance(t, table.Stock):
if year is not None:
raise Exception("Providing 'year' when creating stocks")
if col :
raise Exception("Providing 'col' when creating stocks")
t.insert(market, symbol)
else:
t.insert(market, symbol, year, quarter, col)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--year')
@click.option('--quarter', type=int, default=4)
def delete(target, market, symbol, year, quarter):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
market = market.upper()
t.delete(market, symbol, year, quarter)
@cli.command()
@click.argument('target')
@click.argument('market')
@click.argument('symbol')
@click.option('--arg', type=(str, str), multiple=True)
def query(target, market, symbol, arg):
d = DashboardFactory(get_database_path()).by_shortcut(target)
market = market.upper()
d.draw(market, symbol, arg)
@cli.command()
@click.argument('target')
def migrate(target):
t = TableAdaptorFactory(get_database_path()).by_shortcut(target)
t.migrate()
if __name__ == '__main__':
cli()
| 0
| 1,441
| 0
| 0
| 0
| 133
| 0
| 20
| 228
|
b3a45dcb40d939002cd6cc74fed37e8c87cd19b8
| 2,539
|
py
|
Python
|
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | null | null | null |
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | null | null | null |
rbtools/clients/tests/test_scanning.py
|
fangwentong/rbtools
|
c09f5c93fd61d447dee19b643ddfcf00ba92f920
|
[
"MIT"
] | 1
|
2020-06-27T23:08:47.000Z
|
2020-06-27T23:08:47.000Z
|
"""Unit tests for client scanning."""
from __future__ import unicode_literals
| 35.760563
| 78
| 0.648681
|
"""Unit tests for client scanning."""
from __future__ import unicode_literals
import os
from rbtools.clients import scan_usable_client
from rbtools.clients.git import GitClient
from rbtools.clients.svn import SVNClient
from rbtools.clients.tests import SCMClientTests
from rbtools.utils.process import execute
class ScanningTests(SCMClientTests):
"""Unit tests for client scanning."""
def test_scanning_nested_repos_1(self):
"""Testing scan_for_usable_client with nested repositories (git inside
svn)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out SVN first.
clone_dir = self.chdir_tmp()
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
svn_clone_dir = os.path.join(clone_dir, 'svn-repo')
# Now check out git.
git_clone_dir = os.path.join(svn_clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(git_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(git_clone_dir))
self.assertEqual(type(tool), GitClient)
def test_scanning_nested_repos_2(self):
"""Testing scan_for_usable_client with nested repositories (svn inside
git)
"""
git_dir = os.path.join(self.testdata_dir, 'git-repo')
svn_dir = os.path.join(self.testdata_dir, 'svn-repo')
# Check out git first
clone_dir = self.chdir_tmp()
git_clone_dir = os.path.join(clone_dir, 'git-repo')
os.mkdir(git_clone_dir)
execute(['git', 'clone', git_dir, git_clone_dir],
env=None, ignore_errors=False, extra_ignore_errors=())
# Now check out svn.
svn_clone_dir = os.path.join(git_clone_dir, 'svn-repo')
os.chdir(git_clone_dir)
execute(['svn', 'co', 'file://%s' % svn_dir, 'svn-repo'],
env=None, ignore_errors=False, extra_ignore_errors=())
os.chdir(svn_clone_dir)
repository_info, tool = scan_usable_client({}, self.options)
self.assertEqual(repository_info.local_path,
os.path.realpath(svn_clone_dir))
self.assertEqual(type(tool), SVNClient)
| 0
| 0
| 0
| 2,202
| 0
| 0
| 0
| 100
| 157
|
0662e69a71e1cc9d3473c7b9d5a6fe55d4510954
| 2,858
|
py
|
Python
|
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | null | null | null |
tests/test_archive.py
|
lgq2015/ubuntu-isign
|
2b72d0c260d13e1dce4a9438a9b0cb566a0fcdaf
|
[
"Apache-2.0"
] | 1
|
2020-10-26T17:36:54.000Z
|
2020-10-26T17:36:54.000Z
|
import logging
log = logging.getLogger(__name__)
| 32.477273
| 89
| 0.731631
|
from isign_base_test import IsignBaseTest
from isign.archive import archive_factory, Archive, AppArchive, AppZipArchive, IpaArchive
from isign.utils import PY3
import logging
log = logging.getLogger(__name__)
class TestArchive(IsignBaseTest):
def _test_good(self, filename, klass):
archive = archive_factory(filename)
assert archive is not None
assert archive.__class__ is klass
assert isinstance(archive, Archive)
def test_archive_factory_app(self):
self._test_good(self.TEST_APP, AppArchive)
def test_archive_factory_appzip(self):
self._test_good(self.TEST_APPZIP, AppZipArchive)
def test_archive_factory_ipa(self):
self._test_good(self.TEST_IPA, IpaArchive)
def test_archive_factory_nonapp_dir(self):
archive = archive_factory(self.TEST_NONAPP_DIR)
assert archive is None
def test_archive_factory_nonapp_ipa(self):
archive = archive_factory(self.TEST_NONAPP_IPA)
assert archive is None
def test_archive_factory_nonapp_txt(self):
archive = archive_factory(self.TEST_NONAPP_TXT)
assert archive is None
def test_archive_factory_nonapp_simulator_app(self):
archive = archive_factory(self.TEST_SIMULATOR_APP)
assert archive is None
class TestBundleInfo(IsignBaseTest):
def _test_bundle_info(self, filename):
archive = archive_factory(filename)
assert archive is not None
assert archive.bundle_info is not None
if PY3:
assert archive.bundle_info[b'CFBundleName'] == b'isignTestApp'
else:
assert archive.bundle_info['CFBundleName'] == 'isignTestApp'
def test_app_archive_info(self):
self._test_bundle_info(self.TEST_APP)
def test_appzip_archive_info(self):
self._test_bundle_info(self.TEST_APPZIP)
def test_ipa_archive_info(self):
self._test_bundle_info(self.TEST_IPA)
class TestArchivePrecheck(IsignBaseTest):
def test_precheck_app(self):
assert AppArchive.precheck(self.TEST_APP)
def test_precheck_appzip(self):
assert AppZipArchive.precheck(self.TEST_APPZIP)
def test_precheck_ipa(self):
assert IpaArchive.precheck(self.TEST_IPA)
def test_bad_precheck_app(self):
assert AppArchive.precheck(self.TEST_NONAPP_DIR) is False
assert AppArchive.precheck(self.TEST_APPZIP) is False
assert AppArchive.precheck(self.TEST_IPA) is False
def test_bad_precheck_appzip(self):
assert AppZipArchive.precheck(self.TEST_APP) is False
assert AppZipArchive.precheck(self.TEST_IPA) is False
def test_bad_precheck_ipa(self):
assert IpaArchive.precheck(self.TEST_APP) is False
assert IpaArchive.precheck(self.TEST_APPZIP) is False
assert IpaArchive.precheck(self.TEST_NONAPP_IPA) is False
| 0
| 0
| 0
| 2,576
| 0
| 0
| 0
| 94
| 135
|
628390e7b0e104bdccc43edd629d89f2f161d0b5
| 4,769
|
py
|
Python
|
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
cotrendy/lightcurves.py
|
PLATO-Mission/cotrendy
|
31d03f0cfd8329f72d897e84d2aa6c0ca8865dfe
|
[
"MIT"
] | null | null | null |
"""
Light curves components for Cotrendy
"""
import sys
import logging
import cotrendy.utils as cuts
def load_photometry(config, apply_object_mask=True):
"""
Read in a photometry file
Parameters
----------
config : dict
Configuration file loaded via TOML
apply_object_mask : boolean
Mask our a subset of stars?
Returns
-------
times : array
Array of times of observation
lightcurves : list
List of Lightcurve objects, one per star
Raises
------
None
"""
root = config['global']['root']
time_file = config['data']['time_file']
flux_file = config['data']['flux_file']
error_file = config['data']['error_file']
times = cuts.depicklify(f"{root}/{time_file}")
if times is None:
logging.critical(f"Could not load {root}/{time_file}...")
sys.exit(1)
fluxes = cuts.depicklify(f"{root}/{flux_file}")
if fluxes is None:
logging.critical(f"Could not load {root}/{flux_file}...")
sys.exit(1)
errors = cuts.depicklify(f"{root}/{error_file}")
if errors is None:
logging.critical(f"Could not load {root}/{error_file}...")
sys.exit(1)
if fluxes.shape != errors.shape or len(times) != len(fluxes[0]):
logging.critical("Data arrays have mismatched shapes...")
sys.exit(1)
# now apply the mask if needed
if apply_object_mask:
objects_mask_file = config['data']['objects_mask_file']
mask = cuts.depicklify(f"{root}/{objects_mask_file}")
fluxes = fluxes[mask]
errors = errors[mask]
# now make list of Lightcurves objects
lightcurves = []
n_stars = len(fluxes)
i = 0
for star, star_err in zip(fluxes, errors):
logging.info(f"{i+1}/{n_stars}")
lightcurves.append(Lightcurve(star, star_err, config['data']['reject_outliers']))
i += 1
return times, lightcurves
| 29.993711
| 89
| 0.585657
|
"""
Light curves components for Cotrendy
"""
import sys
import logging
import numpy as np
from scipy.stats import median_absolute_deviation
import cotrendy.utils as cuts
def load_photometry(config, apply_object_mask=True):
"""
Read in a photometry file
Parameters
----------
config : dict
Configuration file loaded via TOML
apply_object_mask : boolean
Mask our a subset of stars?
Returns
-------
times : array
Array of times of observation
lightcurves : list
List of Lightcurve objects, one per star
Raises
------
None
"""
root = config['global']['root']
time_file = config['data']['time_file']
flux_file = config['data']['flux_file']
error_file = config['data']['error_file']
times = cuts.depicklify(f"{root}/{time_file}")
if times is None:
logging.critical(f"Could not load {root}/{time_file}...")
sys.exit(1)
fluxes = cuts.depicklify(f"{root}/{flux_file}")
if fluxes is None:
logging.critical(f"Could not load {root}/{flux_file}...")
sys.exit(1)
errors = cuts.depicklify(f"{root}/{error_file}")
if errors is None:
logging.critical(f"Could not load {root}/{error_file}...")
sys.exit(1)
if fluxes.shape != errors.shape or len(times) != len(fluxes[0]):
logging.critical("Data arrays have mismatched shapes...")
sys.exit(1)
# now apply the mask if needed
if apply_object_mask:
objects_mask_file = config['data']['objects_mask_file']
mask = cuts.depicklify(f"{root}/{objects_mask_file}")
fluxes = fluxes[mask]
errors = errors[mask]
# now make list of Lightcurves objects
lightcurves = []
n_stars = len(fluxes)
i = 0
for star, star_err in zip(fluxes, errors):
logging.info(f"{i+1}/{n_stars}")
lightcurves.append(Lightcurve(star, star_err, config['data']['reject_outliers']))
i += 1
return times, lightcurves
class Lightcurve():
"""
Lightcurve object of real object
"""
def __init__(self, flux, flux_err, filter_outliers=False):
"""
Initialise the class
Parameters
----------
flux : array-like
list of flux values
flux_err : array-like
list of flux error values
filter_outliers : boolean
turn on PLATO outlier rejection?
default = False
Returns
-------
None
Raises
------
None
"""
# Initialise variables to hold data when trend is applied
self.flux_wtrend = flux
self.fluxerr_wtrend = flux_err
self.median_flux = np.median(flux)
self.outlier_indices = None
# store the lightcurve after removing outliers
if filter_outliers:
self.filter_outliers()
def filter_outliers(self, alpha=5, beta=12):
"""
Filter out data points that are > alpha*local MAD
within a window ±beta around a given data point.
Replace the data point with the local median
as to not introduce gaps
Parameters
----------
alpha : int
Scaling factor for number of MADs to reject outside
beta : int
Half width of sliding window for MAD rejection
Returns
-------
None
Outliers indices are included in self.outlier_indices
Raises
------
None
"""
# could imaging this having a voting system where each beta*2+1 slice
# votes on an outlier and if >N votes it gets nuked
outlier_indices = []
for i in np.arange(beta, len(self.flux_wtrend)-beta-1):
window = self.flux_wtrend[i-beta: i+beta+1]
med = np.median(window)
mad = median_absolute_deviation(window)
outlier_positions = np.where(((window >= med+alpha*mad) |
(window <= med-alpha*mad)))[0] + i - beta
# gather them up and then correct them with a median
# window centered on them
for outlier_position in outlier_positions:
if outlier_position not in outlier_indices:
outlier_indices.append(outlier_position)
# now go back and fix the outliers
for outlier in outlier_indices:
lower = outlier-beta
upper = outlier+beta+1
if lower < 0:
lower = 0
if upper > len(self.flux_wtrend):
upper = len(self.flux_wtrend)
med = np.median(self.flux_wtrend[lower:upper])
self.flux_wtrend[outlier] = med
self.outlier_indices = outlier_indices
| 2
| 0
| 0
| 2,747
| 0
| 0
| 0
| 25
| 67
|
c4d9e25825d0a67968b72afbc467451be752f281
| 1,954
|
py
|
Python
|
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
BigQuery_Script.py
|
rezaho/iipp_patstat2018
|
b83e913a124113052dfbfc5d43ef9d9f6a3f7af0
|
[
"Apache-2.0"
] | null | null | null |
# Script for creating and loading PatStat2018b dataset into Big Query tables
# coding: utf-8
###############################################
###### Importing Libraries and functions ######
from google.cloud import bigquery
from open_patstat.utils.gcp import create_table
from open_patstat.utils.schema import Schema
####################################################
###### Initializing the Client anf Job Config ######
# Before running this line, make sure that you have defined the environment variable...
# ..."GOOGLE_APPLICATION_CREDENTIALS" which points to the JSON file containing authentication key
client = bigquery.Client()
# Initializing the Job_config
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 1
job_config.max_bad_records = 10
job_config.source_format = bigquery.SourceFormat.CSV
dataset_ref = client.dataset('patstat')
###########################################
####### Creating and Adding Tables ########
# Tables list to be loaded
tables_list = ['tls201', 'tls209', 'tls204', 'tls207', 'tls206', 'tls211', 'tls212']
# Google Bucket directory address, which contains all data files
gs_add = 'gs://patstat_2018g/data_PATSTAT_Global_2018_Autumn/'
# Loading the tables in the list
for table in tables_list:
# Creating the table
create_table(client,
dataset_id='patstat',
table_id=table,
schema=getattr(Schema(),table))
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
load_job = client.load_table_from_uri(
source_uris=gs_add+table+'_*.gz',
destination=table_ref,
# job_id=job_id,
job_id_prefix='lgs-',
job_config=job_config,
)
load_job.result()
| 34.892857
| 97
| 0.665814
|
# Script for creating and loading PatStat2018b dataset into Big Query tables
# coding: utf-8
###############################################
###### Importing Libraries and functions ######
from google.cloud import bigquery
from open_patstat.utils.gcp import create_table, load_gcs_file, delete_table
from open_patstat.utils.schema import Schema
####################################################
###### Initializing the Client anf Job Config ######
# Before running this line, make sure that you have defined the environment variable...
# ..."GOOGLE_APPLICATION_CREDENTIALS" which points to the JSON file containing authentication key
client = bigquery.Client()
# Initializing the Job_config
job_config = bigquery.LoadJobConfig()
job_config.skip_leading_rows = 1
job_config.max_bad_records = 10
job_config.source_format = bigquery.SourceFormat.CSV
dataset_ref = client.dataset('patstat')
###########################################
####### Creating and Adding Tables ########
# Tables list to be loaded
tables_list = ['tls201', 'tls209', 'tls204', 'tls207', 'tls206', 'tls211', 'tls212']
# Google Bucket directory address, which contains all data files
gs_add = 'gs://patstat_2018g/data_PATSTAT_Global_2018_Autumn/'
# Loading the tables in the list
for table in tables_list:
# Creating the table
create_table(client,
dataset_id='patstat',
table_id=table,
schema=getattr(Schema(),table))
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
# Adding files to the table from GCP bucket
table_ref = dataset_ref.table(table)
job_config.schema = getattr(Schema(),table)
load_job = client.load_table_from_uri(
source_uris=gs_add+table+'_*.gz',
destination=table_ref,
# job_id=job_id,
job_id_prefix='lgs-',
job_config=job_config,
)
load_job.result()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0
|
fc24f84cff67f66fdbc72dc2ba547c523b3814fe
| 828
|
py
|
Python
|
external/cclib/bridge/cclib2pyquante.py
|
faribas/RMG-Py
|
6149e29b642bf8da9537e2db98f15121f0e040c7
|
[
"MIT"
] | 1
|
2017-12-18T18:43:22.000Z
|
2017-12-18T18:43:22.000Z
|
external/cclib/bridge/cclib2pyquante.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 72
|
2016-06-06T18:18:49.000Z
|
2019-11-17T03:21:10.000Z
|
external/cclib/bridge/cclib2pyquante.py
|
speth/RMG-Py
|
1d2c2b684580396e984459d9347628a5ceb80e2e
|
[
"MIT"
] | 3
|
2017-09-22T15:47:37.000Z
|
2021-12-30T23:51:47.000Z
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 737 $"
from PyQuante.Molecule import Molecule
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29.571429
| 74
| 0.621981
|
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 737 $"
from PyQuante.Molecule import Molecule
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", zip(atomnos, atomcoords), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f9a937ded3908623f9ea6aa4b476025ff2324f45
| 1,106
|
py
|
Python
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | 1
|
2021-03-24T04:39:40.000Z
|
2021-03-24T04:39:40.000Z
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | 1
|
2021-03-24T05:33:20.000Z
|
2021-03-24T05:36:28.000Z
|
NBATextAlerts/Alerts.py
|
kevinfjiang/NBATextAlerts
|
0ddd4fc0fa7a272191c422167350d8813581675b
|
[
"MIT"
] | null | null | null |
"""
https://www.twilio.com/
This link is the basis for the text messaging, make sure to sign up!
After registering, press the home buton and click "Dashboard", both in the top left
You will see the following lines
"cellphone" -> Paste verified Twilio number as string
"ACCOUNT SID" -> Paste that number into account as string
"AUTH TOKEN" -> click show and paste that into token as string
"PHONE NUMBER" -> Paste that into token as string
Remember to verify your phone number
"""
cellphone = "" #Input the phone number you want to send texts too (the phone number verified by twilio)
twilio_number = ""#Twilio provides a PHONE NUMBER, input it here
account = ""#Input ACCOUNT SID
token = ""#AUTH TOKEN, press show
#Test message if calling alerts. Run Alerts.py to test the system is working
if __name__ == "__main__":
send_message("Test message. Did you receive it?")
| 30.722222
| 103
| 0.711573
|
"""
https://www.twilio.com/
This link is the basis for the text messaging, make sure to sign up!
After registering, press the home buton and click "Dashboard", both in the top left
You will see the following lines
"cellphone" -> Paste verified Twilio number as string
"ACCOUNT SID" -> Paste that number into account as string
"AUTH TOKEN" -> click show and paste that into token as string
"PHONE NUMBER" -> Paste that into token as string
Remember to verify your phone number
"""
from twilio.rest import Client
cellphone = "" #Input the phone number you want to send texts too (the phone number verified by twilio)
twilio_number = ""#Twilio provides a PHONE NUMBER, input it here
account = ""#Input ACCOUNT SID
token = ""#AUTH TOKEN, press show
def send_message(message):
client = Client(account, token)
client.messages.create(to=cellphone,
from_=twilio_number,
body=message)
#Test message if calling alerts. Run Alerts.py to test the system is working
if __name__ == "__main__":
send_message("Test message. Did you receive it?")
| 0
| 0
| 0
| 0
| 0
| 170
| 0
| 9
| 46
|
abfd30e1b28d8aa306ca97c0ff99e36c6c64c29c
| 2,546
|
py
|
Python
|
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
utils/timer.py
|
FanmingL/ESCP
|
518f13f8b002d142f670f52d9ef34778e2c2d59f
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
test_timer()
| 33.064935
| 89
| 0.569521
|
import time
import inspect
import numpy as np
class Timer:
def __init__(self):
self.check_points = {}
self.points_time = {}
self.need_summary = {}
self.init_time = time.time()
def reset(self):
self.check_points = {}
self.points_time = {}
self.need_summary = {}
@staticmethod
def file_func_line(stack=1):
frame = inspect.stack()[stack][0]
info = inspect.getframeinfo(frame)
return info.filename, info.function, info.lineno
@staticmethod
def line(stack=2, short=False):
file, func, lineo = Timer.file_func_line(stack)
if short:
return f"line_{lineo}_func_{func}"
return f"line: {lineo}, func: {func}, file: {file}"
def register_point(self, tag=None, stack=3, short=True, need_summary=True, level=0):
if tag is None:
tag = self.line(stack, short)
if False and not tag.startswith('__'):
print(f'arrive {tag}, time: {time.time() - self.init_time}, level: {level}')
if level not in self.check_points:
self.check_points[level] = []
self.points_time[level] = []
self.need_summary[level] = set()
self.check_points[level].append(tag)
self.points_time[level].append(time.time())
if need_summary:
self.need_summary[level].add(tag)
def register_end(self, stack=4, level=0):
self.register_point('__timer_end_unique', stack, need_summary=False, level=level)
def summary(self):
if len(self.check_points) == 0:
return dict()
res = {}
for level in self.check_points:
self.register_point('__timer_finale_unique', level=level)
res_tmp = {}
for ind, item in enumerate(self.check_points[level][:-1]):
time_now = self.points_time[level][ind]
time_next = self.points_time[level][ind + 1]
if item in res_tmp:
res_tmp[item].append(time_next - time_now)
else:
res_tmp[item] = [time_next - time_now]
for k, v in res_tmp.items():
if k in self.need_summary[level]:
res['period_' + k] = np.mean(v)
self.reset()
return res
def test_timer():
timer = Timer()
for i in range(4):
timer.register_point()
time.sleep(1)
for k, v in timer.summary().items():
print(f'{k}, {v}')
if __name__ == '__main__':
test_timer()
| 0
| 376
| 0
| 1,873
| 0
| 160
| 0
| -20
| 112
|
f7642e021866ac47a0bcd5fd062c3e4fbd79be21
| 4,042
|
py
|
Python
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
pnijhara/h2o4gpu
|
6257112c134136471420b68241f57190a445b67d
|
[
"Apache-2.0"
] | 458
|
2017-09-20T08:32:10.000Z
|
2022-02-28T18:40:57.000Z
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 461
|
2017-09-20T11:39:04.000Z
|
2021-11-21T15:51:42.000Z
|
src/interface_py/h2o4gpu/util/lightgbm_dynamic.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 114
|
2017-09-20T12:08:07.000Z
|
2021-11-29T14:15:40.000Z
|
# pylint: skip-file
import os
import importlib.util
got_cpu_lgb = False
got_gpu_lgb = False
from h2o4gpu.util.gpu import device_count
_, ngpus_vis_global = device_count()
enable_lightgbm_import = True
if enable_lightgbm_import:
lgb_loader = importlib.util.find_spec('lightgbm')
lgb_found = lgb_loader is not None
always_do_dynamic_lgb_selection = True # False will take existing lightgbm package if exists, True will always overwrite existing
do_dynamic_lgb_selection = True
link_method = False # False (default now) is to directly load from path
if not lgb_found and do_dynamic_lgb_selection or always_do_dynamic_lgb_selection:
numpy_loader = importlib.util.find_spec('numpy')
found = numpy_loader is not None
if found:
numpy_path = os.path.dirname(numpy_loader.origin)
dirname = "/".join(numpy_path.split("/")[:-1])
lgb_path_gpu = os.path.join(dirname, "lightgbm_gpu")
lgb_path_cpu = os.path.join(dirname, "lightgbm_cpu")
lgb_path_new = os.path.join(dirname, "lightgbm")
got_lgb = False
expt_gpu = ""
expt_cpu = ""
expt_other = ""
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
try:
if ngpus_vis_global > 0:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_gpu, '__init__.py'))
lgb = loader.load_module()
print("Selected GPU version of lightgbm to import\n")
got_lgb = True
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
got_gpu_lgb = True
except Exception as e:
expt_gpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_cpu, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_gpu)
else:
print("Selected CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_cpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_new, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected non-dynamic CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_other)
else:
print("Selected non-dynamic CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_other = str(e)
pass
if not got_lgb:
print(
"Unable to dynamically or non-dynamically import either GPU or CPU version of lightgbm: expt_gpu=%s expt_cpu=%s expt_other=%s\n" % (
expt_gpu, expt_cpu, expt_other))
else:
print("Did not find lightgbm or numpy\n")
| 47.552941
| 222
| 0.543295
|
# pylint: skip-file
import os
import importlib.util
got_cpu_lgb = False
got_gpu_lgb = False
from h2o4gpu.util.gpu import device_count
_, ngpus_vis_global = device_count()
enable_lightgbm_import = True
if enable_lightgbm_import:
lgb_loader = importlib.util.find_spec('lightgbm')
lgb_found = lgb_loader is not None
always_do_dynamic_lgb_selection = True # False will take existing lightgbm package if exists, True will always overwrite existing
do_dynamic_lgb_selection = True
link_method = False # False (default now) is to directly load from path
if not lgb_found and do_dynamic_lgb_selection or always_do_dynamic_lgb_selection:
numpy_loader = importlib.util.find_spec('numpy')
found = numpy_loader is not None
if found:
numpy_path = os.path.dirname(numpy_loader.origin)
dirname = "/".join(numpy_path.split("/")[:-1])
lgb_path_gpu = os.path.join(dirname, "lightgbm_gpu")
lgb_path_cpu = os.path.join(dirname, "lightgbm_cpu")
lgb_path_new = os.path.join(dirname, "lightgbm")
got_lgb = False
expt_gpu = ""
expt_cpu = ""
expt_other = ""
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
try:
if ngpus_vis_global > 0:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_gpu, '__init__.py'))
lgb = loader.load_module()
print("Selected GPU version of lightgbm to import\n")
got_lgb = True
# This locally leads to lgb as if did import lightgbm as lgb, but also any other file that imports lgb will immediately return with lgb even though no module name "lightgbm" has a path in site-packages.
got_gpu_lgb = True
except Exception as e:
expt_gpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_cpu, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_gpu)
else:
print("Selected CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_cpu = str(e)
pass
if not got_lgb:
try:
loader = importlib.machinery.SourceFileLoader('lightgbm',
os.path.join(lgb_path_new, '__init__.py'))
lgb = loader.load_module()
if ngpus_vis_global > 0:
print(
"Selected non-dynamic CPU version of lightgbm to import (GPU selection failed due to %s)\n" % expt_other)
else:
print("Selected non-dynamic CPU version of lightgbm to import\n")
got_lgb = True
got_cpu_lgb = True
except Exception as e:
expt_other = str(e)
pass
if not got_lgb:
print(
"Unable to dynamically or non-dynamically import either GPU or CPU version of lightgbm: expt_gpu=%s expt_cpu=%s expt_other=%s\n" % (
expt_gpu, expt_cpu, expt_other))
else:
print("Did not find lightgbm or numpy\n")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
188ee1b65907db67dfd917f80e2a5d76fdb2dca5
| 1,967
|
py
|
Python
|
google-cloud-sdk/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2017-11-29T18:52:27.000Z
|
2017-11-29T18:52:27.000Z
|
google-cloud-sdk/.install/.backup/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/.install/.backup/lib/surface/resource_manager/folders/undelete.py
|
KaranToor/MA450
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
[
"Apache-2.0"
] | 1
|
2020-07-25T12:09:01.000Z
|
2020-07-25T12:09:01.000Z
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to undelete a folder."""
| 33.338983
| 74
| 0.744281
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to undelete a folder."""
import textwrap
from googlecloudsdk.api_lib.resource_manager import folders
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.resource_manager import flags
from googlecloudsdk.command_lib.resource_manager import folders_base
from googlecloudsdk.core import log
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Undelete(folders_base.FolderCommand):
"""Undelete a folder.
Undeletes the folder with the given folder ID.
This command can fail for the following reasons:
* There is no folder with the given ID.
* The active account does not have Owner or Editor permissions for the
given folder.
* When the folder to be undeleted has the same display name as an active
folder under this folder's parent.
"""
detailed_help = {
'EXAMPLES': textwrap.dedent("""\
The following command undeletes the folder with the ID
`3589215982`:
$ {command} 3589215982
"""),
}
@staticmethod
def Args(parser):
flags.FolderIdArg('you want to undelete.').AddToParser(parser)
def Run(self, args):
service = folders.FoldersService()
messages = folders.FoldersMessages()
restored = service.Undelete(
messages.CloudresourcemanagerFoldersUndeleteRequest(
foldersId=args.id))
log.RestoredResource(restored)
| 0
| 1,026
| 0
| 0
| 0
| 0
| 0
| 152
| 157
|
5a28b79a46e2fcfa07d776568c13a7328fded066
| 417
|
py
|
Python
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 7
|
2018-02-10T22:57:28.000Z
|
2020-11-20T14:46:18.000Z
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2020-10-30T18:43:27.000Z
|
2021-02-04T12:39:30.000Z
|
contract/tests/ownership.py
|
ebloc/eBlocBroker
|
52d507835a0fe3c930df2e2c816724d26a3484a7
|
[
"MIT"
] | 5
|
2017-07-06T14:14:13.000Z
|
2019-02-22T14:40:16.000Z
|
#!/usr/bin/python3
import pytest
from utils import ZERO_ADDRESS
from brownie import accounts
def test_ownership(Ebb):
"""Get Owner"""
assert Ebb.getOwner() == accounts[0]
with pytest.reverts(): # transferOwnership should revert
Ebb.transferOwnership(ZERO_ADDRESS, {"from": accounts[0]})
Ebb.transferOwnership(accounts[1], {"from": accounts[0]})
assert Ebb.getOwner() == accounts[1]
| 23.166667
| 66
| 0.695444
|
#!/usr/bin/python3
import pytest
from utils import ZERO_ADDRESS
from brownie import accounts
def test_ownership(Ebb):
"""Get Owner"""
assert Ebb.getOwner() == accounts[0]
with pytest.reverts(): # transferOwnership should revert
Ebb.transferOwnership(ZERO_ADDRESS, {"from": accounts[0]})
Ebb.transferOwnership(accounts[1], {"from": accounts[0]})
assert Ebb.getOwner() == accounts[1]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d4bf808de2a868ba73315da564d256636fe0b32b
| 2,858
|
py
|
Python
|
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
gd/api/_property.py
|
scottwedge/gd.py
|
328c9833abc949b1c9ac0eabe276bd66fead4c2c
|
[
"MIT"
] | null | null | null |
"""Automatic object property code generator."""
from gd.api.enums import (ColorChannelProperties, LevelDataEnum, LevelHeaderEnum, ObjectDataEnum)
__all__ = ("_template", "_create", "_object_code", "_color_code", "_header_code", "_level_code")
_template = """
@property
def {name}(self):
\"\"\":class:`{cls}`: Property ({desc}).\"\"\"
return self.data.get({enum!r})
@{name}.setter
def {name}(self, value):
self.data[{enum!r}] = value
@{name}.deleter
def {name}(self):
try:
del self.data[{enum!r}]
except KeyError:
pass
""".strip()
_container = "_container = {}"
_object_code = _create(ObjectDataEnum, "object")
_color_code = _create(ColorChannelProperties, "color")
_header_code = _create(LevelHeaderEnum, "header")
_level_code = _create(LevelDataEnum, "level")
| 23.816667
| 96
| 0.573828
|
"""Automatic object property code generator."""
from gd.typing import Enum, Union
from gd.api.enums import (
ColorChannelProperties,
LevelDataEnum,
LevelHeaderEnum,
ObjectDataEnum,
PlayerColor,
)
from gd.api.parser import ( # type: ignore
_INT,
_BOOL,
_FLOAT,
_HSV,
_ENUMS,
_TEXT,
_GROUPS,
_COLOR_INT,
_COLOR_BOOL,
_COLOR_PLAYER,
_COLOR_FLOAT,
_COLOR_HSV,
_HEADER_INT,
_HEADER_BOOL,
_HEADER_FLOAT,
_HEADER_COLORS,
_COLORS,
_GUIDELINES,
_HEADER_ENUMS,
)
from gd.api.hsv import HSV
__all__ = ("_template", "_create", "_object_code", "_color_code", "_header_code", "_level_code")
_template = """
@property
def {name}(self):
\"\"\":class:`{cls}`: Property ({desc}).\"\"\"
return self.data.get({enum!r})
@{name}.setter
def {name}(self, value):
self.data[{enum!r}] = value
@{name}.deleter
def {name}(self):
try:
del self.data[{enum!r}]
except KeyError:
pass
""".strip()
_container = "_container = {}"
def _get_type(n: Union[int, str], ts: str = "object") -> str:
t = {
"object": {
n in _INT: int,
n in _BOOL: bool,
n in _FLOAT: float,
n in _HSV: HSV,
n in _ENUMS: _ENUMS.get(n),
n == _TEXT: str,
n == _GROUPS: set,
},
"color": {
n in _COLOR_INT: int,
n in _COLOR_BOOL: bool,
n == _COLOR_PLAYER: PlayerColor,
n == _COLOR_FLOAT: float,
n == _COLOR_HSV: HSV,
},
"header": {
n in _HEADER_INT: int,
n in _HEADER_BOOL: bool,
n == _HEADER_FLOAT: float,
n in _HEADER_COLORS: "ColorChannel",
n == _COLORS: list,
n == _GUIDELINES: list,
n in _HEADER_ENUMS: _HEADER_ENUMS.get(n),
},
"level": {True: "soon"}, # yikes!
}
r = t.get(ts, {}).get(1, str)
try:
return r.__name__
except AttributeError:
return r
def _create(enum: Enum, ts: str) -> str:
final = []
for name, value in enum.as_dict().items():
desc = enum(value).desc
value = str(value)
cls = _get_type(value, ts=ts)
final.append(_template.format(name=name, enum=value, desc=desc, cls=cls))
property_container = {}
for name, value in enum.as_dict().items():
value = str(value) # we are going with str from now on
if value not in property_container:
property_container[value] = name
final.append(_container.format(property_container))
return ("\n\n").join(final)
_object_code = _create(ObjectDataEnum, "object")
_color_code = _create(ColorChannelProperties, "color")
_header_code = _create(LevelHeaderEnum, "header")
_level_code = _create(LevelDataEnum, "level")
| 0
| 0
| 0
| 0
| 0
| 1,575
| 0
| 365
| 113
|
ffc6fc0c01a161fba017b7f74580eecc40db4a94
| 286
|
py
|
Python
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 4
|
2020-08-03T04:16:53.000Z
|
2020-11-02T20:11:16.000Z
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 6
|
2020-09-04T12:36:08.000Z
|
2021-06-18T04:31:29.000Z
|
test.py
|
picturate/picturate
|
9f8e69fef7b600b6d8c1ade41a0ccfc382992e8b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T07:29:46.000Z
|
2020-07-24T07:29:46.000Z
|
from picturate.config import CAttnGANConfig
from picturate.nets import CAttnGAN
config = CAttnGANConfig('bird')
gan = CAttnGAN(config, pretrained=True)
caption = "This little bird is blue with short beak and white underbelly"
filename = 'bird'
gan.generate_image(caption, filename)
| 23.833333
| 73
| 0.793706
|
from picturate.config import CAttnGANConfig
from picturate.nets import CAttnGAN
config = CAttnGANConfig('bird')
gan = CAttnGAN(config, pretrained=True)
caption = "This little bird is blue with short beak and white underbelly"
filename = 'bird'
gan.generate_image(caption, filename)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
409729662516480907dfc439cb222223768f41e8
| 14,838
|
py
|
Python
|
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
tests/unit/test_maxmin.py
|
mzelling/syndata
|
bba1c4a7b142f1da332d6613baae30b8b97c4e9b
|
[
"MIT"
] | null | null | null |
import pytest
import numpy as np
from syndata.maxmin import MaxMinClusters, MaxMinCov, MaxMinBal, maxmin_sampler
# Test Cases for maxmin_sampler
def test_maxmin_sampler():
"""
Make sure the sampling mechanism doesn't break when wrong inputs
are supplied.
"""
# Test cases throwing exceptions
args_causing_exception = [ # negative vals
{'n_samples': 10, 'ref': -2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': -1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': -1.5},
# zeros vals
{'n_samples': 0, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 0, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 0, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0},
# ref < min
{'n_samples': 10, 'ref': 1, 'min_val': 2, 'maxmin_ratio': 1.5},
# ref > max
{'n_samples': 10, 'ref': 10, 'min_val': 1, 'maxmin_ratio': 1.5},
# maxmin_ratio < 1
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0.7},
# maxmin_ratio = 1, ref != min_val
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1},
]
with pytest.raises(ValueError):
for args in args_causing_exception:
args['f_constrain'] = lambda x: 2*args['ref'] - x
maxmin_sampler(**args)
# Test cases with appropriate inputs (randomized)
args_appropriate_input = []
max_ref_val = 10; max_min_val = 10
for i in range(100):
min_val = np.random.default_rng(seed=i).uniform(0,max_min_val)
ref = np.random.uniform(min_val, max_ref_val)
maxmin_ratio = np.random.uniform(ref/min_val, 10*(ref/min_val))
args_appropriate_input.append(
{
# Do the first 10 tests on the edge case n_samples=1
'n_samples': np.random.choice(np.arange(2,15)) if i>10 else 1,
'min_val': min_val,
'ref': ref,
'maxmin_ratio': maxmin_ratio,
}
)
print('making the args', 'ref', ref, 'min_val', min_val, 'max_val', min_val*maxmin_ratio)
# Add test case with large sample size
args_appropriate_input.append({'n_samples': 10000, 'ref': 2, \
'min_val': 1, 'maxmin_ratio': 3})
for args in args_appropriate_input:
args['f_constrain'] = lambda x: 2*args['ref'] - x
out = maxmin_sampler(**args)
print(out)
assert check_maxmin_sampler_output(out,
args['f_constrain'])
def check_maxmin_sampler_output(sampled_vals, f_constrain):
"""
Check that output satisfies lower and upper bounds.
Check min, max values are related through the constraint.
Check that output is sorted.
"""
return is_sorted(sampled_vals, order='ascending') \
and (f_constrain(np.max(sampled_vals) == np.min(sampled_vals))) \
and (f_constrain(np.min(sampled_vals) == np.max(sampled_vals)))
def is_sorted(vals, order='ascending'):
"""
Check if values are sorted.
"""
if order=='ascending':
return np.all(vals[1:] - vals[:-1] >= 0)
elif order=='descending':
return np.all(vals[1:] - vals[:-1] <= 0)
# Test Cases for MaxMinCov
def test_init_maxmincov():
"""
Make sure that no illicit values can be used to construct MaxMinCov.
"""
# appropriate values of attributes
interior_cases = np.random.uniform(1,10,size=(100,3)) # random appropriate values
edge_cases = np.concatenate([2-np.eye(3),np.ones(3)[np.newaxis,:]],axis=0) # edge and corner cases
Z_appropriate = np.concatenate([interior_cases,edge_cases],axis=0)
args_appropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_appropriate]
for args in args_appropriate:
my_maxmincov = MaxMinCov(**args)
for attr in ['ref_aspect','aspect_maxmin','radius_maxmin']:
assert hasattr(my_maxmincov, attr)
# inappropriate values of attributes
Z_inappropriate = np.concatenate([np.ones(3) - 0.5*np.eye(3), (1-0.01)*np.ones(3)[np.newaxis,:]])
args_inappropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_inappropriate]
with pytest.raises(ValueError):
for args in args_inappropriate:
MaxMinCov(**args)
def test_make_cluster_aspects(setup_maxmincov):
"""
Make sure that valid cluster aspect ratios are sampled.
Test the range of acceptable numbers of clusters, and
make sure setting a seed works.
"""
maxmincov = setup_maxmincov
with pytest.raises(ValueError):
maxmincov.make_cluster_aspects(0,seed=None)
maxmincov.make_cluster_aspects(0.99,seed=None)
# test different numbers of clusters
for n_clusters in range(1,100):
cluster_aspects = maxmincov.make_cluster_aspects(n_clusters,seed=None)
assert np.all(cluster_aspects >= 1)
assert np.max(cluster_aspects) >= maxmincov.ref_aspect
assert np.min(cluster_aspects) <= maxmincov.ref_aspect
# test seed
seed = 23
for i in range(10):
cluster_aspects_new = maxmincov.make_cluster_aspects(2,seed=23)
# make sure that each successive output is the same as the previous output
if i >= 1:
assert np.all(cluster_aspects_new == cluster_aspects_prev)
cluster_aspects_prev = cluster_aspects_new
def test_make_cluster_radii(setup_maxmincov):
"""
Make sure valid cluster radii are sampled.
Test the range of acceptable inputs, and make sure setting a seed works.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(1,20+1)[:,np.newaxis],
np.random.uniform(0,10,size=20)[:,np.newaxis],
np.random.choice(np.arange(2,100),size=20)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,1e-3,2], [1,1e-3,1],[2,100,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_clusters': z[0], 'ref_radius': z[1], 'n_dim': z[2]} for z in Z_appropriate]
for args in args_appropriate:
tol = 1e-12
print(args)
cluster_radii = maxmincov.make_cluster_radii(**args)
print(cluster_radii)
assert np.all(cluster_radii > 0)
assert (np.min(cluster_radii) <= args['ref_radius'] + tol) and \
(np.max(cluster_radii) >= args['ref_radius'] - tol)
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_cluster_radii(n_clusters=0, ref_radius=1, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=0, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=1, n_dim=0)
# test seeds
seed = 717
for i in range(10):
cluster_radii_new = maxmincov.make_cluster_radii(n_clusters=5,ref_radius=4,n_dim=25, seed=seed)
if (i >= 1):
assert np.all(cluster_radii_new == cluster_radii_prev)
cluster_radii_prev = cluster_radii_new
def test_make_axis_sd(setup_maxmincov):
"""
Make sure valid standard deviations are sampled (>0).
Ensure sure ref_sd is between min and max, and that the maxmin ratio
equals the desired aspect ratio.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(2,50+2)[:,np.newaxis],
np.random.uniform(0,10,size=50)[:,np.newaxis],
np.random.uniform(1,10,size=50)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,0.5,1.5], [1,0.5,1], [2,0.1,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_axes': z[0], 'sd': z[1], 'aspect': z[2]} for z in Z_appropriate]
for args in args_appropriate:
out = maxmincov.make_axis_sd(**args)
assert (np.min(out) <= args['sd']) and (np.max(out) >= args['sd'])
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_axis_sd(n_axes=0, sd=1, aspect=2)
maxmincov.make_axis_sd(n_axes=0.5, sd=0, aspect=2)
maxmincov.make_axis_sd(n_axes=1, sd=1, aspect=0.5)
maxmincov.make_axis_sd(n_axes=2, sd=1, aspect=-2)
maxmincov.make_axis_sd(n_axes=2, sd=-1, aspect=2)
# test seed
seed = 123
for i in range(10):
axis_sd_new = maxmincov.make_axis_sd(n_axes=5,sd=4,aspect=25, seed=seed)
if (i >= 1):
assert np.all(axis_sd_new == axis_sd_prev)
axis_sd_prev = axis_sd_new
def test_make_cov(setup_maxmincov, setup_clusterdata):
"""
Make sure axes are orthogonal
Make sure cov = axis * sd**2 * axis', similar for cov_inv
"""
clusterdata = setup_clusterdata
maxmincov = setup_maxmincov
# ensure output makes mathematical sense
for i in range(10):
(axis, sd, cov, cov_inv) = maxmincov.make_cov(clusterdata)
for cluster_idx in range(clusterdata.n_clusters):
# test orthogonality of cluster axes
assert np.all(np.allclose(axis[cluster_idx] @ np.transpose(axis[cluster_idx]),
np.eye(axis[cluster_idx].shape[0])))
# test covariance matrix is correct
assert np.all(np.allclose(cov[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**2) \
@ axis[cluster_idx]))
# test inverse covariance matrix is correct
assert np.all(np.allclose(cov_inv[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**(-2)) \
@ axis[cluster_idx]))
# test seed
seed = 123
for i in range(10):
cov_structure_new = maxmincov.make_cov(clusterdata, seed=seed)
if (i >= 1):
for cluster_idx in range(clusterdata.n_clusters):
for j in range(4): # iterate through axis, sd, cov, cov_inv
assert np.all(np.allclose(cov_structure_prev[j][cluster_idx],
cov_structure_new[j][cluster_idx]))
# set previous covariance structure for next iteration:
cov_structure_prev = cov_structure_new
# Test Cases for MaxMinBal
def test_init_maxminbal(setup_maxminbal):
"""
Ensure imbalance ratio is properly specified.
"""
maxminbal = setup_maxminbal
assert maxminbal.imbal_ratio >= 1
# test input check for inappropriate arguments
with pytest.raises(ValueError):
MaxMinBal(imbal_ratio = 0.5)
MaxMinBal(imbal_ratio = -2)
def test_make_class_sizes(setup_maxminbal,setup_clusterdata):
"""
"""
maxminbal = setup_maxminbal
clusterdata = setup_clusterdata
# test with appropriate input
Z_appropriate = [[500,5],[200,1],[100,2],[1000,10],[1500,3], [100,100]]
args_appropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_appropriate]
for args in args_appropriate:
clusterdata.n_samples = args['n_samples']
clusterdata.n_clusters = args['n_clusters']
out = maxminbal.make_class_sizes(clusterdata)
assert np.issubdtype(out.dtype, np.integer) and np.all(out >= 1) and \
(np.sum(out) == args['n_samples'])
# test with inappropriate input
Z_inappropriate = [[500,0],[0,10],[100,-1],[-0.5,5],[10,11]]
args_inappropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_inappropriate]
for args in args_inappropriate:
with pytest.raises(ValueError):
clusterdata.n_clusters = args['n_clusters']
clusterdata.n_samples = args['n_samples']
maxminbal.make_class_sizes(clusterdata)
def test_float_to_int(setup_maxminbal):
"""
float_class_sz, n_samples
"""
maxminbal = setup_maxminbal
# test appropriate inputs
for float_class_sz, n_samples in [(np.array([23.2, 254.7, 0.1, 35.6]), 100), \
(np.array([0.2, 0.7, 0.1, 0.5]), 10),
(np.array([2.5,1.5,5.2]), 3),
(np.array([0.5]), 1)]:
out = maxminbal.float_to_int(float_class_sz,n_samples)
print(len(float_class_sz), float_class_sz, n_samples)
assert (np.sum(out) == n_samples) and (np.all(out >= 1)) \
and np.issubdtype(out.dtype,np.integer)
# test inputs that should be left unchanged
assert np.all(maxminbal.float_to_int(np.array([5,10,25,7]), 5+10+25+7) \
== np.sort(np.array([5,10,25,7])))
# test inappropriate inputs
for float_class_sz, n_samples in [(np.array([0.5,1.5]), 1),
(np.array([0.5,1.5]), 0),
(np.array([2.5,1.5,5.2]), 2)]:
with pytest.raises(ValueError):
maxminbal.float_to_int(float_class_sz,n_samples)
# Test Cases for MaxMinClusters
def test_init_maxminclusters():
"""
Make sure to throw an error when inappropriate arguments are given.
"""
# edge and interior test cases for n_clusters, n_samples, n_dim
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=1)
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=100,n_dim=2)
MaxMinClusters(n_clusters=10,n_samples=200,n_dim=5)
# edge and interior test cases for testing maxmin ratios
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1,radius_maxmin=1, aspect_ref=1)
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1.1,radius_maxmin=1.1,aspect_ref=1.5)
MaxMinClusters(imbal_maxmin=1.2,aspect_maxmin=1,radius_maxmin=1.5,aspect_ref=7)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=1,aspect_ref=5)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=1)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=4)
# edge and interior test cases for overlap
MaxMinClusters(alpha_max=0.5, alpha_min=0.01)
MaxMinClusters(alpha_max=0.05, alpha_min=0)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
# testing the distributions
MaxMinClusters(dist='exp')
MaxMinClusters(dist='gaussian')
MaxMinClusters(dist='t')
# testing packing and scale
MaxMinClusters(packing=0.5)
MaxMinClusters(packing=0.01)
MaxMinClusters(packing=0.99)
MaxMinClusters(scale=0.01)
MaxMinClusters(scale=0.05)
MaxMinClusters(scale=5)
MaxMinClusters(scale=10)
with pytest.raises(ValueError):
# must have n_dim, n_clusters, n_samples >= 1
# and n_clusters <= n_samples
MaxMinClusters(n_clusters=10,n_samples=100,n_dim=0)
MaxMinClusters(n_clusters=10,n_samples=9,n_dim=10)
MaxMinClusters(n_clusters=0,n_samples=100,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
# maxmin_ratios must be >= 1
MaxMinClusters(imbal_maxmin=0.98)
MaxMinClusters(imbal_maxmin=-1.1)
MaxMinClusters(aspect_maxmin=0.35)
MaxMinClusters(aspect_maxmin=-1.5)
MaxMinClusters(radius_maxmin=0.21)
MaxMinClusters(radius_maxmin=-1)
MaxMinClusters(aspect_ref=0.99)
MaxMinClusters(aspect_ref=-2)
# must have alpha_max > 0, alpha_min >= 0, alpha_max > alpha_min
MaxMinClusters(alpha_max=0, alpha_min=0)
MaxMinClusters(alpha_max=0.05, alpha_min=0.1)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
MaxMinClusters(alpha_max=0.025, alpha_min=-1.0)
MaxMinClusters(alpha_max=-0.5, alpha_min=0.05)
# packing must be strictly between 0 and 1, scale must be >0
MaxMinClusters(packing=0)
MaxMinClusters(packing=1)
MaxMinClusters(scale=0)
MaxMinClusters(scale=-0.5)
# currently only support dist in {'gaussian','exp','t'}
MaxMinClusters(dist='foo')
MaxMinClusters(dist='bar')
| 33.722727
| 100
| 0.709934
|
import pytest
import numpy as np
from syndata.core import ClusterData
from syndata.maxmin import MaxMinClusters, MaxMinCov, MaxMinBal, maxmin_sampler
# Test Cases for maxmin_sampler
def test_maxmin_sampler():
"""
Make sure the sampling mechanism doesn't break when wrong inputs
are supplied.
"""
# Test cases throwing exceptions
args_causing_exception = [ # negative vals
{'n_samples': 10, 'ref': -2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': -1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': -1.5},
# zeros vals
{'n_samples': 0, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 0, 'min_val': 1, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 0, 'maxmin_ratio': 1.5},
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0},
# ref < min
{'n_samples': 10, 'ref': 1, 'min_val': 2, 'maxmin_ratio': 1.5},
# ref > max
{'n_samples': 10, 'ref': 10, 'min_val': 1, 'maxmin_ratio': 1.5},
# maxmin_ratio < 1
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 0.7},
# maxmin_ratio = 1, ref != min_val
{'n_samples': 10, 'ref': 2, 'min_val': 1, 'maxmin_ratio': 1},
]
with pytest.raises(ValueError):
for args in args_causing_exception:
args['f_constrain'] = lambda x: 2*args['ref'] - x
maxmin_sampler(**args)
# Test cases with appropriate inputs (randomized)
args_appropriate_input = []
max_ref_val = 10; max_min_val = 10
for i in range(100):
min_val = np.random.default_rng(seed=i).uniform(0,max_min_val)
ref = np.random.uniform(min_val, max_ref_val)
maxmin_ratio = np.random.uniform(ref/min_val, 10*(ref/min_val))
args_appropriate_input.append(
{
# Do the first 10 tests on the edge case n_samples=1
'n_samples': np.random.choice(np.arange(2,15)) if i>10 else 1,
'min_val': min_val,
'ref': ref,
'maxmin_ratio': maxmin_ratio,
}
)
print('making the args', 'ref', ref, 'min_val', min_val, 'max_val', min_val*maxmin_ratio)
# Add test case with large sample size
args_appropriate_input.append({'n_samples': 10000, 'ref': 2, \
'min_val': 1, 'maxmin_ratio': 3})
for args in args_appropriate_input:
args['f_constrain'] = lambda x: 2*args['ref'] - x
out = maxmin_sampler(**args)
print(out)
assert check_maxmin_sampler_output(out,
args['f_constrain'])
def check_maxmin_sampler_output(sampled_vals, f_constrain):
"""
Check that output satisfies lower and upper bounds.
Check min, max values are related through the constraint.
Check that output is sorted.
"""
return is_sorted(sampled_vals, order='ascending') \
and (f_constrain(np.max(sampled_vals) == np.min(sampled_vals))) \
and (f_constrain(np.min(sampled_vals) == np.max(sampled_vals)))
def is_sorted(vals, order='ascending'):
"""
Check if values are sorted.
"""
if order=='ascending':
return np.all(vals[1:] - vals[:-1] >= 0)
elif order=='descending':
return np.all(vals[1:] - vals[:-1] <= 0)
# Test Cases for MaxMinCov
def test_init_maxmincov():
"""
Make sure that no illicit values can be used to construct MaxMinCov.
"""
# appropriate values of attributes
interior_cases = np.random.uniform(1,10,size=(100,3)) # random appropriate values
edge_cases = np.concatenate([2-np.eye(3),np.ones(3)[np.newaxis,:]],axis=0) # edge and corner cases
Z_appropriate = np.concatenate([interior_cases,edge_cases],axis=0)
args_appropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_appropriate]
for args in args_appropriate:
my_maxmincov = MaxMinCov(**args)
for attr in ['ref_aspect','aspect_maxmin','radius_maxmin']:
assert hasattr(my_maxmincov, attr)
# inappropriate values of attributes
Z_inappropriate = np.concatenate([np.ones(3) - 0.5*np.eye(3), (1-0.01)*np.ones(3)[np.newaxis,:]])
args_inappropriate = [{'ref_aspect': z[0], 'aspect_maxmin': z[1],
'radius_maxmin': z[2]} for z in Z_inappropriate]
with pytest.raises(ValueError):
for args in args_inappropriate:
MaxMinCov(**args)
@pytest.fixture()
def setup_maxmincov():
"""
Initialize a valid MaxMinCov instance to test its methods.
"""
maxmincov = MaxMinCov(ref_aspect=1.5,
aspect_maxmin=1.5,
radius_maxmin=1.5)
yield maxmincov
def test_make_cluster_aspects(setup_maxmincov):
"""
Make sure that valid cluster aspect ratios are sampled.
Test the range of acceptable numbers of clusters, and
make sure setting a seed works.
"""
maxmincov = setup_maxmincov
with pytest.raises(ValueError):
maxmincov.make_cluster_aspects(0,seed=None)
maxmincov.make_cluster_aspects(0.99,seed=None)
# test different numbers of clusters
for n_clusters in range(1,100):
cluster_aspects = maxmincov.make_cluster_aspects(n_clusters,seed=None)
assert np.all(cluster_aspects >= 1)
assert np.max(cluster_aspects) >= maxmincov.ref_aspect
assert np.min(cluster_aspects) <= maxmincov.ref_aspect
# test seed
seed = 23
for i in range(10):
cluster_aspects_new = maxmincov.make_cluster_aspects(2,seed=23)
# make sure that each successive output is the same as the previous output
if i >= 1:
assert np.all(cluster_aspects_new == cluster_aspects_prev)
cluster_aspects_prev = cluster_aspects_new
def test_make_cluster_radii(setup_maxmincov):
"""
Make sure valid cluster radii are sampled.
Test the range of acceptable inputs, and make sure setting a seed works.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(1,20+1)[:,np.newaxis],
np.random.uniform(0,10,size=20)[:,np.newaxis],
np.random.choice(np.arange(2,100),size=20)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,1e-3,2], [1,1e-3,1],[2,100,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_clusters': z[0], 'ref_radius': z[1], 'n_dim': z[2]} for z in Z_appropriate]
for args in args_appropriate:
tol = 1e-12
print(args)
cluster_radii = maxmincov.make_cluster_radii(**args)
print(cluster_radii)
assert np.all(cluster_radii > 0)
assert (np.min(cluster_radii) <= args['ref_radius'] + tol) and \
(np.max(cluster_radii) >= args['ref_radius'] - tol)
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_cluster_radii(n_clusters=0, ref_radius=1, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=0, n_dim=10)
maxmincov.make_cluster_radii(n_clusters=1, ref_radius=1, n_dim=0)
# test seeds
seed = 717
for i in range(10):
cluster_radii_new = maxmincov.make_cluster_radii(n_clusters=5,ref_radius=4,n_dim=25, seed=seed)
if (i >= 1):
assert np.all(cluster_radii_new == cluster_radii_prev)
cluster_radii_prev = cluster_radii_new
def test_make_axis_sd(setup_maxmincov):
"""
Make sure valid standard deviations are sampled (>0).
Ensure sure ref_sd is between min and max, and that the maxmin ratio
equals the desired aspect ratio.
"""
maxmincov = setup_maxmincov
# test appropriate inputs
interior_cases = np.concatenate([np.arange(2,50+2)[:,np.newaxis],
np.random.uniform(0,10,size=50)[:,np.newaxis],
np.random.uniform(1,10,size=50)[:,np.newaxis]],
axis=1)
edge_cases = np.array([[1,0.5,1.5], [1,0.5,1], [2,0.1,1]])
Z_appropriate = np.concatenate([interior_cases, edge_cases],axis=0)
args_appropriate = [{'n_axes': z[0], 'sd': z[1], 'aspect': z[2]} for z in Z_appropriate]
for args in args_appropriate:
out = maxmincov.make_axis_sd(**args)
assert (np.min(out) <= args['sd']) and (np.max(out) >= args['sd'])
# test inappropriate inputs
with pytest.raises(ValueError):
maxmincov.make_axis_sd(n_axes=0, sd=1, aspect=2)
maxmincov.make_axis_sd(n_axes=0.5, sd=0, aspect=2)
maxmincov.make_axis_sd(n_axes=1, sd=1, aspect=0.5)
maxmincov.make_axis_sd(n_axes=2, sd=1, aspect=-2)
maxmincov.make_axis_sd(n_axes=2, sd=-1, aspect=2)
# test seed
seed = 123
for i in range(10):
axis_sd_new = maxmincov.make_axis_sd(n_axes=5,sd=4,aspect=25, seed=seed)
if (i >= 1):
assert np.all(axis_sd_new == axis_sd_prev)
axis_sd_prev = axis_sd_new
def test_make_cov(setup_maxmincov, setup_clusterdata):
"""
Make sure axes are orthogonal
Make sure cov = axis * sd**2 * axis', similar for cov_inv
"""
clusterdata = setup_clusterdata
maxmincov = setup_maxmincov
# ensure output makes mathematical sense
for i in range(10):
(axis, sd, cov, cov_inv) = maxmincov.make_cov(clusterdata)
for cluster_idx in range(clusterdata.n_clusters):
# test orthogonality of cluster axes
assert np.all(np.allclose(axis[cluster_idx] @ np.transpose(axis[cluster_idx]),
np.eye(axis[cluster_idx].shape[0])))
# test covariance matrix is correct
assert np.all(np.allclose(cov[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**2) \
@ axis[cluster_idx]))
# test inverse covariance matrix is correct
assert np.all(np.allclose(cov_inv[cluster_idx],
np.transpose(axis[cluster_idx]) @ np.diag(sd[cluster_idx]**(-2)) \
@ axis[cluster_idx]))
# test seed
seed = 123
for i in range(10):
cov_structure_new = maxmincov.make_cov(clusterdata, seed=seed)
if (i >= 1):
for cluster_idx in range(clusterdata.n_clusters):
for j in range(4): # iterate through axis, sd, cov, cov_inv
assert np.all(np.allclose(cov_structure_prev[j][cluster_idx],
cov_structure_new[j][cluster_idx]))
# set previous covariance structure for next iteration:
cov_structure_prev = cov_structure_new
# Test Cases for MaxMinBal
@pytest.fixture(params = np.linspace(1,10,10))
def setup_maxminbal(request):
return MaxMinBal(request.param)
def test_init_maxminbal(setup_maxminbal):
"""
Ensure imbalance ratio is properly specified.
"""
maxminbal = setup_maxminbal
assert maxminbal.imbal_ratio >= 1
# test input check for inappropriate arguments
with pytest.raises(ValueError):
MaxMinBal(imbal_ratio = 0.5)
MaxMinBal(imbal_ratio = -2)
def test_make_class_sizes(setup_maxminbal,setup_clusterdata):
"""
"""
maxminbal = setup_maxminbal
clusterdata = setup_clusterdata
# test with appropriate input
Z_appropriate = [[500,5],[200,1],[100,2],[1000,10],[1500,3], [100,100]]
args_appropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_appropriate]
for args in args_appropriate:
clusterdata.n_samples = args['n_samples']
clusterdata.n_clusters = args['n_clusters']
out = maxminbal.make_class_sizes(clusterdata)
assert np.issubdtype(out.dtype, np.integer) and np.all(out >= 1) and \
(np.sum(out) == args['n_samples'])
# test with inappropriate input
Z_inappropriate = [[500,0],[0,10],[100,-1],[-0.5,5],[10,11]]
args_inappropriate = [{'n_samples': z[0], 'n_clusters': z[1]} for z in Z_inappropriate]
for args in args_inappropriate:
with pytest.raises(ValueError):
clusterdata.n_clusters = args['n_clusters']
clusterdata.n_samples = args['n_samples']
maxminbal.make_class_sizes(clusterdata)
def test_float_to_int(setup_maxminbal):
"""
float_class_sz, n_samples
"""
maxminbal = setup_maxminbal
# test appropriate inputs
for float_class_sz, n_samples in [(np.array([23.2, 254.7, 0.1, 35.6]), 100), \
(np.array([0.2, 0.7, 0.1, 0.5]), 10),
(np.array([2.5,1.5,5.2]), 3),
(np.array([0.5]), 1)]:
out = maxminbal.float_to_int(float_class_sz,n_samples)
print(len(float_class_sz), float_class_sz, n_samples)
assert (np.sum(out) == n_samples) and (np.all(out >= 1)) \
and np.issubdtype(out.dtype,np.integer)
# test inputs that should be left unchanged
assert np.all(maxminbal.float_to_int(np.array([5,10,25,7]), 5+10+25+7) \
== np.sort(np.array([5,10,25,7])))
# test inappropriate inputs
for float_class_sz, n_samples in [(np.array([0.5,1.5]), 1),
(np.array([0.5,1.5]), 0),
(np.array([2.5,1.5,5.2]), 2)]:
with pytest.raises(ValueError):
maxminbal.float_to_int(float_class_sz,n_samples)
# Test Cases for MaxMinClusters
def test_init_maxminclusters():
"""
Make sure to throw an error when inappropriate arguments are given.
"""
# edge and interior test cases for n_clusters, n_samples, n_dim
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=1)
MaxMinClusters(n_clusters=1,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=100,n_dim=2)
MaxMinClusters(n_clusters=10,n_samples=200,n_dim=5)
# edge and interior test cases for testing maxmin ratios
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1,radius_maxmin=1, aspect_ref=1)
MaxMinClusters(imbal_maxmin=1,aspect_maxmin=1.1,radius_maxmin=1.1,aspect_ref=1.5)
MaxMinClusters(imbal_maxmin=1.2,aspect_maxmin=1,radius_maxmin=1.5,aspect_ref=7)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=1,aspect_ref=5)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=1)
MaxMinClusters(imbal_maxmin=3,aspect_maxmin=2,radius_maxmin=5,aspect_ref=4)
# edge and interior test cases for overlap
MaxMinClusters(alpha_max=0.5, alpha_min=0.01)
MaxMinClusters(alpha_max=0.05, alpha_min=0)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
# testing the distributions
MaxMinClusters(dist='exp')
MaxMinClusters(dist='gaussian')
MaxMinClusters(dist='t')
# testing packing and scale
MaxMinClusters(packing=0.5)
MaxMinClusters(packing=0.01)
MaxMinClusters(packing=0.99)
MaxMinClusters(scale=0.01)
MaxMinClusters(scale=0.05)
MaxMinClusters(scale=5)
MaxMinClusters(scale=10)
with pytest.raises(ValueError):
# must have n_dim, n_clusters, n_samples >= 1
# and n_clusters <= n_samples
MaxMinClusters(n_clusters=10,n_samples=100,n_dim=0)
MaxMinClusters(n_clusters=10,n_samples=9,n_dim=10)
MaxMinClusters(n_clusters=0,n_samples=100,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
MaxMinClusters(n_clusters=2,n_samples=1,n_dim=10)
# maxmin_ratios must be >= 1
MaxMinClusters(imbal_maxmin=0.98)
MaxMinClusters(imbal_maxmin=-1.1)
MaxMinClusters(aspect_maxmin=0.35)
MaxMinClusters(aspect_maxmin=-1.5)
MaxMinClusters(radius_maxmin=0.21)
MaxMinClusters(radius_maxmin=-1)
MaxMinClusters(aspect_ref=0.99)
MaxMinClusters(aspect_ref=-2)
# must have alpha_max > 0, alpha_min >= 0, alpha_max > alpha_min
MaxMinClusters(alpha_max=0, alpha_min=0)
MaxMinClusters(alpha_max=0.05, alpha_min=0.1)
MaxMinClusters(alpha_max=0.1, alpha_min=0.0001)
MaxMinClusters(alpha_max=0.025, alpha_min=-1.0)
MaxMinClusters(alpha_max=-0.5, alpha_min=0.05)
# packing must be strictly between 0 and 1, scale must be >0
MaxMinClusters(packing=0)
MaxMinClusters(packing=1)
MaxMinClusters(scale=0)
MaxMinClusters(scale=-0.5)
# currently only support dist in {'gaussian','exp','t'}
MaxMinClusters(dist='foo')
MaxMinClusters(dist='bar')
| 0
| 287
| 0
| 0
| 0
| 0
| 0
| 15
| 69
|
f547cb46376f6cd48fe72244973add9c82d457c0
| 122
|
py
|
Python
|
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | 5
|
2021-09-30T08:12:26.000Z
|
2022-01-19T16:20:10.000Z
|
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | null | null | null |
configs/scheduler_cfgs/multi_step_lr_cfg.py
|
slothfulxtx/TransLoc3D
|
0ac324b1dcec456c76d7db2f87d13c076f2d55e4
|
[
"MIT"
] | null | null | null |
scheduler_type = 'MultiStepLR'
scheduler_cfg = dict(
gamma=0.5,
milestones=(50, 100, 150, 200)
)
end_epoch = 250
| 15.25
| 34
| 0.672131
|
scheduler_type = 'MultiStepLR'
scheduler_cfg = dict(
gamma=0.5,
milestones=(50, 100, 150, 200)
)
end_epoch = 250
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d9f7438220a4ebe74beaea888af37f17f5bfb665
| 721
|
py
|
Python
|
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
levenshtein_distance.py
|
int2str/catbot
|
d6279845eb51eaa9c9e9f2aef2f7a521432d7851
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 14:54:14 2020
@author: Mei
"""
| 18.487179
| 74
| 0.468793
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 14:54:14 2020
@author: Mei
"""
def memoize(func):
mem = {}
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in mem:
mem[key] = func(*args, **kwargs)
return mem[key]
return memoizer
@memoize
def levenshtein(s, t):
if s == "":
return len(t)
if t == "":
return len(s)
if s[-1] == t[-1]:
cost = 0
else:
cost = 1
res = min([levenshtein(s[:-1], t) + 1, # char is inserted
levenshtein(s, t[:-1]) + 1, # char is deleted
levenshtein(s[:-1], t[:-1]) + cost]) # char is substituted
return res
| 0
| 390
| 0
| 0
| 0
| 201
| 0
| 0
| 46
|
b2e9ce95b9c470541c1124a564f290f253410919
| 9,658
|
py
|
Python
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 2
|
2020-04-30T19:13:08.000Z
|
2021-04-14T19:40:47.000Z
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-04-30T19:19:09.000Z
|
2020-05-02T14:22:36.000Z
|
applications/FluidDynamicsApplication/tests/embedded_reservoir_test.py
|
AndreaVoltan/MyKratos7.0
|
e977752722e8ef1b606f25618c4bf8fd04c434cc
|
[
"BSD-4-Clause"
] | 1
|
2020-06-12T08:51:24.000Z
|
2020-06-12T08:51:24.000Z
|
import KratosMultiphysics.kratos_utilities as KratosUtilities
have_external_solvers = KratosUtilities.IsApplicationAvailable("ExternalSolversApplication")
if __name__ == '__main__':
test = EmbeddedReservoirTest()
test.setUp()
test.distance = 0.5
test.slip_level_set = False
test.print_output = False
test.print_reference_values = False
test.work_folder = "EmbeddedReservoirTest"
test.reference_file = "reference_slip_reservoir_2D"
test.settings = "EmbeddedReservoir2DTest_parameters.json"
test.setUpProblem()
test.setUpDistanceField()
test.runTest()
test.tearDown()
test.checkResults()
| 45.130841
| 203
| 0.657693
|
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
import KratosMultiphysics.kratos_utilities as KratosUtilities
have_external_solvers = KratosUtilities.IsApplicationAvailable("ExternalSolversApplication")
import KratosMultiphysics.KratosUnittest as UnitTest
@UnitTest.skipUnless(have_external_solvers,"Missing required application: ExternalSolversApplication")
class EmbeddedReservoirTest(UnitTest.TestCase):
def testEmbeddedReservoir2D(self):
self.distance = 0.5
self.slip_level_set = False
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_reservoir_2D"
self.settings = "EmbeddedReservoir2DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedReservoir3D(self):
self.distance = 0.5
self.slip_level_set = False
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_reservoir_3D"
self.settings = "EmbeddedReservoir3DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedSlipReservoir2D(self):
self.distance = 0.5
self.slip_level_set = True
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_slip_reservoir_2D"
self.settings = "EmbeddedReservoir2DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def testEmbeddedSlipReservoir3D(self):
self.distance = 0.5
self.slip_level_set = True
self.work_folder = "EmbeddedReservoirTest"
self.reference_file = "reference_slip_reservoir_3D"
self.settings = "EmbeddedReservoir3DTest_parameters.json"
self.ExecuteEmbeddedReservoirTest()
def ExecuteEmbeddedReservoirTest(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
self.setUp()
self.setUpProblem()
self.setUpDistanceField()
self.runTest()
self.tearDown()
self.checkResults()
def setUp(self):
self.check_tolerance = 1e-6
self.print_output = False
self.print_reference_values = False
def tearDown(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
KratosUtilities.DeleteFileIfExisting(
self.ProjectParameters["solver_settings"]["model_import_settings"]["input_filename"].GetString()+'.time')
def setUpProblem(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
with open(self.settings, 'r') as parameter_file:
self.ProjectParameters = KratosMultiphysics.Parameters(parameter_file.read())
self.model = KratosMultiphysics.Model()
## Solver construction
import python_solvers_wrapper_fluid
self.solver = python_solvers_wrapper_fluid.CreateSolver(self.model, self.ProjectParameters)
## Set the "is_slip" field in the json settings (to avoid duplication it is set to false in all tests)
if self.slip_level_set and self.solver.settings.Has("is_slip"):
self.ProjectParameters["solver_settings"]["is_slip"].SetBool(True)
self.solver.AddVariables()
## Read the model - note that SetBufferSize is done here
self.solver.ImportModelPart()
self.solver.PrepareModelPart()
## Add AddDofs
self.solver.AddDofs()
## Solver initialization
self.solver.Initialize()
## Processes construction
import process_factory
self.list_of_processes = process_factory.KratosProcessFactory(self.model).ConstructListOfProcesses( self.ProjectParameters["processes"]["gravity"] )
self.list_of_processes += process_factory.KratosProcessFactory(self.model).ConstructListOfProcesses( self.ProjectParameters["processes"]["boundary_conditions_process_list"] )
## Processes initialization
for process in self.list_of_processes:
process.ExecuteInitialize()
self.main_model_part = self.model.GetModelPart(self.ProjectParameters["problem_data"]["model_part_name"].GetString())
def setUpDistanceField(self):
# Set the distance function
if (self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 2):
for node in self.main_model_part.Nodes:
distance = node.Y-self.distance
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, distance)
elif (self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE] == 3):
for node in self.main_model_part.Nodes:
distance = node.Z-self.distance
node.SetSolutionStepValue(KratosMultiphysics.DISTANCE, 0, distance)
# Set the ELEMENTAL_DISTANCES value
n_nodes = len(self.main_model_part.Elements[1].GetNodes())
for element in self.main_model_part.Elements:
elem_dist = KratosMultiphysics.Vector(n_nodes)
elem_nodes = element.GetNodes()
for i_node in range(0,n_nodes):
elem_dist[i_node] = elem_nodes[i_node].GetSolutionStepValue(KratosMultiphysics.DISTANCE)
element.SetValue(KratosMultiphysics.ELEMENTAL_DISTANCES, elem_dist)
def runTest(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
if (self.print_output):
gid_mode = KratosMultiphysics.GiDPostMode.GiD_PostBinary
multifile = KratosMultiphysics.MultiFileFlag.SingleFile
deformed_mesh_flag = KratosMultiphysics.WriteDeformedMeshFlag.WriteUndeformed
write_conditions = KratosMultiphysics.WriteConditionsFlag.WriteElementsOnly
gid_io = KratosMultiphysics.GidIO(self.ProjectParameters["solver_settings"]["model_import_settings"]["input_filename"].GetString(),gid_mode,multifile,deformed_mesh_flag, write_conditions)
mesh_name = 0.0
gid_io.InitializeMesh( mesh_name)
gid_io.WriteMesh( self.main_model_part.GetMesh() )
gid_io.FinalizeMesh()
gid_io.InitializeResults(mesh_name,(self.main_model_part).GetMesh())
end_time = self.ProjectParameters["problem_data"]["end_time"].GetDouble()
time = 0.0
step = 0
for process in self.list_of_processes:
process.ExecuteBeforeSolutionLoop()
while(time <= end_time):
time = self.solver.AdvanceInTime(time)
for process in self.list_of_processes:
process.ExecuteInitializeSolutionStep()
self.solver.InitializeSolutionStep()
self.solver.Predict()
self.solver.SolveSolutionStep()
self.solver.FinalizeSolutionStep()
for process in self.list_of_processes:
process.ExecuteFinalizeSolutionStep()
for process in self.list_of_processes:
process.ExecuteBeforeOutputStep()
if (self.print_output):
gid_io.WriteNodalResults(KratosMultiphysics.VELOCITY,self.main_model_part.Nodes,time,0)
gid_io.WriteNodalResults(KratosMultiphysics.PRESSURE,self.main_model_part.Nodes,time,0)
gid_io.WriteNodalResults(KratosMultiphysics.DISTANCE,self.main_model_part.Nodes,time,0)
for process in self.list_of_processes:
process.ExecuteAfterOutputStep()
for process in self.list_of_processes:
process.ExecuteFinalize()
if (self.print_output):
gid_io.FinalizeResults()
def checkResults(self):
with UnitTest.WorkFolderScope(self.work_folder, __file__):
if self.print_reference_values:
with open(self.reference_file+'.csv','w') as ref_file:
ref_file.write("#ID, PRESSURE\n")
for node in self.main_model_part.Nodes:
pres = node.GetSolutionStepValue(KratosMultiphysics.PRESSURE)
ref_file.write("{0}, {1}\n".format(node.Id, pres))
else:
with open(self.reference_file+'.csv','r') as reference_file:
reference_file.readline() # skip header
line = reference_file.readline()
for node in self.main_model_part.Nodes:
values = [ float(i) for i in line.rstrip('\n ').split(',') ]
node_id = values[0]
reference_pres = values[1]
pres = node.GetSolutionStepValue(KratosMultiphysics.PRESSURE)
self.assertAlmostEqual(reference_pres, pres, delta = self.check_tolerance)
line = reference_file.readline()
if line != '': # If we did not reach the end of the reference file
self.fail("The number of nodes in the mdpa is smaller than the number of nodes in the output file")
if __name__ == '__main__':
test = EmbeddedReservoirTest()
test.setUp()
test.distance = 0.5
test.slip_level_set = False
test.print_output = False
test.print_reference_values = False
test.work_folder = "EmbeddedReservoirTest"
test.reference_file = "reference_slip_reservoir_2D"
test.settings = "EmbeddedReservoir2DTest_parameters.json"
test.setUpProblem()
test.setUpDistanceField()
test.runTest()
test.tearDown()
test.checkResults()
| 0
| 8,846
| 0
| 0
| 0
| 0
| 0
| 79
| 90
|
68e1ed0ef59a3040f7e29f35297d861200c09805
| 454
|
py
|
Python
|
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-18T09:38:53.000Z
|
2021-01-18T09:38:53.000Z
|
tests/conftest.py
|
BradleyKirton/ice3x
|
7a289b6b208a0bd07112744923cf5d315982ee31
|
[
"MIT"
] | 1
|
2021-01-15T05:15:08.000Z
|
2021-01-15T05:15:08.000Z
|
import pytest
def pytest_collection_modifyitems(config, items):
"""If async dependencies is not available skip async tests."""
try:
skip_async = False
except ImportError:
skip_async = True
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "requires_async" in item.keywords and skip_async is True:
item.add_marker(skip_slow)
| 23.894737
| 71
| 0.665198
|
import pytest
def pytest_collection_modifyitems(config, items):
"""If async dependencies is not available skip async tests."""
try:
import treq # noqa
skip_async = False
except ImportError:
skip_async = True
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "requires_async" in item.keywords and skip_async is True:
item.add_marker(skip_slow)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -10
| 38
|
1f318af426ba6effdcc824c35b1410a508967992
| 605
|
py
|
Python
|
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
python/train_model.py
|
bfakhri/dml_custom
|
1e908b10890df11e510d72c21f3125e3069a0eac
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys
print('PYTHON VERSION - ', sys.version)
# For the DML random agent dataset
import random_dataset
# For the model that we will train
import model
# For debugging
import os
for i in range(10):
print(os.getcwd())
ds = random_dataset.dml_dataset()
model = model.Model(ds.shape)
for i in range(1000000):
batch = ds.get_batch()
model.train_step(batch, i)
| 18.90625
| 39
| 0.771901
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import argparse
import random
import numpy as np
import deepmind_lab
import tensorflow as tf
import sys
print('PYTHON VERSION - ', sys.version)
# For the DML random agent dataset
import random_dataset
# For the model that we will train
import model
# For debugging
import os
for i in range(10):
print(os.getcwd())
ds = random_dataset.dml_dataset()
model = model.Model(ds.shape)
for i in range(1000000):
batch = ds.get_batch()
model.train_step(batch, i)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -17
| 111
|
61db0562dc232d4ff5aad924e5350c8b5a68b06a
| 503
|
py
|
Python
|
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | null | null | null |
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | 2
|
2020-06-05T19:41:09.000Z
|
2021-06-10T21:07:30.000Z
|
hub/models/types.py
|
harenlewis/api-hub
|
f79cd8b82e95c039269765a4542866286803a322
|
[
"MIT"
] | null | null | null |
GET = 100
POST = 200
PUT = 300
DELETE = 400
METHOD_TYPES = (
(GET, 'GET'),
(POST, 'POST'),
(PUT, 'PUT'),
(DELETE, 'DELETE'),
)
METHOD_TYPES_DICT = {
'GET': GET,
'POST': POST,
'PUT': PUT,
'DELETE': DELETE,
}
JSON = 500
HTML = 600
TEXT = 700
RESP_TYPES = (
(JSON, 'JSON'),
(HTML, 'HTML'),
(TEXT, 'TEXT'),
)
RESP_TYPES_DICT = {
'JSON': 'application/json; charset=utf-8',
'HTML': 'text/html; charset=utf-8',
'TEXT': 'text/plain; charset=utf-8',
}
| 14.794118
| 46
| 0.532803
|
GET = 100
POST = 200
PUT = 300
DELETE = 400
METHOD_TYPES = (
(GET, 'GET'),
(POST, 'POST'),
(PUT, 'PUT'),
(DELETE, 'DELETE'),
)
METHOD_TYPES_DICT = {
'GET': GET,
'POST': POST,
'PUT': PUT,
'DELETE': DELETE,
}
JSON = 500
HTML = 600
TEXT = 700
RESP_TYPES = (
(JSON, 'JSON'),
(HTML, 'HTML'),
(TEXT, 'TEXT'),
)
RESP_TYPES_DICT = {
'JSON': 'application/json; charset=utf-8',
'HTML': 'text/html; charset=utf-8',
'TEXT': 'text/plain; charset=utf-8',
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
69dfa3f3f3c61dd8f1cd49fd9d62071055662676
| 3,962
|
py
|
Python
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 45
|
2015-09-30T14:55:33.000Z
|
2021-06-28T02:33:30.000Z
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 261
|
2015-06-03T20:41:56.000Z
|
2022-03-07T08:46:10.000Z
|
genome_designer/tests/integration/test_pipeline_integration.py
|
churchlab/millstone
|
ddb5d003a5b8a7675e5a56bafd5c432d9642b473
|
[
"MIT"
] | 22
|
2015-06-04T20:43:10.000Z
|
2022-02-27T08:27:34.000Z
|
"""Alignment pipeline integration tests.
"""
import os
from django.conf import settings
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
| 38.096154
| 80
| 0.69687
|
"""Alignment pipeline integration tests.
"""
import os
import time
from django.conf import settings
from djcelery_testworker.testcase import CeleryWorkerTestCase
from main.models import AlignmentGroup
from main.models import Dataset
from main.models import ExperimentSample
from main.testing_util import create_common_entities
from pipeline.pipeline_runner import run_pipeline
from utils.import_util import copy_and_add_dataset_source
from utils.import_util import import_reference_genome_from_local_file
from utils.import_util import import_reference_genome_from_ncbi
from utils import internet_on
TEST_FASTA = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'test_genome.fa')
TEST_FASTQ1 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.1.fq')
TEST_FASTQ2 = os.path.join(settings.PWD, 'test_data', 'fake_genome_and_reads',
'38d786f2', 'test_genome_1.snps.simLibrary.2.fq')
class TestAlignmentPipeline(CeleryWorkerTestCase):
def setUp(self):
common_entities = create_common_entities()
self.project = common_entities['project']
self.reference_genome = import_reference_genome_from_local_file(
self.project, 'ref_genome', TEST_FASTA, 'fasta')
self.experiment_sample = ExperimentSample.objects.create(
project=self.project, label='sample1')
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ1,
Dataset.TYPE.FASTQ1, TEST_FASTQ1)
copy_and_add_dataset_source(self.experiment_sample, Dataset.TYPE.FASTQ2,
Dataset.TYPE.FASTQ2, TEST_FASTQ2)
def test_run_pipeline(self):
"""Tests running the full pipeline.
"""
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
self.reference_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
# Refresh the object.
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
# Verify the AlignmentGroup object is created.
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
# Make sure the initial JBrowse config has been created.
jbrowse_dir = self.reference_genome.get_jbrowse_directory_path()
self.assertTrue(os.path.exists(jbrowse_dir))
self.assertTrue(os.path.exists(os.path.join(jbrowse_dir,
'indiv_tracks')))
def test_run_pipeline__genbank_from_ncbi_with_spaces_in_label(self):
"""Tests the pipeline where the genome is imported from NCBI with
spaces in the name.
"""
if not internet_on():
return
MG1655_ACCESSION = 'NC_000913.3'
MG1655_LABEL = 'mg1655 look a space'
ref_genome = import_reference_genome_from_ncbi(self.project,
MG1655_LABEL, MG1655_ACCESSION, 'genbank')
sample_list = [self.experiment_sample]
alignment_group_obj, async_result = run_pipeline('name_placeholder',
ref_genome, sample_list)
# Block until pipeline finishes.
while not async_result.ready():
time.sleep(1)
if async_result.status == 'FAILURE':
self.fail('Async task failed.')
alignment_group_obj = AlignmentGroup.objects.get(
id=alignment_group_obj.id)
self.assertEqual(1,
len(alignment_group_obj.experimentsampletoalignment_set.all()))
self.assertEqual(AlignmentGroup.STATUS.COMPLETED,
alignment_group_obj.status)
| 0
| 0
| 0
| 2,954
| 0
| 0
| 0
| 269
| 266
|
c03319542f2244c2d4ef46ea8722b2475a06c15b
| 793
|
py
|
Python
|
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | 1
|
2019-10-31T11:06:23.000Z
|
2019-10-31T11:06:23.000Z
|
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
topics/Array/Best_Time_to_Buy_and_Sell_Stock_121/Best_Time_to_Buy_and_Sell_Stock_121.py
|
DmitryNaimark/leetcode-solutions-python
|
16af5f3a9cb8469d82b14c8953847f0e93a92324
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
# ---------------------------------------------------
# Runtime Complexity: O(N)
# Space Complexity: O(1)
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 5
print(solution.maxProfit([7, 1, 5, 3, 6, 4]))
# 0
print(solution.maxProfit([7, 6, 4, 3, 1]))
| 27.344828
| 64
| 0.461538
|
# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/
# ---------------------------------------------------
from typing import List
# Runtime Complexity: O(N)
# Space Complexity: O(1)
class Solution:
def maxProfit(self, prices: List[int]) -> int:
if len(prices) == 0:
return 0
cur_min = prices[0]
max_diff = 0
for i in range(1, len(prices)):
cur_min = min(prices[i], cur_min)
max_diff = max(prices[i] - cur_min, max_diff)
return max_diff
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 5
print(solution.maxProfit([7, 1, 5, 3, 6, 4]))
# 0
print(solution.maxProfit([7, 6, 4, 3, 1]))
| 0
| 0
| 0
| 314
| 0
| 0
| 0
| 2
| 44
|
651987d7de3aff6142ce2f122b6b368e0940755f
| 6,839
|
py
|
Python
|
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | 5
|
2017-03-29T20:44:42.000Z
|
2020-06-26T23:11:34.000Z
|
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | null | null | null |
main.py
|
GunnarHolwerda/PiWallGuiController
|
cc90e5f6fd6f13fdfdcabcc8e6b195bf01cb440f
|
[
"MIT"
] | 1
|
2021-03-08T14:57:09.000Z
|
2021-03-08T14:57:09.000Z
|
"""
GUI Application to control the PiWall from
"""
#!/usr/bin/python3
# Author: Gunnar Holwerda
# GUI to control a PiWall
from tkinter import Tk
# Run the GUI
if __name__ == "__main__":
tk_window = Tk(className="PiWall")
frame = SelectorWindow(master=tk_window)
tk_window.mainloop()
frame.get_controller().stop_wall()
| 33.360976
| 90
| 0.619389
|
"""
GUI Application to control the PiWall from
"""
#!/usr/bin/python3
# Author: Gunnar Holwerda
# GUI to control a PiWall
from tkinter import Frame, StringVar, OptionMenu, Listbox, Button, Label, Tk, END
from piwallcontroller.piwallcontroller import PiWallController
from piwallcontroller.playlist import Playlist
from threading import Thread
class SelectorWindow(Frame):
"""
GUI Class extending the tkinter.Frame class
"""
TIMEOUTS = {
'1 hour ': 3600,
'2 hours': 7200,
'3 hours': 10800,
'Infinite': -1,
}
def __init__(self, master=None):
Frame.__init__(self, master)
self.__playlist = Playlist()
self.__controller = PiWallController()
self.__dropdown_selection = StringVar()
self.__timeout_selection = StringVar()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.grid()
self.create_video_file_dropdown()
self.create_timeout_dropdown()
self.create_display_box()
self.create_add_button()
self.create_delete_button()
self.create_play_button()
self.create_reboot_button()
self.create_status_label()
self.create_stop_button()
def create_video_file_dropdown(self):
"""
Creates the dropdown to display the video files from
"""
videos = self.__controller.get_video_file_list()
if videos:
self.__dropdown_selection.set(videos[0])
else:
videos.append(None)
self.video_dropdown = OptionMenu(
None, self.__dropdown_selection, *videos)
self.video_dropdown.config(width=10)
self.video_dropdown.grid(row=0, column=0)
def create_timeout_dropdown(self):
"""
Creates the dropdown that displays the timeouts
"""
timeouts = list(self.TIMEOUTS.keys())
timeouts.sort()
self.__timeout_selection.set(timeouts[0])
self.timeout_dropdown = OptionMenu(
None, self.__timeout_selection, *timeouts)
self.timeout_dropdown.config(width=5)
self.timeout_dropdown.grid(row=0, column=1)
def create_display_box(self):
"""
Creates display box that displays all current items in the playlist
"""
self.display_box = Listbox(width=30, height=10)
self.display_box.grid(row=0, column=2, columnspan=2)
def create_play_button(self):
"""
Creates the play button
"""
self.submit_button = Button(text="Play", width=10)
self.submit_button['command'] = self.play_wall
self.submit_button.grid(row=1, column=2, pady=5)
def create_add_button(self):
"""
Creates the button to add the current values in the video and timeout dropdown
into the playlist
"""
self.add_button = Button(text='Add', fg='green', width=10)
self.add_button['command'] = self.update_display_box
self.add_button.grid(row=1, column=0, pady=5)
def create_delete_button(self):
"""
Creates delete button to delete items from display blox
"""
self.delete_button = Button(text='Delete', fg='red', width=10)
self.delete_button['command'] = self.delete_selected_item
self.delete_button.grid(row=1, column=1, pady=5)
def create_reboot_button(self):
"""
Creates button that reboots the pi's
"""
self.reboot_button = Button(text='Reboot Tiles', fg='red', width=10)
self.reboot_button['command'] = self.reboot_pressed
self.reboot_button.grid(row=1, column=3, pady=5)
def create_status_label(self):
"""
Creates label to display current status of the wall
"""
self.status_label = Label(relief="ridge", width=11)
self.set_status_label(0)
self.status_label.grid(row=2, column=3, pady=5)
def create_stop_button(self):
"""
Creates stop button to stop PiWall
"""
self.stop_button = Button(text='Stop Playing')
self.set_status_label(0)
self.stop_button['command'] = self.stop_pressed
self.stop_button.grid(row=2, column=2, pady=5)
def delete_selected_item(self):
"""
Deletes the currently selected item from the displaybox
"""
self.__playlist.remove_playlist_item(self.display_box.curselection())
self.display_box.delete(self.display_box.curselection())
def play_wall(self):
"""
Submits ths form to be played on the pi's
"""
if self.__playlist.is_empty():
return
self.set_status_label(1)
self.display_box.delete(0, END)
# If there is a thread running, we need to stop the wall, which will
# end the thread
if self.__command_thread.isAlive():
print("Stopping Wall")
self.__controller.stop_wall()
self.__command_thread.join()
self.__command_thread = Thread(
target=self.__controller.run_commands, args=(self.__playlist,))
self.__command_thread.start()
def update_display_box(self):
"""
Button listener for the Add Button (create_add_button)
"""
video_file = self.__dropdown_selection.get()
timeout = self.__timeout_selection.get()
self.__playlist.add_playlist_item(video_file, self.TIMEOUTS[timeout])
self.display_box.insert(END, "{0} {1}".format(timeout, video_file))
def stop_pressed(self):
"""
Button listener for the Stop Button (create_stop_button)
"""
self.__controller.stop_wall()
self.set_status_label(0)
def reboot_pressed(self):
"""
Button listener for the Reboot Button (create_reboot_button)
"""
self.set_status_label(0)
self.__controller.reboot_pis()
return True
def set_status_label(self, state):
"""
Updates the status label to the current status of the PiWall
"""
if state == 1:
self.status_label.config(text='Playing', fg='green')
return True
elif state == 0:
self.status_label.config(text='Not Playing', fg='red')
return True
else:
Exception(
'Status label state {0} not supported. Try 1 or 2'.format(state))
def get_controller(self):
"""
Returns the piwallcontrollers
"""
return self.__controller
# Run the GUI
if __name__ == "__main__":
tk_window = Tk(className="PiWall")
frame = SelectorWindow(master=tk_window)
tk_window.mainloop()
frame.get_controller().stop_wall()
| 0
| 0
| 0
| 6,276
| 0
| 0
| 0
| 132
| 89
|
4e411687a292bc56a0037b2e523555237471ea26
| 765
|
py
|
Python
|
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
victor-kironde/botbuilder-python
|
e893d9b036d7cf33cf9c9afd1405450c354cdbcd
|
[
"MIT"
] | 1
|
2020-07-12T21:04:08.000Z
|
2020-07-12T21:04:08.000Z
|
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
Fortune-Adekogbe/botbuilder-python
|
4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4
|
[
"MIT"
] | null | null | null |
libraries/botbuilder-schema/botbuilder/schema/_sign_in_enums.py
|
Fortune-Adekogbe/botbuilder-python
|
4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4
|
[
"MIT"
] | 1
|
2020-10-01T07:34:07.000Z
|
2020-10-01T07:34:07.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
| 40.263158
| 94
| 0.60915
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from enum import Enum
class SignInConstants(str, Enum):
# Name for the signin invoke to verify the 6-digit authentication code as part of sign-in.
verify_state_operation_name = "signin/verifyState"
# Name for signin invoke to perform a token exchange.
token_exchange_operation_name = "signin/tokenExchange"
# The EventActivity name when a token is sent to the bot.
token_response_event_name = "tokens/response"
| 0
| 0
| 0
| 392
| 0
| 0
| 0
| 0
| 46
|
cedce4854061d9a8c9e7cb1c10204a423754caa1
| 220
|
py
|
Python
|
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
verifyage.py
|
cheesyc/basicpython
|
9a055e4f813c6caa601ba00da939439b0bc82a3f
|
[
"MIT"
] | null | null | null |
from datetime import datetime
# def days (d):
# now = datetime.now
if __name__ == "__main__":
# u = int(input("What is your age?"))
# d = int(input("What month were you born in?"")
print (datetime.now)
| 22
| 52
| 0.613636
|
from datetime import datetime
# def days (d):
# now = datetime.now
if __name__ == "__main__":
# u = int(input("What is your age?"))
# d = int(input("What month were you born in?"")
print (datetime.now)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7d5e808698d08d5b754ad10b30667e0affcf369b
| 9,023
|
py
|
Python
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 4
|
2020-02-26T14:00:01.000Z
|
2022-02-25T15:23:09.000Z
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 1
|
2021-06-12T09:40:56.000Z
|
2021-06-12T09:51:45.000Z
|
predictive-horizontal-pod-autoscaler/short/analyse.py
|
jthomperoo/custom-pod-autoscaler-experiments
|
f065bee72391dff008a388d46cba40df3fb23c98
|
[
"Apache-2.0"
] | 1
|
2021-07-07T09:58:23.000Z
|
2021-07-07T09:58:23.000Z
|
# Copyright 2020 Jamie Thompson.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if __name__ == "__main__":
main()
| 54.355422
| 161
| 0.729469
|
# Copyright 2020 Jamie Thompson.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
from tabulate import tabulate
from matplotlib import pyplot as plt
def plot_replica_comparison(horizontal_replicas, predictive_replicas):
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_replicas, "r", list(np.arange(0, 30, 0.5)), predictive_replicas, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("number of replicas")
plt.savefig("results/predictive_vs_horizontal_replicas.svg")
def plot_avg_latency_comparison(horizontal_latencies, predictive_latencies):
horizontal_avg_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("avg_response_time"))
predictive_avg_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("avg_response_time"))
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_avg_latencies, "r", list(np.arange(0, 30, 0.5)), predictive_avg_latencies, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("average latency")
plt.savefig("results/avg_latency_comparison.svg")
def plot_max_latency_comparison(horizontal_latencies, predictive_latencies):
horizontal_max_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("max_response_time"))
predictive_max_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("max_response_time"))
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_max_latencies, "r", list(np.arange(0, 30, 0.5)), predictive_max_latencies, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("maximum latency")
plt.savefig("results/max_latency_comparison.svg")
def plot_failed_to_success_request_percentage(horizontal_latencies, predictive_latencies):
horizontal_fail_percentages = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
predictive_fail_percentages = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
plt.figure(figsize=[6, 6])
plt.plot(list(np.arange(0, 30, 0.5)), horizontal_fail_percentages, "r", list(np.arange(0, 30, 0.5)), predictive_fail_percentages, "b")
plt.legend(["K8s HPA", "CPA Predictive HPA"])
plt.xlabel("time (minutes)")
plt.ylabel("failed requests (%)")
plt.savefig("results/fail_percentage_comparison.svg")
def create_table(horizontal_replicas, predictive_replicas, horizontal_latencies, predictive_latencies):
horizontal_num_requests = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_num_requests.append(result["num_requests"])
predictive_num_requests = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_num_requests.append(result["num_requests"])
horizontal_avg_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("avg_response_time"))
predictive_avg_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_avg_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("avg_response_time"))
horizontal_max_latencies = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//").get("max_response_time"))
predictive_max_latencies = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_max_latencies.append(result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//").get("max_response_time"))
horizontal_fail_percentages = []
for result in horizontal_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/horizontal-deployment/proxy//") is None:
continue
horizontal_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
predictive_fail_percentages = []
for result in predictive_latencies:
if result["requests"].get("GET_/api/v1/namespaces/default/services/predictive-deployment/proxy//") is None:
continue
predictive_fail_percentages.append(result["num_requests_fail"] / result["num_requests"] * 100)
table = {
"time (mins)": list(np.arange(0, 30, 0.5)),
"hpa num requests": horizontal_num_requests,
"phpa num requests": predictive_num_requests,
"hpa replicas": horizontal_replicas,
"phpa replicas": predictive_replicas,
"hpa avg latencies": horizontal_avg_latencies,
"phpa avg latencies": predictive_avg_latencies,
"hpa max latencies": horizontal_max_latencies,
"phpa max latencies": predictive_max_latencies,
"hpa fail requests (%)": horizontal_fail_percentages,
"phpa fail requests (%)": predictive_fail_percentages
}
with open("results/predictive_vs_horizontal_table.md", "w") as table_file:
table_file.write(tabulate(table, tablefmt="pipe", headers="keys"))
def main():
with open("results/results.json") as json_file:
results = json.load(json_file)
horizontal_replicas = results["horizontal"]["replicas"]
predictive_replicas = results["predictive"]["replicas"]
horizontal_latencies = results["horizontal"]["latency"]
predictive_latencies = results["predictive"]["latency"]
horizontal_latencies = sorted(horizontal_latencies, key=lambda k: k["start_time"])
predictive_latencies = sorted(predictive_latencies, key=lambda k: k["start_time"])
create_table(horizontal_replicas, predictive_replicas, horizontal_latencies, predictive_latencies)
plot_replica_comparison(horizontal_replicas, predictive_replicas)
plot_avg_latency_comparison(horizontal_latencies, predictive_latencies)
plot_max_latency_comparison(horizontal_latencies, predictive_latencies)
plot_failed_to_success_request_percentage(horizontal_latencies, predictive_latencies)
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 8,167
| 0
| 10
| 226
|
21699970a803f9a1e84a84d986852609b75c11f8
| 2,747
|
py
|
Python
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper
|
35edbf2c29f20ccec20baaaf46cc2382b7defda6
|
[
"MIT"
] | 7
|
2019-11-26T00:01:58.000Z
|
2021-04-03T05:31:44.000Z
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper_restore
|
f599011a8f856bd81e73e5472d50980b4695055c
|
[
"MIT"
] | 33
|
2019-10-22T22:23:51.000Z
|
2020-10-02T20:14:17.000Z
|
fmformatter/Sites2Query.py
|
wassermanlab/OpenFlexTyper_restore
|
f599011a8f856bd81e73e5472d50980b4695055c
|
[
"MIT"
] | 4
|
2019-11-29T23:16:57.000Z
|
2020-03-07T19:04:26.000Z
|
# Function which takes in a sites file and produces a query file.
# Sites file looks like (these are 1-based coords):
# 22:50988105:G:A
#
# Query file looks like:
# #Index Reference Alternate Chrom Pos Ref Alt Identifier DataType
#0 TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACATTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACACTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT 16 27557749 T C rs7198785_S-3AAAA cytoscan
# given 1-based pos coordinate, extract seqs and return the 2 seqs for query, one with the ref and one with the alt alleles
# reftest,alttest = Site2Seqs(22,50988105,'G','A',ARGS.Fasta)
# print(reftest)
# print(alttest)
if __name__=="__main__":
Main()
| 41.621212
| 649
| 0.790681
|
import pybedtools
import sys
import argparse
def GetArgs():
parser = argparse.ArgumentParser()
parser.add_argument("-I","--Infile",help="Infile in the format of Sites: Chrom:position:ref:alt", required=True)
parser.add_argument("-F","--Fasta",help="Input fasta file corresponding to the positions", required=True)
parser.add_argument("-O","--Outfile",help="Output file for the queries for FlexTyper", required=True)
parser.add_argument("-S","--Source",help="Source acquired from,e.g. PeddyGRCh37Sites", required=True)
args = parser.parse_args()
return args
# Function which takes in a sites file and produces a query file.
# Sites file looks like (these are 1-based coords):
# 22:50988105:G:A
#
# Query file looks like:
# #Index Reference Alternate Chrom Pos Ref Alt Identifier DataType
#0 TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACATTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT TTTCTCCAAATACAGATCCAATGTCTTCACTTGTCTATTAAATGCCTCCCATTCCAAATATGATTACCTCTCCCCAGCTCCAATTAAGTCCCTTCTTTCCCCTCTTACTACCGCTTTCTTCCATGTGCCTCTTACAACACCATGGAGACACTTTTCATTTGTGCTTCTTTCATGCAGTTAGCCAAGCTTGTCAAGTTTTTTTTTTTTTGAAAAAAAAAAAAAATACATACATATATATATATATAATTTTTTTTCCCCTCACTATGTTGCCCAGATTGGTCTTGAACTACCGGGCTCAAGT 16 27557749 T C rs7198785_S-3AAAA cytoscan
def ParseSitesGetQuery(SitesInfile,Fasta,QueryOutfile,Source):
infile = open(SitesInfile,'r')
outfile = open(QueryOutfile,'w')
counter = 0
outfile.write("#Index\tReference\tAlternate\tChrom\tPos\tRef\tAlt\tIdentifier\tDataType\n")
for line in infile:
line = line.strip('\n')
cols=line.split(':')
chrom = cols[0]
pos = int(cols[1]) - 1 # 1-based transition
ref = cols[2]
alt = cols[3]
Source='PeddySitesGRCh37'
refSeq,altSeq = Site2Seqs(chrom,pos,ref,alt,Fasta)
outfile.write("%d\t%s\t%s\t%s\t%d\t%s\t%s\t%s\t%s\n"%(counter,refSeq,altSeq,chrom,pos,ref,alt,line,Source))
counter += 1
# given 1-based pos coordinate, extract seqs and return the 2 seqs for query, one with the ref and one with the alt alleles
def Site2Seqs(chrom,pos,ref,alt,fasta):
pos = pos-1
refSeq = pybedtools.BedTool.seq((chrom,pos-150,pos+151),fasta)
altSeqleft = pybedtools.BedTool.seq((chrom,pos-150,pos),fasta)
altSeqright = pybedtools.BedTool.seq((chrom,pos+1,pos+151),fasta)
altSeq = altSeqleft + alt + altSeqright
return refSeq,altSeq
def Main():
ARGS = GetArgs()
ParseSitesGetQuery(ARGS.Infile,ARGS.Fasta,ARGS.Outfile,ARGS.Source)
# test Site2Seqs
# reftest,alttest = Site2Seqs(22,50988105,'G','A',ARGS.Fasta)
# print(reftest)
# print(alttest)
if __name__=="__main__":
Main()
| 0
| 0
| 0
| 0
| 0
| 1,466
| 0
| -21
| 157
|
fac204b97e11e17794e1161b7bf560750117f3ce
| 49
|
py
|
Python
|
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | 2
|
2022-01-21T14:37:50.000Z
|
2022-01-21T16:06:27.000Z
|
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | null | null | null |
src/thekpi_node/__init__.py
|
keeplerteam/thekpi
|
082258c26909254caf46caec1da89438a43548c3
|
[
"MIT"
] | null | null | null |
from .node import KpiNode
__all__ = ["KpiNode"]
| 12.25
| 25
| 0.714286
|
from .node import KpiNode
__all__ = ["KpiNode"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1fa0e3b8383b8f9f172b6decfb3c6c2eff282ed3
| 4,727
|
py
|
Python
|
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
python/Tests/TestStatic.py
|
ugirumurera/ta_solver
|
c3bd83633aca4db785a4d0dc554f924bb26754e1
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
#from Solvers.Decomposition_Solver import Decomposition_Solver
| 41.464912
| 121
| 0.675904
|
import unittest
import numpy as np
from Solvers.Frank_Wolfe_Solver_Static import Frank_Wolfe_Solver
from Solvers.Path_Based_Frank_Wolfe_Solver import Path_Based_Frank_Wolfe_Solver
#from Solvers.Decomposition_Solver import Decomposition_Solver
from Model_Manager.Link_Model_Manager import Link_Model_Manager_class
from Java_Connection import Java_Connection
from Data_Types.Demand_Assignment_Class import Demand_Assignment_class
import os
import inspect
class TestStatic(unittest.TestCase):
@classmethod
def setUpClass(cls):
# make Java connection
cls.connection = Java_Connection()
# create a static/bpr model manager
this_folder = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
configfile = os.path.join(this_folder, os.path.pardir, 'configfiles', 'seven_links.xml')
bpr_coefficients = {0L: [1, 0, 0, 0, 1], 1L: [1, 0, 0, 0, 1], 2L: [5, 0, 0, 0, 5], 3L: [2, 0, 0, 0, 2],
4L: [2, 0, 0, 0, 2], 5L: [1, 0, 0, 0, 1], 6L: [5, 0, 0, 0, 5]}
cls.model_manager = Link_Model_Manager_class(configfile, "static", cls.connection, None, "bpr", bpr_coefficients)
# create a demand assignment
api = TestStatic.model_manager.beats_api
time_period = 1 # Only have one time period for static model
paths_list = list(api.get_path_ids())
commodity_list = list(api.get_commodity_ids())
route_list = {}
for path_id in paths_list:
route_list[path_id] = api.get_subnetwork_with_id(path_id).get_link_ids()
# Creating the demand assignment for initialization
cls.demand_assignments = Demand_Assignment_class(route_list, commodity_list, time_period, dt=time_period)
demands = {}
demand_value = np.zeros(time_period)
demand_value1 = np.zeros(time_period)
demand_value[0] = 2
demand_value1[0] = 2
demands[(1L, 1L)] = demand_value
demands[(2L, 1L)] = demand_value1
demands[(3L, 1L)] = demand_value
cls.demand_assignments.set_all_demands(demands)
def check_manager(self):
self.assertTrue(TestStatic.model_manager.is_valid())
def test_model_run(self):
traffic_model = TestStatic.model_manager.traffic_model
link_states = traffic_model.Run_Model(TestStatic.demand_assignments)
self.assertTrue(self.check_assignments(link_states))
def test_link_cost(self):
traffic_model = TestStatic.model_manager.traffic_model
link_states = traffic_model.Run_Model(TestStatic.demand_assignments)
link_costs = TestStatic.model_manager.cost_function.evaluate_Cost_Function(link_states)
self.assertTrue(self.check_link_costs(link_costs))
def test_link_based_fw(self):
frank_sol = Frank_Wolfe_Solver(self.model_manager)
def test_path_based_fw(self):
num_steps = 1
eps = 1e-2
frank_sol = Frank_Wolfe_Solver(self.model_manager)
assignment_seq = Path_Based_Frank_Wolfe_Solver(self.model_manager, num_steps)
# Cost resulting from the path_based Frank-Wolfe
link_states = self.model_manager.traffic_model.Run_Model(assignment_seq)
cost_path_based = self.model_manager.cost_function.evaluate_BPR_Potential(link_states)
# Cost resulting from link-based Frank-Wolfe
cost_link_based = self.model_manager.cost_function.evaluate_BPR_Potential_FW(frank_sol)
self.assertTrue(np.abs(cost_link_based-cost_path_based) < eps)
'''
def test_decomposition_solver(self):
number_of_subproblems = 1
start_time1 = timeit.default_timer()
assignment_dec, error = Decomposition_Solver(self.traffic_scenario, self.Cost_Function, number_of_subproblems)
print "Decomposition finished with error ", error
elapsed1 = timeit.default_timer() - start_time1
print ("Decomposition Path-based took %s seconds" % elapsed1)
'''
def check_assignments(self, link_states):
links_flows = {(0L,1L): [6], (1L,1L): [4], (2L,1L): [2], (3L,1L): [2],
(4L,1L): [2], (5L,1L): [2], (6L,1L): [4]}
states = link_states.get_all_states()
for key in states.keys():
if states[key][0].get_flow() != links_flows[key][0]:
return False
return True
def check_link_costs(self, link_costs):
cost_links = {(0L,1L): [1297], (1L,1L): [257], (2L,1L): [85], (3L,1L): [34],
(4L,1L): [34], (5L,1L): [17], (6L,1L): [1285]}
states = link_costs.get_all_costs()
for key in states.keys():
if states[key][0] != cost_links[key][0]:
return False
return True
| 0
| 1,578
| 0
| 2,670
| 0
| 0
| 0
| 192
| 224
|
7c364bc32aba99d22e5967788cc363abdd9e9b31
| 484
|
py
|
Python
|
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | 1
|
2018-03-21T12:27:56.000Z
|
2018-03-21T12:27:56.000Z
|
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
api/setup.py
|
jim8786453/kiln_share
|
2d70c8863f7db18069d13cdea319cd113a2d0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup
from pip.req import parse_requirements
req_file = 'requirements.txt'
install_reqs = parse_requirements(req_file, session=False)
reqs = [str(ir.req) for ir in install_reqs]
del os.link
setup(
author='Jim Kennedy',
author_email='[email protected]',
description='Api for kilnshare.co.uk',
install_requires=reqs,
name='kiln_share',
packages=['kiln_share'],
version='0.0.1',
)
| 22
| 58
| 0.727273
|
#!/usr/bin/env python
import os
import platform
from setuptools import setup
from pip.req import parse_requirements
req_file = 'requirements.txt'
install_reqs = parse_requirements(req_file, session=False)
reqs = [str(ir.req) for ir in install_reqs]
del os.link
setup(
author='Jim Kennedy',
author_email='[email protected]',
description='Api for kilnshare.co.uk',
install_requires=reqs,
name='kiln_share',
packages=['kiln_share'],
version='0.0.1',
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -6
| 22
|
a7da0dc79993ceee28e11231a75e0d28a5195097
| 784
|
py
|
Python
|
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | null | null | null |
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | 327
|
2019-10-29T13:35:25.000Z
|
2022-03-03T10:01:46.000Z
|
back/infolica/alembic/versions/20210906_5a8069c68433.py
|
maltaesousa/infolica
|
9b510b706daba8f8a04434d281c1f8730651f25f
|
[
"MIT"
] | 5
|
2019-11-07T15:49:05.000Z
|
2021-03-08T08:59:56.000Z
|
"""fix affaire abandon default value
Revision ID: 5a8069c68433
Revises: ee79f1259c77
Create Date: 2021-09-06 16:28:58.437853
"""
# revision identifiers, used by Alembic.
revision = '5a8069c68433'
down_revision = 'ee79f1259c77'
branch_labels = None
depends_on = None
| 25.290323
| 65
| 0.655612
|
"""fix affaire abandon default value
Revision ID: 5a8069c68433
Revises: ee79f1259c77
Create Date: 2021-09-06 16:28:58.437853
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a8069c68433'
down_revision = 'ee79f1259c77'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('affaire', 'abandon',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
| 0
| 0
| 0
| 0
| 0
| 421
| 0
| 3
| 90
|
36df2a65cfecf0f2d8cef146751f1d40789fd2ae
| 1,128
|
py
|
Python
|
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
Code_Python/Exercicio-03/Leia-três-numeros.py
|
gabrielf7/code-exercises
|
b3a8661fadc133395f3c6fb7e926317acf7fa539
|
[
"MIT"
] | null | null | null |
#questo 4
num1 = float(input("Digite o primeiro valor: \n"))
num2 = float(input("Digite o segundo valor: \n"))
num3 = float(input("Digite o terceiro valor: \n"))
if(num1 > num2 > num3 or num1 == num2 > num3 or num1 > num2 == num3):
maior = num1
segundo = num2
menor = num3
elif num1 > num2 < num3 or num1 == num2 < num3 or num1 > num2 == num3:
maior = num1
segundo = num3
menor = num2
if(num2 > num1 > num3 or num2 == num1 > num3 or num2 > num1 == num3):
maior = num2
segundo = num1
menor = num3
elif (num2 > num1 < num3 or num2 == num1 < num3 or num2 > num1 == num3):
maior = num2
segundo = num3
menor = num1
if(num3 > num1 > num2 or num3 == num1 > num2 or num3 > num1 == num2):
maior = num3
segundo = num1
menor = num2
elif (num3 > num1 < num2 or num3 == num1 < num2 or num3 > num1 == num2):
maior = num3
segundo = num2
menor = num1
if num1 == num2 == num3:
maior = num1
iguais = maior
print("Iguais: [", iguais,"]")
exit()
print("Maior: [", maior, "] | Segundo: [", segundo, "] | Menor: ", [menor])
| 31.333333
| 75
| 0.565603
|
#questão 4
num1 = float(input("Digite o primeiro valor: \n"))
num2 = float(input("Digite o segundo valor: \n"))
num3 = float(input("Digite o terceiro valor: \n"))
if(num1 > num2 > num3 or num1 == num2 > num3 or num1 > num2 == num3):
maior = num1
segundo = num2
menor = num3
elif num1 > num2 < num3 or num1 == num2 < num3 or num1 > num2 == num3:
maior = num1
segundo = num3
menor = num2
if(num2 > num1 > num3 or num2 == num1 > num3 or num2 > num1 == num3):
maior = num2
segundo = num1
menor = num3
elif (num2 > num1 < num3 or num2 == num1 < num3 or num2 > num1 == num3):
maior = num2
segundo = num3
menor = num1
if(num3 > num1 > num2 or num3 == num1 > num2 or num3 > num1 == num2):
maior = num3
segundo = num1
menor = num2
elif (num3 > num1 < num2 or num3 == num1 < num2 or num3 > num1 == num2):
maior = num3
segundo = num2
menor = num1
if num1 == num2 == num3:
maior = num1
iguais = maior
print("Iguais: [", iguais,"]")
exit()
print("Maior: [", maior, "] | Segundo: [", segundo, "] | Menor: ", [menor])
| 2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ec43363c255f6adb5d1411a40a6f397b07037274
| 383
|
py
|
Python
|
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | 1
|
2019-04-12T14:47:58.000Z
|
2019-04-12T14:47:58.000Z
|
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | null | null | null |
Economic_Dispatch/plot_results.py
|
asuncionjc/Pyomo_Playground
|
b81a12905fb6cdd041b11f89ee4bbbc20168d4d2
|
[
"Apache-2.0"
] | 1
|
2021-02-14T18:40:13.000Z
|
2021-02-14T18:40:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:53:54 2019
@author: Asun
"""
| 25.533333
| 99
| 0.665796
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 15:53:54 2019
@author: Asun
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_results(model):
x = np.arange(0, 3)
y = [model.flow[generating_unit,1].value for generating_unit in model.indexes_generating_units]
plt.plot(x, y, color = 'red', marker = 'o', linestyle = "--")
plt.savefig('flow_plot.pdf')
| 0
| 0
| 0
| 0
| 0
| 226
| 0
| 7
| 68
|
de05130838373479be28ff8059892d8eb6a14633
| 1,787
|
py
|
Python
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 1
|
2019-08-15T18:52:55.000Z
|
2019-08-15T18:52:55.000Z
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 10
|
2019-08-15T19:05:10.000Z
|
2020-07-24T05:07:28.000Z
|
download_hype.py
|
woctezuma/steam-hype
|
cb885f8c1c2a4e7b8d344401207e3a7634f52317
|
[
"MIT"
] | 1
|
2019-08-20T03:32:25.000Z
|
2019-08-20T03:32:25.000Z
|
if __name__ == '__main__':
main()
| 20.078652
| 75
| 0.604365
|
import time
import requests
from utils import save_results
def get_steam_hype_url():
# This is not my API. Please use with moderation!
url = 'https://steamhype-api.herokuapp.com/calendar'
return url
def get_time_stamp():
time_stamp = int(time.time() * 1000)
return time_stamp
def get_steam_hype_params(num_followers=0):
params = dict()
params['start'] = get_time_stamp()
params['current'] = 0
params['followers'] = num_followers
params['includedlc'] = 'false'
params['price'] = 100
params['discount'] = 0
params['reviews'] = 0
params['score'] = 0
return params
def request_data(params=None):
if params is None:
params = get_steam_hype_params()
resp_data = requests.get(url=get_steam_hype_url(),
params=params)
result = resp_data.json()
return result
def batch_request_data(params,
save_results_to_disk=True,
verbose=False):
results = dict()
while True:
print('Request n°{}'.format(params['current'] + 1))
result = request_data(params)
if len(result) == 0:
break
else:
for game in result:
app_id = game['id']
results[app_id] = game
params['current'] += 1
if verbose:
print(results)
if save_results_to_disk:
save_results(results=results)
return results
def main(num_followers=5000,
save_results_to_disk=True):
params = get_steam_hype_params(num_followers=num_followers)
results = batch_request_data(params=params,
save_results_to_disk=save_results_to_disk)
return True
if __name__ == '__main__':
main()
| 2
| 0
| 0
| 0
| 0
| 1,541
| 0
| -7
| 206
|
538fa6ef11f1d9c920a5d631b5035786fcade951
| 2,881
|
py
|
Python
|
examples/sine.py
|
bjodah/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 27
|
2016-09-14T11:40:35.000Z
|
2022-03-05T18:48:26.000Z
|
examples/sine.py
|
tutoushaonian/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 4
|
2016-04-08T03:55:14.000Z
|
2018-06-27T11:18:58.000Z
|
examples/sine.py
|
tutoushaonian/finitediff
|
bfb1940cf5c7ce5c9a3b440d1efd8f8c4128fed8
|
[
"BSD-2-Clause"
] | 5
|
2017-05-25T06:50:40.000Z
|
2021-09-13T14:16:59.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function # Python 3 behaviour in Py2
import numpy as np
from finitediff import derivatives_at_point_by_finite_diff, interpolate_by_finite_diff
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (
np.linspace(x0, xend, n_data)
+ np.random.rand(n_data) * (xend - x0) / n_data / 1.5
)
y_data = np.sin(x_data) * (1.0 + 0.1 * (np.random.rand(n_data) - 0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1] - x_fit[0]) / 2
x_fit[-1] = x_fit[-2] + (x_fit[-1] - x_fit[-2]) / 2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j - alt)
upper_bound = min(n_data - 1, j + alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0
)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1
)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, "x", label="Data points (sin)")
plt.plot(x_fit, y_fit, "-", label="Fitted curve (order=0)")
plt.plot(x_data, np.sin(x_data), "-", label="Analytic sin(x)")
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit - np.sin(x_fit), label="Error in order=0")
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, "-", label="Fitted derivative (order=1)")
plt.plot(x_data, np.cos(x_data), "-", label="Analytic cos(x)")
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit - np.cos(x_fit), label="Error in order=1")
plt.legend()
plt.show()
if __name__ == "__main__":
try:
from argh import dispatch_command
except ImportError:
dispatch_command(demo_usage)
| 30.648936
| 87
| 0.596321
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function # Python 3 behaviour in Py2
import numpy as np
from finitediff import derivatives_at_point_by_finite_diff, interpolate_by_finite_diff
def demo_usage(n_data=50, n_fit=537, nhead=5, ntail=5, plot=False, alt=0):
"""
Plots a noisy sine curve and the fitting to it.
Also presents the error and the error in the
approximation of its first derivative (cosine curve)
Usage example for benchmarking:
$ time python sine.py --nhead 3 --ntail 3 --n-fit 500000 --n-data 50000
Usage example for plotting:
$ python sine.py --nhead 1 --ntail 1 --plot
"""
x0, xend = 0, 5
# shaky linspace -5% to +5% noise
x_data = (
np.linspace(x0, xend, n_data)
+ np.random.rand(n_data) * (xend - x0) / n_data / 1.5
)
y_data = np.sin(x_data) * (1.0 + 0.1 * (np.random.rand(n_data) - 0.5))
x_fit = np.linspace(x0, xend, n_fit)
# Edges behave badly, work around:
x_fit[0] = x_fit[0] + (x_fit[1] - x_fit[0]) / 2
x_fit[-1] = x_fit[-2] + (x_fit[-1] - x_fit[-2]) / 2
if alt:
y_fit = np.empty(n_fit)
dydx_fit = np.empty(n_fit)
for i, xf in enumerate(x_fit):
# get index j of first data point beyond xf
j = np.where(x_data > xf)[0][0]
lower_bound = max(0, j - alt)
upper_bound = min(n_data - 1, j + alt)
y_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 0
)
dydx_fit[i] = derivatives_at_point_by_finite_diff(
x_data[lower_bound:upper_bound], y_data[lower_bound:upper_bound], xf, 1
)[1]
else:
interp = interpolate_by_finite_diff(x_data, y_data, x_fit, 1, nhead, ntail)
y_fit = interp[:, 0]
dydx_fit = interp[:, 1]
if plot:
import matplotlib.pyplot as plt
plt.subplot(221)
plt.plot(x_data, y_data, "x", label="Data points (sin)")
plt.plot(x_fit, y_fit, "-", label="Fitted curve (order=0)")
plt.plot(x_data, np.sin(x_data), "-", label="Analytic sin(x)")
plt.legend()
plt.subplot(222)
plt.plot(x_fit, y_fit - np.sin(x_fit), label="Error in order=0")
plt.legend()
plt.subplot(223)
plt.plot(x_fit, dydx_fit, "-", label="Fitted derivative (order=1)")
plt.plot(x_data, np.cos(x_data), "-", label="Analytic cos(x)")
plt.legend()
plt.subplot(224)
plt.plot(x_fit, dydx_fit - np.cos(x_fit), label="Error in order=1")
plt.legend()
plt.show()
if __name__ == "__main__":
try:
from argh import dispatch_command
except ImportError:
def dispatch_command(cb):
return cb()
dispatch_command(demo_usage)
| 0
| 0
| 0
| 0
| 0
| 28
| 0
| 0
| 31
|
9271f1a5455a7ecdd71cc83dbca5ba4c204b255a
| 1,173
|
py
|
Python
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-04-09T11:39:41.000Z
|
2021-12-10T17:45:42.000Z
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-08-12T10:03:26.000Z
|
2021-08-12T10:03:26.000Z
|
packages/mdspan/package.py
|
pdidev/spack
|
32151f29738895e1f7d96e496c084d6349a9277b
|
[
"Apache-2.0",
"MIT"
] | 3
|
2020-03-27T15:41:45.000Z
|
2022-02-01T15:03:11.000Z
|
# Copyright (C) 2020 Commissariat a l'energie atomique et aux energies alternatives (CEA)
# and others. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
| 35.545455
| 115
| 0.695652
|
# Copyright (C) 2020 Commissariat a l'energie atomique et aux energies alternatives (CEA)
# and others. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mdspan(CMakePackage):
"""Reference implementation of mdspan targeting C++23."""
homepage = "https://github.com/Kokkos/mdspan"
git = "https://github.com/Kokkos/mdspan.git"
url = "https://github.com/kokkos/mdspan/archive/refs/tags/mdspan-0.2.0.tar.gz"
maintainers = ['crtrott']
version('stable', branch='stable', preferred=True)
version('0.2.0', sha256='1ce8e2be0588aa6f2ba34c930b06b892182634d93034071c0157cb78fa294212', extension='tar.gz')
version('0.1.0', sha256='24c1e4be4870436c6c5e80d38870721b0b6252185b8288d00d8f3491dfba754b', extension='tar.gz')
depends_on("[email protected]:", type='build')
variant('cxx_standard', default='DETECT', description="Override the default CXX_STANDARD to compile with.",
values=('DETECT', '14', '17', '20'))
def cmake_args(self):
args = [
self.define_from_variant('MDSPAN_CXX_STANDARD', 'cxx_standard')
]
return args
| 0
| 0
| 0
| 929
| 0
| 0
| 0
| -2
| 46
|
cdb3f49fb732beb3ef7f5d4eef3c47dfc48b1951
| 307
|
py
|
Python
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-07-03T09:05:58.000Z
|
2021-07-03T09:05:58.000Z
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
examples/docs_snippets_crag/docs_snippets_crag/concepts/solids_pipelines/linear_pipeline.py
|
dbatten5/dagster
|
d76e50295054ffe5a72f9b292ef57febae499528
|
[
"Apache-2.0"
] | 1
|
2021-09-26T07:29:17.000Z
|
2021-09-26T07:29:17.000Z
|
# pylint: disable=unused-argument
# start_marker
# end_marker
| 13.347826
| 43
| 0.710098
|
# pylint: disable=unused-argument
# start_marker
from dagster import pipeline, solid
@solid
def return_one(context) -> int:
return 1
@solid
def add_one(context, number: int) -> int:
return number + 1
@pipeline
def linear_pipeline():
add_one(add_one(add_one(return_one())))
# end_marker
| 0
| 134
| 0
| 0
| 0
| 0
| 0
| 14
| 91
|
ac9f99f6f60b9becd44d5f1c6fefe4639be389b0
| 474
|
py
|
Python
|
xastropy/relativity/__init__.py
|
bpholden/xastropy
|
66aff0995a84c6829da65996d2379ba4c946dabe
|
[
"BSD-3-Clause"
] | 3
|
2015-08-23T00:32:58.000Z
|
2020-12-31T02:37:52.000Z
|
xastropy/relativity/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 104
|
2015-07-17T18:31:54.000Z
|
2018-06-29T17:04:09.000Z
|
xastropy/relativity/__init__.py
|
Kristall-WangShiwei/xastropy
|
723fe56cb48d5a5c4cdded839082ee12ef8c6732
|
[
"BSD-3-Clause"
] | 16
|
2015-07-17T15:50:37.000Z
|
2019-04-21T03:42:47.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" astropy.cosmology contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<http://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
| 36.461538
| 69
| 0.767932
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" astropy.cosmology contains classes and functions for cosmological
distance measures and other cosmology-related calculations.
See the `Astropy documentation
<http://docs.astropy.org/en/latest/cosmology/index.html>`_ for more
detailed usage examples and references.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .velocities import *
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 23
|
4bfa262067e0d0cd970b7cd29211db1db46e96fe
| 651
|
py
|
Python
|
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
app/migrations/0003_auto_20181022_1601.py
|
Evohmike/Nyumba-Kumi-App
|
80ba9ded12bda6e41c9395a4e439e80f8840d295
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-22 13:01
from __future__ import unicode_literals
| 25.038462
| 71
| 0.605223
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-22 13:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_neighbourhood_hood_photo'),
]
operations = [
migrations.AddField(
model_name='neighbourhood',
name='health',
field=models.CharField(default='071000000', max_length=15),
),
migrations.AddField(
model_name='neighbourhood',
name='police',
field=models.CharField(default='9999', max_length=15),
),
]
| 0
| 0
| 0
| 474
| 0
| 0
| 0
| 19
| 46
|
30d1cfa49c2d708d5f169d7bff5b66ab9dc3fbca
| 2,138
|
py
|
Python
|
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0.1
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | 1
|
2018-08-04T14:44:42.000Z
|
2018-08-04T14:44:42.000Z
|
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | null | null | null |
test_laylib/test_default_engine.py
|
Layto888/laylib-1.0
|
c7317c29659a476adf6e90eb729b09ce4c49e219
|
[
"MIT"
] | null | null | null |
# test module default_engine.py
import logging
import os
import inspect
import sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
"""
@pytest.fixture
def surface_env(scope="function"):
pg.init()
if not pg.display.get_init():
logging.info('unable to init display pygame')
set_env = pg.display.set_mode((200, 200))
yield set_env
# pg.quit()
"""
| 26.395062
| 87
| 0.755379
|
# test module default_engine.py
import pytest
import logging
import os
import inspect
import sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from laylib import default_engine
logging.basicConfig(level=logging.INFO,
format='%(levelname)s: %(message)s')
"""
@pytest.fixture
def surface_env(scope="function"):
pg.init()
if not pg.display.get_init():
logging.info('unable to init display pygame')
set_env = pg.display.set_mode((200, 200))
yield set_env
# pg.quit()
"""
class _ObjClass(default_engine.DefaultEngine):
pass
@pytest.fixture
def class_default_engine():
new_class = _ObjClass()
return new_class
@pytest.mark.skip(reason="unskip this test if you're not using travis CI.")
def test_surface_env(surface_env):
# the screen should not be none.
assert surface_env is not None
assert surface_env.get_size() == (200, 200)
def test_default_engine_attr(class_default_engine):
assert isinstance(class_default_engine, default_engine.DefaultEngine)
assert class_default_engine.running is True
assert class_default_engine.playing is False
assert class_default_engine._time_unit == 1000.0
def test_time_setget(class_default_engine):
class_default_engine.time_unit = 20.0
assert class_default_engine.time_unit == 20.0
class_default_engine.time_unit = -50.0
assert class_default_engine.time_unit == 1000.0
@pytest.mark.skip(reason="We can't exit the main_loop this way")
def test_delta_time_main_loop(class_default_engine):
pass
@pytest.mark.skip(reason="will not be tested. User interaction")
def test_event_listener():
pass
@pytest.mark.skip(reason="will be tested with resources module.")
def test_load_game():
pass
def test_destroy_game(class_default_engine):
class_default_engine._destroy_game()
assert class_default_engine.all_sprites is not None
assert class_default_engine.img is None
assert class_default_engine.snd is None
assert class_default_engine.fnt is None
| 0
| 539
| 0
| 34
| 0
| 715
| 0
| 4
| 252
|
e0296db2c64142c0262d853517a11e247c329f34
| 3,886
|
py
|
Python
|
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | 4
|
2019-06-08T00:19:06.000Z
|
2020-08-03T16:28:53.000Z
|
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/base_decoder.py
|
pizzahan/lingvo
|
9b85b7ba5d037701302efa807841c05223bc7d1d
|
[
"Apache-2.0"
] | 5
|
2018-12-11T08:05:16.000Z
|
2020-05-30T03:40:13.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common decoder interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 37.728155
| 80
| 0.717962
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common decoder interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo.core import base_layer
from lingvo.core import beam_search_helper
from lingvo.core import target_sequence_sampler
class BaseDecoder(base_layer.BaseLayer):
"""Base class for all decoders."""
@classmethod
def Params(cls):
p = super(BaseDecoder, cls).Params()
p.Define(
'packed_input', False, 'If True, decoder and all layers support '
'multiple examples in a single sequence.')
return p
def FProp(self, theta, encoder_outputs, targets):
"""Decodes `targets` given encoded source.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
encoder_outputs: a NestedMap computed by encoder.
targets: A dict of string to tensors representing the targets one try to
predict.
Returns:
A map from metric name (a python string) to a tuple (value, weight).
Both value and weight are scalar Tensors.
"""
predictions = self.ComputePredictions(theta, encoder_outputs, targets)
return self.ComputeLoss(theta, predictions, targets)[0]
def ComputePredictions(self, theta, encoder_outputs, targets):
raise NotImplementedError('Abstract method: %s' % type(self))
def ComputeLoss(self, theta, predictions, targets):
raise NotImplementedError('Abstract method: %s' % type(self))
class BaseBeamSearchDecoder(BaseDecoder):
"""Decoder that does beam search."""
@classmethod
def Params(cls):
p = super(BaseBeamSearchDecoder, cls).Params()
p.Define('target_sos_id', 1, 'Id of the target sequence sos symbol.')
p.Define('target_eos_id', 2, 'Id of the target sequence eos symbol.')
# TODO(rpang): remove target_seq_len and use beam_search.target_seq_len
# instead.
p.Define('target_seq_len', 0, 'Target seq length.')
p.Define('beam_search', beam_search_helper.BeamSearchHelper.Params(),
'BeamSearchHelper params.')
p.Define('target_sequence_sampler',
target_sequence_sampler.TargetSequenceSampler.Params(),
'TargetSequenceSampler params.')
return p
@base_layer.initializer
def __init__(self, params):
super(BaseBeamSearchDecoder, self).__init__(params)
p = self.params
p.beam_search.target_seq_len = p.target_seq_len
p.beam_search.target_sos_id = p.target_sos_id
p.beam_search.target_eos_id = p.target_eos_id
self.CreateChild('beam_search', p.beam_search)
p.target_sequence_sampler.target_seq_len = p.target_seq_len
p.target_sequence_sampler.target_sos_id = p.target_sos_id
p.target_sequence_sampler.target_eos_id = p.target_eos_id
self.CreateChild('target_sequence_sampler', p.target_sequence_sampler)
def BeamSearchDecode(self, encoder_outputs):
# pylint: disable=line-too-long
"""Performs beam search based decoding.
Args:
encoder_outputs: the outputs of the encoder.
returns:
`.BeamSearchDecodeOutput`, A namedtuple whose elements are tensors.
"""
# pylint: enable=line-too-long
raise NotImplementedError('Abstract method')
| 0
| 1,416
| 0
| 1,464
| 0
| 0
| 0
| 60
| 113
|
85d102b6cba4ef055e73d753952668f328b5a301
| 1,225
|
py
|
Python
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 78
|
2020-11-04T18:27:20.000Z
|
2022-02-07T03:32:53.000Z
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 3
|
2020-11-05T20:42:15.000Z
|
2021-01-13T19:57:01.000Z
|
tests/runtime/asset/test_persistent.py
|
formlio/forml
|
fd070da74a0107e37c0c643dd8df8680618fef74
|
[
"Apache-2.0"
] | 7
|
2020-11-18T17:18:15.000Z
|
2021-03-24T05:14:29.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
ForML persistent unit tests.
"""
# pylint: disable=no-self-use
| 38.28125
| 120
| 0.75102
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
ForML persistent unit tests.
"""
# pylint: disable=no-self-use
from forml.runtime import asset
class TestRegistry:
"""Registry unit tests."""
def test_get(self, registry: asset.Registry, project_name: asset.Project.Key, populated_lineage: asset.Lineage.Key):
"""Test lineage get."""
lineage = asset.Directory(registry).get(project_name).get(populated_lineage)
assert lineage.key == populated_lineage
| 0
| 0
| 0
| 316
| 0
| 0
| 0
| 10
| 45
|
6929622484867a36adedfe910766d009df4df761
| 491
|
py
|
Python
|
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
teste_requests.py
|
stevillis/gu-escola
|
2b26ec53e63fb70447c7a0eb13ab9c6e473122e0
|
[
"MIT"
] | null | null | null |
import requests
BASE_URL = 'http://localhost:8000/api/v2/'
# GET Avaliacoes
"""
response = requests.get(f'{BASE_URL}avaliacoes')
print(response)
print(response.status_code)
avaliacoes = response.json()
print(avaliacoes)
print(avaliacoes.get('count'))
print(avaliacoes.get('results'))
"""
# GET Cursos
headers = {
'Authorization': 'Token 6e6ab3885e67fcc06fabc926a277b07c3bd86be8'
}
response = requests.get(f'{BASE_URL}cursos', headers=headers)
print(response.json().get('results'))
| 19.64
| 69
| 0.745418
|
import requests
BASE_URL = 'http://localhost:8000/api/v2/'
# GET Avaliacoes
"""
response = requests.get(f'{BASE_URL}avaliacoes')
print(response)
print(response.status_code)
avaliacoes = response.json()
print(avaliacoes)
print(avaliacoes.get('count'))
print(avaliacoes.get('results'))
"""
# GET Cursos
headers = {
'Authorization': 'Token 6e6ab3885e67fcc06fabc926a277b07c3bd86be8'
}
response = requests.get(f'{BASE_URL}cursos', headers=headers)
print(response.json().get('results'))
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d5b5b6fef388dc9909b4b8f5f7507dcc08300c41
| 4,852
|
py
|
Python
|
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | 1
|
2018-08-24T14:04:18.000Z
|
2018-08-24T14:04:18.000Z
|
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | null | null | null |
submissions/aartiste/myKMeans.py
|
dillonpoff/aima-python
|
2eadb43f6ede9c7a2e211ea38dff3fa5fd5c91df
|
[
"MIT"
] | null | null | null |
from sklearn.cluster import KMeans
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=2,
# max_iter=300,
# n_init=10,
# init='k-means++',
# algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=0,
# copy_x=True,
)
Examples = {
'Trump': {
'frame': trumpScaled,
},
'TrumpCustom': {
'frame': trumpScaled,
'kmeans': km
},
}
| 27.568182
| 99
| 0.620569
|
from sklearn.cluster import KMeans
import traceback
from submissions.aartiste import election
from submissions.aartiste import county_demographics
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
trumpECHP = DataFrame()
'''
Extract data from the CORGIS elections, and merge it with the
CORGIS demographics. Both data sets are organized by county and state.
'''
joint = {}
elections = election.get_results()
for county in elections:
try:
st = county['Location']['State Abbreviation']
countyST = county['Location']['County'] + st
trump = county['Vote Data']['Donald Trump']['Percent of Votes']
joint[countyST] = {}
joint[countyST]['ST']= st
joint[countyST]['Trump'] = trump
except:
traceback.print_exc()
demographics = county_demographics.get_all_counties()
for county in demographics:
try:
countyNames = county['County'].split()
cName = ' '.join(countyNames[:-1])
st = county['State']
countyST = cName + st
# elderly =
# college =
# home =
# poverty =
if countyST in joint:
joint[countyST]['Elderly'] = county['Age']["Percent 65 and Older"]
joint[countyST]['HighSchool'] = county['Education']["High School or Higher"]
joint[countyST]['College'] = county['Education']["Bachelor's Degree or Higher"]
joint[countyST]['White'] = county['Ethnicities']["White Alone, not Hispanic or Latino"]
joint[countyST]['Persons'] = county['Housing']["Persons per Household"]
joint[countyST]['Home'] = county['Housing']["Homeownership Rate"]
joint[countyST]['Income'] = county['Income']["Median Houseold Income"]
joint[countyST]['Poverty'] = county['Income']["Persons Below Poverty Level"]
joint[countyST]['Sales'] = county['Sales']["Retail Sales per Capita"]
except:
traceback.print_exc()
'''
Remove the counties that did not appear in both samples.
'''
intersection = {}
for countyST in joint:
if 'College' in joint[countyST]:
intersection[countyST] = joint[countyST]
trumpECHP.data = []
'''
Build the input frame, row by row.
'''
for countyST in intersection:
# choose the input values
row = []
for key in intersection[countyST]:
if key in ['ST', 'Trump']:
continue
row.append(intersection[countyST][key])
trumpECHP.data.append(row)
firstCounty = next(iter(intersection.keys()))
firstRow = intersection[firstCounty]
trumpECHP.feature_names = list(firstRow.keys())
trumpECHP.feature_names.remove('ST')
trumpECHP.feature_names.remove('Trump')
'''
Build the target list,
one entry for each row in the input frame.
The Naive Bayesian network is a classifier,
i.e. it sorts data points into bins.
The best it can do to estimate a continuous variable
is to break the domain into segments, and predict
the segment into which the variable's value will fall.
In this example, I'm breaking Trump's % into two
arbitrary segments.
'''
trumpECHP.target = []
def trumpTarget(percentage):
if percentage > 45:
return 1
return 0
for countyST in intersection:
# choose the target
tt = trumpTarget(intersection[countyST]['Trump'])
trumpECHP.target.append(tt)
trumpECHP.target_names = [
'Trump <= 45%',
'Trump > 45%',
]
'''
Try scaling the data.
'''
trumpScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(trumpECHP.data)
trumpScaled.data = scaleGrid(trumpECHP.data)
trumpScaled.feature_names = trumpECHP.feature_names
trumpScaled.target = trumpECHP.target
trumpScaled.target_names = trumpECHP.target_names
'''
Make a customn classifier,
'''
km = KMeans(
n_clusters=2,
# max_iter=300,
# n_init=10,
# init='k-means++',
# algorithm='auto',
# precompute_distances='auto',
# tol=1e-4,
# n_jobs=-1,
# random_state=numpy.RandomState,
# verbose=0,
# copy_x=True,
)
Examples = {
'Trump': {
'frame': trumpScaled,
},
'TrumpCustom': {
'frame': trumpScaled,
'kmeans': km
},
}
| 0
| 0
| 0
| 70
| 0
| 759
| 0
| 0
| 92
|
f45d5ecb43560f81497d317a23712bf1eaf8d15f
| 603
|
py
|
Python
|
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | 3
|
2018-06-21T15:16:25.000Z
|
2018-06-21T22:42:17.000Z
|
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | null | null | null |
initialize_points_w.py
|
NCBI-Hackathons/McDiff
|
43037967e65e8dbdda18c891175c93537b98a238
|
[
"MIT"
] | 1
|
2018-06-25T16:17:04.000Z
|
2018-06-25T16:17:04.000Z
|
# import random
# import numpy as np
# numParticles = 120
# point_list = [[0,0],[0,1],[1,1],[1,0]]
# poly = geometry.Polygon(point_list)
# print generate_random_points(numParticles, poly)
| 25.125
| 71
| 0.706468
|
from shapely import geometry
# import random
# import numpy as np
# numParticles = 120
# point_list = [[0,0],[0,1],[1,1],[1,0]]
# poly = geometry.Polygon(point_list)
def generate_random_points(N, poly):
list_of_points = np.zeros((2, N))
minx,miny,maxx,maxy = poly.bounds
counter = 0
while counter < N:
punto = (np.random.uniform(minx, maxx), np.random.uniform(miny,maxy))
p = geometry.Point(punto)
if poly.contains(p):
list_of_points[0,counter] = punto[0]
list_of_points[1,counter] = punto[1]
counter += 1
return list_of_points
# print generate_random_points(numParticles, poly)
| 0
| 0
| 0
| 0
| 0
| 360
| 0
| 7
| 45
|
c67cc3624a702cafd7e7246abe8b88132e111d61
| 53
|
py
|
Python
|
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
modules/__init__.py
|
richardHaw/nagare
|
4909c4ba8833e7cf5152e39a7bc58a558aaa2c7c
|
[
"MIT"
] | null | null | null |
# this file is needed for python2, delete for python3
| 53
| 53
| 0.792453
|
# this file is needed for python2, delete for python3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
54cd06ce2ea0585ac5ee273e70cb010a30aa3f06
| 9,713
|
py
|
Python
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
docebo/aws-config-rules
|
75f92bcad644bd71f19bbc15cf99e6d6de6b8227
|
[
"CC0-1.0"
] | 1,295
|
2016-03-01T23:06:33.000Z
|
2022-03-31T07:17:53.000Z
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
tied/aws-config-rules
|
7c66e109c1225111d2ab8d1811d6e80dea0affcb
|
[
"CC0-1.0"
] | 287
|
2016-03-01T19:51:43.000Z
|
2022-01-06T04:59:55.000Z
|
python/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS/SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS_test.py
|
tied/aws-config-rules
|
7c66e109c1225111d2ab8d1811d6e80dea0affcb
|
[
"CC0-1.0"
] | 744
|
2016-03-01T18:33:00.000Z
|
2022-03-31T18:46:44.000Z
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
SAGEMAKER_CLIENT_MOCK = MagicMock()
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS')
####################
# Helper Functions #
####################
| 50.853403
| 182
| 0.724699
|
# Copyright 2017-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import sys
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::::Account'
#############
# Main Code #
#############
CONFIG_CLIENT_MOCK = MagicMock()
STS_CLIENT_MOCK = MagicMock()
SAGEMAKER_CLIENT_MOCK = MagicMock()
class Boto3Mock():
@staticmethod
def client(client_name, *args, **kwargs):
if client_name == 'config':
return CONFIG_CLIENT_MOCK
if client_name == 'sts':
return STS_CLIENT_MOCK
if client_name == 'sagemaker':
return SAGEMAKER_CLIENT_MOCK
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
RULE = __import__('SAGEMAKER_NOTEBOOK_NO_DIRECT_INTERNET_ACCESS')
class ComplianceTest(unittest.TestCase):
notebook_instances_list = [{'NotebookInstances': [{'NotebookInstanceName': 'trial12'}, {'NotebookInstanceName': 'trial123'}]}]
notebooks_direct_internet = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Enabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Enabled'}]
notebooks_no_direct_internet = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Disabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Disabled'}]
notebooks_both = [{'NotebookInstanceName': 'trial12', 'DirectInternetAccess': 'Disabled'}, {'NotebookInstanceName': 'trial123', 'DirectInternetAccess': 'Enabled'}]
#SCENARIO 1: No Amazon SageMaker notebook instances exist
def test_scenario_1_no_notebooks(self):
notebook_instances_list = [{'NotebookInstances': []}]
RULE.ASSUME_ROLE_MODE = False
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": notebook_instances_list})
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('NOT_APPLICABLE', '123456789012', 'AWS::::Account')]
assert_successful_evaluation(self, response, resp_expected)
#SCENARIO 2: DirectInternetAccess is set to Enabled for the Amazon SageMaker notebook instances
def test_scenario_2_direct_internet_access(self):
RULE.ASSUME_ROLE_MODE = False
annotation = "This Amazon SageMaker Notebook Instance has direct internet access."
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_direct_internet)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('NON_COMPLIANT', compliance_resource_id='trial12', annotation=annotation),
build_expected_response('NON_COMPLIANT', compliance_resource_id='trial123', annotation=annotation)]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
#SCENARIO 3: DirectInternetAccess is set to Disabled for the Amazon SageMaker notebook instances
def test_scenario_3_no_direct_internet_access(self):
RULE.ASSUME_ROLE_MODE = False
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_no_direct_internet)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('COMPLIANT', compliance_resource_id='trial12'),
build_expected_response('COMPLIANT', compliance_resource_id='trial123')]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
#Test for a mix of compliance types
def test_scenario_2_and_3(self):
RULE.ASSUME_ROLE_MODE = False
annotation = "This Amazon SageMaker Notebook Instance has direct internet access."
SAGEMAKER_CLIENT_MOCK.configure_mock(**{
"get_paginator.return_value": SAGEMAKER_CLIENT_MOCK,
"paginate.return_value": self.notebook_instances_list})
SAGEMAKER_CLIENT_MOCK.describe_notebook_instance = MagicMock(side_effect=self.notebooks_both)
response = RULE.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = [build_expected_response('COMPLIANT', compliance_resource_id='trial12'),
build_expected_response('NON_COMPLIANT', compliance_resource_id='trial123', annotation=annotation)]
assert_successful_evaluation(self, response, resp_expected, evaluations_count=2)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(test_class, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
test_class.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
test_class.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
test_class.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
test_class.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
test_class.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
test_class.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
test_class.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
test_class.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
test_class.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
test_class.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
test_class.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(test_class, response, customer_error_code=None, customer_error_message=None):
if customer_error_code:
test_class.assertEqual(customer_error_code, response['customerErrorCode'])
if customer_error_message:
test_class.assertEqual(customer_error_message, response['customerErrorMessage'])
test_class.assertTrue(response['customerErrorCode'])
test_class.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
test_class.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
test_class.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
STS_CLIENT_MOCK.reset_mock(return_value=True)
STS_CLIENT_MOCK.assume_role = MagicMock(return_value=assume_role_response)
| 0
| 326
| 0
| 4,029
| 0
| 4,000
| 0
| -6
| 232
|
0e295a939cb3bb447622e932af4f06083d13ea4b
| 75
|
py
|
Python
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | 19
|
2021-02-16T12:32:22.000Z
|
2022-01-06T11:16:44.000Z
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | 20
|
2021-01-13T20:58:07.000Z
|
2022-03-21T15:53:07.000Z
|
starling_sim/basemodel/topology/__init__.py
|
tellae/starling
|
56121c728eb5de3dfc77cdf08da89548f3315c87
|
[
"CECILL-B"
] | null | null | null |
"""
This package contains the modules related to simulation topologies
"""
| 18.75
| 66
| 0.773333
|
"""
This package contains the modules related to simulation topologies
"""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7a9c4005ae9ed6fcb141368f64486d286ecf01ed
| 3,288
|
py
|
Python
|
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | null | null | null |
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | null | null | null |
networking_onos/extensions/callback.py
|
sanghoshin/networking-onos
|
2baec5f74e2721e5f8dffd57b3ef7a27034fa54a
|
[
"Apache-2.0"
] | 1
|
2017-10-19T04:23:14.000Z
|
2017-10-19T04:23:14.000Z
|
# Copyright (c) 2017 SK Telecom Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import resources
from networking_onos.extensions import constant as onos_const
_OPERATION_MAPPING = {
events.PRECOMMIT_CREATE: onos_const.ONOS_CREATE,
events.PRECOMMIT_UPDATE: onos_const.ONOS_UPDATE,
events.PRECOMMIT_DELETE: onos_const.ONOS_DELETE,
events.AFTER_CREATE: onos_const.ONOS_CREATE,
events.AFTER_UPDATE: onos_const.ONOS_UPDATE,
events.AFTER_DELETE: onos_const.ONOS_DELETE,
}
_RESOURCE_MAPPING = {
resources.SECURITY_GROUP: onos_const.ONOS_SG,
resources.SECURITY_GROUP_RULE: onos_const.ONOS_SG_RULE,
}
| 39.614458
| 79
| 0.680961
|
# Copyright (c) 2017 SK Telecom Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from networking_onos.extensions import constant as onos_const
_OPERATION_MAPPING = {
events.PRECOMMIT_CREATE: onos_const.ONOS_CREATE,
events.PRECOMMIT_UPDATE: onos_const.ONOS_UPDATE,
events.PRECOMMIT_DELETE: onos_const.ONOS_DELETE,
events.AFTER_CREATE: onos_const.ONOS_CREATE,
events.AFTER_UPDATE: onos_const.ONOS_UPDATE,
events.AFTER_DELETE: onos_const.ONOS_DELETE,
}
_RESOURCE_MAPPING = {
resources.SECURITY_GROUP: onos_const.ONOS_SG,
resources.SECURITY_GROUP_RULE: onos_const.ONOS_SG_RULE,
}
class OnosSecurityGroupHandler(object):
def __init__(self, precommit, postcommit):
assert postcommit is not None
self._precommit = precommit
self._postcommit = postcommit
self._subscribe()
def _subscribe(self):
if self._precommit is not None:
for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE):
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE)
for event in (events.AFTER_CREATE, events.AFTER_DELETE):
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, events.AFTER_UPDATE)
def _sg_callback(self, callback, resource, event, trigger, **kwargs):
context = kwargs['context']
res = kwargs.get(resource)
res_id = kwargs.get("%s_id" % resource)
if res_id is None:
res_id = res.get('id')
ops = _OPERATION_MAPPING[event]
res_type = _RESOURCE_MAPPING[resource]
res_dict = res
callback(context, ops, res_type, res_id, res_dict)
def sg_callback_precommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._precommit, resource, event, trigger, **kwargs)
def sg_callback_postcommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._postcommit, resource, event, trigger, **kwargs)
| 0
| 0
| 0
| 1,972
| 0
| 0
| 0
| 21
| 45
|
2e38813849e7b8d4b409de57f658a7d182ad66aa
| 3,682
|
py
|
Python
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 7
|
2015-10-03T04:10:57.000Z
|
2021-04-02T14:43:21.000Z
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 1
|
2016-04-20T17:11:22.000Z
|
2016-04-26T18:08:23.000Z
|
play.py
|
ksu-is/guesswordgame
|
65478e24c1fc834e43ab9dd3d00c8429fbe96f22
|
[
"Apache-2.0"
] | 5
|
2016-02-26T09:42:48.000Z
|
2021-05-09T17:32:04.000Z
|
import game.main as game
import time
import sys
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print "\n Recieved Interrupt Signal. Bye...."
import sys
sys.exit()
| 32.017391
| 110
| 0.560565
|
import game.main as game
import time
import sys
def main():
play = "--++playtheguesswordgame++--"
if len(sys.argv) > 1 and sys.argv[1] == "tut":
print """
Enter your guess that must be containing 4 letters:
"""
time.sleep(3)
print """
# now the player types the word 'buff'
"""
time.sleep(5)
print """
Enter your guess that must be containing 4 letters: buff
_ _ _ _ **
"""
time.sleep(6)
print """
# the above is the clues for the player from his word buff
# that is, the computer is saying that there are two characters
# in the word 'buff' that exactly exists (and buff wasn't that
# word) in the word the computer has in it's mind.
# Now the player tries to find which are those two characters
# were exactly in its place and which two aren't part of the word
# that computer have in its mind.
loading .......
"""
time.sleep(20)
print """
# Now again the user tries the word 'lube'
Enter your guess that must be containing 4 letters: lube
_ _ _ _ *!!
"""
time.sleep(6)
print """
# from the above clue the player gets to know that the character 'u'
# lies exactly at the second position on the word that he has to guess
# and 'b' should be at the first position, from the previous clue (no 'f' here).
# The player has now only a one ! to figure out. i,e either 'l' or 'b' exists in the
# word but misplaced. now he is going to figure it out by trying the word 'bulk'.
"""
time.sleep(10)
print """
Enter your guess that must be containing 4 letters: bulk
_ _ _ _ ***
"""
print """
# Here, the player knows, one '*' for 'b', one '*' for 'u' and the last star for 'l' (from
# previous clue). Now, he knows first three chars and he thinks the word might be 'bulb'
"""
print """
Enter your guess that must be containing 4 letters: bulb
Congrats! you've got the right word. To continue playing the game please enter 1 and to quit enter 2:
1. play
2. quit
# so, that's it we guess!
"""
play = raw_input("Do you want to play the game now! (y/n) :")
while play != 'y' and play != 'Y' and play != 'n' and play != 'N':
print "please type either 'y' or 'n' without single quote"
play = raw_input("Do you want to play the game now! (y/n) :")
if play == "--++playtheguesswordgame++--" or play == 'y' or play == 'Y':
print """
Welcome to Guess Word game
Game: Computer will think a word and you should guess it. It would be easy to win
the game if you apply the basic logic.
Play the game by typing your guess word.
For each word you type, the game will output the number of characters that exactly
match the word that computer have in its mind (yes! the mind) as the number
of stars and the number of characters that exist in the word but not in the appropriate
position with the number of exclamation symbol.
"""
guess_word = game.GuessWord()
guess_word.start_game()
else:
print "Good bye!"
if __name__ == "__main__":
try:
main()
except (KeyboardInterrupt, SystemExit):
print "\n Recieved Interrupt Signal. Bye...."
import sys
sys.exit()
| 0
| 0
| 0
| 0
| 0
| 3,423
| 0
| 0
| 23
|
b7325eaebdbd28f2ed8cbfb180708a24650dee3d
| 5,258
|
py
|
Python
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | 2
|
2021-12-06T06:42:41.000Z
|
2022-03-29T21:40:14.000Z
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | 7
|
2021-10-29T20:31:44.000Z
|
2021-12-05T06:55:58.000Z
|
mysite/users/models.py
|
2021fallCMPUT404/group-cmput404-project
|
985b76dc6c554caf77e7cf5788355cca22a26e74
|
[
"Apache-2.0"
] | null | null | null |
SITE_URL = "https://cmput404-socialdist-project.herokuapp.com"
'''
#TODO: MERGE USER_PROFILE INTO USER
class User(AbstractUser):
pass
'''
# Create your models here.
| 38.661765
| 105
| 0.621909
|
from django.db import models
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from django.http import HttpResponse
import uuid
from django import forms
from django.forms.widgets import Textarea
import datetime
from posts.models import Post, Like, CommentLike#, InboxLike
from django.urls import reverse
SITE_URL = "https://cmput404-socialdist-project.herokuapp.com"
'''
#TODO: MERGE USER_PROFILE INTO USER
class User(AbstractUser):
pass
'''
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT / user_<id>/<filename>
#return 'user_{0}/{1}'.format(instance.user.id, filename)
return 'images/users/user_{0}/{1}'.format(instance.user.id, filename)
# Create your models here.
class Create_user(forms.Form):
username = forms.CharField(initial='')
password = forms.CharField(widget=forms.PasswordInput())
confirm_password = forms.CharField(widget=forms.PasswordInput())
class User_Profile(models.Model):
type = "author"
user = models.OneToOneField(User,
on_delete=models.CASCADE,
related_name='user_profile')
host = SITE_URL + '/'
url = SITE_URL
displayName = models.CharField(max_length=60, blank=True)
email = models.CharField(max_length=60, blank=True)
first_name = models.CharField(max_length=69, blank=True)
last_name = models.CharField(max_length=69, blank=True)
profileImage = models.ImageField(
upload_to='profile_picture',
blank=True,
default='profile_picture/default_picture.png')
github = models.CharField(blank=True, default="", max_length=100)
#user = models.ForeignKey(User, on_delete=models.CASCADE)
bio = models.CharField(max_length=256, unique=False)
#user_posts = models.ForeignKey(Post, on_delete=models.CASCADE, null=True)
def __str__(self):
return ', '.join((self.displayName, str(self.id), str(self.user.id)))
def get_absolute_url(self):
return SITE_URL + reverse('users:user_crud', args=[str(self.user.id)])
class Inbox(models.Model):
type = 'inbox'
author = models.ForeignKey(User, on_delete=models.CASCADE)
post = models.ManyToManyField(Post, null=True, blank=True)
follow = models.ManyToManyField("users.FriendRequest", null=True, blank=True)
like = models.ManyToManyField(Like, null=True, blank=True)
#comment_like = models.ManyToManyField(CommentLike, null=True, blank=True, on_delete=models.CASCADE)
#inbox_like = models.ManyToManyField(InboxLike, null=True, blank=True, on_delete=models.CASCADE)
class UserFollows(models.Model):
#following
actor = models.ForeignKey(User_Profile,
related_name="following",
on_delete=models.CASCADE,
default='')
#Got followed
object = models.ForeignKey(User_Profile,
related_name="followers",
on_delete=models.CASCADE,
default='')
#Creates new instance of Userfollow with the actor following the object
#Parameters are User_Profile objects
def create_user_follow(actor, object):
UserFollows.objects.get_or_create(actor=actor, object=object)
#The actor will stop following the object
def delete_user_follow(actor, object):
instance = UserFollows.objects.filter(actor=actor, object=object)
if instance.exists():
instance.delete()
return None
class FriendRequest(models.Model):
type = "Follow"
actor = models.ForeignKey(User_Profile,
on_delete=models.CASCADE,
related_name="actor",
default='')
object = models.ForeignKey(User_Profile,
on_delete=models.CASCADE,
related_name="object",
default='')
def create_friend_request(actor, object):
'''Creates a friend request instance with the actor being the person who follows
and the object is the person whom is being followed. The actor and object paramaters
are user_profile objects.'''
print(actor, object)
if UserFollows.objects.filter(actor=object, object=actor).exists(
): #Checks if the object is already following the actor
# Returns so it doesn't create constant friend requests
print("{} is already following {}".format(object.displayName,
actor.displayName))
return
f_request, created = FriendRequest.objects.get_or_create(actor=actor,
object=object)
print("Friend request created")
print(f_request.summary())
return f_request
def summary(self):
return '{} wants to follow {}'.format(self.actor.displayName,
self.object.displayName)
| 0
| 0
| 0
| 4,322
| 0
| 229
| 0
| 118
| 393
|
ff134e64e57b7ca7080b40af0e3f390aa9a3db33
| 1,305
|
py
|
Python
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 1
|
2019-01-12T13:17:32.000Z
|
2019-01-12T13:17:32.000Z
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
.env/lib/python2.7/site-packages/skimage/viewer/tests/test_utils.py
|
ViduraPrasangana/faster-rcnn-caffe
|
af6f5ee89c6e82d295bddd192d9dfcebd60d7c52
|
[
"MIT"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
# -*- coding: utf-8 -*-
| 31.071429
| 76
| 0.724904
|
# -*- coding: utf-8 -*-
from skimage.viewer import utils
from skimage.viewer.utils import dialogs
from skimage.viewer.qt import QtCore, QtWidgets, has_qt
from skimage._shared import testing
@testing.skipif(not has_qt, reason="Qt not installed")
def test_event_loop():
utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(10, QtWidgets.QApplication.quit)
utils.start_qtapp()
@testing.skipif(not has_qt, reason="Qt not installed")
def test_format_filename():
fname = dialogs._format_filename(('apple', 2))
assert fname == 'apple'
fname = dialogs._format_filename('')
assert fname is None
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
def test_open_file_dialog():
QApp = utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QApp.quit())
filename = dialogs.open_file_dialog()
assert filename is None
@testing.skipif(True, reason="Can't automatically close window. See #3081.")
@testing.skipif(not has_qt, reason="Qt not installed")
def test_save_file_dialog():
QApp = utils.init_qtapp()
timer = QtCore.QTimer()
timer.singleShot(100, lambda: QApp.quit())
filename = dialogs.save_file_dialog()
assert filename is None
| 0
| 1,019
| 0
| 0
| 0
| 0
| 0
| 78
| 180
|
e4eed4150a0020f361e02176075753236176288a
| 269
|
py
|
Python
|
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | 1
|
2021-06-15T09:58:15.000Z
|
2021-06-15T09:58:15.000Z
|
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | null | null | null |
RasaNLU/pending_actions.py
|
naikshubham/Rasa-Introduction
|
93b1c6428879e49ddd93d7a5ec5a4eb52fb9bab2
|
[
"BSD-2-Clause"
] | null | null | null |
# Pending actions
# we can improve user experience of our bot by asking the user simple yes or no followup questions
# one easy way to handle these followup is to define pending actions which gets executed as soon as user says "yes"
# and wiped if the user says "no"
| 44.833333
| 115
| 0.769517
|
# Pending actions
# we can improve user experience of our bot by asking the user simple yes or no followup questions
# one easy way to handle these followup is to define pending actions which gets executed as soon as user says "yes"
# and wiped if the user says "no"
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
28ea666798dad6da46886eee004f74017eb3e201
| 2,573
|
py
|
Python
|
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | 1
|
2019-08-30T10:54:06.000Z
|
2019-08-30T10:54:06.000Z
|
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | null | null | null |
get_ip_pool.py
|
vbertcen/ajk_sp_sale_rent_ratio
|
bd477441fde3ccbe396b68dba2418ec0b9aa558e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sys
import datetime
reload(sys)
sys.setdefaultencoding('utf8')
now_str = datetime.datetime.now().strftime('%Y-%m-%d')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116", }
if __name__ == '__main__':
init_ip_pool()
| 34.306667
| 156
| 0.539059
|
# coding=utf-8
import sys
import pymysql
import requests
import datetime
from lxml import etree
reload(sys)
sys.setdefaultencoding('utf8')
now_str = datetime.datetime.now().strftime('%Y-%m-%d')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 OPR/57.0.3098.116", }
def init_ip_pool():
conn = pymysql.connect(host='localhost', user='root', password='Scholl7fcb', database='house_spider')
cursor = conn.cursor()
cursor.execute("truncate ip_pool")
conn.commit()
cursor.close()
index = 1
while True:
print "当前查询到第{}页".format(index)
url = 'http://www.66ip.cn/{}.html'.format(index)
html = requests.get(url=url, headers=headers)
selector = etree.HTML(html.content)
page_count = len(selector.xpath('//*[@id="main"]/div/div[1]/table/tr'))
if page_count == 0:
break
print page_count
ip = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[1]'
port = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[2]'
location = '//*[@id="main"]/div/div[1]/table/tr[{}]/td[3]'
for i in range(2, page_count):
ip_text = selector.xpath(ip.format(i))[0].text
port_text = selector.xpath(port.format(i))[0].text
location_text = selector.xpath(location.format(i))[0].text
cursor = conn.cursor()
if verify_available(ip_text, port_text):
cursor.execute(
"insert into ip_pool values(null,'{}','{}','{}',1,'{}')".format(ip_text, port_text, location_text,
now_str))
print "ip={},available={}".format(ip_text, "true")
else:
cursor.execute(
"insert into ip_pool values(null,'{}','{}','{}',0,'{}')".format(ip_text, port_text, location_text,
now_str))
print "ip={},available={}".format(ip_text, "false")
cursor.close()
conn.commit()
index += 1
conn.close()
def verify_available(ip, port):
pro = dict()
pro['http'] = "http://{}:{}".format(ip, port)
try:
html = requests.get(url='http://www.baidu.com', headers=headers, proxies=pro, timeout=2)
except Exception:
return False
else:
return html.content.count('百度') > 0
if __name__ == '__main__':
init_ip_pool()
| 27
| 0
| 0
| 0
| 0
| 2,103
| 0
| -12
| 113
|
dd8bdd0ca9cd34cb385f46afc75b4e9cf95ab521
| 476
|
py
|
Python
|
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
jython/jython/java_usage_examples.py
|
JohannesDienst/polyglot_integration
|
ee0936539282e82d4d0605ed564389c0539ede40
|
[
"MIT"
] | null | null | null |
from java.lang import System as javasystem
javasystem.out.println("Hello")
from java.util import Random
r = rand(100, 23)
for i in range(10):
print r.nextDouble()
| 23.8
| 56
| 0.653361
|
from java.lang import System as javasystem
javasystem.out.println("Hello")
from java.util import Random
class rand(Random):
def __init__(self, multiplier=1.0, seed=None):
self.multiplier = multiplier
if seed is None:
Random.__init__(self)
else:
Random.__init__(self, seed)
def nextDouble(self):
return Random.nextDouble(self) * self.multiplier
r = rand(100, 23)
for i in range(10):
print r.nextDouble()
| 0
| 0
| 0
| 284
| 0
| 0
| 0
| 0
| 23
|
da4b679b11109485dccab6378be56da7adfaca21
| 321
|
py
|
Python
|
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
5-loops/exercise_3.1.py
|
wgatharia/csci131
|
50d76603863c9a9932634fdf2e48594f8dc673d2
|
[
"MIT"
] | null | null | null |
"""
File: exercise_3.1.py
Author: William Gatharia
This code demonstrates using a for loop.
"""
#loop and print numbers from 1 to 10 using a for loop and range
# range creates a list of numbers
# starting from 1 to 10.
# Note the 11 = 10 + 1 is the upper limit form range
for i in range(1, 11):
print(i)
| 24.692308
| 63
| 0.682243
|
"""
File: exercise_3.1.py
Author: William Gatharia
This code demonstrates using a for loop.
"""
#loop and print numbers from 1 to 10 using a for loop and range
# range creates a list of numbers
# starting from 1 to 10.
# Note the 11 = 10 + 1 is the upper limit form range
for i in range(1, 11):
print(i)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
cd438ed3e070272f3b23e2778ba9493fd02837f8
| 1,919
|
py
|
Python
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:32.000Z
|
2020-11-07T05:57:32.000Z
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T00:30:22.000Z
|
2021-01-26T02:22:16.000Z
|
Data_preparation/test/video_subcrop.py
|
Rukaume/LRCN
|
0d1928cc72544f59a4335fea7febc561d3dfc118
|
[
"MIT"
] | 1
|
2020-11-07T05:57:52.000Z
|
2020-11-07T05:57:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 21:19:37 2020
@author: miyazakishinichi
"""
from tkinter import messagebox
from tkinter import filedialog
import tkinter
import os, sys, cv2
from tqdm import tqdm
####Tk root generate####
root = tkinter.Tk()
root.withdraw()
####ROI setting####
messagebox.showinfo('selectfiles', 'select csvfile for ROI setting')
ROI_file_path = tkinter.filedialog.askopenfilename(initialdir = dir)
if ROI_file_path == "":
messagebox.showinfo('cancel', 'stop before ROI setting')
sys.exit()
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = roi_data.loc[3]['left':'high']
####file select & directory setting####
messagebox.showinfo('selectfiles', 'select image files')
path = filedialog.askopenfilename()
if path != False:
pass
else:
messagebox.showinfo('quit', 'stop the script')
sys.exit()
folderpath = os.path.dirname(path)
os.chdir(folderpath)
imlist = os.listdir("./")
os.makedirs("../chamber3", exist_ok = True)
for i in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[i])
left, right, low, high = int(roi['left']),\
int(roi['right']),int(roi['low']),int(roi['high'])
subimage = tempimage[low:high,left:right]
cv2.imwrite("../chamber3/{}.jpg".format(str(i).zfill(5)), subimage)
| 27.028169
| 71
| 0.668056
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 21:19:37 2020
@author: miyazakishinichi
"""
import pandas as pd
from tkinter import messagebox
from tkinter import filedialog
import tkinter
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import os, sys, cv2
from tqdm import tqdm
def csv_file_read(filepath):
file_dir, file_name = os.path.split(filepath)
base, ext = os.path.splitext(file_name)
if ext == '.csv':
data = pd.read_csv(filepath, index_col = 0)
return data
else:
return messagebox.showinfo('error',
'selected file is not csv file')
####Tk root generate####
root = tkinter.Tk()
root.withdraw()
####ROI setting####
messagebox.showinfo('selectfiles', 'select csvfile for ROI setting')
ROI_file_path = tkinter.filedialog.askopenfilename(initialdir = dir)
if ROI_file_path == "":
messagebox.showinfo('cancel', 'stop before ROI setting')
sys.exit()
roi_data = csv_file_read(ROI_file_path)
roi_data['left'] = roi_data['BX']
roi_data['right'] = roi_data['BX'] + roi_data['Width']
roi_data['low'] = roi_data['BY']
roi_data['high'] = roi_data['BY'] + roi_data['Height']
roi = roi_data.loc[3]['left':'high']
####file select & directory setting####
messagebox.showinfo('selectfiles', 'select image files')
path = filedialog.askopenfilename()
if path != False:
pass
else:
messagebox.showinfo('quit', 'stop the script')
sys.exit()
folderpath = os.path.dirname(path)
os.chdir(folderpath)
imlist = os.listdir("./")
os.makedirs("../chamber3", exist_ok = True)
for i in tqdm(range(len(imlist))):
tempimage = cv2.imread(imlist[i])
left, right, low, high = int(roi['left']),\
int(roi['right']),int(roi['low']),int(roi['high'])
subimage = tempimage[low:high,left:right]
cv2.imwrite("../chamber3/{}.jpg".format(str(i).zfill(5)), subimage)
| 0
| 0
| 0
| 0
| 0
| 310
| 0
| 7
| 112
|
aedaa1eb60c8454a5adaa3d060aa87eba4684ba7
| 207
|
py
|
Python
|
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
getFrame.py
|
divakar-lakhera/Partial-Encryption
|
0fc6537b4a23848b21618e906a22920bd00b7c41
|
[
"MIT"
] | null | null | null |
import cv2
INPUT_FILE='input_encode.avi'
FRAME_NUMBER=70
cap=cv2.VideoCapture(INPUT_FILE)
cap.set(cv2.CAP_PROP_POS_FRAMES, FRAME_NUMBER)
ret,frame=cap.read()
cv2.imwrite("frame_"+INPUT_FILE+".png",frame)
| 18.818182
| 46
| 0.797101
|
import cv2
INPUT_FILE='input_encode.avi'
FRAME_NUMBER=70
cap=cv2.VideoCapture(INPUT_FILE)
cap.set(cv2.CAP_PROP_POS_FRAMES, FRAME_NUMBER)
ret,frame=cap.read()
cv2.imwrite("frame_"+INPUT_FILE+".png",frame)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
78169f18b371e12087115a1c033f6919a0a32815
| 27,978
|
py
|
Python
|
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
brainda/algorithms/decomposition/csp.py
|
TBC-TJU/MetaBCI-brainda
|
d2dc655163b771ca22e43432d886ece3d98235c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <[email protected]>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from typing import Tuple
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh
from .base import robust_pattern
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and Antoine Souloumiac. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V.T, D
def _uwedge(X, init=None, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization algorithm UWEDGE.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
init : None | ndarray, optional
Initialization for the diagonalizer, shape (n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-7).
n_iter_max : int
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
W_est : ndarray
The diagonalizer, shape (n_filters, n_channels), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor [1]_ [2]_.
This is a translation from the matlab code provided by the authors.
References
----------
.. [1] P. Tichavsky, A. Yeredor and J. Nielsen, "A Fast Approximate Joint Diagonalization Algorithm Using a Criterion with a Block Diagonal Weight Matrix", ICASSP 2008, Las Vegas.
.. [2] P. Tichavsky and A. Yeredor, "Fast Approximate Joint Diagonalization Incorporating Weight Matrices" IEEE Transactions of Signal Processing, 2009.
"""
L, d, _ = X.shape
# reshape input matrix
M = np.concatenate(X, 0).T
# init variables
d, Md = M.shape
iteration = 0
improve = 10
if init is None:
E, H = np.linalg.eig(M[:, 0:d])
W_est = np.dot(np.diag(1. / np.sqrt(np.abs(E))), H.T)
else:
W_est = init
Ms = np.array(M)
Rs = np.zeros((d, L))
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
M[:, Il] = 0.5*(M[:, Il] + M[:, Il].T)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit = np.sum(Ms**2) - np.sum(Rs**2)
while (improve > eps) & (iteration < n_iter_max):
B = np.dot(Rs, Rs.T)
C1 = np.zeros((d, d))
for i in range(d):
C1[:, i] = np.sum(Ms[:, i:Md:d]*Rs, axis=1)
D0 = B*B.T - np.outer(np.diag(B), np.diag(B))
A0 = (C1 * B - np.dot(np.diag(np.diag(B)), C1.T)) / (D0 + np.eye(d))
A0 += np.eye(d)
W_est = np.linalg.solve(A0, W_est)
Raux = np.dot(np.dot(W_est, M[:, 0:d]), W_est.T)
aux = 1./np.sqrt(np.abs(np.diag(Raux)))
W_est = np.dot(np.diag(aux), W_est)
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit_new = np.sum(Ms**2) - np.sum(Rs**2)
improve = np.abs(crit_new - crit)
crit = crit_new
iteration += 1
D = np.reshape(Ms, (d, L, d)).transpose(1, 0, 2)
return W_est.T, D
ajd_methods = {
'rjd': _rjd,
'ajd_pham': _ajd_pham,
'uwedge': _uwedge
}
def _check_ajd_method(method):
"""Check if a given method is valid.
Parameters
----------
method : callable object or str
Could be the name of ajd_method or a callable method itself.
Returns
-------
method: callable object
A callable ajd method.
"""
if callable(method):
pass
elif method in ajd_methods.keys():
method = ajd_methods[method]
else:
raise ValueError(
"""%s is not an valid method ! Valid methods are : %s or a
callable function""" % (method, (' , ').join(ajd_methods.keys())))
return method
def ajd(X: ndarray, method: str ='uwedge') -> Tuple[ndarray, ndarray]:
"""Wrapper of AJD methods.
Parameters
----------
X : ndarray
Input covariance matrices, shape (n_trials, n_channels, n_channels)
method : str, optional
AJD method (default uwedge).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The mean of quasi diagonal matrices, shape (n_channels,).
"""
method = _check_ajd_method(method)
V, D = method(X)
D = np.diag(np.mean(D, axis=0))
ind = np.argsort(D)[::-1]
D = D[ind]
V = V[:, ind]
return V, D
def gw_csp_kernel(X: ndarray, y: ndarray,
ajd_method: str = 'uwedge') -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Grosse-Wentrup AJD method based on paper [1]_.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y : ndarray
labels, shape (n_trials).
ajd_method : str, optional
ajd methods, 'uwedge' 'rjd' and 'ajd_pham', by default 'uwedge'.
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
mutual_info: ndarray
Mutual informaiton values, shape (n_filters).
References
----------
.. [1] Grosse-Wentrup, Moritz, and Martin Buss. "Multiclass common spatial patterns and information theoretic feature extraction." Biomedical Engineering, IEEE Transactions on 55, no. 8 (2008): 1991-2000.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
Cx = []
for label in labels:
C = covariances(X[y==label])
# trace normalization
C = C / np.trace(C, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
Cx.append(np.mean(C, axis=0))
Cx = np.stack(Cx)
W, D = ajd(Cx, method=ajd_method)
# Ctot = np.mean(Cx, axis=0)
# W = W / np.sqrt(np.diag(W.T@Ctot@W))
W = W / np.sqrt(D)
# compute mutual information values
Pc = [np.mean(y == label) for label in labels]
mutual_info = []
for j in range(W.shape[-1]):
a = 0
b = 0
for i in range(len(labels)):
# tmp = np.dot(np.dot(W[j], self.C_[i]), W[j].T)
tmp = W[:, j].T@Cx[i]@W[:, j]
a += Pc[i] * np.log(np.sqrt(tmp))
b += Pc[i] * (tmp ** 2 - 1)
mi = - (a + (3.0 / 16) * (b ** 2))
mutual_info.append(mi)
mutual_info = np.array(mutual_info)
ix = np.argsort(mutual_info)[::-1]
W = W[:, ix]
mutual_info = mutual_info[ix]
D = D[ix]
A = robust_pattern(W, Cx[0], W.T@Cx[0]@W)
return W, D, A, mutual_info
def spoc_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""Source Power Comodulation (SPoC) based on paper [1]_.
It is a continous CSP-like method.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
y : ndarray
labels, shape (n_trials)
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Sven Dhne, Frank C. Meinecke, Stefan Haufe, Johannes Hhne, Michael Tangermann, Klaus-Robert Mller, and Vadim V. Nikulin. SPoC: a novel framework for relating the amplitude of neuronal oscillations to behaviorally relevant parameters. NeuroImage, 86:111122, 2014. doi:10.1016/j.neuroimage.2013.07.079.
"""
X, weights = np.copy(X), np.copy(y)
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
weights = weights - np.mean(weights)
weights = weights / np.std(weights)
Cx = covariances(X)
# trace normalization
Cx = Cx / np.trace(Cx, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C = np.mean(Cx, axis=0)
Cz = np.mean(weights[:, np.newaxis, np.newaxis]*Cx, axis=0)
# check positive-definiteness
C = nearestPD(C)
Cz = nearestPD(Cz)
# TODO: direct copy from pyriemann, need verify
D, W = eigh(Cz, C)
ind = np.argsort(D)[::-1]
D = D[ind]
W = W[:, ind]
A = robust_pattern(W, Cz, W.T@Cz@W)
return W, D, A
| 36.240933
| 315
| 0.589356
|
# -*- coding: utf-8 -*-
#
# Authors: Swolf <[email protected]>
# Date: 2021/1/07
# License: MIT License
"""
Common Spatial Patterns and his happy little buddies!
"""
from copy import deepcopy
from typing import Union, Optional, List, Dict, Tuple
from functools import partial
import numpy as np
from numpy import ndarray
from scipy.linalg import eigh, pinv, solve
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV, StratifiedKFold, ShuffleSplit
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.pipeline import make_pipeline
from .base import robust_pattern, FilterBank
from ..utils.covariance import nearestPD, covariances
def csp_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""The kernel in CSP algorithm based on paper [1]_.
Parameters
----------
X: ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y: ndarray
labels of X, shape (n_trials,).
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
if len(labels) != 2:
raise ValueError("the current kernel is for 2-class problem.")
C1 = covariances(X[y==labels[0]])
C2 = covariances(X[y==labels[1]])
# # trace normalization
# # this operation equals to trial normalization
# C1 = C1 / np.trace(C1, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
# C2 = C2 / np.trace(C2, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C1 = np.mean(C1, axis=0)
C2 = np.mean(C2, axis=0)
Cc = C1 + C2
# check positive-definiteness
Cc = nearestPD(Cc)
# generalized eigenvalue problem
D, W = eigh(C1, Cc)
ix = np.argsort(D)[::-1]
W = W[:, ix]
D = D[ix]
A = robust_pattern(W, C1, W.T@C1@W)
return W, D, A
def csp_feature(W: ndarray, X: ndarray,
n_components: int = 2) -> ndarray:
"""Return CSP features in paper [1]_.
Parameters
----------
W : ndarray
spatial filters from csp_kernel, shape (n_channels, n_filters)
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
n_components : int, optional
the first k components to use, usually even number, by default 2
Returns
-------
ndarray
features of shape (n_trials, n_features)
Raises
------
ValueError
n_components should less than the number of channels
References
----------
.. [1] Ramoser H, Muller-Gerking J, Pfurtscheller G. Optimal spatial filtering of single trial EEG during imagined hand movement[J]. IEEE transactions on rehabilitation engineering, 2000, 8(4): 441-446.
"""
W, X = np.copy(W), np.copy(X)
max_components = W.shape[1]
if n_components > max_components:
raise ValueError("n_components should less than the number of channels")
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
# normalized variance
features = np.mean(np.square(np.matmul(W[:, :n_components].T, X)), axis=-1)
features = features / (np.sum(features, axis=-1, keepdims=True) + eps)
# log-transformation
features = np.log(np.clip(features, eps, None))
return features
def _rjd(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on jacobi angle.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stopping criterion (default 1e-8).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the Cardoso AJD algorithm [1]_ used in
JADE. The code is a translation of the matlab code provided in the author
website.
References
----------
.. [1] Cardoso, Jean-Francois, and Antoine Souloumiac. Jacobi angles for simultaneous diagonalization. SIAM journal on matrix analysis and applications 17.1 (1996): 161-164.
"""
# reshape input matrix
A = np.concatenate(X, 0).T
# init variables
m, nm = A.shape
V = np.eye(m)
encore = True
k = 0
while encore:
encore = False
k += 1
if k > n_iter_max:
break
for p in range(m - 1):
for q in range(p + 1, m):
Ip = np.arange(p, nm, m)
Iq = np.arange(q, nm, m)
# computation of Givens angle
g = np.array([A[p, Ip] - A[q, Iq], A[p, Iq] + A[q, Ip]])
gg = np.dot(g, g.T)
ton = gg[0, 0] - gg[1, 1]
toff = gg[0, 1] + gg[1, 0]
theta = 0.5 * np.arctan2(toff, ton +
np.sqrt(ton * ton + toff * toff))
c = np.cos(theta)
s = np.sin(theta)
encore = encore | (np.abs(s) > eps)
if (np.abs(s) > eps):
tmp = A[:, Ip].copy()
A[:, Ip] = c * A[:, Ip] + s * A[:, Iq]
A[:, Iq] = c * A[:, Iq] - s * tmp
tmp = A[p, :].copy()
A[p, :] = c * A[p, :] + s * A[q, :]
A[q, :] = c * A[q, :] - s * tmp
tmp = V[:, p].copy()
V[:, p] = c * V[:, p] + s * V[:, q]
V[:, q] = c * V[:, q] - s * tmp
D = np.reshape(A, (m, int(nm / m), m)).transpose(1, 0, 2)
return V, D
def _ajd_pham(X, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization based on pham's algorithm.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-6).
n_iter_max : int, optional
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
This is a direct implementation of the PHAM's AJD algorithm [1]_.
References
----------
.. [1] Pham, Dinh Tuan. "Joint approximate diagonalization of positive definite Hermitian matrices." SIAM Journal on Matrix Analysis and Applications 22, no. 4 (2001): 1136-1152.
"""
# Adapted from http://github.com/alexandrebarachant/pyRiemann
n_epochs = X.shape[0]
# Reshape input matrix
A = np.concatenate(X, axis=0).T
# Init variables
n_times, n_m = A.shape
V = np.eye(n_times)
epsilon = n_times * (n_times - 1) * eps
for it in range(n_iter_max):
decr = 0
for ii in range(1, n_times):
for jj in range(ii):
Ii = np.arange(ii, n_m, n_times)
Ij = np.arange(jj, n_m, n_times)
c1 = A[ii, Ii]
c2 = A[jj, Ij]
g12 = np.mean(A[ii, Ij] / c1)
g21 = np.mean(A[ii, Ij] / c2)
omega21 = np.mean(c1 / c2)
omega12 = np.mean(c2 / c1)
omega = np.sqrt(omega12 * omega21)
tmp = np.sqrt(omega21 / omega12)
tmp1 = (tmp * g12 + g21) / (omega + 1)
tmp2 = (tmp * g12 - g21) / max(omega - 1, 1e-9)
h12 = tmp1 + tmp2
h21 = np.conj((tmp1 - tmp2) / tmp)
decr += n_epochs * (g12 * np.conj(h12) + g21 * h21) / 2.0
tmp = 1 + 1.j * 0.5 * np.imag(h12 * h21)
tmp = np.real(tmp + np.sqrt(tmp ** 2 - h12 * h21))
tau = np.array([[1, -h12 / tmp], [-h21 / tmp, 1]])
A[[ii, jj], :] = np.dot(tau, A[[ii, jj], :])
tmp = np.c_[A[:, Ii], A[:, Ij]]
tmp = np.reshape(tmp, (n_times * n_epochs, 2), order='F')
tmp = np.dot(tmp, tau.T)
tmp = np.reshape(tmp, (n_times, n_epochs * 2), order='F')
A[:, Ii] = tmp[:, :n_epochs]
A[:, Ij] = tmp[:, n_epochs:]
V[[ii, jj], :] = np.dot(tau, V[[ii, jj], :])
if decr < epsilon:
break
D = np.reshape(A, (n_times, -1, n_times)).transpose(1, 0, 2)
return V.T, D
def _uwedge(X, init=None, eps=1e-9, n_iter_max=1000):
"""Approximate joint diagonalization algorithm UWEDGE.
Parameters
----------
X : ndarray
A set of covariance matrices to diagonalize, shape (n_trials, n_channels, n_channels).
init : None | ndarray, optional
Initialization for the diagonalizer, shape (n_channels, n_channels).
eps : float, optional
Tolerance for stoping criterion (default 1e-7).
n_iter_max : int
The maximum number of iteration to reach convergence (default 1000).
Returns
-------
W_est : ndarray
The diagonalizer, shape (n_filters, n_channels), usually n_filters == n_channels.
D : ndarray
The set of quasi diagonal matrices, shape (n_trials, n_channels, n_channels).
Notes
-----
Uniformly Weighted Exhaustive Diagonalization using Gauss iteration
(U-WEDGE). Implementation of the AJD algorithm by Tichavsky and Yeredor [1]_ [2]_.
This is a translation from the matlab code provided by the authors.
References
----------
.. [1] P. Tichavsky, A. Yeredor and J. Nielsen, "A Fast Approximate Joint Diagonalization Algorithm Using a Criterion with a Block Diagonal Weight Matrix", ICASSP 2008, Las Vegas.
.. [2] P. Tichavsky and A. Yeredor, "Fast Approximate Joint Diagonalization Incorporating Weight Matrices" IEEE Transactions of Signal Processing, 2009.
"""
L, d, _ = X.shape
# reshape input matrix
M = np.concatenate(X, 0).T
# init variables
d, Md = M.shape
iteration = 0
improve = 10
if init is None:
E, H = np.linalg.eig(M[:, 0:d])
W_est = np.dot(np.diag(1. / np.sqrt(np.abs(E))), H.T)
else:
W_est = init
Ms = np.array(M)
Rs = np.zeros((d, L))
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
M[:, Il] = 0.5*(M[:, Il] + M[:, Il].T)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit = np.sum(Ms**2) - np.sum(Rs**2)
while (improve > eps) & (iteration < n_iter_max):
B = np.dot(Rs, Rs.T)
C1 = np.zeros((d, d))
for i in range(d):
C1[:, i] = np.sum(Ms[:, i:Md:d]*Rs, axis=1)
D0 = B*B.T - np.outer(np.diag(B), np.diag(B))
A0 = (C1 * B - np.dot(np.diag(np.diag(B)), C1.T)) / (D0 + np.eye(d))
A0 += np.eye(d)
W_est = np.linalg.solve(A0, W_est)
Raux = np.dot(np.dot(W_est, M[:, 0:d]), W_est.T)
aux = 1./np.sqrt(np.abs(np.diag(Raux)))
W_est = np.dot(np.diag(aux), W_est)
for k in range(L):
ini = k*d
Il = np.arange(ini, ini + d)
Ms[:, Il] = np.dot(np.dot(W_est, M[:, Il]), W_est.T)
Rs[:, k] = np.diag(Ms[:, Il])
crit_new = np.sum(Ms**2) - np.sum(Rs**2)
improve = np.abs(crit_new - crit)
crit = crit_new
iteration += 1
D = np.reshape(Ms, (d, L, d)).transpose(1, 0, 2)
return W_est.T, D
ajd_methods = {
'rjd': _rjd,
'ajd_pham': _ajd_pham,
'uwedge': _uwedge
}
def _check_ajd_method(method):
"""Check if a given method is valid.
Parameters
----------
method : callable object or str
Could be the name of ajd_method or a callable method itself.
Returns
-------
method: callable object
A callable ajd method.
"""
if callable(method):
pass
elif method in ajd_methods.keys():
method = ajd_methods[method]
else:
raise ValueError(
"""%s is not an valid method ! Valid methods are : %s or a
callable function""" % (method, (' , ').join(ajd_methods.keys())))
return method
def ajd(X: ndarray, method: str ='uwedge') -> Tuple[ndarray, ndarray]:
"""Wrapper of AJD methods.
Parameters
----------
X : ndarray
Input covariance matrices, shape (n_trials, n_channels, n_channels)
method : str, optional
AJD method (default uwedge).
Returns
-------
V : ndarray
The diagonalizer, shape (n_channels, n_filters), usually n_filters == n_channels.
D : ndarray
The mean of quasi diagonal matrices, shape (n_channels,).
"""
method = _check_ajd_method(method)
V, D = method(X)
D = np.diag(np.mean(D, axis=0))
ind = np.argsort(D)[::-1]
D = D[ind]
V = V[:, ind]
return V, D
def gw_csp_kernel(X: ndarray, y: ndarray,
ajd_method: str = 'uwedge') -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""Grosse-Wentrup AJD method based on paper [1]_.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples).
y : ndarray
labels, shape (n_trials).
ajd_method : str, optional
ajd methods, 'uwedge' 'rjd' and 'ajd_pham', by default 'uwedge'.
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
mutual_info: ndarray
Mutual informaiton values, shape (n_filters).
References
----------
.. [1] Grosse-Wentrup, Moritz, and Martin Buss. "Multiclass common spatial patterns and information theoretic feature extraction." Biomedical Engineering, IEEE Transactions on 55, no. 8 (2008): 1991-2000.
"""
X, y = np.copy(X), np.copy(y)
labels = np.unique(y)
X = X - np.mean(X, axis=-1, keepdims=True)
Cx = []
for label in labels:
C = covariances(X[y==label])
# trace normalization
C = C / np.trace(C, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
Cx.append(np.mean(C, axis=0))
Cx = np.stack(Cx)
W, D = ajd(Cx, method=ajd_method)
# Ctot = np.mean(Cx, axis=0)
# W = W / np.sqrt(np.diag(W.T@Ctot@W))
W = W / np.sqrt(D)
# compute mutual information values
Pc = [np.mean(y == label) for label in labels]
mutual_info = []
for j in range(W.shape[-1]):
a = 0
b = 0
for i in range(len(labels)):
# tmp = np.dot(np.dot(W[j], self.C_[i]), W[j].T)
tmp = W[:, j].T@Cx[i]@W[:, j]
a += Pc[i] * np.log(np.sqrt(tmp))
b += Pc[i] * (tmp ** 2 - 1)
mi = - (a + (3.0 / 16) * (b ** 2))
mutual_info.append(mi)
mutual_info = np.array(mutual_info)
ix = np.argsort(mutual_info)[::-1]
W = W[:, ix]
mutual_info = mutual_info[ix]
D = D[ix]
A = robust_pattern(W, Cx[0], W.T@Cx[0]@W)
return W, D, A, mutual_info
class CSP(BaseEstimator, TransformerMixin):
"""Common Spatial Pattern.
if n_components is None, auto finding the best number of components with gridsearch. The upper searching limit is determined by max_components, default is half of the number of channels.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None):
self.n_components = n_components
self.max_components = max_components
def fit(self, X: ndarray, y: ndarray):
self.classes_ = np.unique(y)
self.W_, self.D_, self.A_ = csp_kernel(X, y)
# resorting with 0.5 threshold
self.D_ = np.abs(self.D_ - 0.5)
ind = np.argsort(self.D_, axis=-1)[::-1]
self.W_, self.D_, self.A_ = self.W_[:, ind], self.D_[ind], self.A_[:, ind]
# auto-tuning
if self.n_components is None:
estimator = make_pipeline(*[CSP(n_components=self.n_components), SVC()])
if self.max_components is None:
params = {'csp__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'csp__n_components': np.arange(1, self.max_components+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['csp__n_components']
return self
def transform(self, X: ndarray):
n_components = self.best_n_components_ if self.n_components is None else self.n_components
return csp_feature(self.W_, X, n_components=n_components)
class MultiCSP(BaseEstimator, TransformerMixin):
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
multiclass: str = 'ovr', ajd_method: str ='uwedge'):
self.n_components = n_components
self.max_components = max_components
self.multiclass = multiclass
self.ajd_method = ajd_method
def fit(self, X: ndarray, y: ndarray):
self.classes_ = np.unique(y)
if self.multiclass == 'ovr':
self.estimator_ = OneVsRestClassifier(
make_pipeline(*[
CSP(n_components=self.n_components, max_components=self.max_components), SVC()
]), n_jobs=-1)
self.estimator_.fit(X, y)
elif self.multiclass == 'ovo':
self.estimator_ = OneVsOneClassifier(
make_pipeline(*[
CSP(n_components=self.n_components, max_components=self.max_components), SVC()
]), n_jobs=-1)
# patching avoiding 2d array check
self.estimator_._validate_data = partial(self.estimator_._validate_data, allow_nd=True)
self.estimator_.fit(X, y)
elif self.multiclass == 'grosse-wentrup':
self.W_, _, self.A_, self.mutualinfo_values_ = gw_csp_kernel(
X, y, ajd_method=self.ajd_method)
if self.n_components is None:
estimator = make_pipeline(*[
MultiCSP(n_components=self.n_components, multiclass='grosse-wentrup', ajd_method=self.ajd_method), SVC()
])
if self.max_components is None:
params = {'multicsp__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'multicsp__n_components': np.arange(1, self.max_components+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['multicsp__n_components']
else:
raise ValueError("not a valid multiclass strategy")
return self
def transform(self, X: ndarray):
if self.multiclass == 'grosse-wentrup':
n_components = self.best_n_components_ if self.n_components is None else self.n_components
features = csp_feature(self.W_, X, n_components=n_components)
else:
features = np.concatenate([est[0].transform(X) for est in self.estimator_.estimators_], axis=-1)
return features
def spoc_kernel(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray, ndarray]:
"""Source Power Comodulation (SPoC) based on paper [1]_.
It is a continous CSP-like method.
Parameters
----------
X : ndarray
eeg data, shape (n_trials, n_channels, n_samples)
y : ndarray
labels, shape (n_trials)
Returns
-------
W: ndarray
Spatial filters, shape (n_channels, n_filters).
D: ndarray
Eigenvalues of spatial filters, shape (n_filters,).
A: ndarray
Spatial patterns, shape (n_channels, n_patterns).
References
----------
.. [1] Sven Dähne, Frank C. Meinecke, Stefan Haufe, Johannes Höhne, Michael Tangermann, Klaus-Robert Müller, and Vadim V. Nikulin. SPoC: a novel framework for relating the amplitude of neuronal oscillations to behaviorally relevant parameters. NeuroImage, 86:111–122, 2014. doi:10.1016/j.neuroimage.2013.07.079.
"""
X, weights = np.copy(X), np.copy(y)
eps = np.finfo(X.dtype).eps
X = X - np.mean(X, axis=-1, keepdims=True)
weights = weights - np.mean(weights)
weights = weights / np.std(weights)
Cx = covariances(X)
# trace normalization
Cx = Cx / np.trace(Cx, axis1=-1, axis2=-2)[:, np.newaxis, np.newaxis]
C = np.mean(Cx, axis=0)
Cz = np.mean(weights[:, np.newaxis, np.newaxis]*Cx, axis=0)
# check positive-definiteness
C = nearestPD(C)
Cz = nearestPD(Cz)
# TODO: direct copy from pyriemann, need verify
D, W = eigh(Cz, C)
ind = np.argsort(D)[::-1]
D = D[ind]
W = W[:, ind]
A = robust_pattern(W, Cz, W.T@Cz@W)
return W, D, A
class SPoC(BaseEstimator, TransformerMixin):
"""Source Power Comodulation (SPoC).
For continuous data, not verified.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None):
self.n_components = n_components
self.max_components = max_components
def fit(self, X: ndarray, y: ndarray):
self.W_, self.D_, self.A_ = spoc_kernel(X, y)
# auto-tuning
if self.n_components is None:
estimator = make_pipeline(*[SPoC(n_components=self.n_components), Ridge(alpha=0.5)])
if self.max_components is None:
params = {'spoc__n_components': np.arange(1, self.W_.shape[1]+1)}
else:
params = {'spoc__n_components': np.arange(1, self.max_components+1)}
test_size = 0.2 if len(y) > 5 else 1/len(y)
gs = GridSearchCV(estimator,
param_grid=params, scoring='neg_root_mean_squared_error',
cv=ShuffleSplit(n_splits=5, test_size=test_size), refit=False, n_jobs=-1, verbose=False)
gs.fit(X, y)
self.best_n_components_ = gs.best_params_['spoc__n_components']
def transform(self, X: ndarray):
n_components = self.best_n_components_ if self.n_components is None else self.n_components
return csp_feature(self.W_, X, n_components=n_components)
class FBCSP(FilterBank):
"""FBCSP.
FilterBank CSP based on paper [1]_.
References
----------
.. [1] Ang K K, Chin Z Y, Zhang H, et al. Filter bank common spatial pattern (FBCSP) in brain-computer interface[C]//2008 IEEE International Joint Conference on Neural Networks (IEEE World Congress on Computational Intelligence). IEEE, 2008: 2390-2397.
"""
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
n_mutualinfo_components: Optional[int] = None,
filterbank: Optional[List[ndarray]] = None):
self.n_components = n_components
self.max_components = max_components
self.n_mutualinfo_components = n_mutualinfo_components
self.filterbank = filterbank
super().__init__(CSP(n_components=n_components, max_components=max_components), filterbank=filterbank)
def fit(self, X: ndarray, y: ndarray):
super().fit(X, y)
features = super().transform(X)
if self.n_mutualinfo_components is None:
estimator = make_pipeline(*[
SelectKBest(score_func=mutual_info_classif, k='all'),
SVC()
])
params = {'selectkbest__k': np.arange(1, features.shape[1]+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(features, y)
self.best_n_mutualinfo_components_ = gs.best_params_['selectkbest__k']
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.best_n_mutualinfo_components_)
else:
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.n_mutualinfo_components)
self.selector_.fit(features, y)
return self
def transform(self, X: ndarray):
features = super().transform(X)
features = self.selector_.transform(features)
return features
class FBMultiCSP(FilterBank):
def __init__(self,
n_components: Optional[int] = None,
max_components: Optional[int] = None,
multiclass: str = 'ovr', ajd_method: str ='uwedge',
n_mutualinfo_components: Optional[int] = None,
filterbank: Optional[List[ndarray]] = None):
self.n_components = n_components
self.max_components = max_components
self.multiclass = multiclass
self.ajd_method = ajd_method
self.n_mutualinfo_components = n_mutualinfo_components
self.filterbank = filterbank
self.n_mutualinfo_components = n_mutualinfo_components
super().__init__(MultiCSP(n_components=n_components, max_components=max_components, multiclass=multiclass, ajd_method=ajd_method))
def fit(self, X: ndarray, y: ndarray):
super().fit(X, y)
features = super().transform(X)
if self.n_mutualinfo_components is None:
estimator = make_pipeline(*[
SelectKBest(score_func=mutual_info_classif, k='all'),
SVC()
])
params = {'selectkbest__k': np.arange(1, features.shape[1]+1)}
n_splits = np.min(np.unique(y, return_counts=True)[1])
n_splits = 5 if n_splits > 5 else n_splits
gs = GridSearchCV(estimator,
param_grid=params, scoring='accuracy',
cv=StratifiedKFold(n_splits=n_splits, shuffle=True), refit=False, n_jobs=-1, verbose=False)
gs.fit(features, y)
self.best_n_mutualinfo_components_ = gs.best_params_['selectkbest__k']
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.best_n_mutualinfo_components_)
else:
self.selector_ = SelectKBest(
score_func=mutual_info_classif, k=self.n_mutualinfo_components)
self.selector_.fit(features, y)
return self
def transform(self, X: ndarray):
features = super().transform(X)
features = self.selector_.transform(features)
return features
| 9
| 0
| 0
| 10,331
| 0
| 0
| 0
| 301
| 313
|
a213ac945ac3eff393596fccbd49623779d35895
| 16,917
|
py
|
Python
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 5
|
2021-07-14T11:57:36.000Z
|
2022-03-26T19:47:54.000Z
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-21T17:53:37.000Z
|
2022-01-26T11:36:32.000Z
|
teller/explainer/explainer.py
|
Techtonique/teller
|
3571353b843179335e3995a0128d4a0c54c2b905
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-12-21T17:51:00.000Z
|
2021-12-21T17:51:00.000Z
|
import matplotlib.style as style
| 32.284351
| 131
| 0.435952
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.style as style
from sklearn.base import BaseEstimator
from ..utils import (
is_factor,
numerical_gradient,
numerical_gradient_jackknife,
numerical_gradient_gaussian,
numerical_interactions,
numerical_interactions_jackknife,
numerical_interactions_gaussian,
Progbar,
score_regression,
score_classification,
)
class Explainer(BaseEstimator):
"""Class Explainer: effects of features on the response.
Attributes:
obj: an object;
fitted object containing methods `fit` and `predict`
n_jobs: an integer;
number of jobs for parallel computing
y_class: an integer;
class whose probability has to be explained (for classification only)
normalize: a boolean;
whether the features must be normalized or not (changes the effects)
"""
def __init__(self, obj, n_jobs=None, y_class=0, normalize=False):
self.obj = obj
self.n_jobs = n_jobs
self.y_mean_ = None
self.effects_ = None
self.residuals_ = None
self.r_squared_ = None
self.adj_r_squared_ = None
self.effects_ = None
self.ci_ = None
self.ci_inters_ = {}
self.type_fit = None
self.y_class = y_class # classification only
self.normalize = normalize
self.type_ci = None
def fit(
self,
X,
y,
X_names,
method="avg",
type_ci="jackknife",
scoring=None,
level=95,
col_inters=None,
):
"""Fit the explainer's attribute `obj` to training data (X, y).
Args:
X: array-like, shape = [n_samples, n_features];
Training vectors, where n_samples is the number
of samples and n_features is the number of features.
y: array-like, shape = [n_samples, ]; Target values.
X_names: {array-like}, shape = [n_features, ];
Column names (strings) for training vectors.
method: str;
Type of summary requested for effects. Either `avg`
(for average effects), `inters` (for interactions)
or `ci` (for effects including confidence intervals
around them).
type_ci: str;
Type of resampling for `method == 'ci'` (confidence
intervals around effects). Either `jackknife`
bootsrapping or `gaussian` (gaussian white noise with
standard deviation equal to `0.01` applied to the
features).
scoring: str;
measure of errors must be in ("explained_variance",
"neg_mean_absolute_error", "neg_mean_squared_error",
"neg_mean_squared_log_error", "neg_median_absolute_error",
"r2", "rmse") (default: "rmse").
level: int; Level of confidence required for
`method == 'ci'` (in %).
col_inters: str; Name of column for computing interactions.
"""
assert method in (
"avg",
"ci",
"inters",
), "must have: `method` in ('avg', 'ci', 'inters')"
n, p = X.shape
self.X_names = X_names
self.level = level
self.method = method
self.type_ci = type_ci
if is_factor(y): # classification ---
self.n_classes = len(np.unique(y))
assert (
self.y_class <= self.n_classes
), "self.y_class must be <= number of classes"
assert hasattr(
self.obj, "predict_proba"
), "`self.obj` must be a classifier and have a method `predict_proba`"
self.type_fit = "classification"
if scoring is None:
self.scoring = "accuracy"
self.score_ = score_classification(self.obj, X, y, scoring=self.scoring)
def predict_proba(x):
return self.obj.predict_proba(x)[:, self.y_class]
y_hat = predict_proba(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
predict_proba,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=predict_proba,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
else: # is_factor(y) == False # regression ---
self.type_fit = "regression"
if scoring is None:
self.scoring = "rmse"
self.score_ = score_regression(self.obj, X, y, scoring=self.scoring)
y_hat = self.obj.predict(X)
# heterogeneity of effects
if method == "avg":
self.grad_ = numerical_gradient(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
)
# confidence intervals
if method == "ci":
if type_ci=="jackknife":
self.ci_ = numerical_gradient_jackknife(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
if type_ci=="gaussian":
self.ci_ = numerical_gradient_gaussian(
self.obj.predict,
X,
normalize=self.normalize,
n_jobs=self.n_jobs,
level=level,
)
# interactions
if method == "inters":
assert col_inters is not None, "`col_inters` must be provided"
self.col_inters = col_inters
ix1 = np.where(X_names == col_inters)[0][0]
pbar = Progbar(p)
if type_ci=="jackknife":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_jackknife(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
if type_ci=="gaussian":
for ix2 in range(p):
self.ci_inters_.update(
{
X_names[ix2]: numerical_interactions_gaussian(
f=self.obj.predict,
X=X,
ix1=ix1,
ix2=ix2,
verbose=0,
)
}
)
pbar.update(ix2)
pbar.update(p)
print("\n")
self.y_mean_ = np.mean(y)
ss_tot = np.sum((y - self.y_mean_) ** 2)
ss_reg = np.sum((y_hat - self.y_mean_) ** 2)
ss_res = np.sum((y - y_hat) ** 2)
self.residuals_ = y - y_hat
self.r_squared_ = 1 - ss_res / ss_tot
self.adj_r_squared_ = 1 - (1 - self.r_squared_) * (n - 1) / (
n - p - 1
)
# classification and regression ---
if method == "avg":
res_df = pd.DataFrame(data=self.grad_, columns=X_names)
res_df_mean = res_df.mean()
res_df_std = res_df.std()
res_df_median = res_df.median()
res_df_min = res_df.min()
res_df_max = res_df.max()
data = pd.concat(
[res_df_mean, res_df_std, res_df_median, res_df_min, res_df_max],
axis=1
)
df_effects = pd.DataFrame(
data=data.values,
columns=["mean", "std", "median", "min", "max"],
index=X_names,
)
# heterogeneity of effects
self.effects_ = df_effects.sort_values(by=["mean"], ascending=False)
return self
def summary(self):
"""Summarise results
a method in class Explainer
Args:
None
"""
assert (
(self.ci_ is not None)
| (self.effects_ is not None)
| (self.ci_inters_ is not None)
), "object not fitted, fit the object first"
if (self.ci_ is not None) & (self.method == "ci"):
# (mean_est, se_est,
# mean_est + qt*se_est, mean_est - qt*se_est,
# p_values, signif_codes)
df_mean = pd.Series(data=self.ci_[0], index=self.X_names)
df_se = pd.Series(data=self.ci_[1], index=self.X_names)
df_ubound = pd.Series(data=self.ci_[2], index=self.X_names)
df_lbound = pd.Series(data=self.ci_[3], index=self.X_names)
df_pvalue = pd.Series(data=self.ci_[4], index=self.X_names)
df_signif = pd.Series(data=self.ci_[5], index=self.X_names)
data = pd.concat(
[df_mean, df_se, df_lbound, df_ubound, df_pvalue, df_signif],
axis=1,
)
self.ci_summary_ = pd.DataFrame(
data=data.values,
columns=[
"Estimate",
"Std. Error",
str(self.level) + "% lbound",
str(self.level) + "% ubound",
"Pr(>|t|)",
"",
],
index=self.X_names,
).sort_values(by=["Estimate"], ascending=False)
print("\n")
print(f"Score ({self.scoring}): \n {np.round(self.score_, 3)}")
if self.type_fit == "regression":
print("\n")
print("Residuals: ")
self.residuals_dist_ = pd.DataFrame(
pd.Series(
data=np.quantile(
self.residuals_, q=[0, 0.25, 0.5, 0.75, 1]
),
index=["Min", "1Q", "Median", "3Q", "Max"],
)
).transpose()
print(self.residuals_dist_.to_string(index=False))
print("\n")
if self.type_ci=="jackknife":
print("Tests on marginal effects (Jackknife): ")
if self.type_ci=="gaussian":
print("Tests on marginal effects (Gaussian noise): ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.ci_summary_)
print("\n")
print(
"Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘-’ 1"
)
if self.type_fit == "regression":
print("\n")
print(
f"Multiple R-squared: {np.round(self.r_squared_, 3)}, Adjusted R-squared: {np.round(self.adj_r_squared_, 3)}"
)
if (self.effects_ is not None) & (self.method == "avg"):
print("\n")
print("Heterogeneity of marginal effects: ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(self.effects_)
print("\n")
if (self.ci_inters_ is not None) & (self.method == "inters"):
print("\n")
print("Interactions with " + self.col_inters + ": ")
with pd.option_context(
"display.max_rows", None, "display.max_columns", None
):
print(
pd.DataFrame(
self.ci_inters_,
index=[
"Estimate",
"Std. Error",
str(95) + "% lbound",
str(95) + "% ubound",
"Pr(>|t|)",
"",
],
).transpose()
)
def plot(self, what):
"""Plot average effects, heterogeneity of effects, ...
Args:
what: a string;
if .
"""
assert self.effects_ is not None, "Call method 'fit' before plotting"
assert self.grad_ is not None, "Call method 'fit' before plotting"
# For method == "avg"
if (self.method == "avg"):
if(what == "average_effects"):
sns.set(style="darkgrid")
fi = pd.DataFrame()
fi['features'] = self.effects_.index.values
fi['effect'] = self.effects_['mean'].values
sns.barplot(x='effect', y='features',
data=fi.sort_values(by='effect', ascending=False))
if(what == "hetero_effects"):
grads_df = pd.DataFrame(data=self.grad_, columns=self.X_names)
sorted_columns = list(self.effects_.index.values) # by mean
sorted_columns.reverse()
grads_df = grads_df.reindex(sorted_columns, axis=1)
sns.set(style="darkgrid")
grads_df.boxplot(vert=False)
# For method == "ci"
if (self.method == "ci"):
assert self.ci_ is not None, "Call method 'fit' before plotting"
raise NotImplementedError("No plot for method == 'ci' yet")
def get_individual_effects(self):
assert self.grad_ is not None, "Call method 'fit' before calling this method"
if self.method == "avg":
return pd.DataFrame(data=self.grad_, columns=self.X_names)
| 30
| 0
| 0
| 16,418
| 0
| 0
| 0
| 294
| 159
|
70d25e9deb9ce5482aecfe92367ea925fc132f5b
| 4,271
|
py
|
Python
|
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | null | null | null |
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | null | null | null |
script/extract_spotting_area.py
|
jingyonghou/XY_QByE_STD
|
ca2a07c70ea7466ee363cd0b81808c6794a400e5
|
[
"Apache-2.0"
] | 1
|
2020-07-28T06:02:03.000Z
|
2020-07-28T06:02:03.000Z
|
import sys
if __name__=="__main__":
if len(sys.argv) < 7:
print("USAGE: python %s result_dir keywordlist testlist testscp textfile ourdir"%sys.argv[0])
exit(1)
result_dir = sys.argv[1]
keywordlist = open(sys.argv[2]).readlines()
testlist = open(sys.argv[3]).readlines()
doc_scp_file = sys.argv[4]
relevant_dict = build_relevant_dict(sys.argv[5])
out_dir = sys.argv[6]
scorelist_all = []
arealist_all = []
for keyword in keywordlist:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
resultlist = result_fid.readlines()
result_fid.close()
scorelist = []
arealist = []
for res in resultlist:
fields =res.strip().split()
score = float(fields[0])
start_point = int(fields[1])
end_point = int(fields[2])
scorelist.append(score)
arealist.append((start_point, end_point))
scorelist_all.append(scorelist)
arealist_all.append(arealist)
extract_list_all = extract_spotting_area(scorelist_all, arealist_all, keywordlist, testlist, relevant_dict)
write_spot_wave(extract_list_all, doc_scp_file, out_dir)
| 36.818966
| 199
| 0.618356
|
import numpy as np
import sys
import wavedata
import random
import os
def relevant(query, text_id, relevant_dict):
if text_id in relevant_dict[query]:
return True
return False
def build_relevant_dict(text_file):
relevant_dict = {}
for line in open(text_file).readlines():
fields = line.strip().split()
text_id = fields[0]
for i in range(1, len(fields)):
keyword_id = fields[i]
if not relevant_dict.has_key(keyword_id):
relevant_dict[keyword_id]=set()
relevant_dict[keyword_id].add(text_id)
return relevant_dict
def extract_spotting_area(scorelist_all, arealist_all, querylist, doclist, relevant_dict):
extract_list_all = []
for i in range(len(querylist)):
true_list=[]
false_list=[]
extract_list=[]
ranklist = np.array(scorelist_all[i]).argsort()
for j in range(len(ranklist)):
j_r = ranklist[j]
keyword_id = querylist[i].strip()
keyword = querylist[i].strip().split("_")[0]
utt_id = doclist[j_r].strip()
doc_id = "_".join(doclist[j_r].strip().split("_")[:-1])
if relevant(keyword, doc_id, relevant_dict):
true_list.append([ keyword_id, utt_id, 1, scorelist_all[i][j_r], j, arealist_all[i][j_r] ])
else:
false_list.append([ keyword_id, utt_id, 0, scorelist_all[i][j_r], j, arealist_all[i][j_r] ])
true_num = len(true_list)
extract_list = true_list + false_list[0:true_num]
extract_list_all.append(extract_list)
return extract_list_all
def frame_to_point(frame_pair):
return (frame_pair[0]*10*8, frame_pair[1]*10*8+25*8)
def write_spot_wave(extract_list_all, doc_scp, out_dir):
doc_dic = {}
for line in open(doc_scp).readlines():
fields = line.strip().split()
if len(fields) != 2:
print("Error: the fields of doc scp file is not 2\n")
exit(1)
doc_id = fields[0]
wav_path = fields[1]
if doc_dic.has_key(doc_id):
print("Error: repeat key in doc scp file\n")
doc_dic[doc_id] = wav_path
for extract_list in extract_list_all:
keyword_id = extract_list[0][0]
keyword_out_dir = out_dir + "-".join(keyword_id.split("'"))
cmd = "mkdir -p " + keyword_out_dir
os.system(cmd)
for item in extract_list:
doc_id = item[1]
has_keyword = item[2]
score = item[3]
rank_position = item[4]
extract_point = frame_to_point(item[5])
inputfilename = doc_dic[doc_id]
data = wavedata.readwave(inputfilename)
spotting_data = data[extract_point[0]:extract_point[1]]
outputfilename = keyword_out_dir + "/%s_%s_%s_%s_%s_%s_%s.wav"%(str(rank_position).zfill(4), str(has_keyword), str(score), str(extract_point[0]), str(extract_point[1]),keyword_id, doc_id)
wavedata.writewave(outputfilename, spotting_data, 1, 2, 8000)
if __name__=="__main__":
if len(sys.argv) < 7:
print("USAGE: python %s result_dir keywordlist testlist testscp textfile ourdir"%sys.argv[0])
exit(1)
result_dir = sys.argv[1]
keywordlist = open(sys.argv[2]).readlines()
testlist = open(sys.argv[3]).readlines()
doc_scp_file = sys.argv[4]
relevant_dict = build_relevant_dict(sys.argv[5])
out_dir = sys.argv[6]
scorelist_all = []
arealist_all = []
for keyword in keywordlist:
result_fid = open(result_dir + keyword.strip() + ".RESULT")
resultlist = result_fid.readlines()
result_fid.close()
scorelist = []
arealist = []
for res in resultlist:
fields =res.strip().split()
score = float(fields[0])
start_point = int(fields[1])
end_point = int(fields[2])
scorelist.append(score)
arealist.append((start_point, end_point))
scorelist_all.append(scorelist)
arealist_all.append(arealist)
extract_list_all = extract_spotting_area(scorelist_all, arealist_all, keywordlist, testlist, relevant_dict)
write_spot_wave(extract_list_all, doc_scp_file, out_dir)
| 0
| 0
| 0
| 0
| 0
| 2,873
| 0
| -29
| 204
|
271deb29b66fe4e4014e52baf2d9509cf8f631f6
| 427
|
py
|
Python
|
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
src/smallest_integer.py
|
marco-zangari/code-katas
|
1dfda1cfbbe8687b17e97e414358b38d964df675
|
[
"MIT"
] | null | null | null |
"""Find the smallest integer in the array, Kata in Codewars."""
def smallest(alist):
"""Return the smallest integer in the list.
input: a list of integers
output: a single integer
ex: [34, 15, 88, 2] should return 34
ex: [34, -345, -1, 100] should return -345
"""
res = [alist[0]]
for num in alist:
if res[0] > num:
res.pop()
res.append(num)
return res[0]
| 23.722222
| 63
| 0.569087
|
"""Find the smallest integer in the array, Kata in Codewars."""
def smallest(alist):
"""Return the smallest integer in the list.
input: a list of integers
output: a single integer
ex: [34, 15, 88, 2] should return 34
ex: [34, -345, -1, 100] should return -345
"""
res = [alist[0]]
for num in alist:
if res[0] > num:
res.pop()
res.append(num)
return res[0]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e3f4e7de367fe4adbb1c08ed45342cc24a82354b
| 1,810
|
py
|
Python
|
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
extract-code.py
|
aaw/commafree
|
6ee17fdf1e7858546782f81b1f004659c03661d3
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
# Extracts a commafree code from a CNF file created by commafree.py and
# the output of a SAT solver on that CNF file. Only works on satisfiable
# instances.
#
# Usage: extract-code.py <cnf-file> <sat-solver-output-file>
import sys
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: %s <cnf-file> <sat-solver-output-file>' % sys.argv[0])
sys.exit(1)
mapping = strip_cnf_mapping(sys.argv[1])
solution = strip_sat_solution(sys.argv[2])
code = [mapping[code_id] for code_id in solution
if mapping.get(code_id) is not None]
assert verify_commafree(code)
print('{' + ', '.join(sorted(code)) + '}')
print('')
print('size: %s' % len(code))
| 31.754386
| 78
| 0.570718
|
#!/usr/bin/python3
# Extracts a commafree code from a CNF file created by commafree.py and
# the output of a SAT solver on that CNF file. Only works on satisfiable
# instances.
#
# Usage: extract-code.py <cnf-file> <sat-solver-output-file>
import re
import sys
def strip_cnf_mapping(filename):
# lines look like 'c var 1 == 000001 chosen'
mapping = {}
pattern = re.compile('c var ([^\\s]+) == ([^\\s]+) chosen')
with open(filename) as f:
for line in f:
if line.startswith('p'): continue
if not line.startswith('c'): return mapping
m = re.match(pattern, line)
if m is None: continue
mapping[int(m.groups()[0])] = m.groups()[1]
return mapping
def strip_sat_solution(filename):
pos = []
with open(filename) as f:
for line in f:
if not line.startswith('v'): continue
pos += [int(x) for x in line[1:].strip().split(' ') if int(x) > 0]
return pos
def verify_commafree(codewords):
n = len(codewords[0])
cws = set(c for c in codewords)
for x in codewords:
for y in codewords:
for i in range(1,n):
cw = x[i:]+y[:i]
if cw in cws:
print("CONFLICT: %s, %s, and %s." % (x,y,cw))
return False
return True
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: %s <cnf-file> <sat-solver-output-file>' % sys.argv[0])
sys.exit(1)
mapping = strip_cnf_mapping(sys.argv[1])
solution = strip_sat_solution(sys.argv[2])
code = [mapping[code_id] for code_id in solution
if mapping.get(code_id) is not None]
assert verify_commafree(code)
print('{' + ', '.join(sorted(code)) + '}')
print('')
print('size: %s' % len(code))
| 0
| 0
| 0
| 0
| 0
| 1,004
| 0
| -12
| 92
|
6496c26b86b5e1c0f0f3e63c148cc42bb42f3e84
| 22,361
|
py
|
Python
|
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2015-10-12T09:14:22.000Z
|
2015-10-12T09:14:22.000Z
|
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | null | null | null |
chrome/test/functional/autofill.py
|
meego-tablet-ux/meego-app-browser
|
0f4ef17bd4b399c9c990a2f6ca939099495c2b9c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T07:22:28.000Z
|
2020-11-04T07:22:28.000Z
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import pyauto_functional # Must be imported before pyauto
if __name__ == '__main__':
pyauto_functional.Main()
| 45.449187
| 80
| 0.653414
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetAutofillProfile()
pp.pprint(info)
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith', 'ADDRESS_HOME_ZIP': '94043',},
{'EMAIL_ADDRESS': '[email protected]',
'COMPANY_NAME': 'Company X',}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': 'Larry'}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'])
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'])
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles."""
# First try profiles with invalid input.
without_invalid = {'NAME_FIRST': u'Will',
'ADDRESS_HOME_CITY': 'Sunnyvale',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': 'my_zip',
'ADDRESS_HOME_COUNTRY': 'United States'}
# Add some invalid fields.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = 'Invalid_Phone_Number'
with_invalid['PHONE_FAX_WHOLE_NUMBER'] = 'Invalid_Fax_Number'
self.FillAutofillProfile(profiles=[with_invalid])
self.assertEqual([without_invalid],
self.GetAutofillProfile()['profiles'])
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
together: 2, 7, 6, 4, 9
(0 + 2 = 2, 1 + 6 = 7, 0 + 6 = 6, 0 + 4 = 4, 1 + 8 = 9)
4.3. Sum together the digits for s2: 2 + 7 + 6 + 4 + 9 = 28
5. Sum together s1 + s2 and if the sum ends in zero, the number passes the
Luhn test: 42 + 28 = 70 which is a valid credit card number.
Args:
number: the credit card number being validated, as a string.
Return:
boolean whether the credit card number is valid or not.
"""
# Filters out non-digit characters.
number = re.sub('[^0-9]', '', number)
reverse = [int(ch) for ch in str(number)][::-1]
# The divmod of the function splits a number into two digits, ready for
# summing.
return ((sum(reverse[0::2]) + sum(sum(divmod(d*2, 10))
for d in reverse[1::2])) % 10 == 0)
def testInvalidCreditCardNumberIsNotAggregated(self):
"""Test credit card info with an invalid number is not aggregated.
When filling out a form with an invalid credit card number (one that
does not pass the Luhn test) the credit card info should not be saved into
Autofill preferences.
"""
invalid_cc_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7890',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
cc_number = invalid_cc_info['CREDIT_CARD_NUMBER']
self.assertFalse(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires an invalid credit card number.')
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
self.NavigateToURL(url)
for key, value in invalid_cc_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until the form is submitted and the page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(
cc_infobar, msg='Save credit card infobar offered to save CC info.')
def testWhitespacesAndSeparatorCharsStrippedForValidCCNums(self):
"""Test whitespaces and separator chars are stripped for valid CC numbers.
The credit card numbers used in this test pass the Luhn test.
For reference: http://www.merriampark.com/anatomycc.htm
"""
credit_card_info = [{'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'},
{'CREDIT_CARD_NAME': 'Jane Doe',
'CREDIT_CARD_NUMBER': '4417-1234-5678-9113',
'CREDIT_CARD_EXP_MONTH': '10',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2013'}]
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
for cc_info in credit_card_info:
self.NavigateToURL(url)
for key, value in cc_info.iteritems():
cc_number = cc_info['CREDIT_CARD_NUMBER']
self.assertTrue(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires a valid credit card number.')
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
self.PerformActionOnInfobar('accept', infobar_index=0)
# Verify the filled-in credit card number against the aggregated number.
aggregated_cc_1 = (
self.GetAutofillProfile()['credit_cards'][0]['CREDIT_CARD_NUMBER'])
aggregated_cc_2 = (
self.GetAutofillProfile()['credit_cards'][1]['CREDIT_CARD_NUMBER'])
self.assertFalse((' ' in aggregated_cc_1 or ' ' in aggregated_cc_2 or
'-' in aggregated_cc_1 or '-' in aggregated_cc_2),
msg='Whitespaces or separator chars not stripped.')
def testProfilesNotAggregatedWithNoAddress(self):
"""Test Autofill does not aggregate profiles with no address info."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': '[email protected]',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '650-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no address info was aggregated.')
def testProfilesNotAggregatedWithInvalidEmail(self):
"""Test Autofill does not aggregate profiles with an invalid email."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'garbage',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with invalid email was aggregated.')
def _SendKeyEventsToPopulateForm(self, tab_index=0, windex=0):
"""Send key events to populate a web form with Autofill profile data.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
TAB_KEYPRESS = 0x09 # Tab keyboard key press.
DOWN_KEYPRESS = 0x28 # Down arrow keyboard key press.
RETURN_KEYPRESS = 0x0D # Return keyboard key press.
self.SendWebkitKeypressEvent(TAB_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(RETURN_KEYPRESS, tab_index, windex)
def testComparePhoneNumbers(self):
"""Test phone fields parse correctly from a given profile.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
"""
profile_path = os.path.join(self.DataDir(), 'autofill',
'phone_pinput_autofill.txt')
profile_expected_path = os.path.join(self.DataDir(), 'autofill',
'phone_pexpected_autofill.txt')
profiles = self.EvalDataFrom(profile_path)
profiles_expected = self.EvalDataFrom(profile_expected_path)
self.FillAutofillProfile(profiles=profiles)
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'form_phones.html'))
for profile_expected in profiles_expected:
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
form_values = {}
for key, value in profile_expected.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
form_values[key], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, form_values[key])))
def testCCInfoNotStoredWhenAutocompleteOff(self):
"""Test CC info not offered to be saved when autocomplete=off for CC field.
If the credit card number field has autocomplete turned off, then the credit
card infobar should not offer to save the credit card info. The credit card
number must be a valid Luhn number.
"""
credit_card_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408041234567893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'cc_autocomplete_off_test.html'))
self.NavigateToURL(url)
for key, value in credit_card_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(cc_infobar,
msg='Save credit card infobar offered to save CC info.')
def testNoAutofillForReadOnlyFields(self):
"""Test that Autofill does not fill in read-only fields."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': '[email protected]',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'read_only_field_test.html'))
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
js_return_readonly_field = (
'var field_value = document.getElementById("email").value;'
'window.domAutomationController.send(field_value);')
readonly_field_value = self.ExecuteJavascript(
js_return_readonly_field, 0, 0)
js_return_addrline1_field = (
'var field_value = document.getElementById("address").value;'
'window.domAutomationController.send(field_value);')
addrline1_field_value = self.ExecuteJavascript(
js_return_addrline1_field, 0, 0)
self.assertNotEqual(
readonly_field_value, profile['EMAIL_ADDRESS'],
'Autofill filled in value "%s" for a read-only field.'
% readonly_field_value)
self.assertEqual(
addrline1_field_value, profile['ADDRESS_HOME_LINE1'],
'Unexpected value "%s" in the Address field.' % addrline1_field_value)
def FormFillLatencyAfterSubmit(self):
"""Test latency time on form submit with lots of stored Autofill profiles.
This test verifies when a profile is selected from the Autofill dictionary
that consists of thousands of profiles, the form does not hang after being
submitted.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
This test is partially automated. The bulk of the work is done, such as
generating 1500 plus profiles, inserting those profiles into Autofill,
selecting a profile from the list. The tester will need to click on the
submit button and check if the browser hangs.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'latency_after_submit_test.html'))
# Run the generator script to generate the dictionary list needed for the
# profiles.
gen = autofill_dataset_generator.DatasetGenerator(
logging_level=logging.ERROR)
list_of_dict = gen.GenerateDataset(num_of_dict_to_generate=1501)
self.FillAutofillProfile(profiles=list_of_dict)
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
# TODO(dyu): add automated form hang or crash verification.
raw_input(
'Verify the test manually. Test hang time after submitting the form.')
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Adding crowdsourcing Autofill profile.
file_path = os.path.join(self.DataDir(), 'autofill',
'crowdsource_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = self.GetAutofillProfile()['profiles'][0]['NAME_FIRST']
lname = self.GetAutofillProfile()['profiles'][0]['NAME_LAST']
email = self.GetAutofillProfile()['profiles'][0]['EMAIL_ADDRESS']
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
fname_field = ('document.getElementById("fn").value = "%s"; '
'window.domAutomationController.send("done");') % fname
lname_field = ('document.getElementById("ln").value = "%s"; '
'window.domAutomationController.send("done");') % lname
email_field = ('document.getElementById("em").value = "%s"; '
'window.domAutomationController.send("done");') % email
self.ExecuteJavascript(fname_field, 0, 0);
self.ExecuteJavascript(lname_field, 0, 0);
self.ExecuteJavascript(email_field, 0, 0);
self.ExecuteJavascript('document.getElementById("frmsubmit").submit();'
'window.domAutomationController.send("done");',
0, 0)
def MergeDuplicateProfilesInAutofill(self):
"""Test Autofill ability to merge duplicate profiles and throw away junk."""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
# Run the parser script to generate the dictionary list needed for the
# profiles.
c = autofill_dataset_converter.DatasetConverter(
os.path.join(self.DataDir(), 'autofill', 'dataset.txt'),
logging_level=logging.INFO) # Set verbosity to INFO, WARNING, ERROR.
list_of_dict = c.Convert()
for profile in list_of_dict:
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
self.ExecuteJavascript('document.getElementById("merge_dup").submit();'
'window.domAutomationController.send("done");',
0, 0)
# Verify total number of inputted profiles is greater than the final number
# of profiles after merging.
self.assertTrue(
len(list_of_dict) > len(self.GetAutofillProfile()['profiles']))
# Write profile dictionary to a file.
merged_profile = os.path.join(self.DataDir(), 'autofill',
'merged-profiles.txt')
profile_dict = self.GetAutofillProfile()['profiles']
output = open(merged_profile, 'wb')
pickle.dump(profile_dict, output)
output.close()
if __name__ == '__main__':
pyauto_functional.Main()
| 0
| 0
| 0
| 21,905
| 0
| 0
| 0
| -23
| 179
|
d1839a3279a5cf65bd5fa7efd4fde3026ed8d45c
| 9,871
|
py
|
Python
|
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | 1
|
2020-10-18T01:34:39.000Z
|
2020-10-18T01:34:39.000Z
|
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | null | null | null |
ke/images/python/cluster_tool.py
|
justasabc/kubernetes-ubuntu
|
afc670297a5becb2fcb4404c3ee1e02c99b5eaf4
|
[
"Apache-2.0"
] | null | null | null |
"""
Class Hierarchy
G{classtree: BaseTool}
Package tree
G{packagetree: cluster_tool}
Import Graph
G{importgraph: cluster_tool}
"""
#/usr/bin/python
# -*- coding:utf-8 -*-
DOCKER_SERVER_URL = 'tcp://master:2375'
if __name__=="__main__":
main()
| 34.757042
| 141
| 0.668727
|
"""
Class Hierarchy
G{classtree: BaseTool}
Package tree
G{packagetree: cluster_tool}
Import Graph
G{importgraph: cluster_tool}
"""
#/usr/bin/python
# -*- coding:utf-8 -*-
import subprocess
from json_generator import JsonGenerator
from container_client import ContainerClient
DOCKER_SERVER_URL = 'tcp://master:2375'
class BaseTool:
"""
base tool
"""
def __init__(self,name):
self.name = name
""" @type: C{string} """
def execute_command(self,command_str):
#print "[BaseTool] {0}".format(command_str)
p = subprocess.Popen(command_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
value = ""
for line in p.stdout.readlines():
value += line
return_code = p.wait()
return value.rstrip()
class KubernetesTool(BaseTool):
"""
kubernetes tool
"""
def __init__(self):
#print "[KubernetesTool] init..."
BaseTool.__init__(self,"KubernetesTool")
self.container_client = ContainerClient(DOCKER_SERVER_URL)
""" @type: L{ContainerClient} """
#print "[KubernetesTool] OK"
def __create(self,type_name,config_file):
command_str = "kubecfg -c {0} create {1}".format(config_file,type_name)
return BaseTool.execute_command(self,command_str)
def __list(self,type_name):
command_str = "kubecfg list {0}".format(type_name)
return BaseTool.execute_command(self,command_str)
def __delete(self,type_name,type_id):
command_str = "kubecfg delete {0}/{1}".format(type_name,type_id)
return BaseTool.execute_command(self,command_str)
#=====================================================================
# create pod/service/replicationController/node/minion/event
#=====================================================================
def create_pod(self,config_file):
type_name = "pods"
return self.__create(type_name,config_file)
def create_service(self,config_file):
type_name = "services"
return self.__create(type_name,config_file)
def create_replication_controller(self,config_file):
type_name = "replicationControllers"
return self.__create(type_name,config_file)
#=====================================================================
# list pod/service/replicationController/node/minion/event
#=====================================================================
def list_pods(self):
type_name = "pods"
return self.__list(type_name)
def list_services(self):
type_name = "services"
return self.__list(type_name)
def list_replication_controller(self):
type_name = "replicationControllers"
return self.__list(type_name)
#=====================================================================
# delete pod/service/replicationController/node/minion/event
#=====================================================================
def delete_pod(self,type_id):
type_name = "pods"
return self.__delete(type_name,type_id)
def delete_service(self,type_id):
type_name = "services"
return self.__delete(type_name,type_id)
def delete_replication_controller(self,type_id):
type_name = "replicationControllers"
return self.__delete(type_name,type_id)
#=====================================================================
# get pod hostname
#=====================================================================
def get_pod_hostname(self,pod_id):
command_str = "kubecfg list pods | grep "+pod_id+ " | awk '{print $3;}' | cut -f1 -d/"
return BaseTool.execute_command(self,command_str)
def hostname_to_ip(self,hostname):
if hostname == "":
print "*"*50
print "[KubernetesTool] hostname is empty! "
print "[KubernetesTool] use master node instead! "
print "*"*50
hostname = "master"
command_str = "resolveip -s {0}".format(hostname)
return BaseTool.execute_command(self,command_str)
def get_pod_ip(self,pod_id):
hostname = self.get_pod_hostname(pod_id)
return self.hostname_to_ip(hostname)
def stats_container(self,container):
command_str = "docker stats {0}".format(container)
return BaseTool.execute_command(self,command_str)
def get_host_ip(self):
command_str = "/sbin/ifconfig $ETH0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'"
return BaseTool.execute_command(self,command_str)
def get_container_ip(self,container_name):
command_str = "docker inspect -f '{{ .NetworkSettings.IPAddress }}' {0}".format(container_name)
return BaseTool.execute_command(self,command_str)
def copy_region_xml_to_minions(self,minions):
# scp -r xml/* minion1:/volumes/var/www/region_load/
for minion in minions:
print "copying xml to {0}...".format(minion)
command_str = "scp -r xml/* {0}:/volumes/var/www/region_load/".format(minion)
BaseTool.execute_command(self,command_str)
def save_json_to_file(self,dict_data,file_path):
generator = JsonGenerator('generator')
generator.generate(dict_data,file_path)
#=====================================================================
# resize replicationController
#=====================================================================
def resize_replication_controller(self,controller_id,replicas):
command_str = "kubecfg resize {0} {1}".format(controller_id,replicas)
return BaseTool.execute_command(self,command_str)
class IptablesTool(BaseTool):
"""
iptables tool
"""
def __init__(self):
#print "[IptablesTool] init..."
BaseTool.__init__(self,"IptablesTool")
#print "[IptablesTool] OK"
#==========================================================
# nat add rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_add_rule_to_prerouting_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "iptables -t nat -A PREROUTING -p {0} --dport {1} -j DNAT --to-destination {2}:{3}".format(protocol,dst_port,dst_ip,dst_port)
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_postrouting_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "iptables -t nat -A POSTROUTING -p {0} -d {1} --dport {2} -j SNAT --to-source {3}".format(protocol,dst_ip,dst_port,src_ip)
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_input_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "ls"
return BaseTool.execute_command(self,command_str)
def nat_add_rule_to_output_chain(self,protocol,src_port,dst_port,src_ip,dst_ip):
command_str = "ls"
return BaseTool.execute_command(self,command_str)
#==========================================================
# nat delete rules to PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_delete_rule_from_prerouting_chain(self,rule_number):
command_str = "iptables -t nat -D PREROUTING {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_postrouting_chain(self,rule_number):
command_str = "iptables -t nat -D POSTROUTING {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_input_chain(self,rule_number):
command_str = "iptables -t nat -D INPUT {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
def nat_delete_rule_from_output_chain(self,rule_number):
command_str = "iptables -t nat -D OUTPUT {0}".format(rule_number)
return BaseTool.execute_command(self,command_str)
#==========================================================
# nat flush PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_flush_prerouting_chain(self):
command_str = "iptables -t nat -F PREROUTING"
return BaseTool.execute_command(self,command_str)
def nat_flush_postrouting_chain(self):
command_str = "iptables -t nat -F POSTROUTING"
return BaseTool.execute_command(self,command_str)
def nat_flush_input_chain(self):
command_str = "iptables -t nat -F INPUT"
return BaseTool.execute_command(self,command_str)
def nat_flush_output_chain(self):
command_str = "iptables -t nat -F OUTPUT"
return BaseTool.execute_command(self,command_str)
def nat_flush_all_chains(self):
self.nat_flush_prerouting_chain()
self.nat_flush_postrouting_chain()
self.nat_flush_input_chain()
self.nat_flush_output_chain()
#==========================================================
# nat list PREROUTING/POSTROUTING/INPUT/OUTPUT chains
#==========================================================
def nat_list_prerouting_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L PREROUTING"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_postrouting_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L POSTROUTING"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_input_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L INPUT"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_output_chain(self,with_line_numbers=False):
command_str = "iptables -t nat -L OUTPUT"
if with_line_numbers:
command_str += " --line-numbers"
return BaseTool.execute_command(self,command_str)
def nat_list_all_chains(self):
result = ""
result += (self.nat_list_prerouting_chain() + "\n")
result += (self.nat_list_postrouting_chain() + "\n")
result += (self.nat_list_input_chain() + "\n")
result += (self.nat_list_output_chain() + "\n")
return result.rstrip()
class ToolTesting(KubernetesTool,IptablesTool):
pass
def test():
cmd = IptablesTool()
cmd.nat_flush_prerouting_chain()
print cmd.nat_list_all_chains()
print "OK"
cmd = KubernetesTool()
hostname = cmd.get_pod_hostname("apache-pod")
print cmd.hostname_to_ip(hostname)
print "OK"
def main():
test()
if __name__=="__main__":
main()
| 0
| 0
| 0
| 9,166
| 0
| 209
| 0
| 38
| 205
|
4311beaaf96391f1dec77dcb15a6c9c8eec39f67
| 239
|
py
|
Python
|
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | 8
|
2019-05-27T19:34:25.000Z
|
2020-03-01T19:06:48.000Z
|
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | null | null | null |
cli/__init__.py
|
Polsaker/throat
|
39fd66efb7251f1607d9bf9e407e0cbbdfc10c57
|
[
"MIT"
] | 7
|
2019-05-29T17:12:40.000Z
|
2020-05-01T16:41:16.000Z
|
from .recount import recount
from .admin import admin
from .default import default
from .migration import migration
from .translations import translations
commands = [
migration,
recount,
admin,
default,
translations
]
| 18.384615
| 38
| 0.74477
|
from .recount import recount
from .admin import admin
from .default import default
from .migration import migration
from .translations import translations
commands = [
migration,
recount,
admin,
default,
translations
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
500d465798a7caedef8ae7ce212b2a7ab666165d
| 1,078
|
py
|
Python
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 9
|
2015-03-09T11:04:21.000Z
|
2022-01-16T09:45:36.000Z
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 1
|
2020-04-24T14:38:43.000Z
|
2020-04-24T14:38:43.000Z
|
madrona/common/assets.py
|
movermeyer/madrona
|
fcdced0a03408754b88a3d88f416e04d500c32d4
|
[
"BSD-3-Clause"
] | 2
|
2016-12-06T15:31:35.000Z
|
2018-03-04T20:04:44.000Z
|
from elementtree import ElementTree as et
import os
ROOT_PATH = ''
def get_js_files():
"""Returns a list of all the javascript files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_js_test_files():
"""Returns a list of all the javascript test files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('test'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_css_files():
"""Returns a list of all css files listed in
media/css_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/css_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
| 30.8
| 65
| 0.646568
|
from elementtree import ElementTree as et
import os
ROOT_PATH = ''
def get_js_files():
"""Returns a list of all the javascript files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_js_test_files():
"""Returns a list of all the javascript test files listed in
media/js_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/js_includes.xml')
for f in tree.findall('test'):
files.append(ROOT_PATH + f.get('path'))
return files
def get_css_files():
"""Returns a list of all css files listed in
media/css_includes.xml"""
files = []
path = os.path.dirname(os.path.abspath(__file__))
tree = et.parse(path + '/../media/css_includes.xml')
for f in tree.findall('file'):
files.append(ROOT_PATH + f.get('path'))
return files
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
48a887f207778a7c4e05b2e0a8a7e32643674841
| 1,018
|
py
|
Python
|
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
tests/conf/config.py
|
robert-werner/fastapi-crudrouter
|
4f924307b53e5ea1adaa509302800c060ee7d06a
|
[
"MIT"
] | null | null | null |
import pathlib
ENV_FILE_PATH = pathlib.Path(__file__).parent / "dev.env"
assert ENV_FILE_PATH.exists()
| 27.513514
| 150
| 0.574656
|
import os
import pathlib
ENV_FILE_PATH = pathlib.Path(__file__).parent / "dev.env"
assert ENV_FILE_PATH.exists()
class BaseConfig:
POSTGRES_HOST = ""
POSTGRES_USER = ""
POSTGRES_PASSWORD = ""
POSTGRES_DB = ""
POSTGRES_PORT = ""
def __init__(self):
self._apply_dot_env()
self._apply_env_vars()
self.POSTGRES_URI = f"postgresql://{self.POSTGRES_USER}:{self.POSTGRES_PASSWORD}@{self.POSTGRES_HOST}:{self.POSTGRES_PORT}/{self.POSTGRES_DB}"
print(self.POSTGRES_URI)
def _apply_dot_env(self):
with open(ENV_FILE_PATH) as fp:
for line in fp.readlines():
line = line.strip(" \n")
if not line.startswith("#"):
k, v = line.split("=", 1)
if hasattr(self, k) and not getattr(self, k):
setattr(self, k, v)
def _apply_env_vars(self):
for k, v in os.environ.items():
if hasattr(self, k):
setattr(self, k, v)
| 0
| 0
| 0
| 879
| 0
| 0
| 0
| -12
| 45
|
530a06fbf60cdea98dfb1c9085cf498b370520c5
| 3,958
|
py
|
Python
|
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
.2lanemdr/2lanemdr.py
|
hemidactylus/nbws1
|
282cc2f0d5c04f5fc818f3e411dfb5b549ea47f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import re
import os
import json
import subprocess
DEF_FILE = '.2lane.info'
DIRECTIVE_TEMPLATE = '<!-- 2L {body} -->'
TYPO_WARNING_FINDER = re.compile('\W2L\W', re.IGNORECASE)
MESSAGE_TEMPLATE = '** 2lanemdr {kind} on {filename}:{linenumber} "{message}"'
def parseDirective(line, wrcs):
"""
Return (kind, target):
('endif', None)
('if', <fn>)
('elif', <fn>)
(None, None)
"""
if line == DIRECTIVE_TEMPLATE.format(body='ENDIF'):
return ('endif', None)
else:
for fn in wrcs.keys():
if line == DIRECTIVE_TEMPLATE.format(body='IF %s' % fn):
return ('if', fn)
elif line == DIRECTIVE_TEMPLATE.format(body='ELIF %s' % fn):
return ('elif', fn)
#
return None, None
def mkFiles(src, prescr, warner, errorer):
"""
Return a list with the path to all files created
"""
inContents = [
li.replace('\n', '')
for li in open(src).readlines()
]
# open files
oFiles = {
fn: open(fp, 'w')
for fn, fp in prescr.items()
}
# cursor setting
writing = {
fn: True
for fn in oFiles.keys()
}
# process lines
for lineNumber, line in enumerate(inContents):
# directive or content line?
directive, dTarget = parseDirective(line, writing)
if directive is not None:
# validate and process
if directive == 'endif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ENDIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = True
elif directive == 'if':
if sum(int(c) for c in writing.values()) != len(writing):
errorer('Misplaced IF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
elif directive == 'elif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ELIF', lineNumber)
elif writing[dTarget]:
errorer('Repeated target in ELIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
else:
errorer('Unknown directive', lineNumber)
else:
#
if TYPO_WARNING_FINDER.search(line):
warner('check line', lineNumber)
# write serially on all active cursors
for fn, fh in oFiles.items():
if writing[fn]:
fh.write('%s\n' % line)
# close files
for fn, fh in oFiles.items():
fh.close()
return [fp for fp in prescr.values()]
if __name__ == '__main__':
if os.path.isfile(DEF_FILE):
defs = json.load(open(DEF_FILE))
files = defs.get('sources', {})
#
allCreatedFiles = []
#
for origF, dests in files.items():
createdFiles = mkFiles(origF, dests, warner=warner, errorer=errorer)
allCreatedFiles += createdFiles
# we git add the created files
subprocess.call(['git', 'add'] + allCreatedFiles)
| 29.984848
| 80
| 0.490904
|
#!/usr/bin/python
import re
import os
import sys
import json
import subprocess
DEF_FILE = '.2lane.info'
DIRECTIVE_TEMPLATE = '<!-- 2L {body} -->'
TYPO_WARNING_FINDER = re.compile('\W2L\W', re.IGNORECASE)
MESSAGE_TEMPLATE = '** 2lanemdr {kind} on {filename}:{linenumber} "{message}"'
def parseDirective(line, wrcs):
"""
Return (kind, target):
('endif', None)
('if', <fn>)
('elif', <fn>)
(None, None)
"""
if line == DIRECTIVE_TEMPLATE.format(body='ENDIF'):
return ('endif', None)
else:
for fn in wrcs.keys():
if line == DIRECTIVE_TEMPLATE.format(body='IF %s' % fn):
return ('if', fn)
elif line == DIRECTIVE_TEMPLATE.format(body='ELIF %s' % fn):
return ('elif', fn)
#
return None, None
def mkFiles(src, prescr, warner, errorer):
"""
Return a list with the path to all files created
"""
inContents = [
li.replace('\n', '')
for li in open(src).readlines()
]
# open files
oFiles = {
fn: open(fp, 'w')
for fn, fp in prescr.items()
}
# cursor setting
writing = {
fn: True
for fn in oFiles.keys()
}
# process lines
for lineNumber, line in enumerate(inContents):
# directive or content line?
directive, dTarget = parseDirective(line, writing)
if directive is not None:
# validate and process
if directive == 'endif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ENDIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = True
elif directive == 'if':
if sum(int(c) for c in writing.values()) != len(writing):
errorer('Misplaced IF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
elif directive == 'elif':
if sum(int(c) for c in writing.values()) != 1:
errorer('Misplaced ELIF', lineNumber)
elif writing[dTarget]:
errorer('Repeated target in ELIF', lineNumber)
else:
for fn in writing.keys():
writing[fn] = fn == dTarget
else:
errorer('Unknown directive', lineNumber)
else:
#
if TYPO_WARNING_FINDER.search(line):
warner('check line', lineNumber)
# write serially on all active cursors
for fn, fh in oFiles.items():
if writing[fn]:
fh.write('%s\n' % line)
# close files
for fn, fh in oFiles.items():
fh.close()
return [fp for fp in prescr.values()]
if __name__ == '__main__':
if os.path.isfile(DEF_FILE):
defs = json.load(open(DEF_FILE))
files = defs.get('sources', {})
#
allCreatedFiles = []
#
for origF, dests in files.items():
def warner(msg, intLineno):
wmsg = MESSAGE_TEMPLATE.format(
kind='WARNING',
filename=origF,
linenumber=intLineno+1,
message=msg,
)
print(wmsg)
def errorer(msg, intLineno):
emsg = MESSAGE_TEMPLATE.format(
kind='ERROR',
filename=origF,
linenumber=intLineno+1,
message=msg,
)
print(emsg)
sys.exit(1)
createdFiles = mkFiles(origF, dests, warner=warner, errorer=errorer)
allCreatedFiles += createdFiles
# we git add the created files
subprocess.call(['git', 'add'] + allCreatedFiles)
| 0
| 0
| 0
| 0
| 0
| 525
| 0
| -11
| 92
|