hexsha
stringlengths 40
40
| size
int64 6
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.53
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 6
1.04M
| filtered:remove_non_ascii
int64 0
538k
| filtered:remove_decorators
int64 0
917k
| filtered:remove_async
int64 0
722k
| filtered:remove_classes
int64 -45
1M
| filtered:remove_generators
int64 0
814k
| filtered:remove_function_no_docstring
int64 -102
850k
| filtered:remove_class_no_docstring
int64 -3
5.46k
| filtered:remove_unused_imports
int64 -1,350
52.4k
| filtered:remove_delete_markers
int64 0
59.6k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5108e36e1b24e0722a49cfe5996dd84987d18722
| 1,337
|
py
|
Python
|
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
ex105.py
|
ArthurCorrea/python-exercises
|
0c2ac46b8c40dd9868b132e847cfa42e025095e3
|
[
"MIT"
] | null | null | null |
# Faa um programa que tenha um funo notas() que pode receber vrias
# notas de alunos e vai retornar um dicionrio com as seguintes informaes:
# - Quantidade de notas;
# - A maior nota;
# - A menor nota;
# - A mdia da turma;
# - A situao(opcional);
# Adicione tambm as docstrings da funo.
def notas(show=False):
"""
:param show: mostra a situao da turma de acordo com o escolhido: True ou False
:return: sem retorno
"""
somanotas = 0
d = dict()
lista = list()
qtdvalores = 0
while True:
n1 = float(input(f'Nota do aluno {qtdvalores}: '))
somanotas += n1
lista.append(n1)
qtdvalores += 1
d['Qtd notas'] = qtdvalores
resp = str(input('Quer continuar: [S/N] ')).upper().strip()[0]
while resp != 'S' and resp != 'N':
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
d['Maior nota'] = max(lista)
d['Menor nota'] = min(lista)
d['Mdia da turma'] = somanotas / qtdvalores
if show:
if d['Mdia da turma'] < 5:
d['Situao'] = 'Ruim'
elif 5 <= d['Mdia da turma'] < 7:
d['Situao'] = 'Razovel'
else:
d['Situao'] = 'Boa'
print(d)
else:
print(d)
notas()
notas(show=True)
| 27.854167
| 84
| 0.554226
|
# Faça um programa que tenha um função notas() que pode receber várias
# notas de alunos e vai retornar um dicionário com as seguintes informações:
# - Quantidade de notas;
# - A maior nota;
# - A menor nota;
# - A média da turma;
# - A situação(opcional);
# Adicione também as docstrings da função.
def notas(show=False):
"""
:param show: mostra a situação da turma de acordo com o escolhido: True ou False
:return: sem retorno
"""
somanotas = 0
d = dict()
lista = list()
qtdvalores = 0
while True:
n1 = float(input(f'Nota do aluno {qtdvalores}: '))
somanotas += n1
lista.append(n1)
qtdvalores += 1
d['Qtd notas'] = qtdvalores
resp = str(input('Quer continuar: [S/N] ')).upper().strip()[0]
while resp != 'S' and resp != 'N':
resp = str(input('Quer continuar? [S/N] ')).upper().strip()[0]
if resp == 'N':
break
d['Maior nota'] = max(lista)
d['Menor nota'] = min(lista)
d['Média da turma'] = somanotas / qtdvalores
if show:
if d['Média da turma'] < 5:
d['Situação'] = 'Ruim'
elif 5 <= d['Média da turma'] < 7:
d['Situação'] = 'Razoável'
else:
d['Situação'] = 'Boa'
print(d)
else:
print(d)
notas()
notas(show=True)
| 50
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
78baa060484aee5b5791c697ecb50180a17dcae1
| 15,613
|
py
|
Python
|
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
core/models/group.py
|
agolibroda/PyTorWiki
|
678a2ae13d0027c61af36e61b72e4e54493a29ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# Copyright 2016 Alec Goliboda
#
# group.py
from __future__ import print_function
# import markdown
# import pymysql
# from _overlapped import NULL
##############
from .. import WikiException
# from core.models.template import Template
| 40.343669
| 193
| 0.547172
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
#
# Copyright 2016 Alec Goliboda
#
# group.py
from __future__ import print_function
import logging
import json
import zlib
# import markdown
from datetime import datetime
import tornado.options
# import pymysql
import hashlib
import bcrypt
import base64
# from _overlapped import NULL
##############
import config
from . import Model, CipherWrapper
from .. import WikiException
from core.models.author import Author
from ..constants.data_base import *
# from core.models.template import Template
from ..constants.data_base import *
class Group(Model):
"""
модель - для Группы
внутри будут:
- список участников
- библиотека
Просмотр:
- список всех групп
- одну группу Описание
- список участников группы
- список статей (библиотека)
- создавать группы
- "удалять группы" - о... нужен флаг - "группа удалена"???
- добавлять (удаять) участников в группу
- по приглашению - нужен список приглашений - соотв, у каждого автора может быть список приглашений "вступить в группу"
- нужен список заявок на вступление - это инструмент админа группы "список заявок на вступление"
- добавлять (удалять) статьи в библиотеку
- статья моет ажодится в библиотеке и иметь флаг
"pbl" - для всеобщего доступа
"grp" - только для группы, такие стаьи будут ЗАКРЫТЫМИ!!!!
Видимость групп (group_status)
- публичная - 'pbl' - любой посетитель может читать публичные материалы группы
- закрытая - 'shut' ??? - что - то я пока не знаю.... может, в закрытых группах не может быть "публичных статей"??
Процедура создания новой группы:
При создании новой группы, Создатель группы становится ее Администратором.
Запись о создании группы размещается в таблицах "dt_headers" и "groups"
Запись о вступлении в группу Администратора добавляется в таблицу "members";
Процедура работы с Ключами:
Создается уникальная пара RSA-ключей,
Публичный ключ помещается в заголовок группы,
персональный - размещается в списке "members",
Приватный ключ группы закрывается Публичным ключем Создателя группы,
и добавляется в соответствующее поле таблицы "members"
Когда Участник Группы открывает страницу группы (переходит на рабочий стол группы)
в профиль Участника добавляется значение его копии приватного ключа группы;
После этого пользователь сможет читать и редактировать все статьи из групповой библиотеки, имеющие флаг "grp"
"""
def __init__(self, group_title = '', group_annotation = '', group_status = 'pbl'):
Model.__init__(self)
self.dt_header_id = 0
# self.author_id = 0
self.group_title = group_title
self.group_annotation = group_annotation
self.group_status = group_status
self.public_key = ''
self.private_key = ''
self.private_key_hash = ''
# self.group_create_date = datetime.now()
self.setDataStruct(Model.TableDef( tabName='groups',
idFieldName=None,
mainPrimaryList =['dt_header_id'],
listAttrNames=['dt_header_id', 'group_title', 'group_annotation', 'group_status']))
self.setHeadStruct(Model.TableDef( tabName='dt_headers',
idFieldName='dt_header_id',
mainPrimaryList =['dt_header_id'],
listAttrNames=['dt_header_type', 'public_key']))
class Member(Model):
def __init__(self):
Model.__init__(self)
self.group_id = 0
self.author_id = 0
self.member_role_type = 'M'
self.setDataStruct(Model.TableDef( tabName='members',
idFieldName=None,
mainPrimaryList =None,
listAttrNames=['group_id', 'author_id', 'member_role_type', 'private_key']))
def save(self, authorId ):
operationFlag = 'I'
revisions_sha_hash_sou = str(self.group_id) + str(self.author_id) + self.member_role_type
logging.info(' Member save:: self = ' + str(self))
Model.save(self, authorId, operationFlag, revisions_sha_hash_sou)
def getGroupMembersleList(self, groupId):
"""
Получить список всех соучастников одной группы
"""
getRez = self.select(
'dt_headers.dt_header_id, author_name, author_surname, author_role, author_phon, author_email, author_create, dt_headers.public_key ',
'authors, dt_headers',
{
'whereStr': " members.group_id = authors.dt_header_id AND dt_headers.dt_header_id = authors.dt_header_id AND " +\
" members.actual_flag = 'A' AND authors.actual_flag = 'A' AND "
" members.group_id = " + str(groupId) , # строка набор условий для выбора строк
'orderStr': ' author_name, author_surname ', # строка порядок строк
}
)
# 'whereStr': " groups.author_id = authors.author_id AND groups.group_id = " + str(group_id)
# logging.info( 'getGroupMembersleList:: getRez = ' + str(getRez))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
authorList = []
author = Author()
for autorStruct in getRez:
authorList.append(author.parsingAuthor(self, autorStruct))
return authorList
class Library(Model):
def __init__(self, groupId = 0, articleId=0, libraryPermissionType = 'W' ):
Model.__init__(self)
self.group_id = groupId
self.article_id = articleId
self.library_permission_type = libraryPermissionType
self.setDataStruct(Model.TableDef( tabName='librarys',
idFieldName=None,
mainPrimaryList =['group_id','article_id' ],
listAttrNames=['group_id', 'author_id', 'library_permission_type']))
def save(self, autorId):
operationFlag = 'I'
revisionsShaHashSou = str(self.group_id) + str(self.article_id) + self.library_permission_type
# logging.info(' Library save:: self = ' + str(self))
Model.save(self, autorId, operationFlag, revisionsShaHashSou)
# self.dt_header_id = Model.save(self, self.dt_header_id, operationFlag, sha_hash_sou)
def getGroupArticleList(self, groupId):
"""
Получить список всех статей одной группы
"""
getRez = self.select(
' articles.article_id, articles.article_title, articles.article_link, ' +
' articles.article_annotation, articles.article_category_id, ' +
' articles.article_template_id, ' +
' null AS group_title, null AS group_annotation, librarys AS group_id, librarys.library_permission_type ',
'articles',
{
'whereStr': " librarys.article_id = articles.article_id AND " +\
" articles.actual_flag = 'A' AND librarys.actual_flag = 'A' AND " +\
" librarys.group_id = " + str(groupId) , # строка набор условий для выбора строк
'orderStr': ' articles.article_id ', # строка порядок строк
}
)
# 'whereStr': " groups.dt_header_id = authors.dt_header_id AND groups.group_id = " + str(group_id)
# for item in getRez:
# logging.info( 'getGroupArticleList:: getRez = ' + str(item))
if len(getRez) == 0:
# raise WikiException( ARTICLE_NOT_FOUND )
return []
return getRez
def get(self, groupId):
"""
загрузить ОДНО значение - по ИД группы
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND dt_headers.dt_header_id = " + str(groupId)
} # все остальные секции селекта
)
# for item in resList:
# logging.info('Author:: get:: resList = ' + str(item))
if len(resList) == 1:
# return resList[0]
objValuesNameList = list(resList[0].__dict__.keys())
for objValue in objValuesNameList:
if objValue.find('_') != 0:
self.__setattr__(objValue,resList[0].__getattribute__(objValue) )
return self
else:
raise WikiException(LOAD_ONE_VALUE_ERROR)
def list(self):
"""
загрузить список всех групп
"""
resList = self.select(
'dt_headers.dt_header_id, group_title, group_annotation, group_status ' , # строка - чего хотим получить из селекта
'dt_headers', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id "
} # все остальные секции селекта
)
# logging.info('Author:: get:: resList = ')
# logging.info(resList)
return resList
def grouplistForAutor(self, authorId):
"""
Получить список групп для одного автора - все руппы, которые АВТОР создал,
и в которых АВТОР является участником
вот тут возможно, надо будет все поправить -
и показывать только ПАБЛИК группы, и/или приватные группы,
в которых участвуют оба - и зритель, и автор
"""
try:
resList = self.select(
' DISTINCT dt_headers.dt_header_id, groups.group_title, groups.group_annotation, groups.group_status, ' +
' members.member_role_type ' , # строка - чего хотим получить из селекта
' members, dt_headers ', #'authors', # строка - список таблиц
{
'whereStr': " groups.actual_flag = 'A' AND groups.dt_header_id = dt_headers.dt_header_id AND " +
" members.author_id = " + str(authorId) +
" AND members.group_id = groups.dt_header_id ",
'orderStr': ' groups.group_title '
} # все остальные секции селекта
)
# logging.info( 'grouplistForAutor:: resList = ' + str(resList))
return resList
except Exception as e:
# except WikiException as e:
# WikiException( ARTICLE_NOT_FOUND )
logging.info( 'grouplistForAutor::Have ERROR!!! ' + str(e))
if not article: raise tornado.web.HTTPError(404)
else: return (article, [])
def getGroupArticleList(self, groupId):
"""
Получить список всех статей одной группы
"""
libControl = self.Library ()
return libControl.getGroupArticleList( groupId)
def getGroupMembersleList(self, groupId):
"""
Получить список всех Участников одной группы
"""
memberControl = self.Member ()
return memberControl.getGroupMembersleList( groupId)
def save(self, authorId ):
"""
сохранить группу,
пользователя, который создал группу надо воткнуть не только в авторы группы,
но, и в "members" да еще и АДМИНОМ!!!
"""
bbsalt = config.options.salt.encode()
cip = CipherWrapper()
logging.info(' save:: before SAVE = ' + str(self))
if self.dt_header_id == 0:
# self.group_create_date = datetime.now()
operationFlag = 'I'
autotControl = Author()
creator = autotControl.get(authorId)
cip.rsaInit() # сделать пару ключей
self.public_key = cip.rsaPubSerialiation(cip.getPublicKey())
pKey = cip.getPrivateKey() # поучить незакрытый приватный ключ
# self.private_key_hash = bcrypt.hashpw(cip.rsaPrivateSerialiation(pKey), bbsalt).decode('utf-8') # получим ХЕш приватного ключа - для последуюей проверки при восстановлении пароля
# logging.info(' save:: before SAVE creator.publicKey() = ' + str(creator.publicKey()))
pkTmp = cip.rsaEncrypt(creator.publicKey(), cip.rsaPrivateSerialiation(pKey))
# logging.info(' save:: before SAVE pkTmp = ' + str(pkTmp))
self.private_key = pkTmp
else:
operationFlag = 'U'
self.begin()
revisions_sha_hash_sou = str(self.group_title) + str(self.group_annotation) + str(self.group_status)
# self.dt_header_id =
Model.save(self, authorId, operationFlag, revisions_sha_hash_sou )
# теперь сохранить автора группы как ее админа.
# logging.info(' SAVE:: GROUPPPPP authorId = ' + str(authorId))
# logging.info(' SAVE:: GROUPPPPP 2 = ' + str(self))
if operationFlag == 'I':
memberControl = self.Member()
memberControl.author_id = authorId
memberControl.group_id = self.dt_header_id
memberControl.member_role_type = 'A'
memberControl.private_key = self.private_key
# bbWrk = (bytePass+bbsalt)[0:32]
# cipher_aes = AES.new(bbWrk, AES.MODE_EAX) # закроем приватный ключ на пароль пользователя.
# ciphertext = cipher_aes.encrypt(pKey)
# self.private_key = pickle.dumps({'cipherKey': ciphertext, 'nonce': cipher_aes.nonce})
memberControl.save(authorId)
self.commit()
return True
def librarySave(self, authorId = 0, groupId = 0, article_id=0, library_permission_type = 'W'):
"""
Добавить статью к группе
"""
libControl = self.Library(groupId, authorId, library_permission_type)
libControl.save(authorId)
| 4,704
| 0
| 0
| 12,575
| 0
| 0
| 0
| 14
| 341
|
70b6c80341def36320aeb56eea498bea8fda840e
| 4,327
|
py
|
Python
|
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
spug_api/libs/parser.py
|
atompi/spug
|
88ebd46e47c88731b40cb82a6c7a360511b703fa
|
[
"MIT"
] | null | null | null |
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <[email protected]>
# Released under the AGPL-3.0 License.
#
#
#
# Json
| 33.030534
| 105
| 0.554657
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <[email protected]>
# Released under the AGPL-3.0 License.
import json
from .utils import AttrDict
# 自定义的解析异常
class ParseError(BaseException):
def __init__(self, message):
self.message = message
# 需要校验的参数对象
class Argument(object):
"""
:param name: name of option
:param default: default value if the argument if absent
:param bool required: is required
"""
def __init__(self, name, default=None, handler=None, required=True, type=str, filter=None, help=None,
nullable=False):
self.name = name
self.default = default
self.type = type
self.required = required
self.nullable = nullable
self.filter = filter
self.help = help
self.handler = handler
if not isinstance(self.name, str):
raise TypeError('Argument name must be string')
if filter and not callable(self.filter):
raise TypeError('Argument filter is not callable')
def parse(self, has_key, value):
if not has_key:
if self.required and self.default is None:
raise ParseError(
self.help or 'Required Error: %s is required' % self.name)
else:
return self.default
elif value in [u'', '', None]:
if self.default is not None:
return self.default
elif not self.nullable and self.required:
raise ParseError(
self.help or 'Value Error: %s must not be null' % self.name)
else:
return None
try:
if self.type:
if self.type in (list, dict) and isinstance(value, str):
value = json.loads(value)
assert isinstance(value, self.type)
elif self.type == bool and isinstance(value, str):
assert value.lower() in ['true', 'false']
value = value.lower() == 'true'
elif not isinstance(value, self.type):
value = self.type(value)
except (TypeError, ValueError, AssertionError):
raise ParseError(self.help or 'Type Error: %s type must be %s' % (
self.name, self.type))
if self.filter:
if not self.filter(value):
raise ParseError(
self.help or 'Value Error: %s filter check failed' % self.name)
if self.handler:
value = self.handler(value)
return value
# 解析器基类
class BaseParser(object):
def __init__(self, *args):
self.args = []
for e in args:
if isinstance(e, str):
e = Argument(e)
elif not isinstance(e, Argument):
raise TypeError('%r is not instance of Argument' % e)
self.args.append(e)
def _get(self, key):
raise NotImplementedError
def _init(self, data):
raise NotImplementedError
def add_argument(self, **kwargs):
self.args.append(Argument(**kwargs))
def parse(self, data=None, clear=False):
rst = AttrDict()
try:
self._init(data)
for e in self.args:
has_key, value = self._get(e.name)
if clear and has_key is False and e.required is False:
continue
rst[e.name] = e.parse(has_key, value)
except ParseError as err:
return None, err.message
return rst, None
# Json解析器
class JsonParser(BaseParser):
def __init__(self, *args):
self.__data = None
super(JsonParser, self).__init__(*args)
def _get(self, key):
return key in self.__data, self.__data.get(key)
def _init(self, data):
try:
if isinstance(data, (str, bytes)):
data = data.decode('utf-8')
self.__data = json.loads(data) if data else {}
else:
assert hasattr(data, '__contains__')
assert hasattr(data, 'get')
assert callable(data.get)
self.__data = data
except (ValueError, AssertionError):
raise ParseError('Invalid data type for parse')
| 75
| 0
| 0
| 3,999
| 0
| 0
| 0
| -4
| 133
|
ca3c214980bb966e02bee0584e6a700a068fc2b7
| 3,954
|
py
|
Python
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 22
|
2016-08-11T14:50:55.000Z
|
2021-06-06T09:39:26.000Z
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 19
|
2016-09-07T05:54:40.000Z
|
2020-07-02T07:46:38.000Z
|
mBugTranslations/Chrome.py
|
SkyLined/mBugId
|
781bfe9a120e55630a91ce1e86b39ad0dee031ec
|
[
"CC-BY-4.0"
] | 11
|
2016-09-03T22:42:50.000Z
|
2018-10-01T18:28:59.000Z
|
from .cBugTranslation import cBugTranslation;
aoBugTranslations = [
# ASan build related -> Ignored
cBugTranslation(
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".*!`anonymous namespace'::Create", # Part of skia
rb".*!base::debug::BreakDebugger",
rb".*!base::debug::CollectGDIUsageAndDie",
rb".*!blink::ReportFatalErrorInMainThread",
rb".*!blink::V8ScriptRunner::CallExtraOrCrash(<.+>)?",
rb".*!crash_reporter::internal::CrashForExceptionInNonABICompliantCodeRange",
rb".*!CrashForException_ExportThunk",
rb".*!crashpad::`anonymous namespace'::UnhandledExceptionHandler",
rb".*!crashpad::CrashpadClient::DumpAndCrash",
rb".*!raise",
rb".*!sk_abort_no_print",
rb".*!SkMallocPixelRef::MakeUsing",
rb".*!v8::Utils::ApiCheck",
rb".*!WTF::Deque<.+>::ExpandCapacity(IfNeeded)",
rb".*!WTF::Deque<.+>::push_back",
],
),
# Breakpoint -> Ignored
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!__sanitizer_cov",
],
s0zTranslatedBugTypeId = None, # This is apparently triggered by ASAN builds to determine EIP/RIP.
s0zTranslatedBugDescription = None,
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!base::internal::SchedulerWorkerPoolImpl::Start", # CHECK() on thread start
rb".*!base::PartitionRecommitSystemPages",
rb".*!blink::MemoryRegion::Commit",
rb".*!content::`anonymous namespace'::CrashOnMapFailure",
rb".*!skia::CreateHBitmap",
rb".*!ui::ClientGpuMemoryBufferManager::ClientGpuMemoryBufferManager", # std::vector throws breakpoint
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application triggered a breakpoint to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!blink::reportFatalErrorInMainThread",
rb".*!v8::Utils::ReportApiFailure",
rb".*!logging::LogMessage::~LogMessage",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered an exception to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# AVW@NULL -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"AVW@NULL",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::win::`anonymous namespace'::ForceCrashOnSigAbort",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered a NULL pointer access violation to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# Various -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"0xE0000008|Assert|AVW@NULL", # 0xE0000008 (win::kOomExceptionCode) -> OOM
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!(?:base|WTF)::[Pp]artitions?(?:ExcessiveAllocationSize|OutOfMemory(Using\w+)?)",
rb".*!blink::(?:BlinkGCOutOfMemory|ReportOOMErrorInMainThread)",
rb".*!FX_OutOfMemoryTerminate",
rb".*!SkBitmap::allocPixels",
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application caused an access violation by writing to NULL to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# OOM -> hide irrelevant frames
cBugTranslation(
srzOriginalBugTypeId = r"OOM",
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".+!(.+::)?(Win)?CallNewHandler",
rb".+!(.+::)?\w+_malloc(_\w+)?",
rb".+!(.+::)?\w*(Alloc|alloc|OutOfMemory)\w*(<.+>)?",
],
),
];
| 40.762887
| 151
| 0.686899
|
import re;
from .cBugTranslation import cBugTranslation;
aoBugTranslations = [
# ASan build related -> Ignored
cBugTranslation(
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".*!`anonymous namespace'::Create", # Part of skia
rb".*!base::debug::BreakDebugger",
rb".*!base::debug::CollectGDIUsageAndDie",
rb".*!blink::ReportFatalErrorInMainThread",
rb".*!blink::V8ScriptRunner::CallExtraOrCrash(<.+>)?",
rb".*!crash_reporter::internal::CrashForExceptionInNonABICompliantCodeRange",
rb".*!CrashForException_ExportThunk",
rb".*!crashpad::`anonymous namespace'::UnhandledExceptionHandler",
rb".*!crashpad::CrashpadClient::DumpAndCrash",
rb".*!raise",
rb".*!sk_abort_no_print",
rb".*!SkMallocPixelRef::MakeUsing",
rb".*!v8::Utils::ApiCheck",
rb".*!WTF::Deque<.+>::ExpandCapacity(IfNeeded)",
rb".*!WTF::Deque<.+>::push_back",
],
),
# Breakpoint -> Ignored
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!__sanitizer_cov",
],
s0zTranslatedBugTypeId = None, # This is apparently triggered by ASAN builds to determine EIP/RIP.
s0zTranslatedBugDescription = None,
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!base::internal::SchedulerWorkerPoolImpl::Start", # CHECK() on thread start
rb".*!base::PartitionRecommitSystemPages",
rb".*!blink::MemoryRegion::Commit",
rb".*!content::`anonymous namespace'::CrashOnMapFailure",
rb".*!skia::CreateHBitmap",
rb".*!ui::ClientGpuMemoryBufferManager::ClientGpuMemoryBufferManager", # std::vector throws breakpoint
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application triggered a breakpoint to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# Breakpoint -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"Breakpoint",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!blink::reportFatalErrorInMainThread",
rb".*!v8::Utils::ReportApiFailure",
rb".*!logging::LogMessage::~LogMessage",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered an exception to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# AVW@NULL -> Assert
cBugTranslation(
srzOriginalBugTypeId = r"AVW@NULL",
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::win::`anonymous namespace'::ForceCrashOnSigAbort",
],
s0zTranslatedBugTypeId = "Assert",
s0zTranslatedBugDescription = "The application triggered a NULL pointer access violation to indicate an assertion failed.",
s0zTranslatedSecurityImpact = None,
),
# Various -> OOM
cBugTranslation(
srzOriginalBugTypeId = r"0xE0000008|Assert|AVW@NULL", # 0xE0000008 (win::kOomExceptionCode) -> OOM
azs0rbAppliesOnlyToTopStackFrame = [
rb".*!base::`anonymous namespace'::OnNoMemory",
rb".*!(?:base|WTF)::[Pp]artitions?(?:ExcessiveAllocationSize|OutOfMemory(Using\w+)?)",
rb".*!blink::(?:BlinkGCOutOfMemory|ReportOOMErrorInMainThread)",
rb".*!FX_OutOfMemoryTerminate",
rb".*!SkBitmap::allocPixels",
],
s0zTranslatedBugTypeId = "OOM",
s0zTranslatedBugDescription = "The application caused an access violation by writing to NULL to indicate it was unable to allocate enough memory.",
s0zTranslatedSecurityImpact = None,
),
# OOM -> hide irrelevant frames
cBugTranslation(
srzOriginalBugTypeId = r"OOM",
azs0rbAdditionalIrrelevantStackFrameSymbols = [
rb".+!(.+::)?(Win)?CallNewHandler",
rb".+!(.+::)?\w+_malloc(_\w+)?",
rb".+!(.+::)?\w*(Alloc|alloc|OutOfMemory)\w*(<.+>)?",
],
),
];
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -12
| 23
|
f7b84e0119a5bb7d3e69b3fc77fd9952daf83b18
| 2,975
|
py
|
Python
|
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
ProjetoMercado/mercado/models.py
|
LucasRodriguesDaPaixao/ProjetoMercado
|
7a086ab0af800b15ef090520c9c81a0cd83dd650
|
[
"MIT"
] | null | null | null |
# Create your models here.
| 33.806818
| 103
| 0.736807
|
from django.db import models
# Create your models here.
class Cliente(models.Model):
ID_cliente = models.AutoField(primary_key=True)
nome_cliente = models.CharField(max_length=100, verbose_name="Nome:")
cpf = models.CharField(max_length=14, verbose_name="CPF:")
def __str__(self):
return self.nome_cliente
class Fornecedor(models.Model):
ID_fornecedor = models.AutoField(primary_key=True)
nome_fornecedor = models.CharField(max_length=100, verbose_name="Nome:")
email_fornecedor = models.CharField(max_length=100, verbose_name="Email:")
cnpj= models.CharField(max_length=18, verbose_name="CNPJ:")
telefone = models.CharField(max_length=13, verbose_name="Telefone:")
def __str__(self):
return self.nome_fornecedor
class Meta:
verbose_name_plural="Fornecedores"
class Categoria(models.Model):
ID_categoria = models.AutoField(primary_key=True)
nome_categoria = models.CharField(max_length=45, verbose_name="Nome Categoria:")
def __str__(self):
return self.nome_categoria
class Produto(models.Model):
ID_produto = models.AutoField(primary_key=True)
nome_produto = models.CharField(max_length=100, verbose_name="Nome:")
data_validade = models.DateField(verbose_name="Data de validade:")
preco = models.DecimalField(max_digits=5, decimal_places=2, verbose_name="Preço:")
quantidade_produto = models.IntegerField(verbose_name="Quantidade de produtos:")
FK_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, verbose_name="Categoria:")
FK_fornecedor = models.ForeignKey(Fornecedor, on_delete=models.CASCADE, verbose_name="Fornecedor:")
def __str__(self):
return self.nome_produto
class Setor(models.Model):
ID_setor = models.AutoField(primary_key=True)
nome_setor = models.CharField(max_length=45, verbose_name="Setor:")
FK_categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE, verbose_name="Categoria:")
def __str__(self):
return self.nome_setor
class Meta:
verbose_name_plural="Setores"
class Funcionario(models.Model):
ID_funcionario = models.AutoField(primary_key=True)
nome_funcionario = models.CharField(max_length=45, verbose_name="Nome:")
rg = models.CharField(max_length=12, verbose_name="RG:")
cpf = models.CharField(max_length=14, verbose_name="CPF:")
FK_setor = models.ForeignKey(Setor, on_delete=models.CASCADE, verbose_name="Setor:")
def __str__(self):
return self.nome_funcionario
class Compra(models.Model):
ID_compra = models.AutoField(primary_key=True)
valor_total = models.DecimalField(max_digits=5, decimal_places=2, verbose_name="Valor total:")
FK_cliente = models.ForeignKey(Cliente, on_delete=models.CASCADE, verbose_name="Cliente:")
compra_produto = models.ManyToManyField(Produto)
def __str__(self):
return "Compra: {} <--> {}".format(self.ID_compra, self.FK_cliente)
| 2
| 0
| 0
| 2,744
| 0
| 0
| 0
| 7
| 182
|
e682d03323f99fc860ddd405e81e02079d38b903
| 2,979
|
py
|
Python
|
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | 2
|
2021-11-02T09:53:49.000Z
|
2021-11-10T10:33:05.000Z
|
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | null | null | null |
macrokit/_validator.py
|
hanjinliu/macro-kit
|
61ebc38ea1086337d5a7477c6e896af0220f8a71
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Hashable, TypeVar
_T = TypeVar("_T", bound=Hashable)
_A = TypeVar("_A")
validator = Validator()
| 22.568182
| 75
| 0.627727
|
from typing import Callable, Hashable, TypeVar, Iterable, Union
from ._symbol import Symbol
from .head import Head
_T = TypeVar("_T", bound=Hashable)
_A = TypeVar("_A")
class Validator:
"""A validator class that will be used for Expr argument validation."""
def __init__(self):
self._map: dict[_T, Callable[[_A], _A]] = {}
def register(self, value: _T):
"""Register value for validation."""
def wrapper(func):
self._map[value] = func
return func
return wrapper
def __call__(self, arg: _T, *args: _A) -> Union[_A, Iterable[_A]]:
"""Run validation."""
try:
func = self._map[arg]
except KeyError:
return args
try:
out = func(*args)
except ValidationError as e:
e.args = (f"{args} is incompatible with {arg}",)
raise e
return out
class ValidationError(ValueError):
"""Raised when validation failed."""
validator = Validator()
@validator.register(Head.empty)
def _no_arg(args):
if len(args) != 0:
raise ValidationError()
return args
@validator.register(Head.del_)
@validator.register(Head.raise_)
def _single_arg(args):
if len(args) != 1:
raise ValidationError()
return args
@validator.register(Head.comment)
def _single_str(args):
if len(args) != 1:
raise ValidationError()
k = args[0]
if isinstance(k, Symbol):
k.name = k.name.strip("'")
return args
@validator.register(Head.assert_)
@validator.register(Head.getitem)
@validator.register(Head.unop)
def _two_args(args):
if len(args) != 2:
raise ValidationError()
return args
@validator.register(Head.getattr)
def _getattr(args):
if len(args) != 2:
raise ValidationError()
k = args[1]
if isinstance(k, Symbol):
k.name = k.name.strip("'")
return args
@validator.register(Head.assign)
@validator.register(Head.kw)
@validator.register(Head.annotate)
def _symbol_and_any(args):
if len(args) != 2:
raise ValidationError()
k, v = args
if isinstance(k, str):
k = Symbol.var(k)
elif isinstance(k, Symbol) and k.constant:
k = Symbol.var(k.name)
return [k, v]
@validator.register(Head.binop)
@validator.register(Head.aug)
def _three_args(args):
if len(args) != 3:
raise ValidationError()
return args
@validator.register(Head.function)
@validator.register(Head.for_)
@validator.register(Head.while_)
def _an_arg_and_a_block(args):
if len(args) != 2:
raise ValidationError()
b = args[1]
if getattr(b, "head", None) != Head.block:
raise ValidationError()
return args
@validator.register(Head.if_)
@validator.register(Head.elif_)
def _two_args_and_a_block(args):
if len(args) != 3:
raise ValidationError()
b = args[2]
if getattr(b, "head", None) != Head.block:
raise ValidationError()
return args
| 0
| 1,745
| 0
| 774
| 0
| 0
| 0
| 34
| 297
|
2dcc5057b0af83ae887869fbadf0b60476028183
| 7,579
|
py
|
Python
|
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
cubi_tk/archive/readme.py
|
eudesbarbosa/cubi-tk
|
80c3ef9387f2399f796b2cc445b99781d541f222
|
[
"MIT"
] | null | null | null |
"""``cubi-tk archive prepare``: Prepare a project for archival"""
import os
import re
from ..isa_tpl import IsaTabTemplate
from ..isa_tpl import load_variables
_BASE_DIR = os.path.dirname(__file__)
TEMPLATE = IsaTabTemplate(
name="archive",
path=os.path.join(os.path.dirname(_BASE_DIR), "isa_tpl", "archive"),
description="Prepare project for archival",
configuration=load_variables("archive"),
)
DU = re.compile("^ *([0-9]+)[ \t]+[^ \t]+.*$")
DATE = re.compile("^(20[0-9][0-9]-[01][0-9]-[0-3][0-9])[_-].+$")
MAIL = (
"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
'|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]'
'|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")'
"@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
"|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:"
"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
"|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)"
"\\])"
)
PATTERNS = {
"project_name": re.compile("^ *- *Project name: *.+$"),
"date": re.compile("^ *- *Start date: *20[0-9]{2}-[01][0-9]-[0-3][0-9].*$"),
"status": re.compile("^ *- *Current status: *(Active|Inactive|Finished|Archived) *$"),
"PI": re.compile("^ *- P.I.: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"client": re.compile("^ *- *Client contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"archiver": re.compile("^ *- *CUBI contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"CUBI": re.compile("^ *- *CUBI project leader: ([A-z '-]+) *$"),
}
COMMANDS = {
"size": ["du", "--bytes", "--max-depth=0"],
"inodes": ["du", "--inodes", "--max-depth=0"],
"size_follow": ["du", "--dereference", "--bytes", "--max-depth=0"],
"inodes_follow": ["du", "--dereference", "--inodes", "--max-depth=0"],
}
MSG = "**Contents of original `README.md` file**"
| 34.766055
| 98
| 0.577517
|
"""``cubi-tk archive prepare``: Prepare a project for archival"""
import errno
import os
import re
import shutil
import sys
import tempfile
from cookiecutter.main import cookiecutter
from logzero import logger
from ..common import execute_shell_commands
from ..isa_tpl import IsaTabTemplate
from ..isa_tpl import load_variables
_BASE_DIR = os.path.dirname(__file__)
TEMPLATE = IsaTabTemplate(
name="archive",
path=os.path.join(os.path.dirname(_BASE_DIR), "isa_tpl", "archive"),
description="Prepare project for archival",
configuration=load_variables("archive"),
)
DU = re.compile("^ *([0-9]+)[ \t]+[^ \t]+.*$")
DATE = re.compile("^(20[0-9][0-9]-[01][0-9]-[0-3][0-9])[_-].+$")
MAIL = (
"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*"
'|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]'
'|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")'
"@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
"|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}"
"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[a-z0-9-]*[a-z0-9]:"
"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]"
"|\\\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)"
"\\])"
)
PATTERNS = {
"project_name": re.compile("^ *- *Project name: *.+$"),
"date": re.compile("^ *- *Start date: *20[0-9]{2}-[01][0-9]-[0-3][0-9].*$"),
"status": re.compile("^ *- *Current status: *(Active|Inactive|Finished|Archived) *$"),
"PI": re.compile("^ *- P.I.: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"client": re.compile("^ *- *Client contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"archiver": re.compile("^ *- *CUBI contact: \\[([A-z '-]+)\\]\\(mailto:(" + MAIL + ")\\) *$"),
"CUBI": re.compile("^ *- *CUBI project leader: ([A-z '-]+) *$"),
}
COMMANDS = {
"size": ["du", "--bytes", "--max-depth=0"],
"inodes": ["du", "--inodes", "--max-depth=0"],
"size_follow": ["du", "--dereference", "--bytes", "--max-depth=0"],
"inodes_follow": ["du", "--dereference", "--inodes", "--max-depth=0"],
}
MSG = "**Contents of original `README.md` file**"
def _extra_context_from_config(config=None):
extra_context = {}
if config:
for name in TEMPLATE.configuration:
if getattr(config, "var_%s" % name, None) is not None:
extra_context[name] = getattr(config, "var_%s" % name)
return extra_context
def _get_snakemake_nb(project_dir):
cmds = [
[
"find",
project_dir,
"-type",
"d",
"-name",
".snakemake",
"-exec",
"du",
"--inodes",
"--max-depth=0",
"{}",
";",
],
["cut", "-f", "1"],
["paste", "-sd+"],
["bc"],
]
return execute_shell_commands(cmds, check=False, verbose=False)
def _get_archiver_name():
cmds = [
["pinky", "-l", os.getenv("USER")],
["grep", "In real life:"],
["sed", "-e", "s/.*In real life: *//"],
]
output = execute_shell_commands(cmds, check=False, verbose=False)
return output.rstrip()
def _create_extra_context(project_dir, config=None):
extra_context = _extra_context_from_config(config)
logger.info("Collecting size & inodes numbers")
for (context_name, cmd) in COMMANDS.items():
if context_name not in extra_context.keys():
cmd.append(project_dir)
extra_context[context_name] = DU.match(
execute_shell_commands([cmd], check=False, verbose=False)
).group(1)
if "snakemake_nb" not in extra_context.keys():
extra_context["snakemake_nb"] = _get_snakemake_nb(project_dir)
if "archiver_name" not in extra_context.keys():
extra_context["archiver_name"] = _get_archiver_name()
if "archiver_email" not in extra_context.keys():
extra_context["archiver_email"] = (
"{}@bih-charite.de".format(extra_context["archiver_name"]).lower().replace(" ", ".")
)
if "CUBI_name" not in extra_context.keys():
extra_context["CUBI_name"] = extra_context["archiver_name"]
if "PI_name" in extra_context.keys() and "PI_email" not in extra_context.keys():
extra_context["PI_email"] = (
"{}@charite.de".format(extra_context["PI_name"]).lower().replace(" ", ".")
)
if "client_name" in extra_context.keys() and "client_email" not in extra_context.keys():
extra_context["client_email"] = (
"{}@charite.de".format(extra_context["client_name"]).lower().replace(" ", ".")
)
if "SODAR_UUID" in extra_context.keys() and "SODAR_URL" not in extra_context.keys():
extra_context["SODAR_URL"] = "{}/projects/{}".format(
config.sodar_server_url, extra_context["SODAR_UUID"]
)
if "directory" not in extra_context.keys():
extra_context["directory"] = project_dir
if "project_name" not in extra_context.keys():
extra_context["project_name"] = os.path.basename(project_dir)
if "start_date" not in extra_context.keys() and DATE.match(extra_context["project_name"]):
extra_context["start_date"] = DATE.match(extra_context["project_name"]).group(1)
if "current_status" not in extra_context.keys():
extra_context["current_status"] = "Finished"
return extra_context
def _copy_readme(src, target):
os.makedirs(os.path.realpath(os.path.dirname(target)), mode=488, exist_ok=True)
with open(src, "rt") as f:
lines = [x.rstrip() for x in f.readlines()]
if os.path.exists(target):
lines.extend(["", "", "-" * 80, "", "", MSG, "", "", "-" * 80, "", ""])
with open(target, "rt") as f:
lines.extend([x.rstrip() for x in f.readlines()])
os.remove(target)
with open(os.path.realpath(target), "wt") as f:
f.write("\n".join(lines))
def is_readme_valid(filename=None):
if filename is None:
f = sys.stdin
else:
if not os.path.exists(filename):
return False
f = open(filename, "rt")
matching = set()
for line in f:
line = line.rstrip()
for (name, pattern) in PATTERNS.items():
if pattern.match(line):
matching.add(name)
f.close()
return set(PATTERNS.keys()).issubset(matching)
def create_readme(filename, project_dir, config=None, no_input=False):
# If a valid README.md file already exists in the project, do nothing
if os.path.exists(filename) and is_readme_valid(filename):
logger.info("Using existing file, variables ignored : '{}'".format(filename))
return
# Fill defaults (emails, size, inodes, ...)
extra_context = _create_extra_context(project_dir, config)
try:
tmp = tempfile.mkdtemp()
# Create the readme file in temp directory
cookiecutter(
template=TEMPLATE.path, extra_context=extra_context, output_dir=tmp, no_input=no_input
)
# Copy it back to destination, including contents of former incomplete README.md
_copy_readme(os.path.join(tmp, extra_context["project_name"], "README.md"), filename)
finally:
try:
shutil.rmtree(tmp)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def add_readme_parameters(parser):
for name in TEMPLATE.configuration:
key = name.replace("_", "-")
parser.add_argument(
"--var-%s" % key, help="template variables %s" % repr(name), default=None
)
| 0
| 0
| 0
| 0
| 0
| 5,266
| 0
| 14
| 341
|
769fc816a6040cc61dab6376c20fd5c6bf0ebaa0
| 989
|
py
|
Python
|
sigmod2021-exdra-p523/experiments/archive/submitted_results/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | 4
|
2021-12-10T17:20:26.000Z
|
2021-12-27T14:38:40.000Z
|
sigmod2021-exdra-p523/experiments/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
sigmod2021-exdra-p523/experiments/code/other/pca.py
|
damslab/reproducibility
|
f7804b2513859f7e6f14fa7842d81003d0758bf8
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html#sklearn.pipeline.make_pipeline
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA
pca = make_pipeline(StandardScaler(), PCA(n_components=10,svd_solver="full")).fit(X)
np.savetxt(args.outputpath, pca.steps[1][1].components_, delimiter=",")
| 43
| 118
| 0.781598
|
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
# https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html#sklearn.pipeline.make_pipeline
# https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html
# https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA
pca = make_pipeline(StandardScaler(), PCA(n_components=10,svd_solver="full")).fit(X)
np.savetxt(args.outputpath, pca.steps[1][1].components_, delimiter=",")
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ac1fa224c6f4611660c583d6666d2a339221dfa7
| 8,868
|
py
|
Python
|
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | 1
|
2020-11-19T19:26:34.000Z
|
2020-11-19T19:26:34.000Z
|
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | null | null | null |
prototypes/learn_weigths.py
|
pantelisantonoudiou/Logic_szrDetect
|
3267cabc78905c189a97e06ea2731b6f9e7b2def
|
[
"Apache-2.0"
] | 1
|
2021-04-07T11:41:39.000Z
|
2021-04-07T11:41:39.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:14:32 2020
@author: Pante
"""
import features
import numpy as np
from sklearn.preprocessing import StandardScaler
from array_helper import find_szr_idx, match_szrs, merge_close
from build_feature_data import get_data, get_features_allch
####### consider isolation forest for outlier detection!!!!!!
def user_cost(y_true, y_pred):
"""
user_cost(y_true, y_pred)
Parameters
----------
y_true : 1ndarray bool, ground truth values
y_pred : 1ndarray bool, predicted values
Returns
-------
cost : float
"""
detected = 0 # number of detected seizures
# get bounds of sezures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # total predicted
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # total predicted
bounds_pred = merge_close(bounds_pred, merge_margin = 5) # merge seizures close together
if bounds_pred.shape[0]>0: # find matching seizures
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
# calculate cost
a = 1 - (detected/bounds_true.shape[0]) # get detected ratio
b = (bounds_pred.shape[0] - detected) # get false positives
cost = a + np.log10(b+1) # cost function
return cost
def create_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
a = 100
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = (1 - (detected/bounds_true.shape[0]))*20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
# L = 1 # learning rate
cost = a + np.log10(b+1)
return cost
def szr_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = 1 - (detected/bounds_true.shape[0])
if (a > 0 and a <= 1):
a = 20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
cost = a + np.log10(b+1)
return cost
def get_min_cost(feature, y_true):
"""
get_min_cost(feature, y_true)
Parameters
----------
feature : 1D ndarray, extracted feature
y_true : 1D ndarray, bool grund truth labels
Returns
-------
TYPE: Float, threshold value that gves minimum cost
"""
n_loop = 100 # loop number and separation
thresh_array = np.linspace(1, 20, n_loop) # thresholds to test
cost_array = np.zeros(n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = feature> (np.mean(feature) + thresh_array[i]*np.std(feature))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = szr_cost(bounds_true, bounds_pred) # get cost
# pass to array
cost_array[i] = cost
return thresh_array[np.argmin(cost_array)]
# define parameter list
param_list = (features.autocorr, features.line_length, features.rms, features.mad, features.var, features.std, features.psd, features.energy,
features.get_envelope_max_diff,)
cross_ch_param_list = (features.cross_corr, features.signal_covar, features.signal_abs_covar,)
# get data and true labels
exp_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data\3642_3641_3560_3514'
# 071919_3514 071719_3560
data, y_true = get_data(exp_path, '072519_3642',ch_num = [0,1],
inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# # get file list
# main_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data'
# folder_path = '3514_3553_3639_3640'
# ver_path = os.path.join(main_path,folder_path, 'verified_predictions_pantelis')
# filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path))) # get only files with predictions
# filelist = [os.path.splitext(x)[0] for x in filelist] # remove csv ending
# # data, y_true = get_data(r'W:\Maguire Lab\Trina\2019\07-July\3514_3553_3639_3640, '071819_3553a',ch_num = [0,1],
# # inner_path={'data_path':'reorganized_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# for i in range(1):
# # 071919_3514 071719_3560
# data, y_true = get_data(os.path.join(main_path, folder_path), filelist[i],ch_num = [0,1],
# inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# if sum(y_true) == 0:
# continue
# get features
x_data, labels = get_features_allch(data,param_list,cross_ch_param_list)
# Normalize data
x_data = StandardScaler().fit_transform(x_data)
# get cost plot
cost_array,thresh_array = find_threshold(x_data, y_true)
| 27.974763
| 141
| 0.614005
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 13 09:14:32 2020
@author: Pante
"""
import os, features, time
import numpy as np
from sklearn.preprocessing import StandardScaler
from array_helper import find_szr_idx, match_szrs, merge_close
from build_feature_data import get_data, get_features_allch
from sklearn.metrics import log_loss,recall_score
import matplotlib.pyplot as plt
####### consider isolation forest for outlier detection!!!!!!
def user_cost(y_true, y_pred):
"""
user_cost(y_true, y_pred)
Parameters
----------
y_true : 1ndarray bool, ground truth values
y_pred : 1ndarray bool, predicted values
Returns
-------
cost : float
"""
detected = 0 # number of detected seizures
# get bounds of sezures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # total predicted
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # total predicted
bounds_pred = merge_close(bounds_pred, merge_margin = 5) # merge seizures close together
if bounds_pred.shape[0]>0: # find matching seizures
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
# calculate cost
a = 1 - (detected/bounds_true.shape[0]) # get detected ratio
b = (bounds_pred.shape[0] - detected) # get false positives
cost = a + np.log10(b+1) # cost function
return cost
def create_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
a = 100
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = (1 - (detected/bounds_true.shape[0]))*20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
# L = 1 # learning rate
cost = a + np.log10(b+1)
return cost
def szr_cost(bounds_true, bounds_pred):
"""
create_cost(bounds_true, bounds_pred)
Parameters
----------
bounds_true : 2d ndarray (rows = seizrs, columns = start,stop), ground truth
bounds_pred : 2d ndarray (rows = seizrs, columns = start,stop), predicted
Returns
-------
cost : Float,
"""
# find matching seizurs
detected = 0
if bounds_pred.shape[0]>0:
detected = match_szrs(bounds_true, bounds_pred, err_margin = 10)
if bounds_true.shape[0]>0:
# get detected ratio
a = 1 - (detected/bounds_true.shape[0])
if (a > 0 and a <= 1):
a = 20
# get false positives
b = (bounds_pred.shape[0] - detected)
# cost function
cost = a + np.log10(b+1)
return cost
def get_min_cost(feature, y_true):
"""
get_min_cost(feature, y_true)
Parameters
----------
feature : 1D ndarray, extracted feature
y_true : 1D ndarray, bool grund truth labels
Returns
-------
TYPE: Float, threshold value that gves minimum cost
"""
n_loop = 100 # loop number and separation
thresh_array = np.linspace(1, 20, n_loop) # thresholds to test
cost_array = np.zeros(n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = feature> (np.mean(feature) + thresh_array[i]*np.std(feature))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = szr_cost(bounds_true, bounds_pred) # get cost
# pass to array
cost_array[i] = cost
return thresh_array[np.argmin(cost_array)]
def find_threshold(x_data, y_true):
# thresh = 1;
ftr = 8
x = x_data[:,ftr]
# fig = plt.figure()
# ax = fig.add_subplot(111)
# t = np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x))
# line1 = ax.plot(x)
# line2 = ax.plot(t)
n_loop = 100
cost_array = np.zeros(n_loop)
thresh_array = np.zeros(n_loop)
thresh_array = np.linspace(1, 20, n_loop)
for i in range(n_loop):
# thresh_array[i] = thresh
y_pred = x> (np.mean(x) + thresh_array[i]*np.std(x))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,2])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = create_cost(bounds_true, bounds_pred) # get cost
# cost = log_loss(y_true, y_pred ,labels =[True,False])
cost_array[i] = cost
# if cost == 0:
# print('cost has reached zero, stopping')
# return cost_array,thresh_array
# thresh += cost # update cost
# ax.plot(np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x)))
# line2[0].set_ydata(np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x)))
# fig.canvas.draw()
plt.figure()
plt.plot(thresh_array, cost_array)
plt.ylabel('cost')
plt.xlabel('thresh')
print('seizures = ', bounds_true.shape[0])
return cost_array,thresh_array
def find_threshold_all(x_data, y_true):
thresh = 1;
ftr = 1
x = x_data[:,ftr]
fig = plt.figure()
ax = fig.add_subplot(111)
t = np.ones(x.shape[0]) * (np.mean(x) + thresh*np.std(x))
line1 = ax.plot(x)
line2 = ax.plot(t)
n_loop = 100
cost_array = np.zeros(n_loop)
thresh_array = np.zeros(n_loop)
# thresh_array = np.linspace(10, 0, n_loop)
for i in range(n_loop):
thresh_array[i] = thresh
y_pred = x> (np.mean(x) + thresh_array[i]*np.std(x))
# get number of seizures
bounds_true = find_szr_idx(y_true, np.array([0,1])) # true
bounds_pred = find_szr_idx(y_pred, np.array([0,1])) # predicted
# merge seizures close together
if bounds_pred.shape[0]>1:
bounds_pred = merge_close(bounds_pred, merge_margin = 5)
cost = create_cost(bounds_true, bounds_pred) # get cost
# cost = log_loss(y_true, y_pred ,labels =[True,False])
cost_array[i] = cost
if cost == 0:
print('cost has reached zero, stopping')
return cost_array,thresh_array
return cost_array,thresh_array
# define parameter list
param_list = (features.autocorr, features.line_length, features.rms, features.mad, features.var, features.std, features.psd, features.energy,
features.get_envelope_max_diff,)
cross_ch_param_list = (features.cross_corr, features.signal_covar, features.signal_abs_covar,)
# get data and true labels
exp_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data\3642_3641_3560_3514'
# 071919_3514 071719_3560
data, y_true = get_data(exp_path, '072519_3642',ch_num = [0,1],
inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# # get file list
# main_path = r'C:\Users\Pante\Desktop\seizure_data_tb\Train_data'
# folder_path = '3514_3553_3639_3640'
# ver_path = os.path.join(main_path,folder_path, 'verified_predictions_pantelis')
# filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path))) # get only files with predictions
# filelist = [os.path.splitext(x)[0] for x in filelist] # remove csv ending
# # data, y_true = get_data(r'W:\Maguire Lab\Trina\2019\07-July\3514_3553_3639_3640, '071819_3553a',ch_num = [0,1],
# # inner_path={'data_path':'reorganized_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# for i in range(1):
# # 071919_3514 071719_3560
# data, y_true = get_data(os.path.join(main_path, folder_path), filelist[i],ch_num = [0,1],
# inner_path={'data_path':'filt_data', 'pred_path':'verified_predictions_pantelis'} , load_y = True)
# if sum(y_true) == 0:
# continue
# get features
x_data, labels = get_features_allch(data,param_list,cross_ch_param_list)
# Normalize data
x_data = StandardScaler().fit_transform(x_data)
# get cost plot
cost_array,thresh_array = find_threshold(x_data, y_true)
| 0
| 0
| 0
| 0
| 0
| 2,799
| 0
| 48
| 98
|
276b5d3d63f7139687164c5d10374d92ac764ed2
| 1,016
|
py
|
Python
|
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
qcloudsdkcmem/DescribeCmemRequest.py
|
f3n9/qcloudcli
|
b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
| 25.4
| 73
| 0.643701
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class DescribeCmemRequest(Request):
def __init__(self):
super(DescribeCmemRequest, self).__init__(
'cmem', 'qcloudcliV1', 'DescribeCmem', 'cmem.api.qcloud.com')
def get_limit(self):
return self.get_params().get('limit')
def set_limit(self, limit):
self.add_param('limit', limit)
def get_offset(self):
return self.get_params().get('offset')
def set_offset(self, offset):
self.add_param('offset', offset)
def get_sizeInfo(self):
return self.get_params().get('sizeInfo')
def set_sizeInfo(self, sizeInfo):
self.add_param('sizeInfo', sizeInfo)
def get_subnetId(self):
return self.get_params().get('subnetId')
def set_subnetId(self, subnetId):
self.add_param('subnetId', subnetId)
def get_vpcId(self):
return self.get_params().get('vpcId')
def set_vpcId(self, vpcId):
self.add_param('vpcId', vpcId)
| 0
| 0
| 0
| 926
| 0
| 0
| 0
| 20
| 46
|
a06cceb6d9e57c9f8d1381b5bcfd1fa628bd0789
| 2,314
|
py
|
Python
|
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
rssnewsbot/spiders/rssspider.py
|
hijoe320/RSSBot
|
cbc0bc24d980ede3419111d51384abbc2c93f70c
|
[
"MIT"
] | null | null | null |
from time import mktime
import xxhash
def hs(s):
"""
hash function to convert url to fixed length hash code
"""
return xxhash.xxh32(s).hexdigest()
def time2ts(time_struct):
"""
convert time_struct to epoch
"""
return mktime(time_struct)
| 32.138889
| 103
| 0.617978
|
from time import sleep, gmtime, mktime
from datetime import datetime
import logging
import scrapy
import redis
import msgpack
import xxhash
import pymongo as pm
import feedparser as fp
from colorama import Back, Fore, Style
from ..settings import MONGODB_URI, REDIS_HOST, REDIS_PORT, REDIS_PWD, REDIS_PENDING_QUEUE
def hs(s):
"""
hash function to convert url to fixed length hash code
"""
return xxhash.xxh32(s).hexdigest()
def time2ts(time_struct):
"""
convert time_struct to epoch
"""
return mktime(time_struct)
class RSSSpider(scrapy.Spider):
name = "rssspider"
def __init__(self, *args, **kwargs):
super(RSSSpider, self).__init__(*args, **kwargs)
self.rc = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD)
self.df = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PWD, db=REDIS_DUPFLT_DB)
self.mc = pm.MongoClient(host=MONGODB_URI, connect=False)
def start_requests(self):
with self.mc.rssnews.feed.find() as cursor:
logging.info("number of rss feeds = %d", cursor.count())
for item in cursor:
logging.debug("rss=%(url)s", item)
yield scrapy.Request(url=item["url"], callback=self.parse, meta=item)
def parse(self, res):
logging.debug("%sparsing %s%s", Fore.GREEN, res.url, Style.RESET_ALL)
rss = fp.parse(res.body)
symbol = res.meta["symbol"]
for e in rss.entries:
if self.check_exist(e.link):
continue
if '*' in e.link:
url = "http" + e.link.split("*http")[-1]
self.append_task(e, url)
elif e.link.startswith("http://finance.yahoo.com/r/"):
yield scrapy.Request(url=e.link, callback=self.extract_url, meta=e)
else:
self.append_task(e, e.link)
def extract_url(self, res):
if res.body.startswith("<script src="):
url = res.body.split("URL=\'")[-1].split("\'")[0]
self.append_task(res.meta, url)
else:
pass
def check_exist(self, url):
return self.df.get(url)
def append_task(self, entry, url):
self.df.set(url, True, ex=3600)
self.rc.append(PENDING_QUEUE, msgpack.packb(task))
| 0
| 0
| 0
| 1,740
| 0
| 0
| 0
| 79
| 221
|
0a29357a3fcb65eb38130117fd1af6fb06bc1c40
| 11,992
|
py
|
Python
|
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
data/transforms/data_preprocessing.py
|
zyxwvu321/Classifer_SSL_Longtail
|
e6c09414c49e695b0f4221a3c6245ae3929a1788
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 15:36:15 2020
dataset transform
@author: minjie
"""
# imgs = []
# pts = []
#
# hh,ww,_ = img.shape
# for _ in range(self.n_aug):
# #points = [ww/2.0,hh/2.0,1.0]
# points = [[0.0,0.0,1.0], [0.0,hh,1.0], [ww,0.0,1.0],[ww,hh,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
# imgs.append(augmented['image'])
# pts.append(augmented['keypoints'])
# # NOTE: use bbox will have prob that box is outside crop region.
# bboxes= [[0.45, 0.45, 0.55, 0.55]]
#
# augmented = self.T_aug(image = img,bboxes = bboxes,category_id = ['0'])
# hh,ww,_ = img.shape
# points = [[ww/2.0,hh/2.0,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
#return augmented['image']
| 38.935065
| 162
| 0.488075
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 15:36:15 2020
dataset transform
@author: minjie
"""
import albumentations as A
from albumentations.pytorch import ToTensor as ToTensor_albu
import cv2
import torch
from multiprocessing import Pool
from utils.parse_meta import parse_kpds
import numpy as np
def get_aug(aug, min_area=0., min_visibility=0.):
return A.Compose(aug, bbox_params={'format': 'pascal_voc', 'min_area': min_area, 'min_visibility': min_visibility, 'label_fields': ['category_id']})
class TrainAugmentation_albu:
def __init__(self, sz_hw = (384,384),mean=0, std=1.0, crp_scale=(0.08, 1.0),crp_ratio = (0.75, 1.3333), weak_aug = False,n_aug = 1,out_augpos = False):
"""
Args:
weak_aug, week aug for fixmatch
"""
if isinstance(sz_hw, int):
sz_hw = (sz_hw,sz_hw)
self.mean = mean
self.std = std
self.sz_hw = sz_hw
self.crp_scale = crp_scale
self.crp_ratio = crp_ratio
self.n_aug = n_aug # number of repeated augmentation
self.out_augpos = out_augpos
if self.sz_hw[0] == self.sz_hw[1]:
self.T_aug = A.Compose([A.Rotate(p=0.5),
A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0),
A.Flip(p = 0.5),
A.RandomRotate90(p = 0.5)])
else:
self.T_aug = A.Compose([A.Rotate(p=0.5),
A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0),
A.Flip(p = 0.5)])
self.I_aug = A.Compose([ A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
A.HueSaturationValue(hue_shift_limit=2, sat_shift_limit=15, val_shift_limit=20,p = 0.5),
A.OneOf([A.Blur(blur_limit=5, p=0.3),
A.GaussNoise(var_limit=(5.0, 10.0), p=0.3),
A.IAASharpen(alpha=(0.1, 0.3), lightness=(0.5, 1.0), p=0.4)],p=0.5)])
self.N_aug = A.Compose([A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()])
if weak_aug is False:
self.augment = A.Compose([ self.T_aug, self.I_aug,self.N_aug])
# self.augment = A.Compose([ self.T_aug, self.I_aug])
# self.augment = A.Compose(self.augment, bbox_params={'format': 'albumentations', 'min_area': 0, 'min_visibility': 0, 'label_fields': ['category_id']})
if self.out_augpos is True:
self.augment = A.Compose(self.augment,\
keypoint_params = A.KeypointParams(format= 'xys', \
remove_invisible=False, angle_in_degrees=True))#label_fields=['category_id'], \
else:
#weak augment
self.T_aug = A.RandomResizedCrop(height = self.sz_hw[0], width = self.sz_hw[1], scale=self.crp_scale, ratio=self.crp_ratio,
interpolation = cv2.INTER_CUBIC,p = 1.0)
self.augment = A.Compose([ self.T_aug, self.N_aug])
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
if self.n_aug==1:
#augmented = self.augment(image = img)
if self.out_augpos is False:
augmented = self.augment(image = img)
return augmented['image']
else:
hh,ww,_ = img.shape
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
hw_in = img.shape[:2]
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
return (image_aug,feat_kpds)
else:
# test multi-aug
if self.out_augpos is False:
return torch.stack([self.augment(image = img)['image'] for _ in range(self.n_aug)])
else:
img_out = []
feat_out = []
trans_out = []
hh,ww,_ = img.shape
#points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0],[ww,0.0, 1.0]] # add one point for cv2.getAffineTransform
hw_in = img.shape[:2]
for _ in range(self.n_aug):
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
#feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'][:2],hw_in,hw_out))
pts2 = augmented['keypoints']
pts1 = np.float32([pt[:2] for pt in points])
pts2 = np.float32([pt[:2] for pt in pts2])
trans = cv2.getAffineTransform(pts2,pts1)
trans_out.append(trans)
img_out.append(image_aug)
feat_out.append(feat_kpds)
return (torch.stack(img_out), {'feat_out':torch.stack(feat_out), 'trans_out': np.stack(trans_out)})
#return torch.stack([self.augment(image = img)['image'] for _ in range(self.n_aug)])
# imgs = []
# pts = []
#
# hh,ww,_ = img.shape
# for _ in range(self.n_aug):
# #points = [ww/2.0,hh/2.0,1.0]
# points = [[0.0,0.0,1.0], [0.0,hh,1.0], [ww,0.0,1.0],[ww,hh,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
# imgs.append(augmented['image'])
# pts.append(augmented['keypoints'])
# # NOTE: use bbox will have prob that box is outside crop region.
# bboxes= [[0.45, 0.45, 0.55, 0.55]]
#
# augmented = self.T_aug(image = img,bboxes = bboxes,category_id = ['0'])
# hh,ww,_ = img.shape
# points = [[ww/2.0,hh/2.0,1.0]]
# augmented = self.augment(image = img,keypoints=points,category_id = ['0'])
#return augmented['image']
class TestAugmentation_albu:
def __init__(self, size, mean=0, std=1.0,out_augpos = False):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(size, int):
size = (size,size)
self.mean = mean
self.size = size
self.out_augpos = out_augpos
self.augment = A.Compose([A.Resize( size[0], size[1], interpolation=cv2.INTER_CUBIC, p=1),
A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()
])
if self.out_augpos is True:
self.augment = A.Compose(self.augment,\
keypoint_params = A.KeypointParams(format= 'xys', \
remove_invisible=False, angle_in_degrees=True))
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
if self.out_augpos is False:
augmented = self.augment(image = img)
return augmented['image']
else:
hh,ww,_ = img.shape
points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]
hw_in = img.shape[:2]
augmented = self.augment(image = img,keypoints=points)
image_aug = augmented['image']
hw_out = image_aug.shape[1:]
feat_kpds = torch.tensor(parse_kpds(augmented['keypoints'],hw_in,hw_out))
return (image_aug,feat_kpds)
class TrainAugmentation_bone:
def __init__(self, sz_in_hw = (512,512), sz_out_hw = (448,448),mean=0, std=1.0, minmax_h = (0,128), w2h_ratio = 1.0):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(sz_in_hw, int):
sz_in_hw = (sz_in_hw,sz_in_hw)
if isinstance(sz_out_hw, int):
sz_out_hw = (sz_out_hw,sz_out_hw)
self.mean = mean
self.sz_in_hw = sz_in_hw
self.sz_out_hw = sz_out_hw
#self.crp_scale = crp_scale
#self.crp_ratio = crp_ratio
self.minmax_h = minmax_h
self.w2h_ratio = w2h_ratio
self.I_aug = A.Compose([A.Resize( sz_in_hw[0], sz_in_hw[1], interpolation=1, p=1),
A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.5),
A.OneOf([A.Blur(blur_limit=5, p=0.3),
A.GaussNoise(var_limit=(5.0, 10.0), p=0.3),
A.IAASharpen(alpha=(0.1, 0.3), lightness=(0.5, 1.0), p=0.4)],p=0.5)])
self.T_aug = A.RandomSizedCrop(min_max_height = (self.minmax_h[0],self.minmax_h[1]),height = self.sz_out_hw[0], width = self.sz_out_hw[1],\
w2h_ratio = self.w2h_ratio,p = 1.0)
self.N_aug = A.Compose([A.Normalize(mean=mean, std=std, p=1.0),
ToTensor_albu()])
self.augment = A.Compose([self.I_aug, self.T_aug,self.N_aug])
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
augmented = self.augment(image = img)
return augmented['image']
class TestAugmentation_bone:
#def __init__(self, size, mean=0, std=1.0, ext_p =(-0.125,0.25)):
def __init__(self, sz_in_hw = (512,512), sz_out_hw = (448,448), mean=0, std=1.0):
"""
Args:
size: the size the of final image.
mean: mean pixel value per channel.
"""
if isinstance(sz_in_hw, int):
sz_in_hw = (sz_in_hw,sz_in_hw)
if isinstance(sz_out_hw, int):
sz_out_hw = (sz_out_hw,sz_out_hw)
self.augment = A.Compose([A.Resize( sz_in_hw[0], sz_in_hw[1], interpolation=1, p=1),
A.CenterCrop(sz_out_hw[0], sz_out_hw[1], p=1.0),
A.Normalize(mean=mean, std=std, max_pixel_value=255.0, p=1.0),
ToTensor_albu()
])
#
def __call__(self, img):
"""
Args:
img: the output of cv.imread in RGB layout.
labels: labels of boxes.
"""
augmented = self.augment(image = img)
return augmented['image']
| 0
| 0
| 0
| 10,462
| 0
| 181
| 0
| 50
| 271
|
22a8e0eda2fca9bf48bd5895ab01712afaaf9054
| 265
|
py
|
Python
|
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | 2
|
2015-12-02T06:44:01.000Z
|
2016-05-04T21:40:54.000Z
|
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
Python/leetcode/HIndexIi.py
|
darrencheng0817/AlgorithmLearning
|
aec1ddd0c51b619c1bae1e05f940d9ed587aa82f
|
[
"MIT"
] | null | null | null |
'''
Created on 1.12.2016
@author: Darren
''''''
Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
Expected runtime complexity is in O(log n) and the input is sorted.
"
'''
| 18.928571
| 117
| 0.656604
|
'''
Created on 1.12.2016
@author: Darren
''''''
Follow up for H-Index: What if the citations array is sorted in ascending order? Could you optimize your algorithm?
Expected runtime complexity is in O(log n) and the input is sorted.
"
'''
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c6d3edd93fcde8345da8ce9f04c85393e6bb98d8
| 5,516
|
py
|
Python
|
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | 2
|
2020-03-31T15:54:49.000Z
|
2022-01-07T13:43:46.000Z
|
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | null | null | null |
HW1/hw1/code/visual_recog.py
|
jiansfoggy/16-720B
|
6395555449fa297f19efb42970e480f1b382e38a
|
[
"Unlicense"
] | 4
|
2019-09-10T00:48:11.000Z
|
2022-01-07T13:43:50.000Z
|
import numpy as np
import imageio
import visual_words
import multiprocessing as mp
def build_recognition_system(num_workers=2):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* num_workers: number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,M)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
train_data = np.load("../data/train_data.npz")
dictionary = np.load("../outputs/dictionary.npy")
data = train_data['image_names']
SPM_layer_num = 2
K = 100
size_Feature = int(K*(4**(SPM_layer_num+1) -1)/3)
pool = mp.Pool(num_workers)
results = []
for i in range(0, len(data)):
print (i)
args = [data[i][0], dictionary, SPM_layer_num, K]
results.append(pool.apply_async(get_image_feature, args))
features = []
for result in results:
features.append(result.get())
final_features = np.reshape(features, (len(data), size_Feature))
labels = np.asarray(train_data['labels'])
np.savez('../outputs/trained_system.npz', features = final_features, labels = labels, SPM_layer_num = SPM_layer_num, dictionary = dictionary)
def evaluate_recognition_system(num_workers=2):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* num_workers: number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
test_data = np.load("../data/test_data.npz")
trained_system = np.load("../outputs/trained_system.npz")
features = trained_system['features']
dictionary = trained_system['dictionary']
SPM_layer_num = trained_system['SPM_layer_num']
labels = trained_system['labels']
K = dictionary.shape[0]
data = test_data['image_names']
pool = mp.Pool(num_workers)
features_test = []
for i in range(0, len(data)):
args = [(data[i][0], dictionary, SPM_layer_num, K, features, labels)]
features_test.append(pool.apply_async(test_label, args))
test_labels = []
for feature in features_test:
test_labels.append(feature.get())
testActualLabels = test_data['labels']
size_confusion = len(np.unique(testActualLabels))
C = np.zeros((size_confusion, size_confusion))
for a,p in zip(testActualLabels, test_labels):
C[a][p] += 1
accuracy = np.diag(C).sum()/C.sum()
return C, accuracy
def get_image_feature(file_path,dictionary,layer_num,K):
'''
Extracts the spatial pyramid matching feature.
[input]
* file_path: path of image file to read
* dictionary: numpy.ndarray of shape (K,3F)
* layer_num: number of spatial pyramid layers
* K: number of clusters for the word maps
[output]
* feature: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
image = imageio.imread('../data/' + file_path)
wordmap = visual_words.get_visual_words(image, dictionary)
hist_all = get_feature_from_wordmap_SPM(wordmap, layer_num, K)
return hist_all
def distance_to_set(word_hist,histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K)
* histograms: numpy.ndarray of shape (N,K)
[output]
* sim: numpy.ndarray of shape (N)
'''
min_compare = np.minimum(histograms, word_hist)
return np.sum(min_compare, axis=1)
def get_feature_from_wordmap(wordmap,dict_size):
'''
Compute histogram of visual words.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* dict_size: dictionary size K
[output]
* hist: numpy.ndarray of shape (K)
'''
flatten_wordmap = wordmap.flatten()
hist = np.histogram(flatten_wordmap, bins = dict_size, range = (0,dict_size))
hist = hist[0]/np.linalg.norm(hist[0], ord = 1)
return np.asarray(hist)
def get_feature_from_wordmap_SPM(wordmap,layer_num,dict_size):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* layer_num: number of spatial pyramid layers
* dict_size: dictionary size K
[output]
* hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
i_h, i_w = wordmap.shape
hist_all = []
for layer in range(0, layer_num+1):
D = 2**layer
if layer == 0 or layer == 1:
weight = 1/(2**(layer_num))
else:
weight = 1/(2**(layer_num+1-layer))
height_indices = np.round(np.arange(0, i_h+1, i_h/D)).astype('int')
width_indices = np.round(np.arange(0, i_w+1, i_w/D)).astype('int')
divisions = height_indices.shape[0]-1
for i in range(0, divisions):
for j in range (0, divisions):
s_h, s_w = height_indices[i], width_indices[j]
e_h, e_w = height_indices[i+1], width_indices[j+1]
imageSection = wordmap[s_h:e_h, s_w:e_w]
imageDictionary = get_feature_from_wordmap(imageSection, dict_size)
imageDictionary = imageDictionary*weight
hist_all.append(imageDictionary)
hist_all = np.asarray(hist_all)
hist_all = hist_all.flatten()
hist_all = hist_all/np.linalg.norm(hist_all, ord = 1)
return hist_all
| 28.729167
| 143
| 0.701051
|
import numpy as np
import threading
import queue
import imageio
import os,time
import math
import visual_words
import multiprocessing as mp
def build_recognition_system(num_workers=2):
'''
Creates a trained recognition system by generating training features from all training images.
[input]
* num_workers: number of workers to process in parallel
[saved]
* features: numpy.ndarray of shape (N,M)
* labels: numpy.ndarray of shape (N)
* dictionary: numpy.ndarray of shape (K,3F)
* SPM_layer_num: number of spatial pyramid layers
'''
train_data = np.load("../data/train_data.npz")
dictionary = np.load("../outputs/dictionary.npy")
data = train_data['image_names']
SPM_layer_num = 2
K = 100
size_Feature = int(K*(4**(SPM_layer_num+1) -1)/3)
pool = mp.Pool(num_workers)
results = []
for i in range(0, len(data)):
print (i)
args = [data[i][0], dictionary, SPM_layer_num, K]
results.append(pool.apply_async(get_image_feature, args))
features = []
for result in results:
features.append(result.get())
final_features = np.reshape(features, (len(data), size_Feature))
labels = np.asarray(train_data['labels'])
np.savez('../outputs/trained_system.npz', features = final_features, labels = labels, SPM_layer_num = SPM_layer_num, dictionary = dictionary)
def test_label(args):
file_path,dictionary,layer_num,K, features, labels = args
feature = get_image_feature(file_path, dictionary, layer_num, K)
distance = distance_to_set(feature, features)
i = np.argmax(distance)
label = labels[i]
return label
def evaluate_recognition_system(num_workers=2):
'''
Evaluates the recognition system for all test images and returns the confusion matrix.
[input]
* num_workers: number of workers to process in parallel
[output]
* conf: numpy.ndarray of shape (8,8)
* accuracy: accuracy of the evaluated system
'''
test_data = np.load("../data/test_data.npz")
trained_system = np.load("../outputs/trained_system.npz")
features = trained_system['features']
dictionary = trained_system['dictionary']
SPM_layer_num = trained_system['SPM_layer_num']
labels = trained_system['labels']
K = dictionary.shape[0]
data = test_data['image_names']
pool = mp.Pool(num_workers)
features_test = []
for i in range(0, len(data)):
args = [(data[i][0], dictionary, SPM_layer_num, K, features, labels)]
features_test.append(pool.apply_async(test_label, args))
test_labels = []
for feature in features_test:
test_labels.append(feature.get())
testActualLabels = test_data['labels']
size_confusion = len(np.unique(testActualLabels))
C = np.zeros((size_confusion, size_confusion))
for a,p in zip(testActualLabels, test_labels):
C[a][p] += 1
accuracy = np.diag(C).sum()/C.sum()
return C, accuracy
def get_image_feature(file_path,dictionary,layer_num,K):
'''
Extracts the spatial pyramid matching feature.
[input]
* file_path: path of image file to read
* dictionary: numpy.ndarray of shape (K,3F)
* layer_num: number of spatial pyramid layers
* K: number of clusters for the word maps
[output]
* feature: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
image = imageio.imread('../data/' + file_path)
wordmap = visual_words.get_visual_words(image, dictionary)
hist_all = get_feature_from_wordmap_SPM(wordmap, layer_num, K)
return hist_all
def distance_to_set(word_hist,histograms):
'''
Compute similarity between a histogram of visual words with all training image histograms.
[input]
* word_hist: numpy.ndarray of shape (K)
* histograms: numpy.ndarray of shape (N,K)
[output]
* sim: numpy.ndarray of shape (N)
'''
min_compare = np.minimum(histograms, word_hist)
return np.sum(min_compare, axis=1)
def get_feature_from_wordmap(wordmap,dict_size):
'''
Compute histogram of visual words.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* dict_size: dictionary size K
[output]
* hist: numpy.ndarray of shape (K)
'''
flatten_wordmap = wordmap.flatten()
hist = np.histogram(flatten_wordmap, bins = dict_size, range = (0,dict_size))
hist = hist[0]/np.linalg.norm(hist[0], ord = 1)
return np.asarray(hist)
def get_feature_from_wordmap_SPM(wordmap,layer_num,dict_size):
'''
Compute histogram of visual words using spatial pyramid matching.
[input]
* wordmap: numpy.ndarray of shape (H,W)
* layer_num: number of spatial pyramid layers
* dict_size: dictionary size K
[output]
* hist_all: numpy.ndarray of shape (K*(4^layer_num-1)/3)
'''
i_h, i_w = wordmap.shape
hist_all = []
for layer in range(0, layer_num+1):
D = 2**layer
if layer == 0 or layer == 1:
weight = 1/(2**(layer_num))
else:
weight = 1/(2**(layer_num+1-layer))
height_indices = np.round(np.arange(0, i_h+1, i_h/D)).astype('int')
width_indices = np.round(np.arange(0, i_w+1, i_w/D)).astype('int')
divisions = height_indices.shape[0]-1
for i in range(0, divisions):
for j in range (0, divisions):
s_h, s_w = height_indices[i], width_indices[j]
e_h, e_w = height_indices[i+1], width_indices[j+1]
imageSection = wordmap[s_h:e_h, s_w:e_w]
imageDictionary = get_feature_from_wordmap(imageSection, dict_size)
imageDictionary = imageDictionary*weight
hist_all.append(imageDictionary)
hist_all = np.asarray(hist_all)
hist_all = hist_all.flatten()
hist_all = hist_all/np.linalg.norm(hist_all, ord = 1)
return hist_all
| 0
| 0
| 0
| 0
| 0
| 236
| 0
| -31
| 117
|
076663290b2821e6423b989415d2957ab3b21b81
| 441
|
py
|
Python
|
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | 1
|
2022-03-01T08:15:28.000Z
|
2022-03-01T08:15:28.000Z
|
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
SalesAppi/JPSSM_topics
|
6be32fca31e5e15f51753101a222a08fd2013f9b
|
[
"MIT"
] | 1
|
2020-12-14T05:00:28.000Z
|
2020-12-14T05:00:28.000Z
|
from flask import Flask
#from nltk.tokenize import RegexpTokenizer
#from nltk import stem
#from nltk.stem import WordNetLemmatizer
app = Flask(__name__)
| 19.173913
| 42
| 0.825397
|
from flask import Flask
import gensim
import re
from gensim.models import LdaModel
from gensim.test.utils import datapath
from gensim import corpora, models
from gensim.corpora import Dictionary
from re import sub
import os
import string
import codecs
import nltk
#from nltk.tokenize import RegexpTokenizer
#from nltk import stem
#from nltk.stem import WordNetLemmatizer
app = Flask(__name__)
from app import views
from app import model
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -2
| 290
|
f6c6564e121ddf8a7df728a398f2d1498dea1117
| 7,942
|
py
|
Python
|
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
tables.py
|
arunext/greffy
|
001a0b94428629b9cdfaa8966f3cf6cd6f349e8a
|
[
"Apache-2.0"
] | null | null | null |
import psycopg2
from config import config
import datetime
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
def create_tables():
""" create tables in the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS POSTS
(POST_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
COMMENTS INT,
COUNT INT);''')
cur.execute('''CREATE TABLE IF NOT EXISTS COMMENTS
(POST_ID INT NOT NULL,
COMMENT_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
UPVOTES INT,
DOWNVOTES INT);''')
conn.commit()
conn.close()
def create_post(postid, text):
""" insert a new post into the vendors table """
print("inside create post")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
comments = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO POSTS (POST_ID, DATA, CREATED, COMMENTS, COUNT) VALUES (%s, %s, %s, %s, %s)",(postid,text,time,comments,count));
conn.commit()
print("Records created successfully")
conn.close()
def create_comment(postid, commentid, text):
""" insert a new comment into the post table """
print("inside create comments")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO COMMENTS (POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES) VALUES (%s, %s, %s, %s, 0, 0)",(postid,commentid,text, time));
# Get Corresponding post
cur.execute("SELECT POST_ID, COMMENTS from POSTS where POST_ID = {0} ORDER BY COUNT DESC".format(postid));
rows = cur.fetchall()
for row in rows:
comments = row[1]
break
comments = comments+1
# Update Comments count of post
cur.execute("UPDATE POSTS set COMMENTS = {0} where POST_ID = {1}".format(comments,postid));
conn.commit()
print("Records created successfully")
conn.close()
def lookup_table(text):
""" insert a new post into the vendors table """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
#initialize id and count to null values
postid = 0
count = 0
#Select post
cur.execute("SELECT POST_ID, DATA, COUNT from POSTS where DATA = '{0}' ORDER BY COUNT DESC".format(text));
rows = cur.fetchall()
for row in rows:
postid = row[0]
count = row[2]
break
print "Lookup operation done successfully. Id = {0}".format(id);
conn.close()
return postid, count
def update_table_count(postid, count):
""" update post with count """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("UPDATE POSTS set COUNT = {0} where POST_ID = {1}".format(count,postid));
conn.commit()
print "Update operation done successfully for POST_ID {0} and count {1}".format(postid,count)
conn.close()
def comment_upvote(comment_id):
""" update post with count """
print("inside upvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, UPVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY UPVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
upvotes = row[1]
break
upvotes = upvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set UPVOTES = {0} where COMMENT_ID = {1}".format(upvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
def comment_downvote(comment_id):
""" update comment with dwnvote """
print("inside downvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, DOWNVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY DOWNVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
downvotes = row[1]
break
downvotes = downvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set DOWNVOTES = {0} where COMMENT_ID = {1}".format(downvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
| 27.013605
| 157
| 0.629816
|
import psycopg2
from config import config
import datetime
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
def create_tables():
""" create tables in the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute('''CREATE TABLE IF NOT EXISTS POSTS
(POST_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
COMMENTS INT,
COUNT INT);''')
cur.execute('''CREATE TABLE IF NOT EXISTS COMMENTS
(POST_ID INT NOT NULL,
COMMENT_ID INT PRIMARY KEY NOT NULL,
DATA TEXT NOT NULL,
CREATED TIMESTAMP NOT NULL,
UPVOTES INT,
DOWNVOTES INT);''')
conn.commit()
conn.close()
def show_table():
print("creating tables with")
create_tables() #creating table, later check if table exists.
print("Inside show tables")
""" show tables from the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
print ("Opened database successfully")
cur = conn.cursor()
cur.execute("SELECT POST_ID, DATA, COUNT, COMMENTS from POSTS ORDER BY COUNT DESC")
rows = cur.fetchall()
#table_text = ""
#for row in rows:
# table_text += "Post ID = " + str(row[0])
# table_text += "Text = " + row[1]
#table_text += "Count = " + str(row[2]) + "\n"
conn.close()
return rows
def show_post(postid):
print("Inside show post")
""" show tables from the PostgreSQL database"""
params = config()
# connect to the PostgreSQL server
conn = psycopg2.connect(**params)
print ("Opened database successfully")
cur = conn.cursor()
cur.execute("SELECT POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES from COMMENTS where POST_ID = {0} ORDER BY UPVOTES DESC".format(postid));
rows = cur.fetchall()
conn.close()
return rows
def create_post(postid, text):
""" insert a new post into the vendors table """
print("inside create post")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
comments = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO POSTS (POST_ID, DATA, CREATED, COMMENTS, COUNT) VALUES (%s, %s, %s, %s, %s)",(postid,text,time,comments,count));
conn.commit()
print("Records created successfully")
conn.close()
def create_comment(postid, commentid, text):
""" insert a new comment into the post table """
print("inside create comments")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
count = 0
time = datetime.datetime.utcnow();
cur.execute("INSERT INTO COMMENTS (POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES) VALUES (%s, %s, %s, %s, 0, 0)",(postid,commentid,text, time));
# Get Corresponding post
cur.execute("SELECT POST_ID, COMMENTS from POSTS where POST_ID = {0} ORDER BY COUNT DESC".format(postid));
rows = cur.fetchall()
for row in rows:
comments = row[1]
break
comments = comments+1
# Update Comments count of post
cur.execute("UPDATE POSTS set COMMENTS = {0} where POST_ID = {1}".format(comments,postid));
conn.commit()
print("Records created successfully")
conn.close()
def lookup_table(text):
""" insert a new post into the vendors table """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
#initialize id and count to null values
postid = 0
count = 0
#Select post
cur.execute("SELECT POST_ID, DATA, COUNT from POSTS where DATA = '{0}' ORDER BY COUNT DESC".format(text));
rows = cur.fetchall()
for row in rows:
postid = row[0]
count = row[2]
break
print "Lookup operation done successfully. Id = {0}".format(id);
conn.close()
return postid, count
def get_post_summary(postid):
#currently send the top comment, latet this is the key logic to send response
print("inside get post summary")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("SELECT POST_ID, COMMENT_ID, DATA, CREATED, UPVOTES, DOWNVOTES from COMMENTS where POST_ID = {0} ORDER BY UPVOTES DESC".format(postid));
rows = cur.fetchall()
count = 0
catcomments = ""
for row in rows:
count = count + 1
if count == 1:
topcomment = row[2]
catcomments = catcomments + row[2]
if count == 0:
#no comments, ask user to comment
topcomment = "Sorry, we don't have any comments, be the first one to comment: http://greffy.herokuapp.com/post/" + str(postid)
polarity = 0
subjectivity = 0
else:
blob = TextBlob(catcomments)
# TODO add overall positive, neutral negative instead of polarity
blob.sentences
words = b
polarity =round(blob.sentiment.polarity,2)
subjectivity = round(blob.sentiment.subjectivity,2)
print(topcomment,polarity)
return topcomment,polarity
def update_table_count(postid, count):
""" update post with count """
print("inside lookup to tables")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
cur.execute("UPDATE POSTS set COUNT = {0} where POST_ID = {1}".format(count,postid));
conn.commit()
print "Update operation done successfully for POST_ID {0} and count {1}".format(postid,count)
conn.close()
def comment_upvote(comment_id):
""" update post with count """
print("inside upvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, UPVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY UPVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
upvotes = row[1]
break
upvotes = upvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set UPVOTES = {0} where COMMENT_ID = {1}".format(upvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
def comment_downvote(comment_id):
""" update comment with dwnvote """
print("inside downvote comment")
# read database configuration
params = config()
# connect to the PostgreSQL database
conn = psycopg2.connect(**params)
cur = conn.cursor()
# Get Corresponding comment
cur.execute("SELECT COMMENT_ID, DOWNVOTES, POST_ID from COMMENTS where COMMENT_ID = {0} ORDER BY DOWNVOTES DESC".format(comment_id));
rows = cur.fetchall()
for row in rows:
downvotes = row[1]
break
downvotes = downvotes+1
# Update Comments count of post
cur.execute("UPDATE COMMENTS set DOWNVOTES = {0} where COMMENT_ID = {1}".format(downvotes,comment_id));
conn.commit()
print ("Comment upvote completed")
conn.close()
#return post ID so that redirect can use it
return (row[2])
| 0
| 0
| 0
| 0
| 0
| 2,434
| 0
| 0
| 69
|
2f1c4753ac08df358bf6226a60a7c9bda64e76e2
| 950
|
py
|
Python
|
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
weltgeist/units.py
|
samgeen/Weltgeist
|
c7d52e879bb3473cecbb06651b5e76dac3020da6
|
[
"MIT"
] | null | null | null |
"""
Defined code units and physical quantities
The Python parts of Weltgeist use cgs
VH1 uses units defined below
Sam Geen, February 2018
"""
import numpy as np
# Physical quantities (base units in cgs)
pc = 3.086e+18
mH = 1.66e-24
year = 3.154e+7
Myr = 1e6*year
kB = 1.3806485279e-16 # in cgs
G = 6.67428e-8
X = 0.74
mp = mH / X
c = 2.998e+10
eV = 1.60217662e-12 # in ergs
Msun = 1.9891e33 # g
# Code units
# Used by VH1 - the Python parts of Weltgeist use cgs
distance = pc # in cm
density = mH # 1 g/cm^3
time = 1.0 / np.sqrt(G*density) # sets G=1 in VH1 (not super important here, though)
# Derived units
velocity = distance / time
mass = density*distance**3.0
pressure = density * velocity**2.0
energy = mass*velocity**2.0
# Note: this is acceleration! In the code (e.g. forces.f90), grav = v*v/r
# e.g. 2*GM/r = v_esc^2, so g=GM/r^2=0.5*v_esc^2/r
gravity = G*mass/distance**2 # velocity*velocity/distance
| 26.388889
| 85
| 0.663158
|
"""
Defined code units and physical quantities
The Python parts of Weltgeist use cgs
VH1 uses units defined below
Sam Geen, February 2018
"""
import numpy as np
# Physical quantities (base units in cgs)
pc = 3.086e+18
mH = 1.66e-24
year = 3.154e+7
Myr = 1e6*year
kB = 1.3806485279e-16 # in cgs
G = 6.67428e-8
X = 0.74
mp = mH / X
c = 2.998e+10
eV = 1.60217662e-12 # in ergs
Msun = 1.9891e33 # g
# Code units
# Used by VH1 - the Python parts of Weltgeist use cgs
distance = pc # in cm
density = mH # 1 g/cm^3
time = 1.0 / np.sqrt(G*density) # sets G=1 in VH1 (not super important here, though)
# Derived units
velocity = distance / time
mass = density*distance**3.0
pressure = density * velocity**2.0
energy = mass*velocity**2.0
# Note: this is acceleration! In the code (e.g. forces.f90), grav = v*v/r
# e.g. 2*GM/r = v_esc^2, so g=GM/r^2=0.5*v_esc^2/r
gravity = G*mass/distance**2 # velocity*velocity/distance
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
ca8b1afb26f13038161c24aead09569f01b99768
| 9,456
|
py
|
Python
|
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | 2
|
2016-02-18T01:06:04.000Z
|
2016-02-18T03:53:37.000Z
|
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | null | null | null |
olwidget/widgets.py
|
jj0hns0n/mednet
|
efb6681292e7ac8f870ee5967a5a2b352853ae35
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from django.contrib.gis.gdal import OGRGeometry
from django.contrib.gis.geos import GEOSGeometry
from django.conf import settings
# Default settings for paths and API URLs. These can all be overridden by
# specifying a value in settings.py
api_defaults = {
'GOOGLE_API_KEY': "",
'YAHOO_APP_ID': "",
'OLWIDGET_MEDIA_URL': url_join(settings.MEDIA_URL, "olwidget"),
'GOOGLE_API': "http://maps.google.com/maps?file=api&v=2",
'YAHOO_API': "http://api.maps.yahoo.com/ajaxymap?v=3.0",
'OSM_API': "http://openstreetmap.org/openlayers/OpenStreetMap.js",
'OL_API': "http://openlayers.org/api/2.8/OpenLayers.js",
'MS_VE_API' : "http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1",
}
for key, default in api_defaults.iteritems():
if not hasattr(settings, key):
setattr(settings, key, default)
OLWIDGET_JS = url_join(settings.OLWIDGET_MEDIA_URL, "js/olwidget.js")
OLWIDGET_CSS = url_join(settings.OLWIDGET_MEDIA_URL, "css/olwidget.css")
DEFAULT_PROJ = "4326"
ewkt_re = re.compile("^SRID=(?P<srid>\d+);(?P<wkt>.+)$", re.I)
def get_wkt(value, srid=DEFAULT_PROJ):
"""
`value` is either a WKT string or a geometry field. Returns WKT in the
projection for the given SRID.
"""
ogr = None
if value:
if isinstance(value, OGRGeometry):
ogr = value
elif isinstance(value, GEOSGeometry):
ogr = value.ogr
elif isinstance(value, basestring):
match = ewkt_re.match(value)
if match:
ogr = OGRGeometry(match.group('wkt'), match.group('srid'))
else:
ogr = OGRGeometry(value)
wkt = ''
if ogr:
# Workaround for Django bug #12312. GEOSGeometry types don't support 3D wkt;
# OGRGeometry types output 3D for linestrings even if they should do 2D, causing
# IntegrityError's.
if ogr.dimension == 2:
geos = ogr.geos
geos.transform(srid)
wkt = geos.wkt
else:
ogr.transform(srid)
wkt = ogr.wkt
return wkt
def collection_wkt(fields):
""" Returns WKT for the given list of geometry fields. """
if not fields:
return ""
if len(fields) == 1:
return get_wkt(fields[0])
return "GEOMETRYCOLLECTION(%s)" % \
",".join(get_wkt(field) for field in fields)
def add_srid(wkt, srid=DEFAULT_PROJ):
"""
Returns EWKT (WKT with a specified SRID) for the given wkt and SRID
(default 4326).
"""
if wkt:
return "SRID=%s;%s" % (srid, wkt)
return ""
| 32.273038
| 88
| 0.602792
|
import re
from django.contrib.gis.gdal import OGRException, OGRGeometry
from django.contrib.gis.geos import GEOSGeometry
from django.forms.widgets import Textarea
from django.template.loader import render_to_string
from django.utils import simplejson
from django.conf import settings
from django import forms
def reduce_url_parts(a, b):
if a[-1] == "/":
return a + b
return a + "/" + b
def url_join(*args):
return reduce(reduce_url_parts, args)
# Default settings for paths and API URLs. These can all be overridden by
# specifying a value in settings.py
api_defaults = {
'GOOGLE_API_KEY': "",
'YAHOO_APP_ID': "",
'OLWIDGET_MEDIA_URL': url_join(settings.MEDIA_URL, "olwidget"),
'GOOGLE_API': "http://maps.google.com/maps?file=api&v=2",
'YAHOO_API': "http://api.maps.yahoo.com/ajaxymap?v=3.0",
'OSM_API': "http://openstreetmap.org/openlayers/OpenStreetMap.js",
'OL_API': "http://openlayers.org/api/2.8/OpenLayers.js",
'MS_VE_API' : "http://dev.virtualearth.net/mapcontrol/mapcontrol.ashx?v=6.1",
}
for key, default in api_defaults.iteritems():
if not hasattr(settings, key):
setattr(settings, key, default)
OLWIDGET_JS = url_join(settings.OLWIDGET_MEDIA_URL, "js/olwidget.js")
OLWIDGET_CSS = url_join(settings.OLWIDGET_MEDIA_URL, "css/olwidget.css")
DEFAULT_PROJ = "4326"
def separated_lowercase_to_lower_camelcase(input):
return re.sub('_\w', lambda match: match.group(0)[-1].upper(), input)
def translate_options(options):
translated = {}
for key, value in options.iteritems():
new_key = separated_lowercase_to_lower_camelcase(key)
# recurse
if isinstance(value, dict):
translated[new_key] = translate_options(value)
else:
translated[new_key] = value
return translated
class MapMixin(object):
def set_options(self, options, template):
self.options = options or {}
# Though this is the olwidget.js default, it must be explicitly set so
# form.media knows to include osm.
self.options['layers'] = self.options.get('layers',
['osm.mapnik'])
self.template = template or self.default_template
def _media(self):
js = set()
# collect scripts necessary for various layers
for layer in self.options['layers']:
if layer.startswith("osm."):
js.add(settings.OSM_API)
elif layer.startswith("google."):
js.add(settings.GOOGLE_API + "&key=%s" % settings.GOOGLE_API_KEY)
elif layer.startswith("yahoo."):
js.add(settings.YAHOO_API + "&appid=%s" % settings.YAHOO_APP_ID)
elif layer.startswith("ve."):
js.add(settings.MS_VE_API)
js = [settings.OL_API, OLWIDGET_JS] + list(js)
return forms.Media(css={'all': (OLWIDGET_CSS,)}, js=js)
media = property(_media)
class EditableMap(forms.Textarea, MapMixin):
"""
An OpenLayers mapping widget for geographic data.
Example::
from django import forms
from olwidget.widgets import OLWidget
class MyForm(forms.Form):
location = forms.CharField(widget=EditableMap(
options={'geometry': 'point'}))
"""
default_template = 'olwidget/editable_map.html'
def __init__(self, options=None, template=None):
self.set_options(options, template)
super(EditableMap, self).__init__()
def render(self, name, value, attrs=None):
if not attrs:
attrs = {}
# without an id, javascript fails
if attrs.has_key('id'):
element_id = attrs['id']
else:
element_id = "id_%s" % id(self)
# Allow passing of wkt for MapDisplay subclass
if attrs.has_key('wkt'):
wkt = attrs['wkt']
else:
# Use the default SRID's
wkt = add_srid(get_wkt(value))
if name and not self.options.has_key('name'):
self.options['name'] = name
context = {
'id': element_id,
'name': name,
'wkt': wkt,
'map_opts': simplejson.dumps(
translate_options(self.options)
),
}
return render_to_string(self.template, context)
class MapDisplay(EditableMap):
"""
Object for display of geometries on an OpenLayers map. Arguments (all are
optional):
* ``fields`` - a list of geometric fields or WKT strings to display on the
map. If none are given, the map will have no overlay.
* ``name`` - a name to use for display of the field data layer.
* ``options`` - a dict of options for map display. A complete list of
options is in the documentation for olwidget.js.
Example::
from olwidget.widgets import MapDisplay
map = MapDisplay(fields=[my_model.start_point, my_model.destination])
To use in a template, first display the media (URLs for javascript and CSS
needed for map display) and then print the MapDisplay object, as in the
following::
<html>
<head>
{{ map.media }}
</head>
<body>
{{ map }}
</body>
</html>
By default, maps rendered by MapDisplay objects are not editable, but this
can be overriden by setting "options['editable'] = True".
"""
def __init__(self, fields=None, options=None, template=None):
self.fields = fields
options = options or {}
if not options.has_key('editable'):
options['editable'] = False
if (self.fields and len(self.fields) > 1) or \
(fields[0].geom_type.upper() == 'GEOMETRYCOLLECTION'):
options['isCollection'] = True
super(MapDisplay, self).__init__(options, template)
def __unicode__(self):
wkt = add_srid(collection_wkt(self.fields))
name = self.options.get('name', 'data')
return self.render(name, None, attrs={'wkt': wkt})
class InfoMap(forms.Widget, MapMixin):
"""
Widget for displaying maps with pop-up info boxes over geometries.
Arguments:
* ``info``: an array of [geometry, HTML] pairs that specify geometries, and
the popup contents associated with them. Geometries can be expressed as
geometry fields, or as WKT strings. Example::
[
[geomodel1.geofield, "<p>Model One</p>"],
[geomodel2.geofield, "<p>Model Two</p>"],
...
]
* ``options``: an optional dict of options for map display.
In templates, InfoMap.media must be displayed in addition to InfoMap for
the map to function properly.
"""
default_template = 'olwidget/info_map.html'
def __init__(self, info=None, options=None, template=None):
self.info = info
self.set_options(options, template)
super(InfoMap, self).__init__()
def render(self, name, value, attrs=None):
if not self.info:
info_json = '[]'
else:
# convert fields to wkt and translate options if needed
wkt_array = []
for geom, attr in self.info:
wkt = add_srid(get_wkt(geom))
if isinstance(attr, dict):
wkt_array.append([wkt, translate_options(attr)])
else:
wkt_array.append([wkt, attr])
info_json = simplejson.dumps(wkt_array)
# arbitrary unique id
div_id = "id_%s" % id(self)
context = {
'id': div_id,
'info_array': info_json,
'map_opts': simplejson.dumps(
translate_options(self.options)
),
}
return render_to_string(self.template, context)
def __unicode__(self):
return self.render(None, None)
ewkt_re = re.compile("^SRID=(?P<srid>\d+);(?P<wkt>.+)$", re.I)
def get_wkt(value, srid=DEFAULT_PROJ):
"""
`value` is either a WKT string or a geometry field. Returns WKT in the
projection for the given SRID.
"""
ogr = None
if value:
if isinstance(value, OGRGeometry):
ogr = value
elif isinstance(value, GEOSGeometry):
ogr = value.ogr
elif isinstance(value, basestring):
match = ewkt_re.match(value)
if match:
ogr = OGRGeometry(match.group('wkt'), match.group('srid'))
else:
ogr = OGRGeometry(value)
wkt = ''
if ogr:
# Workaround for Django bug #12312. GEOSGeometry types don't support 3D wkt;
# OGRGeometry types output 3D for linestrings even if they should do 2D, causing
# IntegrityError's.
if ogr.dimension == 2:
geos = ogr.geos
geos.transform(srid)
wkt = geos.wkt
else:
ogr.transform(srid)
wkt = ogr.wkt
return wkt
def collection_wkt(fields):
""" Returns WKT for the given list of geometry fields. """
if not fields:
return ""
if len(fields) == 1:
return get_wkt(fields[0])
return "GEOMETRYCOLLECTION(%s)" % \
",".join(get_wkt(field) for field in fields)
def add_srid(wkt, srid=DEFAULT_PROJ):
"""
Returns EWKT (WKT with a specified SRID) for the given wkt and SRID
(default 4326).
"""
if wkt:
return "SRID=%s;%s" % (srid, wkt)
return ""
| 0
| 0
| 0
| 5,961
| 0
| 539
| 0
| 81
| 272
|
a777c1f7cbe7e6ff795a3c5c9391e45397c000e0
| 921
|
py
|
Python
|
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
advent/year2021/day1.py
|
davweb/advent-of-code
|
6d9ac52092f4aad26a84d7cfd2fcd8420f1ea612
|
[
"Unlicense"
] | null | null | null |
#!/usr/local/bin/python3
def part1(data):
"""
>>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
>>> part1(read_input())
1581
"""
previous = data[0]
count = 0
for value in data[1:]:
if value > previous:
count += 1
previous = value
return count
def part2(data):
"""
>>> part2([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
5
>>> part2(read_input())
1618
"""
count = 0
for i in range(1, len(data) - 2):
previous = sum(data[i - 1:i + 2])
value = sum(data[i:i + 3])
if value > previous:
count += 1
return count
if __name__ == "__main__":
main()
| 16.745455
| 65
| 0.512486
|
#!/usr/local/bin/python3
def read_input():
file = open('input/2021/day1-input.txt', 'r')
return [int(line) for line in file.readlines()]
def part1(data):
"""
>>> part1([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
7
>>> part1(read_input())
1581
"""
previous = data[0]
count = 0
for value in data[1:]:
if value > previous:
count += 1
previous = value
return count
def part2(data):
"""
>>> part2([199, 200, 208, 210, 200, 207, 240, 269, 260, 263])
5
>>> part2(read_input())
1618
"""
count = 0
for i in range(1, len(data) - 2):
previous = sum(data[i - 1:i + 2])
value = sum(data[i:i + 3])
if value > previous:
count += 1
return count
def main():
data = read_input()
print(part1(data))
print(part2(data))
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 158
| 0
| 0
| 46
|
ac3978d7b01ad6d6e0f32900633722a103fd5b2e
| 4,738
|
py
|
Python
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
bcdaniels/SloppyCell
|
17e68127a6aba19056a5067748a2d18241cc4d76
|
[
"BSD-3-Clause"
] | 2
|
2020-05-26T19:29:39.000Z
|
2020-08-26T20:54:52.000Z
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
bcdaniels/SloppyCell
|
17e68127a6aba19056a5067748a2d18241cc4d76
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T21:08:12.000Z
|
2019-04-15T21:08:12.000Z
|
Example/Gutenkunst2007/Lee_2003/LeeNet.py
|
jurquiza/SloppyCellUrquiza2019
|
a9f64d9d4172c82735813f09e48f36777a714e9c
|
[
"BSD-3-Clause"
] | 3
|
2017-09-12T03:12:01.000Z
|
2018-10-19T11:08:09.000Z
|
net = Network('Lee2003')
net.add_compartment('extract')
net.add_parameter('Dsh0', 100, name = r'Dsh^0')
net.add_parameter('APC0', 100, name = r'APC^0')
net.add_parameter('TCF0', 15, name = r'TCF^0')
net.add_parameter('GSK0', 50, name = r'GSK^0')
net.add_species('X2', 'extract', 0)#, name=r'Dsh_a')
net.add_species('X3', 'extract', 0)#, name=r'APC^*/axin^*/GSK3')
net.add_species('X4', 'extract', 0)#, name=r'APC/axin/GSK3')
net.add_species('X9', 'extract', 0)#, name=r'\beta-catenin^*/APC^*/axin^*/GSK3')
net.add_species('X10', 'extract', 0)#, name=r'\beta-catenin^*')
net.add_species('X11', 'extract', 0)#, name=r'\beta-catenin')
net.add_species('X12', 'extract', 0)#, name=r'Axin')
#net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X1', 'extract')#, name=r'Dsh_i')
net.add_species('X6', 'extract')#, name=r'APC/axin')
net.add_species('X7', 'extract')#, name=r'APC')
net.add_species('X8', 'extract')#, name=r'\beta-catenin/APC^*/axin^*/GSK3')
net.add_species('X13', 'extract')#, name=r'TCF')
net.add_species('X14', 'extract')#, name=r'\beta-catenin/TCF')
net.add_species('X15', 'extract')#, name=r'\beta-catenin/APC')
net.add_parameter('K7', 50, name = r'K_7')
net.add_parameter('K8', 120, name = r'K_8')
net.add_parameter('K16', 30, name = r'K_16')
net.add_parameter('K17', 1200, name = r'K_17')
net.add_parameter('k1', 0.182, name = r'k_{1}')
net.add_parameter('k2', 1.82e-2, name = r'k_{2}')
net.add_parameter('k3', 5e-2, name = r'k_{3}')
net.add_parameter('k4', 0.267, name = r'k_{4}')
net.add_parameter('k5', 0.133, name = r'k_{5}')
net.add_parameter('k6', 9.09e-2, name = r'k_{6}')
net.add_parameter('km6', 0.909, name = 'k_{-6}')
net.add_parameter('k9', 206, name = r'k_{9}')
net.add_parameter('k10', 206, name = r'k_{10}')
net.add_parameter('k11', 0.417, name = r'k_{11}')
net.add_parameter('k13', 2.57e-4, name = r'k_{13}')
net.add_parameter('k15', 0.167, name = r'k_{15}')
net.add_parameter('v12', 0.423, name = r'v_{12}')
net.add_parameter('v14', 8.22e-5, name = r'v_{14}')
#net.add_parameter('k1', 0.18, name = r'k_{1}')
#net.add_parameter('k2', 1.8e-2, name = r'k_{2}')
#net.add_parameter('k3', 5e-2, name = r'k_{3}')
#net.add_parameter('k4', 0.27, name = r'k_{4}')
#net.add_parameter('k5', 0.13, name = r'k_{5}')
#net.add_parameter('k6', 9.1e-2, name = r'k_{6}')
#net.add_parameter('km6', 0.91, name = 'k_{-6}')
#net.add_parameter('k9', 210, name = r'k_{9}')
#net.add_parameter('k10', 210, name = r'k_{10}')
#net.add_parameter('k11', 0.42, name = r'k_{11}')
#net.add_parameter('k13', 2.6e-4, name = r'k_{13}')
#net.add_parameter('k15', 0.17, name = r'k_{15}')
#
#net.add_parameter('v12', 0.42, name = r'v_{12}')
#net.add_parameter('v14', 8.2e-5, name = r'v_{14}')
net.add_parameter('W', 0, is_optimizable=False)
net.add_rate_rule('X2', 'k1*W*(Dsh0-X2)-k2*X2')
net.add_rate_rule('X9', 'k9 * X8 - k10*X9')
net.add_rate_rule('X10', 'k10*X9-k11*X10')
net.add_rate_rule('X4', '-k3*X2*X4 - k4*X4 + k5*X3 + k6*X5*X6 - km6*X4')
net.add_parameter('a')
net.add_assignment_rule('a', '1+APC0*K17/(K7*(K17+X11))')
net.add_parameter('b')
net.add_assignment_rule('b', 'APC0*K17*X12/(K7*(K17+X11)**2)')
net.add_parameter('c')
net.add_assignment_rule('c', 'k3*X2*X4 - k6 * GSK0*APC0*K17*X12/(K7*(K17+X11)) + km6*X4 + v14 - k15*X12')
net.add_parameter('d')
net.add_assignment_rule('d', '1+X11/K8')
net.add_parameter('e')
net.add_assignment_rule('e', 'X3/K8')
net.add_parameter('f')
net.add_assignment_rule('f', 'k4*X4 - k5*X3 - k9*X3*X11/K8 + k10*X9')
net.add_parameter('g')
net.add_assignment_rule('g', '1+X3/K8+TCF0*K16/(K16+X11)**2 + APC0*K17/(K17+X11)**2')
net.add_parameter('h')
net.add_assignment_rule('h', 'X11/K8')
net.add_parameter('i')
net.add_assignment_rule('i', 'v12 - (k9*X3/K8 + k13)*X11')
net.add_parameter('rhsX11', name = 'rhs_{X11}')
net.add_assignment_rule('rhsX11', '(d*i - f*h)/(d*g - e*h)')
net.add_rate_rule('X11', 'rhsX11')
net.add_rate_rule('X12', '(c + rhsX11*b)/a')
net.add_rate_rule('X3', '(e*i - f*g)/(e*h - d*g)')
net.add_assignment_rule('X1', 'Dsh0 - X2')
net.add_assignment_rule('X7', 'K17*APC0/(K17+X11)')
net.add_assignment_rule('X15', 'X11*APC0/(K17+X11)')
net.add_assignment_rule('X13', 'K16*TCF0/(K16+X11)')
net.add_assignment_rule('X14', 'X11*TCF0/(K16+X11)')
net.add_assignment_rule('X8', 'X3*X11/K8')
net.add_assignment_rule('X6', 'K17*X12*APC0/(K7*(K17+X11))')
# These are just for my own monitoring purposes
net.add_parameter('BCatenin', name = r'\beta-catenin')
net.add_assignment_rule('BCatenin', 'X8+X9+X10+X11+X14+X15')
net.add_parameter('Axin', name = r'Axin')
net.add_assignment_rule('Axin', 'X3+X4+X6+X8+X9+X12')
| 41.561404
| 105
| 0.657239
|
from SloppyCell.ReactionNetworks import *
net = Network('Lee2003')
net.add_compartment('extract')
net.add_parameter('Dsh0', 100, name = r'Dsh^0')
net.add_parameter('APC0', 100, name = r'APC^0')
net.add_parameter('TCF0', 15, name = r'TCF^0')
net.add_parameter('GSK0', 50, name = r'GSK^0')
net.add_species('X2', 'extract', 0)#, name=r'Dsh_a')
net.add_species('X3', 'extract', 0)#, name=r'APC^*/axin^*/GSK3')
net.add_species('X4', 'extract', 0)#, name=r'APC/axin/GSK3')
net.add_species('X9', 'extract', 0)#, name=r'\beta-catenin^*/APC^*/axin^*/GSK3')
net.add_species('X10', 'extract', 0)#, name=r'\beta-catenin^*')
net.add_species('X11', 'extract', 0)#, name=r'\beta-catenin')
net.add_species('X12', 'extract', 0)#, name=r'Axin')
#net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X5', 'extract', 'GSK0', is_constant=True)#, name=r'GSK3')
net.add_species('X1', 'extract')#, name=r'Dsh_i')
net.add_species('X6', 'extract')#, name=r'APC/axin')
net.add_species('X7', 'extract')#, name=r'APC')
net.add_species('X8', 'extract')#, name=r'\beta-catenin/APC^*/axin^*/GSK3')
net.add_species('X13', 'extract')#, name=r'TCF')
net.add_species('X14', 'extract')#, name=r'\beta-catenin/TCF')
net.add_species('X15', 'extract')#, name=r'\beta-catenin/APC')
net.add_parameter('K7', 50, name = r'K_7')
net.add_parameter('K8', 120, name = r'K_8')
net.add_parameter('K16', 30, name = r'K_16')
net.add_parameter('K17', 1200, name = r'K_17')
net.add_parameter('k1', 0.182, name = r'k_{1}')
net.add_parameter('k2', 1.82e-2, name = r'k_{2}')
net.add_parameter('k3', 5e-2, name = r'k_{3}')
net.add_parameter('k4', 0.267, name = r'k_{4}')
net.add_parameter('k5', 0.133, name = r'k_{5}')
net.add_parameter('k6', 9.09e-2, name = r'k_{6}')
net.add_parameter('km6', 0.909, name = 'k_{-6}')
net.add_parameter('k9', 206, name = r'k_{9}')
net.add_parameter('k10', 206, name = r'k_{10}')
net.add_parameter('k11', 0.417, name = r'k_{11}')
net.add_parameter('k13', 2.57e-4, name = r'k_{13}')
net.add_parameter('k15', 0.167, name = r'k_{15}')
net.add_parameter('v12', 0.423, name = r'v_{12}')
net.add_parameter('v14', 8.22e-5, name = r'v_{14}')
#net.add_parameter('k1', 0.18, name = r'k_{1}')
#net.add_parameter('k2', 1.8e-2, name = r'k_{2}')
#net.add_parameter('k3', 5e-2, name = r'k_{3}')
#net.add_parameter('k4', 0.27, name = r'k_{4}')
#net.add_parameter('k5', 0.13, name = r'k_{5}')
#net.add_parameter('k6', 9.1e-2, name = r'k_{6}')
#net.add_parameter('km6', 0.91, name = 'k_{-6}')
#net.add_parameter('k9', 210, name = r'k_{9}')
#net.add_parameter('k10', 210, name = r'k_{10}')
#net.add_parameter('k11', 0.42, name = r'k_{11}')
#net.add_parameter('k13', 2.6e-4, name = r'k_{13}')
#net.add_parameter('k15', 0.17, name = r'k_{15}')
#
#net.add_parameter('v12', 0.42, name = r'v_{12}')
#net.add_parameter('v14', 8.2e-5, name = r'v_{14}')
net.add_parameter('W', 0, is_optimizable=False)
net.add_rate_rule('X2', 'k1*W*(Dsh0-X2)-k2*X2')
net.add_rate_rule('X9', 'k9 * X8 - k10*X9')
net.add_rate_rule('X10', 'k10*X9-k11*X10')
net.add_rate_rule('X4', '-k3*X2*X4 - k4*X4 + k5*X3 + k6*X5*X6 - km6*X4')
net.add_parameter('a')
net.add_assignment_rule('a', '1+APC0*K17/(K7*(K17+X11))')
net.add_parameter('b')
net.add_assignment_rule('b', 'APC0*K17*X12/(K7*(K17+X11)**2)')
net.add_parameter('c')
net.add_assignment_rule('c', 'k3*X2*X4 - k6 * GSK0*APC0*K17*X12/(K7*(K17+X11)) + km6*X4 + v14 - k15*X12')
net.add_parameter('d')
net.add_assignment_rule('d', '1+X11/K8')
net.add_parameter('e')
net.add_assignment_rule('e', 'X3/K8')
net.add_parameter('f')
net.add_assignment_rule('f', 'k4*X4 - k5*X3 - k9*X3*X11/K8 + k10*X9')
net.add_parameter('g')
net.add_assignment_rule('g', '1+X3/K8+TCF0*K16/(K16+X11)**2 + APC0*K17/(K17+X11)**2')
net.add_parameter('h')
net.add_assignment_rule('h', 'X11/K8')
net.add_parameter('i')
net.add_assignment_rule('i', 'v12 - (k9*X3/K8 + k13)*X11')
net.add_parameter('rhsX11', name = 'rhs_{X11}')
net.add_assignment_rule('rhsX11', '(d*i - f*h)/(d*g - e*h)')
net.add_rate_rule('X11', 'rhsX11')
net.add_rate_rule('X12', '(c + rhsX11*b)/a')
net.add_rate_rule('X3', '(e*i - f*g)/(e*h - d*g)')
net.add_assignment_rule('X1', 'Dsh0 - X2')
net.add_assignment_rule('X7', 'K17*APC0/(K17+X11)')
net.add_assignment_rule('X15', 'X11*APC0/(K17+X11)')
net.add_assignment_rule('X13', 'K16*TCF0/(K16+X11)')
net.add_assignment_rule('X14', 'X11*TCF0/(K16+X11)')
net.add_assignment_rule('X8', 'X3*X11/K8')
net.add_assignment_rule('X6', 'K17*X12*APC0/(K7*(K17+X11))')
# These are just for my own monitoring purposes
net.add_parameter('BCatenin', name = r'\beta-catenin')
net.add_assignment_rule('BCatenin', 'X8+X9+X10+X11+X14+X15')
net.add_parameter('Axin', name = r'Axin')
net.add_assignment_rule('Axin', 'X3+X4+X6+X8+X9+X12')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 22
|
07f3349c9a417036cdc8776d53fcfa52d2e1af80
| 5,082
|
py
|
Python
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 114
|
2017-03-03T09:12:23.000Z
|
2022-03-03T20:29:42.000Z
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 19,637
|
2017-01-16T12:34:41.000Z
|
2022-03-31T22:02:40.000Z
|
PWGJE/EMCALJetTasks/Tracks/analysis/util/Interpolator.py
|
maroozm/AliPhysics
|
22ec256928cfdf8f800e05bfc1a6e124d90b6eaf
|
[
"BSD-3-Clause"
] | 1,021
|
2016-07-14T22:41:16.000Z
|
2022-03-31T05:15:51.000Z
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Interpolation module
@author: Jacek Otwinowski
@organization: ALICE Collaboration
Translated into PYTHON by Markus Fasel <[email protected]>, Lawrence Berkeley National Laboratory
"""
| 37.925373
| 123
| 0.538764
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
"""
Interpolation module
@author: Jacek Otwinowski
@organization: ALICE Collaboration
Translated into PYTHON by Markus Fasel <[email protected]>, Lawrence Berkeley National Laboratory
"""
import math
class Interpolator(object):
def __init__(self):
"""
Constructor
"""
pass
def Interpolate(self, x, x1, y1, x2, y2, integrate = False, r = 0, method="lin"):
"""
Interpolation handler:
forwards methods to the different interpolation functions
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
if method == "lin":
return self.__InterpolateLinear(x, x1, y1, x2, y2, integrate, r)
elif method == "pow":
return self.__InterpolatePowerLaw(x, x1, y1, x2, y2, integrate, r)
elif method == "exp":
return self.__InterpolateExponential(x, x1, y1, x2, y2)
elif method == "hag":
return self.__InterpolateSimpleHagedorn(x, x1, y1, x2, y2)
def __InterpolateLinear(self, x, x1, y1, x2, y2, integrate = False, r = 0):
"""
Linear interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
if x1-x2 == 0:
return 0
if integrate:
return 2*r*(y1+((x-x1)*(y1-y2))/(x1-x2))
else:
return (y1 + (((y2-y1)/(x2-x1))*(x-x1)))
def __InterpolatePowerLaw(self, x, x1, y1, x2, y2, integrate = False, r = 0):
"""
Power law interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
@param integrate: if true we evaluate the integral
@param r:
"""
#assume functional form y=a*x^n
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
n = (math.log(y1)-math.log(y2))/(math.log(x1)-math.log(x2));
a = y1*pow(x1,-n)
print "y: %f" %(a*pow(x,n))
print "n: %f" %(n)
print "a: %f" %(a)
if integrate:
return ((a/(n+1.))*(math.pow(x+r,n+1.)-math.pow(x-r,n+1.))/(2.*r))
else:
return (a*math.pow(x,n))
def __InterpolateExponential(self, x, x1, y1, x2, y2):
"""
Exponential interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
"""
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
return math.exp(self.__InterpolateLinear(x,x1,math.log(y1),x2,math.log(y2)))
def __InterpolateSimpleHagedorn(self, x, x1, y1, x2, y2):
"""
Hagedorn interpolation method
@param x: x at which to evaluate the interpolation
@param x1: lower x step
@param y1: function value at x1
@param x2: upper x step
@param y2: function value at x2
"""
if not self.__AssurePositive(x, x1, x2, y1, y2):
return 0.
return math.exp(self.__InterpolateLinear(math.log(1.+x),math.log(1.+x1),math.log(y1),math.log(1.+x2),math.log(y2)))
def __AssurePositive(self, x, x1, x2, y1, y2):
"""
Check if all values are positive
"""
if x <= 0. or x1 <= 0. or x2 <= 0. or y1 <= 0. or y2 <= 0.:
return False
return True
| 0
| 0
| 0
| 3,791
| 0
| 0
| 0
| -10
| 45
|
0b9bc42aab3a61dc776c20fde1b7be088ba0e2b2
| 2,276
|
py
|
Python
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 3
|
2019-05-04T02:07:28.000Z
|
2020-10-16T17:47:44.000Z
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | 604
|
2019-02-21T18:14:51.000Z
|
2022-02-10T08:13:54.000Z
|
creator/schema.py
|
kids-first/kf-api-study-creator
|
93a79b108b6474f9b4135ace06c89ddcf63dd257
|
[
"Apache-2.0"
] | null | null | null |
"""
This is the root schema definition that combines individual applications'
schemas into one.
Each application that has queries or mutations exports them as either Query
or Mutation from the application's schema module.
No resolvers or type definitions should be included here.
"""
import graphene
import creator.analyses.schema
import creator.buckets.schema
import creator.files.schema
import creator.studies.schema
import creator.projects.schema
import creator.users.schema
import creator.referral_tokens.schema
import creator.status.schema
import creator.jobs.schema
import creator.releases.schema
import creator.data_reviews.schema
import creator.ingest_runs.schema
import creator.organizations.schema
schema = graphene.Schema(query=Query, mutation=Mutation)
| 29.947368
| 75
| 0.773726
|
"""
This is the root schema definition that combines individual applications'
schemas into one.
Each application that has queries or mutations exports them as either Query
or Mutation from the application's schema module.
No resolvers or type definitions should be included here.
"""
import graphene
from django.conf import settings
import creator.analyses.schema
import creator.buckets.schema
import creator.files.schema
import creator.studies.schema
import creator.projects.schema
import creator.users.schema
import creator.referral_tokens.schema
import creator.status.schema
import creator.jobs.schema
import creator.releases.schema
import creator.data_reviews.schema
import creator.ingest_runs.schema
import creator.organizations.schema
import creator.data_templates.schema
class Query(
creator.analyses.schema.Query,
creator.files.schema.Query,
creator.studies.schema.Query,
creator.users.schema.Query,
creator.events.schema.Query,
creator.projects.schema.Query,
creator.buckets.schema.Query,
creator.referral_tokens.schema.Query,
creator.status.schema.Query,
creator.jobs.schema.Query,
creator.releases.schema.Query,
creator.data_reviews.schema.Query,
creator.ingest_runs.schema.Query,
creator.organizations.schema.Query,
creator.data_templates.schema.Query,
graphene.ObjectType,
):
""" Root query schema combining all apps' schemas """
node = graphene.relay.Node.Field()
if settings.DEBUG:
from graphene_django.debug import DjangoDebug
debug = graphene.Field(DjangoDebug, name="_debug")
class Mutation(
creator.analyses.schema.Mutation,
creator.buckets.schema.Mutation,
creator.projects.schema.Mutation,
creator.studies.schema.Mutation,
creator.files.schema.Mutation,
creator.users.schema.Mutation,
creator.referral_tokens.schema.Mutation,
creator.status.schema.Mutation,
creator.releases.schema.Mutation,
creator.data_reviews.schema.Mutation,
creator.ingest_runs.schema.Mutation,
creator.organizations.schema.Mutation,
creator.data_templates.schema.Mutation,
graphene.ObjectType,
):
""" Root mutation schema combining all apps' schemas """
pass
schema = graphene.Schema(query=Query, mutation=Mutation)
| 0
| 0
| 0
| 1,390
| 0
| 0
| 0
| 26
| 90
|
eebaa5aaa5d495d9ab50fc4d5c37d590c86b3096
| 9,624
|
py
|
Python
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 200
|
2015-01-05T07:37:20.000Z
|
2022-03-30T03:28:21.000Z
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 1
|
2016-05-13T10:46:32.000Z
|
2016-05-13T10:46:32.000Z
|
simulation/simulation.py
|
bopopescu/sparrow-mod
|
56c601ee3dd852a9f053bffffc2a52ff3da8d2bd
|
[
"Apache-2.0"
] | 73
|
2015-01-06T02:00:17.000Z
|
2021-11-22T10:04:03.000Z
|
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MEDIAN_TASK_DURATION = 100
NETWORK_DELAY = 0
TASKS_PER_JOB = 500
SLOTS_PER_WORKER = 4
TOTAL_WORKERS = 10000
PROBE_RATIO = 2
if __name__ == "__main__":
main()
| 40.779661
| 115
| 0.641209
|
#
# Copyright 2013 The Regents of The University California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import math
import numpy
import random
from util import Job, TaskDistributions
import Queue
MEDIAN_TASK_DURATION = 100
NETWORK_DELAY = 0
TASKS_PER_JOB = 500
SLOTS_PER_WORKER = 4
TOTAL_WORKERS = 10000
PROBE_RATIO = 2
def get_percentile(N, percent, key=lambda x:x):
if not N:
return 0
k = (len(N) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(N[int(k)])
d0 = key(N[int(f)]) * (c-k)
d1 = key(N[int(c)]) * (k-f)
return d0 + d1
def plot_cdf(values, filename):
values.sort()
f = open(filename, "w")
for percent in range(100):
fraction = percent / 100.
f.write("%s\t%s\n" % (fraction, get_percentile(values, fraction)))
f.close()
class Event(object):
""" Abstract class representing events. """
def __init__(self):
raise NotImplementedError("Event is an abstract class and cannot be "
"instantiated directly")
def run(self, current_time):
""" Returns any events that should be added to the queue. """
raise NotImplementedError("The run() method must be implemented by "
"each class subclassing Event")
class JobArrival(Event):
""" Event to signify a job arriving at a scheduler. """
def __init__(self, simulation, interarrival_delay, task_distribution):
self.simulation = simulation
self.interarrival_delay = interarrival_delay
self.task_distribution= task_distribution
def run(self, current_time):
job = Job(TASKS_PER_JOB, current_time, self.task_distribution, MEDIAN_TASK_DURATION)
logging.getLogger("sim").debug("Job %s arrived at %s" % (job.id, current_time))
# Schedule job.
new_events = self.simulation.send_probes(job, current_time)
# Add new Job Arrival event, for the next job to arrive after this one.
arrival_delay = random.expovariate(1.0 / self.interarrival_delay)
new_events.append((current_time + arrival_delay, self))
logging.getLogger("sim").debug("Retuning %s events" % len(new_events))
return new_events
class ProbeEvent(Event):
""" Event to signify a probe arriving at a worker. """
def __init__(self, worker, job_id):
self.worker = worker
self.job_id = job_id
def run(self, current_time):
logging.getLogger("sim").debug("Probe for job %s arrived at worker %s at %s" %
(self.job_id, self.worker.id, current_time))
return self.worker.add_probe(self.job_id, current_time)
class NoopGetTaskResponseEvent(Event):
""" Signifies when a getTask() RPC response arrives at a worker, with a noop response. """
def __init__(self, worker):
self.worker = worker
def run(self, current_time):
logging.getLogger("sim").debug("getTask() request for worker %s returned no task at %s" %
(self.worker.id, current_time))
return self.worker.free_slot(current_time)
class TaskEndEvent():
def __init__(self, worker):
self.worker = worker
def run(self, current_time):
return self.worker.free_slot(current_time)
class Worker(object):
def __init__(self, simulation, num_slots, id):
self.simulation = simulation
self.free_slots = num_slots
# Just a list of job ids!
self.queued_probes = Queue.Queue()
self.id = id
self.probes_replied_to_immediately = 0
def add_probe(self, job_id, current_time):
self.queued_probes.put(job_id)
new_events = self.maybe_get_task(current_time)
self.probes_replied_to_immediately += len(new_events)
logging.getLogger("sim").debug("Worker %s: %s" %
(self.id, self.probes_replied_to_immediately))
return new_events
def free_slot(self, current_time):
""" Frees a slot on the worker and attempts to launch another task in that slot. """
self.free_slots += 1
get_task_events = self.maybe_get_task(current_time)
return get_task_events
def maybe_get_task(self, current_time):
if not self.queued_probes.empty() and self.free_slots > 0:
# Account for "running" task
self.free_slots -= 1
job_id = self.queued_probes.get()
task_duration = self.simulation.get_task(job_id)
probe_response_time = current_time + 2*NETWORK_DELAY
if task_duration > 0:
task_end_time = probe_response_time + task_duration
logging.getLogger("sim").debug(("Task for job %s running on worker %s (get task at: %s, duration: "
"%s, end: %s)") %
(job_id, self.id, current_time, task_duration, task_end_time))
self.simulation.add_task_completion_time(job_id, task_end_time)
new_event = TaskEndEvent(self)
return [(task_end_time, new_event)]
else:
# There was no task left for the job, so send another probe
# after 1RTT.
logging.getLogger("sim").debug("Noop returning on worker %s at %s" %
(self.id, probe_response_time))
return [(probe_response_time, NoopGetTaskResponseEvent(self))]
return []
class Simulation(object):
def __init__(self, num_jobs, file_prefix, load, task_distribution):
avg_used_slots = load * SLOTS_PER_WORKER * TOTAL_WORKERS
self.interarrival_delay = (1.0 * MEDIAN_TASK_DURATION * TASKS_PER_JOB / avg_used_slots)
print ("Interarrival delay: %s (avg slots in use: %s)" %
(self.interarrival_delay, avg_used_slots))
self.jobs = {}
self.remaining_jobs = num_jobs
self.event_queue = Queue.PriorityQueue()
self.workers = []
self.file_prefix = file_prefix
while len(self.workers) < TOTAL_WORKERS:
self.workers.append(Worker(self, SLOTS_PER_WORKER, len(self.workers)))
self.worker_indices = range(TOTAL_WORKERS)
self.task_distribution = task_distribution
def send_probes(self, job, current_time):
""" Send probes to acquire load information, in order to schedule a job. """
self.jobs[job.id] = job
random.shuffle(self.worker_indices)
probe_events = []
num_probes = PROBE_RATIO * len(job.unscheduled_tasks)
for worker_index in self.worker_indices[:num_probes]:
probe_events.append((current_time + NETWORK_DELAY,
ProbeEvent(self.workers[worker_index], job.id)))
return probe_events
def get_task(self, job_id):
job = self.jobs[job_id]
if len(job.unscheduled_tasks) > 0:
task_duration = job.unscheduled_tasks[0]
job.unscheduled_tasks = job.unscheduled_tasks[1:]
return task_duration
return -1
def add_task_completion_time(self, job_id, completion_time):
job_complete = self.jobs[job_id].task_completed(completion_time)
if job_complete:
self.remaining_jobs -= 1
logging.getLogger("sim").debug("Job %s completed in %s" %
(job_id, self.jobs[job_id].end_time - self.jobs[job_id].start_time))
def run(self):
self.event_queue.put((0,
JobArrival(self, self.interarrival_delay, self.task_distribution)))
last_time = 0
while self.remaining_jobs > 0:
current_time, event = self.event_queue.get()
assert current_time >= last_time
last_time = current_time
new_events = event.run(current_time)
for new_event in new_events:
self.event_queue.put(new_event)
print ("Simulation ended after %s milliseconds (%s jobs started)" %
(last_time, len(self.jobs)))
complete_jobs = [j for j in self.jobs.values() if j.completed_tasks_count == j.num_tasks]
print "%s complete jobs" % len(complete_jobs)
response_times = [job.end_time - job.start_time for job in complete_jobs
if job.start_time > 500]
print "Included %s jobs" % len(response_times)
plot_cdf(response_times, "%s_response_times.data" % self.file_prefix)
print "Average response time: ", numpy.mean(response_times)
longest_tasks = [job.longest_task for job in complete_jobs]
plot_cdf(longest_tasks, "%s_ideal_response_time.data" % self.file_prefix)
tasks_replied_to_immediately = sum([w.probes_replied_to_immediately for w in self.workers])
print "Tasks replied to immeiately: ", tasks_replied_to_immediately
return response_times
def main():
random.seed(1)
logging.basicConfig(level=logging.INFO)
sim = Simulation(1000, "sparrow", 0.95, TaskDistributions.CONSTANT)
sim.run()
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 7,905
| 0
| 608
| 0
| -25
| 363
|
eb0cc8b93b8223d65f24aaccba78c888502d04df
| 892
|
py
|
Python
|
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2018-08-02T14:09:26.000Z
|
2018-08-02T14:09:26.000Z
|
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | null | null | null |
2015/MAC0327/Desafios 2/p11.py
|
andredalton/bcc
|
188190e436615e2344d87b722856fa02e6eec9cc
|
[
"Apache-2.0"
] | 1
|
2020-07-13T04:27:02.000Z
|
2020-07-13T04:27:02.000Z
|
# coding=utf-8
__author__ = 'Andr Meneghelli'
"""
/*******************************************************************************
* Aluno: Andr Meneghelli Vale, Nm. USP: 4898948
* Curso: Bacharelado em Cincias da Computao
* Aula 13 - Stone Pile
* MAC0327 -- IME/USP, -- Prof. Cristina Gomes Fernandes
******************************************************************************/
"""
pedras = []
if __name__ == '__main__':
main()
| 22.3
| 80
| 0.48991
|
# coding=utf-8
__author__ = 'André Meneghelli'
"""
/*******************************************************************************
* Aluno: André Meneghelli Vale, Núm. USP: 4898948
* Curso: Bacharelado em Ciências da Computação
* Aula 13 - Stone Pile
* MAC0327 -- IME/USP, -- Prof. Cristina Gomes Fernandes
******************************************************************************/
"""
pedras = []
def procura(s1, s2, index):
global pedras
if index == -1:
return abs(s1-s2)
sa = procura(s1 + pedras[index], s2, index-1)
sb = procura(s1, s2 + pedras[index], index-1)
if sa < sb:
return sa
return sb
def main():
global pedras
s1 = 0
raw_input()
pedras = map(int, raw_input().split())
pedras.sort()
s2 = pedras[len(pedras)-1]
print procura(s1, s2, len(pedras)-2)
if __name__ == '__main__':
main()
| 12
| 0
| 0
| 0
| 0
| 389
| 0
| 0
| 46
|
cd67696b0ec1ee40fb689af2c3c02ad3ecc6be4e
| 5,014
|
py
|
Python
|
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
model.py
|
abhitrip/Behavioral-Cloning
|
9930dc7fc2e6623954f84859b7d011905cd48d30
|
[
"MIT"
] | null | null | null |
import matplotlib.image as mpimg
"""
To show the preprocessing for final model
"""
batch_size = 128
# define model
"""
model = nvidia_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
# train model
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
"""
if __name__=="__main__":
train_model()
| 25.451777
| 89
| 0.632429
|
import csv
import matplotlib.image as mpimg
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers.core import Flatten,Lambda,Dense
from keras.layers.convolutional import Cropping2D,Conv2D
from keras import backend as K
from keras.layers.core import Activation
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import cv2
def resize(image):
import tensorflow as tf
resized = tf.image.resize_images(image,(32,32))
return resized
def resize_nvidia(image):
import tensorflow as tf
resized = tf.image.resize_images(image,(66,200))
return resized
"""
To show the preprocessing for final model
"""
def process_image(file_name,nvidia_or_final):
if nvidia_or_final=='nvidia':
crop_top, crop_bot = 70, 25
new_shape = (66,200)
elif nvidia_or_final=='final':
crop_top, crop_bot = 80, 48
new_shape = (32,32)
img = mpimg.imread(file_name)
h = img.shape[0]
cropped_img = img[crop_top:h-crop_bot,:,:]
plt.imshow(cropped_img)
plt.savefig("cropped_img")
resized_image = cv2.resize(cropped_img,new_shape)
plt.imshow(resized_image)
plt.savefig("resized_img")
plt.imshow(np.fliplr(resized_image))
plt.savefig("flipped_img")
def read_data_gen(batch_size):
"""
Generator function to load driving logs and input images.
"""
while 1:
with open('data/driving_log.csv') as driving_log_file:
reader = csv.DictReader(driving_log_file)
count = 0
inputs, targets = [], []
try:
for row in reader:
center_img = mpimg.imread('data/'+ row['center'].strip())
flipped_center_img = np.fliplr(center_img)
center_steering = float(row['steering'])
if count < batch_size//2:
inputs += [center_img, flipped_center_img]
targets += [center_steering, -center_steering]
count += 1
else:
yield np.array(inputs, dtype=center_img.dtype), np.array(targets)
count = 0
inputs, targets= [], []
except StopIteration:
pass
batch_size = 128
# define model
def final_model():
# define model
model = Sequential()
# crop top and bottom parts of the image
model.add(Cropping2D(cropping=((80, 48), (0, 0)), input_shape=(160, 320, 3)))
# resize image to 32x32
model.add(Lambda(resize,output_shape=(32, 32, 3)))
# normalize layer values
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
# Model colour information
model.add(Conv2D(3, 1, 1, border_mode='valid', subsample=(1, 1), activation='elu'))
# Conv filter 1
model.add(Conv2D(3, 3, 3, border_mode='valid', activation='elu'))
# Conv filter 2
model.add(Conv2D(6, 5, 5, border_mode='valid', subsample=(2, 2), activation='elu'))
# conv filter 3
model.add(Conv2D(16, 5, 5, border_mode='valid', subsample=(2, 2), activation='elu'))
# flatten
model.add(Flatten())
# Dense layer 1
model.add(Dense(100, activation='elu'))
# Dense layer 2
model.add(Dense(25, activation='elu'))
# Final Dense for prediction of steering
model.add(Dense(1))
return model
def nvidia_model():
model = Sequential()
# Preprocessing
model.add(Lambda(lambda x: x/127.5 -1.0,input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
#model.add(Lambda(resize_nvidia,output_shape=(32, 32, 3)))
# 1st Conv Layer
model.add(Conv2D(24,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 2nd Conv Layer
model.add(Conv2D(36,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 3rd Conv Layer
model.add(Conv2D(48,5,5,subsample=(2,2)))
model.add(Activation('elu'))
# 4th Conv Layer
model.add(Conv2D(64,3,3))
model.add(Activation('elu'))
# 5th Conv Layer
model.add(Conv2D(64,3,3))
model.add(Activation('elu'))
# Flatten
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('elu'))
model.add(Dense(50))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(1))
return model
"""
model = nvidia_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
# train model
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
"""
def gen_preprocess_images():
image = 'data/IMG/center_2016_12_01_13_31_13_177.jpg'
process_image(image,'final')
def train_model():
model = final_model()
model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
model.summary()
model.fit_generator(read_data_gen(batch_size), samples_per_epoch=8000*2, nb_epoch=5)
model.save('model.h5')
if __name__=="__main__":
train_model()
| 0
| 0
| 0
| 0
| 1,002
| 3,085
| 0
| 94
| 425
|
43ba3750ab55b89ed9e0505f5404d4b28171dd33
| 1,647
|
py
|
Python
|
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | 1
|
2021-09-09T13:03:02.000Z
|
2021-09-09T13:03:02.000Z
|
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | null | null | null |
src/downward/experiments/issue739/v5-translate.py
|
ScarfZapdos/conan-bge-questgen
|
4d184c5bf0ae4b768b8043cec586395df9ce1451
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-v5"]
CONFIGS = [
IssueConfig('translate', [], driver_options=['--translate']),
IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']),
IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']),
IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
exp.add_parser(exp.LAB_DRIVER_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['translator_*', 'error'])
exp.run_steps()
| 35.042553
| 178
| 0.756527
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
from lab.environments import LocalEnvironment, BaselSlurmEnvironment
import common_setup
from common_setup import IssueConfig, IssueExperiment
from relativescatter import RelativeScatterPlotReport
DIR = os.path.dirname(os.path.abspath(__file__))
BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"]
REVISIONS = ["issue739-v5"]
CONFIGS = [
IssueConfig('translate', [], driver_options=['--translate']),
IssueConfig('translate-with-options', ['--translate-options', '--keep-unreachable-facts', '--keep-unimportant-variables', '--full-encoding'], driver_options=['--translate']),
IssueConfig('translate-time-limit', [], driver_options=['--translate-time-limit', '5s', '--translate']),
IssueConfig('translate-memory-limit', [], driver_options=['--translate-memory-limit', '100M', '--translate']),
]
SUITE = common_setup.DEFAULT_OPTIMAL_SUITE
ENVIRONMENT = BaselSlurmEnvironment(email="[email protected]", export=["PATH", "DOWNWARD_BENCHMARKS"])
if common_setup.is_test_run():
SUITE = ['gripper:prob10.pddl','mystery:prob07.pddl']
ENVIRONMENT = LocalEnvironment(processes=4)
exp = IssueExperiment(
revisions=REVISIONS,
configs=CONFIGS,
environment=ENVIRONMENT,
)
exp.add_suite(BENCHMARKS_DIR, SUITE)
exp.add_parser(exp.LAB_STATIC_PROPERTIES_PARSER)
exp.add_parser(exp.LAB_DRIVER_PARSER)
exp.add_parser(exp.EXITCODE_PARSER)
exp.add_parser(exp.TRANSLATOR_PARSER)
exp.add_step('build', exp.build)
exp.add_step('start', exp.start_runs)
exp.add_fetcher(name='fetch')
exp.add_absolute_report_step(attributes=['translator_*', 'error'])
exp.run_steps()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 22
|
6ba0b5003d4c97df676dbfc10dff603b15cd48d9
| 506
|
py
|
Python
|
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | 4
|
2021-07-15T16:55:24.000Z
|
2022-03-04T23:10:02.000Z
|
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | null | null | null |
mplscience/__init__.py
|
adamgayoso/mpscience
|
0401ded920a4d09314e9a747cf4da07d17a60a05
|
[
"MIT"
] | null | null | null |
"""Matplotlib science style"""
from .core import available_styles, set_style, style_context
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
# https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
package_name = "mplscience"
__version__ = importlib_metadata.version(package_name)
__all__ = ["available_styles", "set_style", "style_context"]
| 31.625
| 75
| 0.804348
|
"""Matplotlib science style"""
from .core import available_styles, set_style, style_context
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
# https://github.com/python-poetry/poetry/issues/144#issuecomment-623927302
try:
import importlib.metadata as importlib_metadata
except ModuleNotFoundError:
import importlib_metadata
package_name = "mplscience"
__version__ = importlib_metadata.version(package_name)
__all__ = ["available_styles", "set_style", "style_context"]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
3a06da5ff6c0053e4dc72e9a222d828921a7534c
| 4,153
|
py
|
Python
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 13
|
2020-01-14T16:23:48.000Z
|
2022-02-16T18:02:08.000Z
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 24
|
2021-04-21T05:30:42.000Z
|
2022-03-31T20:07:29.000Z
|
template/templates.py
|
dkratzert/FinalCif
|
07ca23dbb4e7439b108a906521a118cdb876d97e
|
[
"Beerware"
] | 1
|
2021-08-09T16:48:33.000Z
|
2021-08-09T16:48:33.000Z
|
from contextlib import suppress
with suppress(ImportError):
| 44.180851
| 120
| 0.666265
|
from contextlib import suppress
from pathlib import Path
from typing import List
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QFileDialog, QListWidgetItem
with suppress(ImportError):
from appwindow import AppWindow
from tools.settings import FinalCifSettings
class ReportTemplates:
"""
Displays the list of report templates in the options menu.
"""
def __init__(self, app: 'AppWindow', settings: FinalCifSettings):
self.app = app
self.settings = settings
self.lw = self.app.ui.TemplatesListWidget
self.load_templates_list()
self.app.ui.AddNewTemplPushButton.clicked.connect(self.add_new_template)
self.app.ui.RemoveTemplPushButton.clicked.connect(self.remove_current_template)
self.app.ui.TemplatesListWidget.currentItemChanged.connect(self.template_changed)
self.app.ui.TemplatesListWidget.itemChanged.connect(self.template_changed)
self.app.ui.TemplatesListWidget.setCurrentItem(
self.app.ui.TemplatesListWidget.item(self.app.options.current_template))
def add_new_template(self, templ_path: str = '') -> None:
if not templ_path:
templ_path, _ = QFileDialog.getOpenFileName(filter="DOCX file (*.docx)", initialFilter="DOCX file (*.docx)",
caption='Open a Report Template File')
itemslist = self.get_templates_list_from_widget()
self.app.status_bar.show_message('')
if templ_path in itemslist:
self.app.status_bar.show_message('This templates is already in the list.', 10)
print('This templates is already in the list.')
return
if not Path(templ_path).exists() or not Path(templ_path).is_file() \
or not Path(templ_path).name.endswith('.docx'):
self.app.status_bar.show_message('This template does not exist or is unreadable.', 10)
print('This template does not exist or is unreadable.', Path(templ_path).resolve())
return
item = QListWidgetItem(templ_path)
item.setCheckState(Qt.Unchecked)
self.app.ui.TemplatesListWidget.addItem(item)
self.settings.save_template_list('report_templates_list', self.get_templates_list_from_widget())
def load_templates_list(self):
templates = self.settings.load_template('report_templates_list')
if not templates:
return
for text in templates:
if text.startswith('Use'):
continue
with suppress(Exception):
if not Path(text).exists():
item = QListWidgetItem(text)
item.setForeground(QColor(220, 12, 34))
else:
item = QListWidgetItem(str(Path(text).resolve(strict=True)))
self.app.ui.TemplatesListWidget.addItem(item)
item.setCheckState(Qt.Unchecked)
def get_templates_list_from_widget(self) -> List:
itemslist = []
for num in range(self.lw.count()):
itemtext = self.lw.item(num).text()
if not itemtext in itemslist:
itemslist.append(itemtext)
return itemslist
def remove_current_template(self) -> None:
if self.lw.currentRow() == 0:
return
self.lw.takeItem(self.lw.row(self.lw.currentItem()))
self.settings.save_template_list('report_templates_list', self.get_templates_list_from_widget())
def template_changed(self, current_item: QListWidgetItem):
# Blocking signal in order to avoid infinitive recursion:
self.app.ui.TemplatesListWidget.blockSignals(True)
options = self.settings.load_options()
options.update({'current_report_template': self.lw.row(current_item)})
self.uncheck_all_templates()
current_item.setCheckState(Qt.Checked)
self.settings.save_options(options)
self.app.ui.TemplatesListWidget.blockSignals(False)
def uncheck_all_templates(self):
for num in range(self.lw.count()):
self.lw.item(num).setCheckState(Qt.Unchecked)
| 0
| 0
| 0
| 3,822
| 0
| 0
| 0
| 87
| 182
|
1815a1cfdf441bab8f5c07943254b362f00a655f
| 163
|
py
|
Python
|
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
celery/settings.py
|
alculquicondor/AmigoCloud-IGP-Sync
|
56de7e9137340054159289ef9c6534bb1b5872fc
|
[
"MIT"
] | null | null | null |
from os import environ
TOKEN = environ.get('AMIGOCLOUD_TOKEN')
BROKER_URL = environ.get('BROKER_URL')
PROJECT_URL = 'users/475/projects/13608'
DATASET_ID = 79746
| 23.285714
| 40
| 0.779141
|
from os import environ
TOKEN = environ.get('AMIGOCLOUD_TOKEN')
BROKER_URL = environ.get('BROKER_URL')
PROJECT_URL = 'users/475/projects/13608'
DATASET_ID = 79746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
855c72651aff3902ac92bec1942941cff9cf4170
| 342
|
py
|
Python
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 1
|
2021-11-25T07:53:53.000Z
|
2021-11-25T07:53:53.000Z
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 1
|
2021-09-09T06:34:32.000Z
|
2021-11-02T11:49:00.000Z
|
scripts/twist_remapper.py
|
tamago117/kcctsim
|
0cd72c79ade6be48ad59fb9cfb202dcbe8de69cf
|
[
"Apache-2.0"
] | 2
|
2021-10-01T13:43:58.000Z
|
2021-11-25T07:53:54.000Z
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
pub = rospy.Publisher("/diff_drive_controller/cmd_vel", Twist, queue_size = 10)
if __name__ == '__main__':
rospy.init_node('twist_remapper', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, callback)
rospy.spin()
| 28.5
| 79
| 0.733918
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
pub = rospy.Publisher("/diff_drive_controller/cmd_vel", Twist, queue_size = 10)
def callback(data):
pub.publish(data)
if __name__ == '__main__':
rospy.init_node('twist_remapper', anonymous=True)
rospy.Subscriber("/cmd_vel", Twist, callback)
rospy.spin()
| 0
| 0
| 0
| 0
| 0
| 20
| 0
| 0
| 22
|
860f3238dfabe5abdc4b560671b0f41979c23fa1
| 48,472
|
py
|
Python
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | 1
|
2019-12-09T08:25:14.000Z
|
2019-12-09T08:25:14.000Z
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | 1
|
2020-03-29T19:57:14.000Z
|
2020-03-29T21:49:25.000Z
|
qiskit/visualization/matplotlib.py
|
quantumjim/qiskit-terra
|
5292f487eaa980986a1e5affae8c4fc50c743e71
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring,inconsistent-return-statements
"""mpl circuit visualization backend."""
import logging
try:
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
logger = logging.getLogger(__name__)
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 3
PORDER_REGLINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
| 41.894555
| 100
| 0.444875
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,missing-docstring,inconsistent-return-statements
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import get_backend
from matplotlib import patches
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.circuit import ControlledGate
from qiskit.visualization import exceptions
from qiskit.visualization.qcstyle import DefaultStyle, BWStyle
from qiskit import user_config
from qiskit.circuit.tools.pi_check import pi_check
logger = logging.getLogger(__name__)
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 3
PORDER_REGLINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
self.gate_anchor = 0
def plot_coord(self, index, gate_width, x_offset):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
# could have been updated, so need to store
self.gate_anchor = index
return x_pos + x_offset, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False, layout=None, fold=25, ax=None):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'To install, run "pip install matplotlib".')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
config = user_config.get_config()
if config and (style is None):
config_style = config.get('circuit_mpl_style', 'default')
if config_style == 'default':
self._style = DefaultStyle()
elif config_style == 'bw':
self._style = BWStyle()
elif style is False:
self._style = BWStyle()
else:
self._style = DefaultStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
self.layout = layout
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
if ax is None:
self.return_fig = True
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
else:
self.return_fig = False
self.ax = ax
self.figure = ax.get_figure()
self.fold = fold
if self.fold < 2:
self.fold = -1
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
self.x_offset = 0
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(r)
self._qreg = []
for r in qreg:
self._qreg.append(r)
@property
def ast(self):
return self._ast
def _custom_multiqubit_gate(self, xy, cxy=None, fc=None, wide=True, text=None,
subtext=None):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
if cxy:
ypos = min([y[1] for y in cxy])
if wide:
if subtext:
boxes_length = round(max([len(text), len(subtext)]) / 7) or 1
else:
boxes_length = math.ceil(len(text) / 7) or 1
wid = WID * 2.5 * boxes_length
else:
wid = WID
if fc:
_fc = fc
else:
if self._style.name != 'bw':
if self._style.gc != DefaultStyle().gc:
_fc = self._style.gc
else:
_fc = self._style.dispcol['multi']
_ec = self._style.dispcol['multi']
else:
_fc = self._style.gc
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG),
width=wid, height=height,
fc=_fc,
ec=self._style.dispcol['multi'],
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
# Annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',
fontsize=self._style.fs, color=self._style.gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.5 * height, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos + 0.3 * height, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,
ha='center',
va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT,
wrap=True)
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
if subtext:
subtext_len = len(subtext)
if '$\\pi$' in subtext:
pi_count = subtext.count('pi')
subtext_len = subtext_len - (4 * pi_count)
boxes_wide = round(max(subtext_len, len(text)) / 10, 1) or 1
wid = WID * 1.5 * boxes_wide
else:
boxes_wide = round(len(text) / 10) or 1
wid = WID * 2.2 * boxes_wide
if wid < WID:
wid = WID
else:
wid = WID
if fc:
_fc = fc
elif self._style.gc != DefaultStyle().gc:
_fc = self._style.gc
elif text and text in self._style.dispcol:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.edge_color, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
font_size = self._style.fs
sub_font_size = self._style.sfs
# check if gate is not unitary
if text in ['reset']:
disp_color = self._style.not_gate_lc
sub_color = self._style.not_gate_lc
font_size = self._style.math_fs
else:
disp_color = self._style.gt
sub_color = self._style.sc
if text in self._style.dispcol:
disp_text = "${}$".format(self._style.disptex[text])
else:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=font_size,
color=disp_color, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=sub_font_size,
color=sub_color, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=font_size,
color=disp_color,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _sidetext(self, xy, text):
xpos, ypos = xy
# 0.15 = the initial gap, each char means it needs to move
# another 0.0375 over
xp = xpos + 0.15 + (0.0375 * len(text))
self.ax.text(xp, ypos + HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None, zorder=PORDER_LINE):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=2,
linestyle='solid',
zorder=zorder)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=2,
linestyle='solid',
zorder=zorder)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=2,
linestyle=linestyle,
zorder=zorder)
def _measure(self, qxy, cxy, cid, basis='z'):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.not_gate_lc, linewidth=2,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.not_gate_lc, linewidth=2, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
# measurement basis label
if basis != 'z':
self.ax.text(qx - 0.4 * WID, qy + 0.25 * HIG, basis.upper(),
color=self._style.not_gate_lc,
clip_on=True, zorder=PORDER_TEXT, fontsize=0.5 * self._style.fs,
fontweight='bold')
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy, fc=None, ec=None):
if self._style.gc != DefaultStyle().gc:
fc = self._style.gc
ec = self._style.gc
if fc is None:
fc = self._style.lc
if ec is None:
ec = self._style.lc
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=fc, ec=ec,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def set_multi_ctrl_bits(self, ctrl_state, num_ctrl_qubits, qbit, color_str):
# convert op.ctrl_state to bit string and reverse
cstate = "{0:b}".format(ctrl_state).rjust(num_ctrl_qubits, '0')[::-1]
for i in range(num_ctrl_qubits):
# Make facecolor of ctrl bit the box color if closed and bkgrnd if open
fc_open_close = (self._style.dispcol[color_str] if cstate[i] == '1'
else self._style.bg)
self._ctrl_qubit(qbit[i], fc=fc_open_close, ec=self._style.dispcol[color_str])
def _tgt_qubit(self, xy, fc=None, ec=None, ac=None,
add_width=None):
if self._style.gc != DefaultStyle().gc:
fc = self._style.gc
ec = self._style.gc
if fc is None:
fc = self._style.dispcol['target']
if ec is None:
ec = self._style.lc
if ac is None:
ac = self._style.lc
if add_width is None:
add_width = 0.35
linewidth = 2
if self._style.dispcol['target'] == '#ffffff':
add_width = self._style.colored_add_width
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=fc, ec=ec, linewidth=linewidth,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - add_width * HIG,
ypos + add_width * HIG],
color=ac, linewidth=linewidth, zorder=PORDER_GATE + 1)
self.ax.plot([xpos - add_width * HIG, xpos + add_width * HIG],
[ypos, ypos], color=ac, linewidth=linewidth,
zorder=PORDER_GATE + 1)
def _swap(self, xy, color):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=color, linewidth=2, zorder=PORDER_LINE + 1)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=color, linewidth=2, zorder=PORDER_LINE + 1)
def _barrier(self, config):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
for xy in xys:
xpos, ypos = xy
self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=1, linestyle="dashed",
color=self._style.lc,
zorder=PORDER_TEXT)
box = patches.Rectangle(xy=(xpos - (0.3 * WID), ypos - 0.5),
width=0.6 * WID, height=1,
fc=self._style.bc, ec=None, alpha=0.6,
linewidth=1.5, zorder=PORDER_GRAY)
self.ax.add_patch(box)
def _linefeed_mark(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - .1, xpos - .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
self.ax.plot([xpos + .1, xpos + .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
def draw(self, filename=None, verbose=False):
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style.margin[0]
_xr = self._cond['xmax'] + self._style.margin[1]
_yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5
_yt = self._style.margin[3] + 0.5
self.ax.set_xlim(_xl, _xr)
self.ax.set_ylim(_yb, _yt)
# update figure size
fig_w = _xr - _xl
fig_h = _yt - _yb
if self._style.figwidth < 0.0:
self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID
self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)
if filename:
self.figure.savefig(filename, dpi=self._style.dpi,
bbox_inches='tight')
if self.return_fig:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(self.figure)
return self.figure
def _draw_regs(self):
def _fix_double_script(label):
words = label.split(' ')
words = [word.replace('_', r'\_') if word.count('_') > 1 else word
for word in words]
words = [word.replace('^', r'\^{\ }') if word.count('^') > 1 else word
for word in words]
return ' '.join(words)
len_longest_label = 0
# quantum register
for ii, reg in enumerate(self._qreg):
if len(self._qreg) > 1:
if self.layout is None:
label = '${{{name}}}_{{{index}}}$'.format(name=reg.register.name,
index=reg.index)
else:
label = '${{{name}}}_{{{index}}} \\mapsto {{{physical}}}$'.format(
name=self.layout[reg.index].register.name,
index=self.layout[reg.index].index,
physical=reg.index)
else:
label = '${name}$'.format(name=reg.register.name)
label = _fix_double_script(label)
if len(label) > len_longest_label:
len_longest_label = len(label)
pos = -ii
self._qreg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
self._cond['n_lines'] += 1
# classical register
if self._creg:
n_creg = self._creg.copy()
n_creg.pop(0)
idx = 0
y_off = -len(self._qreg)
for ii, (reg, nreg) in enumerate(itertools.zip_longest(
self._creg, n_creg)):
pos = y_off - idx
if self._style.bundle:
label = '${}$'.format(reg.register.name)
label = _fix_double_script(label)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
if not (not nreg or reg.register != nreg.register):
continue
else:
label = '${}_{{{}}}$'.format(reg.register.name, reg.index)
label = _fix_double_script(label)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.register
}
if len(label) > len_longest_label:
len_longest_label = len(label)
self._cond['n_lines'] += 1
idx += 1
# 7 is the length of the smallest possible label
self.x_offset = -.5 + 0.18 * (len_longest_label - 7)
def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):
# quantum register
for qreg in self._qreg_dict.values():
if n_fold == 0:
label = qreg['label']
else:
label = qreg['label']
y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)
self.ax.text(self.x_offset - 0.2, y, label, ha='right', va='center',
fontsize=1.25 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y],
zorder=PORDER_REGLINE)
# classical register
this_creg_dict = {}
for creg in self._creg_dict.values():
if n_fold == 0:
label = creg['label']
else:
label = creg['label']
y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)
if y not in this_creg_dict.keys():
this_creg_dict[y] = {'val': 1, 'label': label}
else:
this_creg_dict[y]['val'] += 1
for y, this_creg in this_creg_dict.items():
# bundle
if this_creg['val'] > 1:
self.ax.plot([self.x_offset + 1.1, self.x_offset + 1.2], [y - .1, y + .1],
color=self._style.cc,
zorder=PORDER_LINE)
self.ax.text(self.x_offset + 1.0, y + .1, str(this_creg['val']), ha='left',
va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(self.x_offset - 0.2, y, this_creg['label'], ha='right', va='center',
fontsize=1.5 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([self.x_offset + 0.2, y], [self._cond['xmax'], y], lc=self._style.cc,
ls=self._style.cline, zorder=PORDER_REGLINE)
# lf line
if feedline_r:
self._linefeed_mark((self.fold + self.x_offset + 1 - 0.1,
- n_fold * (self._cond['n_lines'] + 1)))
if feedline_l:
self._linefeed_mark((self.x_offset + 0.3,
- n_fold * (self._cond['n_lines'] + 1)))
def _draw_ops(self, verbose=False):
_wide_gate = ['u2', 'u3', 'cu3', 'unitary', 'r', 'cu1', 'rzz']
_barriers = {'coord': [], 'group': []}
#
# generate coordinate manager
#
q_anchors = {}
for key, qreg in self._qreg_dict.items():
q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=qreg['y'],
fold=self.fold)
c_anchors = {}
for key, creg in self._creg_dict.items():
c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=creg['y'],
fold=self.fold)
#
# draw gates
#
prev_anc = -1
for layer in self._ops:
layer_width = 1
for op in layer:
# If one of the standard wide gates
if op.name in _wide_gate:
if layer_width < 2:
layer_width = 2
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len(param) / 10)
if op.name == 'unitary':
box_width = 2
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width
else:
layer_width = 2
continue
# If custom ControlledGate
elif isinstance(op.op, ControlledGate) and op.name not in [
'ccx', 'cx', 'c3x', 'c4x', 'cy', 'cz', 'ch', 'cu1',
'cu3', 'crz', 'cswap']:
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len_param / 5.5)
layer_width = box_width
continue
# if custom gate with a longer than standard name determine
# width
elif op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure',
'measure_x', 'measure_y', 'measure_z'] and len(op.name) >= 4:
box_width = math.ceil(len(op.name) / 6)
# handle params/subtext longer than op names
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
if '$\\pi$' in param:
pi_count = param.count('pi')
len_param = len(param) - (4 * pi_count)
else:
len_param = len(param)
if len_param > len(op.name):
box_width = math.floor(len(param) / 8)
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width * 2
else:
layer_width = 2
continue
# If more than 4 characters min width is 2
layer_width = math.ceil(box_width * WID * 2.5)
this_anc = prev_anc + 1
for op in layer:
_iswide = op.name in _wide_gate
if op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap', 'measure',
'measure_x', 'measure_y', 'measure_z',
'reset'] and len(op.name) >= 4:
_iswide = True
# get qreg index
q_idxs = []
for qarg in op.qargs:
for index, reg in self._qreg_dict.items():
if (reg['group'] == qarg.register and
reg['index'] == qarg.index):
q_idxs.append(index)
break
# get creg index
c_idxs = []
for carg in op.cargs:
for index, reg in self._creg_dict.items():
if (reg['group'] == carg.register and
reg['index'] == carg.index):
c_idxs.append(index)
break
# Only add the gate to the anchors if it is going to be plotted.
# This prevents additional blank wires at the end of the line if
# the last instruction is a barrier type
if self.plot_barriers or \
op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise']:
for ii in q_idxs:
q_anchors[ii].set_index(this_anc, layer_width)
# qreg coordinate
q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)
for ii in q_idxs]
# creg coordinate
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset)
for ii in c_idxs]
# bottom and top point of qreg
qreg_b = min(q_xy, key=lambda xy: xy[1])
qreg_t = max(q_xy, key=lambda xy: xy[1])
# update index based on the value from plotting
this_anc = q_anchors[q_idxs[0]].gate_anchor
if verbose:
print(op)
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params)
else:
param = None
# conditional gate
if op.condition:
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width, self.x_offset) for
ii in self._creg_dict]
mask = 0
for index, cbit in enumerate(self._creg):
if cbit.register == op.condition[0]:
mask |= (1 << index)
val = op.condition[1]
# cbit list to consider
fmt_c = '{{:0{}b}}'.format(len(c_xy))
cmask = list(fmt_c.format(mask))[::-1]
# value
fmt_v = '{{:0{}b}}'.format(cmask.count('1'))
vlist = list(fmt_v.format(val))[::-1]
# plot conditionals
v_ind = 0
xy_plot = []
for xy, m in zip(c_xy, cmask):
if m == '1':
if xy not in xy_plot:
if vlist[v_ind] == '1' or self._style.bundle:
self._conds(xy, istrue=True)
else:
self._conds(xy, istrue=False)
xy_plot.append(xy)
v_ind += 1
creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]
self._subtext(creg_b, hex(val))
self._line(qreg_t, creg_b, lc=self._style.cc,
ls=self._style.cline)
#
# draw special gates
#
if op.name[:7] == 'measure':
vv = self._creg_dict[c_idxs[0]]['index']
if len(op.name) == 9:
basis = op.name[-1]
else:
basis = 'z'
self._measure(q_xy[0], c_xy[0], vv, basis)
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
_barriers = {'coord': [], 'group': []}
for index, qbit in enumerate(q_idxs):
q_group = self._qreg_dict[qbit]['group']
if q_group not in _barriers['group']:
_barriers['group'].append(q_group)
_barriers['coord'].append(q_xy[index])
if self.plot_barriers:
self._barrier(_barriers)
elif op.name == 'initialize':
vec = '[%s]' % param
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.op.label or "|psi>",
subtext=vec)
elif op.name == 'unitary':
# TODO(mtreinish): Look into adding the unitary to the
# subtext
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.op.label or "Unitary")
elif isinstance(op.op, ControlledGate) and op.name not in [
'ccx', 'cx', 'c3x', 'c4x', 'cy', 'cz', 'ch', 'cu1', 'cu3', 'crz',
'cswap']:
disp = op.op.base_gate.name
num_ctrl_qubits = op.op.num_ctrl_qubits
num_qargs = len(q_xy) - num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'multi')
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
if num_qargs == 1:
if param:
self._gate(q_xy[num_ctrl_qubits], wide=_iswide,
text=disp,
fc=self._style.dispcol['multi'],
subtext='{}'.format(param))
else:
fcx = op.name if op.name in self._style.dispcol else 'multi'
self._gate(q_xy[num_ctrl_qubits], wide=_iswide, text=disp,
fc=self._style.dispcol[fcx])
else:
self._custom_multiqubit_gate(
q_xy[num_ctrl_qubits:], wide=_iswide, fc=self._style.dispcol['multi'],
text=disp)
#
# draw single qubit gates
#
elif len(q_xy) == 1:
disp = op.name
if param:
self._gate(q_xy[0], wide=_iswide, text=disp,
subtext=str(param))
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
#
# draw multi-qubit gates (n=2)
#
elif len(q_xy) == 2:
# cx
if op.name == 'cx':
if self._style.dispcol['cx'] != '#ffffff':
add_width = self._style.colored_add_width
else:
add_width = None
num_ctrl_qubits = op.op.num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'cx')
if self._style.name != 'bw':
self._tgt_qubit(q_xy[1], fc=self._style.dispcol['cx'],
ec=self._style.dispcol['cx'],
ac=self._style.dispcol['target'],
add_width=add_width)
else:
self._tgt_qubit(q_xy[1], fc=self._style.dispcol['target'],
ec=self._style.dispcol['cx'],
ac=self._style.dispcol['cx'],
add_width=add_width)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['cx'])
# cz for latexmode
elif op.name == 'cz':
disp = op.name.replace('c', '')
if self._style.name != 'bw':
color = self._style.dispcol['cz']
self._ctrl_qubit(q_xy[0],
fc=color,
ec=color)
self._ctrl_qubit(q_xy[1],
fc=color,
ec=color)
else:
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
# add qubit-qubit wiring
if self._style.name != 'bw':
self._line(qreg_b, qreg_t,
lc=color)
else:
self._line(qreg_b, qreg_t, zorder=PORDER_LINE + 1)
# control gate
elif op.name in ['cy', 'ch', 'cu3', 'crz']:
disp = op.name.replace('c', '')
color = None
if self._style.name != 'bw':
if op.name == 'cy':
color = self._style.dispcol['cy']
else:
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
if param:
self._gate(q_xy[1], wide=_iswide,
text=disp,
fc=color,
subtext='{}'.format(param))
else:
self._gate(q_xy[1], wide=_iswide, text=disp,
fc=color)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=color)
# rzz gate
elif op.name == 'rzz':
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
self._ctrl_qubit(q_xy[1], fc=color, ec=color)
self._sidetext(qreg_b, text='zz({})'.format(param))
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=color)
# cu1 gate
elif op.name == 'cu1':
color = self._style.dispcol['multi']
self._ctrl_qubit(q_xy[0], fc=color, ec=color)
self._ctrl_qubit(q_xy[1], fc=color, ec=color)
self._sidetext(qreg_b, text='U1 ({})'.format(param))
# add qubit-qubit wiring
fc = self._style
self._line(qreg_b, qreg_t, lc=color)
# swap gate
elif op.name == 'swap':
self._swap(q_xy[0], self._style.dispcol['swap'])
self._swap(q_xy[1], self._style.dispcol['swap'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['swap'])
# dcx and iswap gate
elif op.name in ['dcx', 'iswap']:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
fc=self._style.dispcol[op.name],
text=op.op.label or op.name)
# Custom gate
else:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=op.op.label or op.name)
#
# draw multi-qubit gates (n=3)
#
elif len(q_xy) in range(3, 6):
# cswap gate
if op.name == 'cswap':
self._ctrl_qubit(q_xy[0],
fc=self._style.dispcol['multi'],
ec=self._style.dispcol['multi'])
self._swap(q_xy[1], self._style.dispcol['multi'])
self._swap(q_xy[2], self._style.dispcol['multi'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
# ccx gate
elif op.name == 'ccx' or op.name == 'c3x' or op.name == 'c4x':
num_ctrl_qubits = op.op.num_ctrl_qubits
# set the ctrl qbits to open or closed
self.set_multi_ctrl_bits(op.op.ctrl_state, num_ctrl_qubits, q_xy, 'multi')
if self._style.name != 'bw':
self._tgt_qubit(q_xy[num_ctrl_qubits], fc=self._style.dispcol['multi'],
ec=self._style.dispcol['multi'],
ac=self._style.dispcol['target'])
else:
self._tgt_qubit(q_xy[num_ctrl_qubits], fc=self._style.dispcol['target'],
ec=self._style.dispcol['multi'],
ac=self._style.dispcol['multi'])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t, lc=self._style.dispcol['multi'])
# custom gate
else:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=getattr(op.op, 'label', None) or op.name)
# draw custom multi-qubit gate
elif len(q_xy) > 5:
self._custom_multiqubit_gate(q_xy, c_xy, wide=_iswide,
text=op.op.label or op.name)
else:
logger.critical('Invalid gate %s', op)
raise exceptions.VisualizationError('invalid gate {}'.format(op))
# adjust the column if there have been barriers encountered, but not plotted
barrier_offset = 0
if not self.plot_barriers:
# only adjust if everything in the layer wasn't plotted
barrier_offset = -1 if all([op.name in
['barrier', 'snapshot', 'load', 'save', 'noise']
for op in layer]) else 0
prev_anc = this_anc + layer_width + barrier_offset - 1
#
# adjust window size and draw horizontal lines
#
anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]
if anchors:
max_anc = max(anchors)
else:
max_anc = 0
n_fold = max(0, max_anc - 1) // self.fold
# window size
if max_anc > self.fold > 0:
self._cond['xmax'] = self.fold + 1 + self.x_offset
self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1
else:
self._cond['xmax'] = max_anc + 1 + self.x_offset
self._cond['ymax'] = self._cond['n_lines']
# add horizontal lines
for ii in range(n_fold + 1):
feedline_r = (n_fold > 0 and n_fold > ii)
feedline_l = (ii > 0)
self._draw_regs_sub(ii, feedline_l, feedline_r)
# draw gate number
if self._style.index:
for ii in range(max_anc):
if self.fold > 0:
x_coord = ii % self.fold + 1
y_coord = - (ii // self.fold) * (self._cond['n_lines'] + 1) + 0.7
else:
x_coord = ii + 1
y_coord = 0.7
self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.tc, clip_on=True,
zorder=PORDER_TEXT)
@staticmethod
def param_parse(v):
# create an empty list to store the parameters in
param_parts = [None] * len(v)
for i, e in enumerate(v):
try:
param_parts[i] = pi_check(e, output='mpl', ndigits=3)
except TypeError:
param_parts[i] = str(e)
if param_parts[i].startswith('-'):
param_parts[i] = '$-$' + param_parts[i][1:]
param_parts = ', '.join(param_parts)
return param_parts
@staticmethod
def format_numeric(val, tol=1e-5):
if isinstance(val, complex):
return str(val)
elif complex(val).imag != 0:
val = complex(val)
abs_val = abs(val)
if math.isclose(abs_val, 0.0, abs_tol=1e-100):
return '0'
if math.isclose(math.fmod(abs_val, 1.0),
0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:
return str(int(val))
if 0.1 <= abs_val < 100.0:
return '{:.2f}'.format(val)
return '{:.1e}'.format(val)
@staticmethod
def fraction(val, base=np.pi, n=100, tol=1e-5):
abs_val = abs(val)
for i in range(1, n):
for j in range(1, n):
if math.isclose(abs_val, i / j * base, rel_tol=tol):
if val < 0:
i *= -1
return fractions.Fraction(i, j)
return None
| 0
| 1,388
| 0
| 45,685
| 0
| 0
| 0
| 122
| 369
|
a7cf222e3f96762239244a7b076603c3ca2e33f3
| 946
|
py
|
Python
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 3
|
2015-01-06T22:00:22.000Z
|
2016-08-14T08:07:32.000Z
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 56
|
2015-07-12T10:21:38.000Z
|
2020-02-23T18:51:01.000Z
|
sjpClass.py
|
alkamid/wiktionary
|
ce242da609a1001ae7462b07da2f6e83f1a7281b
|
[
"MIT"
] | 2
|
2015-01-06T21:25:06.000Z
|
2018-01-17T12:03:17.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
| 27.823529
| 136
| 0.609937
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
class kategoriaSlowa():
def __init__(self, name, counter, pages, tabelka, outputFile):
self.name = name
self.counter = counter
self.pages = 'Wikipedysta:AlkamidBot/sjp/' + pages
self.buffer = ''
self.tabelka = tabelka
self.outputFile = 'output/' + outputFile
self.limit = 0
def addLimit(self, limit):
self.limit = limit
def checkHistory(pagename):
#returns 1, if AlkamidBot or Olafbot were the last authors, 0 if someone is verifying the page (=it was last edited by someone else)
bots = ('AlkamidBot', 'Olafbot', 'PBbot')
site = pywikibot.Site()
page = pywikibot.Page(site, pagename)
try: page.get()
except pywikibot.NoPage:
return 1
else:
history = page.getVersionHistory()
if history[0][2] in bots:
return 1
else:
return 0
| 0
| 0
| 0
| 370
| 0
| 470
| 0
| -5
| 69
|
8b6ebb32e27f26c072b135c85ff8fb1b572ad23d
| 2,446
|
py
|
Python
|
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_filters.py
|
mobius-medical/flask-genshi
|
68cba6c9cb604272a25f5e4c74e5a127e3ac7854
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
from inspect import cleandoc
from flask_genshi import render_template
def test_applies_method_filters(app):
"""Method filters are applied for generated and rendered templates"""
with app.test_request_context():
genshi = app.extensions["genshi"]
rendered = render_template("filter.html")
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Flask-Genshi - Hi!</title></head></html>
"""
)
assert rendered == expected
def test_filters_per_render(app):
"""Filters can be applied per rendering"""
with app.test_request_context():
rendered = render_template("filter.html", filter=prepend_title)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Hi! - Flask-Genshi</title></head></html>
"""
)
assert rendered == expected
def test_works_with_flatland(app):
"""Filters can take the context and support flatland"""
with app.test_request_context():
genshi = app.extensions["genshi"]
context = dict(form=FlatlandForm({"username": "dag"}))
rendered = render_template("flatland.html", context)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<input type="text" name="username" value="dag">
"""
)
assert rendered == expected
| 31.766234
| 102
| 0.629599
|
from __future__ import unicode_literals
from inspect import cleandoc
from genshi.filters import Transformer
from flask_genshi import render_template
from flatland.out.genshi import setup as flatland_setup
from flatland import Form, String
class FlatlandForm(Form):
username = String
def test_applies_method_filters(app):
"""Method filters are applied for generated and rendered templates"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.filter("html")
def prepend_title(template):
return template | Transformer("head/title").prepend("Flask-Genshi - ")
rendered = render_template("filter.html")
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Flask-Genshi - Hi!</title></head></html>
"""
)
assert rendered == expected
def test_filters_per_render(app):
"""Filters can be applied per rendering"""
with app.test_request_context():
def prepend_title(template):
return template | Transformer("head/title").append(" - Flask-Genshi")
rendered = render_template("filter.html", filter=prepend_title)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head><title>Hi! - Flask-Genshi</title></head></html>
"""
)
assert rendered == expected
def test_works_with_flatland(app):
"""Filters can take the context and support flatland"""
with app.test_request_context():
genshi = app.extensions["genshi"]
@genshi.template_parsed
def callback(template):
flatland_setup(template)
context = dict(form=FlatlandForm({"username": "dag"}))
rendered = render_template("flatland.html", context)
# Remove leading indentation, for cleaner multi-line string
expected = cleandoc(
"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<input type="text" name="username" value="dag">
"""
)
assert rendered == expected
| 0
| 192
| 0
| 27
| 0
| 89
| 0
| 63
| 183
|
978ff0be8e3774dfa21908c9b4b49bc92d1eeb4e
| 3,159
|
py
|
Python
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 3
|
2018-02-27T13:48:28.000Z
|
2018-03-03T21:57:50.000Z
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 6
|
2020-02-12T00:07:46.000Z
|
2022-03-11T23:25:59.000Z
|
forms/models.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 1
|
2019-03-26T20:19:57.000Z
|
2019-03-26T20:19:57.000Z
|
from django import forms
FIELD_TYPES = (
('Short_answer', forms.CharField),
('Paragraph', forms.CharField),
('Integer', forms.IntegerField),
('ChoiceField', forms.ChoiceField),
('MultipleChoiceField', forms.MultipleChoiceField),
# ('Date', forms.DateField),
)
QUES_TYPES = (
('Short_answer', 'One Line Answer'),
('Paragraph', 'Multiple Line Answer'),
('Integer', 'Integer Answer'),
('ChoiceField', 'Choice'),
('MultipleChoiceField', 'Multiple-choice'),
# ('Date', 'date'),
)
| 28.981651
| 120
| 0.660336
|
from django.db import models
from django import forms
from django.contrib.auth.models import User
from .form_dynamic import NominationForm
import json
class Questionnaire(models.Model):
name = models.CharField(max_length=100, null=True)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
def get_form(self, *args, **kwargs):
fields = []
for question in self.question_set.all():
field = question._get_formfield_class()
label = question.question
if question.required:
label = question.question + " *"
field_args = question._get_field_args()
ques_id = question.id
fields.append((label, field, field_args, ques_id))
return NominationForm(*args, extra=fields, **kwargs)
def add_answer(self, applicant, answer_data):
json_data = json.dumps(answer_data)
answerform = FilledForm(questionnaire=self, applicant=applicant,data=json_data)
answerform.save()
return answerform
FIELD_TYPES = (
('Short_answer', forms.CharField),
('Paragraph', forms.CharField),
('Integer', forms.IntegerField),
('ChoiceField', forms.ChoiceField),
('MultipleChoiceField', forms.MultipleChoiceField),
# ('Date', forms.DateField),
)
QUES_TYPES = (
('Short_answer', 'One Line Answer'),
('Paragraph', 'Multiple Line Answer'),
('Integer', 'Integer Answer'),
('ChoiceField', 'Choice'),
('MultipleChoiceField', 'Multiple-choice'),
# ('Date', 'date'),
)
class Question(models.Model):
questionnaire = models.ForeignKey(Questionnaire,on_delete=models.CASCADE, null=True)
question_type = models.CharField(max_length=50, choices=QUES_TYPES, null=True)
question = models.CharField(max_length=1000, null=True)
question_choices = models.TextField(max_length=600, null=True, blank=True, help_text='make new line for new option')
required = models.BooleanField(default=True)
def __unicode__(self):
return self.question
def __str__(self):
return self.question
def _get_formfield_class(self):
for index, field_class in FIELD_TYPES:
if self.question_type == index:
return field_class
def _get_field_args(self):
args = {}
if self.question_type == 'ChoiceField' or self.question_type == 'MultipleChoiceField':
args['choices'] = enumerate(self.question_choices.split('\n'))
if self.question_type == 'MultipleChoiceField':
args['widget']=forms.CheckboxSelectMultiple
if self.question_type == 'Paragraph':
args['widget'] =forms.Textarea
if self.required:
args['label_suffix'] = " *"
args.update({'required': self.required})
return args
class FilledForm(models.Model):
questionnaire = models.ForeignKey(Questionnaire,on_delete=models.CASCADE, null=True)
applicant = models.ForeignKey(User, null=True)
data = models.CharField(max_length=30000, null=True, blank=True)
def __str__(self):
return self.questionnaire.name
| 0
| 0
| 0
| 2,430
| 0
| 0
| 0
| 38
| 157
|
c910c09445a0e65ba2545dbe1c4a46731ae345b6
| 4,099
|
py
|
Python
|
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 2
|
2019-11-21T15:50:59.000Z
|
2019-12-17T02:44:19.000Z
|
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | null | null | null |
degmo/data/datasets.py
|
IcarusWizard/Deep-Generative-Models
|
4117c11ad944bdeff106a80adbb3642a076af64e
|
[
"MIT"
] | 1
|
2021-07-02T05:49:29.000Z
|
2021-07-02T05:49:29.000Z
|
import PIL.Image as Image
from functools import partial
DATADIR = 'dataset/'
load_celeba32 = partial(load_celeba, image_size=32)
load_celeba64 = partial(load_celeba, image_size=64)
load_celeba128 = partial(load_celeba, image_size=128)
| 37.263636
| 112
| 0.68041
|
import numpy as np
import torch, torchvision, math
from torch.functional import F
import os
import PIL.Image as Image
from functools import partial
DATADIR = 'dataset/'
class ImageDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None):
super().__init__()
self.root = root
self.image_list = [os.path.join(root, filename) for filename in os.listdir(root)]
self.transform = transform
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
img = Image.open(self.image_list[index])
if self.transform:
img = self.transform(img)
return (img, )
def load_mnist(normalize=False):
config = {
"c" : 1,
"h" : 28,
"w" : 28,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize([0.5], [0.5]))
transform = torchvision.transforms.Compose(transform)
train_dataset = torchvision.datasets.MNIST(DATADIR, train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(DATADIR, train=False, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (55000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_bmnist(normalize=False):
config = {
"c" : 1,
"h" : 28,
"w" : 28,
}
assert not normalize, "bmnist do not support normalize operation"
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: (x > 0).float()),
])
train_dataset = torchvision.datasets.MNIST(DATADIR, train=True, download=True, transform=transform)
test_dataset = torchvision.datasets.MNIST(DATADIR, train=False, download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (55000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_svhn(normalize=False):
config = {
"c" : 3,
"h" : 32,
"w" : 32,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
train_dataset = torchvision.datasets.SVHN(DATADIR, split='train', download=True, transform=transform)
test_dataset = torchvision.datasets.SVHN(DATADIR, split='test', download=True, transform=transform)
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, (len(train_dataset) - 5000, 5000))
return (train_dataset, val_dataset, test_dataset, config)
def load_cifar(normalize=False):
config = {
"c" : 3,
"h" : 32,
"w" : 32,
}
transform = [torchvision.transforms.ToTensor()]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
dataset = torchvision.datasets.CIFAR10(DATADIR, download=True, transform=transform)
return torch.utils.data.random_split(dataset, (40000, 5000, 5000)) + [config]
def load_celeba(image_size=128, normalize=False):
config = {
"c" : 3,
"h" : image_size,
"w" : image_size,
}
transform = [
torchvision.transforms.Resize(image_size),
torchvision.transforms.CenterCrop(image_size),
torchvision.transforms.ToTensor(),
]
if normalize:
transform.append(torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = torchvision.transforms.Compose(transform)
dataset = torchvision.datasets.CelebA(DATADIR, download=True, transform=transform)
return torch.utils.data.random_split(dataset, (len(dataset) - 2000, 1000, 1000)) + [config]
load_celeba32 = partial(load_celeba, image_size=32)
load_celeba64 = partial(load_celeba, image_size=64)
load_celeba128 = partial(load_celeba, image_size=128)
| 0
| 0
| 0
| 479
| 0
| 3,150
| 0
| 4
| 230
|
1a3578a56a4bccb214d3e2c35a83b6e6b51851e2
| 57,483
|
py
|
Python
|
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
basistheory/api/tenants_api.py
|
Basis-Theory/basistheory-python
|
5fd0f3d20fd07e8de45d6d5919e092c696049df1
|
[
"Apache-2.0"
] | null | null | null |
"""
Basis Theory API
## Getting Started * Sign-in to [Basis Theory](https://basistheory.com) and go to [Applications](https://portal.basistheory.com/applications) * Create a Basis Theory Server to Server Application * All permissions should be selected * Paste the API Key into the `BT-API-KEY` variable # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
| 37.768068
| 300
| 0.517736
|
"""
Basis Theory API
## Getting Started * Sign-in to [Basis Theory](https://basistheory.com) and go to [Applications](https://portal.basistheory.com/applications) * Create a Basis Theory Server to Server Application * All permissions should be selected * Paste the API Key into the `BT-API-KEY` variable # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from basistheory.api_client import ApiClient, Endpoint as _Endpoint
from basistheory.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
set_request_options
)
from basistheory.model.create_tenant_invitation_request import CreateTenantInvitationRequest
from basistheory.model.problem_details import ProblemDetails
from basistheory.model.tenant import Tenant
from basistheory.model.tenant_invitation_response import TenantInvitationResponse
from basistheory.model.tenant_invitation_response_paginated_list import TenantInvitationResponsePaginatedList
from basistheory.model.tenant_invitation_status import TenantInvitationStatus
from basistheory.model.tenant_member_response_paginated_list import TenantMemberResponsePaginatedList
from basistheory.model.tenant_usage_report import TenantUsageReport
from basistheory.model.update_tenant_request import UpdateTenantRequest
from basistheory.model.validation_problem_details import ValidationProblemDetails
class TenantsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.create_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'create_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'create_tenant_invitation_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'create_tenant_invitation_request':
(CreateTenantInvitationRequest,),
},
'attribute_map': {
},
'location_map': {
'create_tenant_invitation_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.delete_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'delete',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_invitation_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}',
'operation_id': 'delete_invitation',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.delete_member_endpoint = _Endpoint(
settings={
'response_type': None,
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members/{memberId}',
'operation_id': 'delete_member',
'http_method': 'DELETE',
'servers': None,
},
params_map={
'all': [
'member_id',
'request_options'
],
'required': [
'member_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'member_id':
(str,),
},
'attribute_map': {
'member_id': 'memberId',
},
'location_map': {
'member_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'get',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_invitations_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations',
'operation_id': 'get_invitations',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'status',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'status':
(TenantInvitationStatus,),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'status': 'status',
'page': 'page',
'size': 'size',
},
'location_map': {
'status': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_members_endpoint = _Endpoint(
settings={
'response_type': (TenantMemberResponsePaginatedList,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/members',
'operation_id': 'get_members',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'user_id',
'page',
'size',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'user_id':
([str],),
'page':
(int,),
'size':
(int,),
},
'attribute_map': {
'user_id': 'user_id',
'page': 'page',
'size': 'size',
},
'location_map': {
'user_id': 'query',
'page': 'query',
'size': 'query',
},
'collection_format_map': {
'user_id': 'multi',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_operation_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/operations',
'operation_id': 'get_tenant_operation_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_tenant_usage_report_endpoint = _Endpoint(
settings={
'response_type': (TenantUsageReport,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/reports/usage',
'operation_id': 'get_tenant_usage_report',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.resend_invitation_endpoint = _Endpoint(
settings={
'response_type': (TenantInvitationResponse,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self/invitations/{invitationId}/resend',
'operation_id': 'resend_invitation',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'invitation_id',
'request_options'
],
'required': [
'invitation_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'invitation_id':
(str,),
},
'attribute_map': {
'invitation_id': 'invitationId',
},
'location_map': {
'invitation_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.update_endpoint = _Endpoint(
settings={
'response_type': (Tenant,),
'auth': [
'apiKey'
],
'endpoint_path': '/tenants/self',
'operation_id': 'update',
'http_method': 'PUT',
'servers': None,
},
params_map={
'all': [
'update_tenant_request',
'request_options'
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'update_tenant_request':
(UpdateTenantRequest,),
},
'attribute_map': {
},
'location_map': {
'update_tenant_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
def create_invitation(
self,
**kwargs
):
"""create_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_invitation(async_req=True)
>>> result = thread.get()
Keyword Args:
create_tenant_invitation_request (CreateTenantInvitationRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.create_invitation_endpoint.call_with_http_info(**kwargs)
def delete(
self,
**kwargs
):
"""delete # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.delete_endpoint.call_with_http_info(**kwargs)
def delete_invitation(
self,
invitation_id,
**kwargs
):
"""delete_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.delete_invitation_endpoint.call_with_http_info(**kwargs)
def delete_member(
self,
member_id,
**kwargs
):
"""delete_member # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_member(member_id, async_req=True)
>>> result = thread.get()
Args:
member_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['member_id'] = \
member_id
return self.delete_member_endpoint.call_with_http_info(**kwargs)
def get(
self,
**kwargs
):
"""get # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_endpoint.call_with_http_info(**kwargs)
def get_invitations(
self,
**kwargs
):
"""get_invitations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_invitations(async_req=True)
>>> result = thread.get()
Keyword Args:
status (TenantInvitationStatus): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_invitations_endpoint.call_with_http_info(**kwargs)
def get_members(
self,
**kwargs
):
"""get_members # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_members(async_req=True)
>>> result = thread.get()
Keyword Args:
user_id ([str]): [optional]
page (int): [optional]
size (int): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantMemberResponsePaginatedList
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_members_endpoint.call_with_http_info(**kwargs)
def get_tenant_operation_report(
self,
**kwargs
):
"""get_tenant_operation_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_operation_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_operation_report_endpoint.call_with_http_info(**kwargs)
def get_tenant_usage_report(
self,
**kwargs
):
"""get_tenant_usage_report # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_tenant_usage_report(async_req=True)
>>> result = thread.get()
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantUsageReport
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.get_tenant_usage_report_endpoint.call_with_http_info(**kwargs)
def resend_invitation(
self,
invitation_id,
**kwargs
):
"""resend_invitation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.resend_invitation(invitation_id, async_req=True)
>>> result = thread.get()
Args:
invitation_id (str):
Keyword Args:
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
TenantInvitationResponse
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
kwargs['invitation_id'] = \
invitation_id
return self.resend_invitation_endpoint.call_with_http_info(**kwargs)
def update(
self,
**kwargs
):
"""update # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update(async_req=True)
>>> result = thread.get()
Keyword Args:
update_tenant_request (UpdateTenantRequest): [optional]
request_options(RequestOptions): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
_request_auths (list): set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
Default is None
async_req (bool): execute request asynchronously
Returns:
Tenant
If the method is called asynchronously, returns the request
thread.
"""
if kwargs.get('request_options'):
set_request_options(kwargs.pop('request_options'), self)
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['_request_auths'] = kwargs.get('_request_auths', None)
return self.update_endpoint.call_with_http_info(**kwargs)
| 0
| 0
| 0
| 55,908
| 0
| 0
| 0
| 786
| 361
|
2b5c325a1726de056d5d1198acc3940aef23c363
| 3,454
|
py
|
Python
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 21
|
2019-07-18T23:38:22.000Z
|
2021-01-08T01:14:44.000Z
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 7
|
2019-09-06T13:14:49.000Z
|
2020-12-18T17:49:34.000Z
|
custom_components/nwsradar/config_flow.py
|
MatthewFlamm/ha_nws_radar
|
f039bf1abb94a48232599746f80d4c7e4af35de7
|
[
"MIT"
] | 2
|
2019-07-26T21:23:59.000Z
|
2020-01-14T23:03:12.000Z
|
"""Config flow for National Weather Service (NWS) integration."""
import logging
# pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
| 32.895238
| 87
| 0.621888
|
"""Config flow for National Weather Service (NWS) integration."""
import logging
import voluptuous as vol
from nws_radar.nws_radar_mosaic import REGIONS
from homeassistant import config_entries
from . import unique_id
# pylint: disable=unused-import
from .const import (
CONF_LOOP,
CONF_STATION,
CONF_STYLE,
STYLES,
CONF_TYPE,
RADAR_TYPES,
DEFAULT_RADAR_TYPE,
CONF_NAME,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for National Weather Service (NWS)."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
self._config = user_input # pylint: disable=attribute-defined-outside-init
if user_input[CONF_STYLE] in {"Standard", "Enhanced"}:
return await self.async_step_standard_enhanced()
# Mosaic
return await self.async_step_mosaic()
data_schema = vol.Schema(
{
vol.Required(CONF_STYLE): vol.In(STYLES),
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_standard_enhanced(self, user_input=None):
"""Standard or enhanced step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_STATION] = self._config[CONF_STATION].upper()
title = unique_id(self._config)
self._config[CONF_NAME] = None
await self.async_set_unique_id(unique_id(self._config))
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): str,
vol.Required(CONF_LOOP, default=True): bool,
vol.Required(CONF_TYPE, default=DEFAULT_RADAR_TYPE): vol.In(
RADAR_TYPES.keys()
),
}
)
return self.async_show_form(
step_id="standard_enhanced", data_schema=data_schema, errors=errors
)
async def async_step_mosaic(self, user_input=None):
"""Mosaic step."""
errors = {}
if user_input is not None:
self._config.update(user_input)
self._config[CONF_TYPE] = ""
self._config[CONF_NAME] = None
title = unique_id(self._config)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=self._config)
data_schema = vol.Schema(
{
vol.Required(CONF_STATION): vol.In(REGIONS),
vol.Required(CONF_LOOP, default=True): bool,
}
)
return self.async_show_form(
step_id="mosaic", data_schema=data_schema, errors=errors
)
async def async_step_import(self, user_input=None):
"""Import an entry from yaml."""
title = unique_id(user_input)
await self.async_set_unique_id(title)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=title, data=user_input)
| 0
| 0
| 2,680
| 290
| 0
| 0
| 0
| 194
| 136
|
e288a19792ad0dab33c86bbab35d030926f6a073
| 2,944
|
py
|
Python
|
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | 23
|
2018-06-18T15:35:47.000Z
|
2021-07-28T02:19:16.000Z
|
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | null | null | null |
xinci/model.py
|
Lapis-Hong/xinci
|
9234ef6e426dfa282c334ff79f4f76b475eb10f3
|
[
"MIT"
] | 10
|
2018-06-20T07:01:17.000Z
|
2020-08-31T15:56:24.000Z
|
#!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/6/17
"""This module contains the main algorithm for chinese word extraction.
criterion 1:
solid rate
criterion 2:
character entropy
"""
from __future__ import unicode_literals
from __future__ import division
| 37.74359
| 108
| 0.66712
|
#!/usr/bin/env python
# coding: utf-8
# @Author: lapis-hong
# @Date : 2018/6/17
"""This module contains the main algorithm for chinese word extraction.
criterion 1:
solid rate
criterion 2:
character entropy
"""
from __future__ import unicode_literals
from __future__ import division
import math
from indexer import CnTextIndexer
from utils import WordCountDict
class EntropyJudger:
"""Use entropy and solid rate to judge whether a candidate is a chinese word or not."""
def __init__(self, document, least_cnt_threshold=5, solid_rate_threshold=0.018, entropy_threshold=1.92):
"""
Args:
least_cnt_threshold: a word least appeared count, can not pass judge if less than this value.
solid_rate_threshold: p(candidate)/p(candidate[0]) * p(candidate)/p(candidate[1]) * ...
entropy_threshold: min(left_char_entropy, right_char_entropy), The smaller this values is,
more new words you will get, but with less accuracy.
"""
self._least_cnt_threshold = least_cnt_threshold
self._solid_rate_threshold = solid_rate_threshold
self._entropy_threshold = entropy_threshold
self._indexer = CnTextIndexer(document)
def judge(self, candidate):
solid_rate = self._get_solid_rate(candidate)
entropy = self._get_entropy(candidate)
if solid_rate < self._solid_rate_threshold or entropy < self._entropy_threshold:
return False
return True
def _get_solid_rate(self, candidate):
if len(candidate) < 2:
return 1.0
cnt = self._indexer.count(candidate) # candidate count in document
if cnt < self._least_cnt_threshold: # least count to be a word
return 0.0
rate = 1.0
for c in candidate:
rate *= cnt / self._indexer.char_cnt_map[c] # candidate character count in document
return math.pow(rate, 1/float(len(candidate))) * math.sqrt(len(candidate)) # interesting
def _get_entropy(self, candidate):
left_char_dic = WordCountDict()
right_char_dic = WordCountDict()
candidate_pos_generator = self._indexer.find(candidate)
for pos in candidate_pos_generator:
c = self._indexer[pos-1]
left_char_dic.add(c)
c = self._indexer[pos+len(candidate)]
right_char_dic.add(c)
previous_total_char_cnt = left_char_dic.count()
next_total_char_cnt = right_char_dic.count()
previous_entropy = 0.0
next_entropy = 0.0
for char, count in left_char_dic.items(): # efficient
prob = count / previous_total_char_cnt
previous_entropy -= prob * math.log(prob)
for char, count in right_char_dic.items():
prob = count / next_total_char_cnt
next_entropy -= prob * math.log(prob)
return min(previous_entropy, next_entropy) # 返回前后信息熵中较小的一个
| 39
| 0
| 0
| 2,531
| 0
| 0
| 0
| 12
| 90
|
86c4428f80dd80644e84963f60d1a11c38e4a4c2
| 561
|
py
|
Python
|
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
Machine_Learning/ZCSNumpy.py
|
ZuoCaiSong/Python
|
137d1c4c79f9594b9bc2c7dc7728246e697f1329
|
[
"MIT"
] | null | null | null |
#! usr/bin/env python
# -*- coding:utf-8 -*-
"""
"""
'''
NumPy
()
'''
'''
NumPy(dimensions)(axes)(rank)
'''
#eg:
'''
3D [1,2,3]
13
'''
#ndarraylist
arr2 = [[1,0,0],
[0,1,0]]
'''
arr2 2 ()
'''
'''
NumPy ndarray
numpy.arraypythonarray.array
'''
# numpy
a = arange(15).reshape(3, 5)
print a
#
print "a", a.ndim
help(ndim)
| 13.357143
| 44
| 0.682709
|
#! usr/bin/env python
# -*- coding:utf-8 -*-
"""
基础篇
"""
from numpy import *
'''
NumPy的主要对象是同种元素的多维数组。
这是一个所有的元素都是一种类型、通过一个正整数元组索引的元素表格(通常是元素是数字)
'''
'''
NumPy中维度(dimensions)叫做轴(axes),轴的个数叫做秩(rank)。
'''
#eg:
'''
在3D空间一个点的坐标 [1,2,3]
是一个秩为1的数组,它只有一个轴,轴的长度为3
'''
#注意直接写,他的类型不是一个ndarray,是一个list,此处只是用于举例说明秩
arr2 = [[1,0,0],
[0,1,0]]
'''
arr2 的秩为2 (它有两个维度)
'''
'''
NumPy的数组类被称作 ndarray 。通常被称作数组。
注意numpy.array和标准python库类array.array并不相同,
后者只处理一维数组和提供少量功能
'''
# 创建一个numpy的对象
a = arange(15).reshape(3, 5)
print a
# 数组轴的个数(秩),行数
print "a的秩为", a.ndim
help(ndim)
| 627
| 0
| 0
| 0
| 0
| 0
| 0
| -1
| 22
|
077630693a28af4ea5bf434f4de1bcb506757b3e
| 1,696
|
py
|
Python
|
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:21.000Z
|
2020-01-02T09:03:21.000Z
|
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_fileclient.py
|
jubrad/salt
|
7960334fb726cfde45e6409da79a65535c626685
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:03:24.000Z
|
2020-01-02T09:03:24.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <[email protected]>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
# Import Salt libs
| 32.615385
| 82
| 0.628538
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email: `Bo Maryniuk <[email protected]>`
'''
# Import Python libs
from __future__ import absolute_import
import errno
# Import Salt Testing libs
from tests.support.mock import patch, Mock
from tests.support.unit import TestCase
# Import Salt libs
from salt.ext.six.moves import range
from salt.fileclient import Client
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EROFS)):
with self.assertRaises(OSError):
with Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
| 0
| 0
| 0
| 1,311
| 0
| 0
| 0
| 58
| 133
|
9e5a009aa9aeb584ea41f3acb660d59e05af5898
| 11,072
|
py
|
Python
|
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
prepare_datasets.py
|
Jakob-Bach/Meta-Learning-Feature-Importance
|
089e5c7a5be91307f747e00b38b1567386fbee16
|
[
"MIT"
] | null | null | null |
"""Prepare datasets
Script that:
- downloads, pre-processes, and saves base datasets from OpenML
- computes meta-features
- computes meta-targets (combining feature-importance measures and base models)
- saves the meta-datasets
Usage: python -m prepare_datasets --help
"""
import argparse
import pathlib
# Download one base dataset with the given "data_id" from OpenML and store it in X, y format in
# "base_data_dir", all columns made numeric. Note that the method might throw an exception if
# OpenML is not able to retrieve the dataset.
# Download OpenML datasets and store them in "base_data_dir". Either retrieve base datasets by
# "data_ids" or search according to fixed dataset characteristics. The latter was done for the
# paper, but the datasets matching the characteristics can change in future.
# Compute all meta-features for one base dataset with "base_dataset_name", located in
# "base_data_dir", and store the resulting meta-data in "meta_data_dir"
# For each base dataset from "base_data_dir", compute all meta-features. Save the resulting
# meta-data into "meta_data_dir".
# Compute one meta-target, i.e., apply one importance measure and one base model to one base
# dataset. Return the actual meta-target (numeric feature importances) and some information
# identifying it.
# For each base dataset from "base_data_dir", compute all meta-targets, i.e., all
# feature-importance measures for all base models. Save the resulting meta-data into
# "meta_data_dir".
# Parse command-line arguments and prepare base + meta datasets.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieves base datasets from OpenML, creates meta-datasets ' +
'and stores all these data.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--base_data_dir', type=pathlib.Path, default='data/base_datasets/',
help='Directory to store base datasets. Will be created if necessary.')
parser.add_argument('-i', '--data_ids', type=int, default=[], nargs='*',
help='Ids of OpenML datasets. If none provided, will search for datasets.')
parser.add_argument('-m', '--meta_data_dir', type=pathlib.Path, default='data/meta_datasets/',
help='Directory to store meta-datasets. Will be created if necessary.')
parser.add_argument('-p', '--n_processes', type=int, default=None,
help='Number of processes for multi-processing (default: all cores).')
args = parser.parse_args()
prepare_base_datasets(base_data_dir=args.base_data_dir, data_ids=args.data_ids)
prepare_meta_features(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
prepare_meta_targets(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
| 58.582011
| 110
| 0.694906
|
"""Prepare datasets
Script that:
- downloads, pre-processes, and saves base datasets from OpenML
- computes meta-features
- computes meta-targets (combining feature-importance measures and base models)
- saves the meta-datasets
Usage: python -m prepare_datasets --help
"""
import argparse
import multiprocessing
import pathlib
from typing import Collection, Dict, Optional, Sequence, Union
import warnings
import numpy as np
import openml
import pandas as pd
import sklearn.preprocessing
import tqdm
import data_utility
import meta_features
import meta_targets
# Download one base dataset with the given "data_id" from OpenML and store it in X, y format in
# "base_data_dir", all columns made numeric. Note that the method might throw an exception if
# OpenML is not able to retrieve the dataset.
def download_base_dataset(data_id: int, base_data_dir: pathlib.Path) -> None:
dataset = openml.datasets.get_dataset(dataset_id=data_id, download_data=True)
X, y, _, _ = dataset.get_data(target=dataset.default_target_attribute)
non_numeric_features = [x.name for x in dataset.features.values()
if (x.name in X.columns) and (x.data_type != 'numeric')]
X[non_numeric_features] = sklearn.preprocessing.OrdinalEncoder(dtype=int).fit_transform(
X=X[non_numeric_features])
assert all(np.issubdtype(X[feature].dtype, np.number) for feature in X.columns)
y = pd.Series(sklearn.preprocessing.LabelEncoder().fit_transform(y=y), name=y.name)
data_utility.save_dataset(X=X, y=y, dataset_name=dataset.name, directory=base_data_dir)
# Download OpenML datasets and store them in "base_data_dir". Either retrieve base datasets by
# "data_ids" or search according to fixed dataset characteristics. The latter was done for the
# paper, but the datasets matching the characteristics can change in future.
def prepare_base_datasets(base_data_dir: pathlib.Path, data_ids: Optional[Sequence[int]] = None) -> None:
print('Base dataset preparation started.')
if not base_data_dir.is_dir():
print('Base-dataset directory does not exist. We create it.')
base_data_dir.mkdir(parents=True)
if any(base_data_dir.iterdir()):
print('Base-dataset directory is not empty. Files might be overwritten, but not deleted.')
dataset_overview = openml.datasets.list_datasets(status='active', output_format='dataframe')
if (data_ids is None) or (len(data_ids) == 0):
dataset_overview = dataset_overview[
(dataset_overview['NumberOfClasses'] == 2) & # binary classification
(dataset_overview['NumberOfInstances'] >= 1000) &
(dataset_overview['NumberOfInstances'] <= 10000) &
(dataset_overview['NumberOfMissingValues'] == 0)
]
# Pick latest version of each dataset:
dataset_overview = dataset_overview.sort_values(by='version').groupby('name').last().reset_index()
# Pick the same amount of datasets from different categories regarding number of features:
feature_number_groups = [(6, 11), (12, 26), (27, 51)] # list of (lower, upper); count includes target
num_datasets_per_group = 20
data_ids = []
with tqdm.tqdm(total=(len(feature_number_groups) * num_datasets_per_group),
desc='Downloading datasets') as progress_bar:
for lower, upper in feature_number_groups:
current_datasets = dataset_overview[(dataset_overview['NumberOfFeatures'] >= lower) &
(dataset_overview['NumberOfFeatures'] <= upper)]
successful_downloads = 0
current_position = 0 # ... in the table of datasets
while successful_downloads < num_datasets_per_group:
data_id = int(current_datasets['did'].iloc[current_position])
try:
download_base_dataset(data_id=data_id, base_data_dir=base_data_dir)
data_ids.append(data_id)
successful_downloads += 1
progress_bar.update()
except Exception: # OpenML does not specify exception type for get_dataset()
pass
finally: # in any case, move on to next dataset
current_position += 1
else:
print('Using given dataset ids.')
for data_id in tqdm.tqdm(data_ids, desc='Downloading datasets'):
try:
download_base_dataset(data_id=data_id, base_data_dir=base_data_dir)
except Exception: # OpenML does not specify exception type for get_dataset()
warnings.warn(f'Download of dataset {data_id} failed.')
dataset_overview[dataset_overview['did'].isin(data_ids)].to_csv(
base_data_dir / '_dataset_overview.csv', index=False)
print('Base datasets prepared and saved.')
# Compute all meta-features for one base dataset with "base_dataset_name", located in
# "base_data_dir", and store the resulting meta-data in "meta_data_dir"
def compute_and_save_meta_features(base_data_dir: pathlib.Path, base_dataset_name: str,
meta_data_dir: pathlib.Path) -> None:
X, y = data_utility.load_dataset(dataset_name=base_dataset_name, directory=base_data_dir)
result = meta_features.compute_meta_features(X=X, y=y)
data_utility.save_dataset(dataset_name=base_dataset_name, directory=meta_data_dir, X=result)
# For each base dataset from "base_data_dir", compute all meta-features. Save the resulting
# meta-data into "meta_data_dir".
def prepare_meta_features(base_data_dir: pathlib.Path, meta_data_dir: pathlib.Path,
n_processes: Optional[int] = None) -> None:
print('Meta-feature preparation started.')
base_datasets = data_utility.list_datasets(directory=base_data_dir)
with tqdm.tqdm(total=(len(base_datasets)), desc='Computing meta-features') as progress_bar:
with multiprocessing.Pool(processes=n_processes) as process_pool:
results = [process_pool.apply_async(compute_and_save_meta_features, kwds={
'base_data_dir': base_data_dir, 'base_dataset_name': base_dataset_name,
'meta_data_dir': meta_data_dir}, callback=lambda x: progress_bar.update())
for base_dataset_name in base_datasets]
[x.wait() for x in results] # don't need to return value here, just wait till finished
print('Meta-features prepared and saved.')
# Compute one meta-target, i.e., apply one importance measure and one base model to one base
# dataset. Return the actual meta-target (numeric feature importances) and some information
# identifying it.
def compute_meta_target(base_data_dir: pathlib.Path, base_dataset_name: str, base_model_name: str,
importance_measure_name: str) -> Dict[str, Union[str, Collection[float]]]:
result = {'base_dataset': base_dataset_name, 'base_model': base_model_name,
'importance_measure': importance_measure_name}
X, y = data_utility.load_dataset(dataset_name=base_dataset_name, directory=base_data_dir)
importance_type = meta_targets.IMPORTANCE_MEASURES[importance_measure_name]
base_model_func = meta_targets.BASE_MODELS[base_model_name]['func']
base_model_args = meta_targets.BASE_MODELS[base_model_name]['args']
result['values'] = importance_type.compute_importance(X=X, y=y, model_func=base_model_func,
model_args=base_model_args)
return result
# For each base dataset from "base_data_dir", compute all meta-targets, i.e., all
# feature-importance measures for all base models. Save the resulting meta-data into
# "meta_data_dir".
def prepare_meta_targets(base_data_dir: pathlib.Path, meta_data_dir: pathlib.Path,
n_processes: Optional[int] = None) -> None:
print('Meta-target preparation started.')
base_datasets = data_utility.list_datasets(directory=base_data_dir)
with tqdm.tqdm(total=(len(base_datasets) * len(meta_targets.IMPORTANCE_MEASURES) *
len(meta_targets.BASE_MODELS)), desc='Computing meta-targets') as progress_bar:
with multiprocessing.Pool(processes=n_processes) as process_pool:
results = [process_pool.apply_async(compute_meta_target, kwds={
'base_data_dir': base_data_dir, 'base_dataset_name': base_dataset_name,
'base_model_name': base_model_name, 'importance_measure_name': importance_measure_name
}, callback=lambda x: progress_bar.update())
for base_dataset_name in base_datasets
for base_model_name in meta_targets.BASE_MODELS.keys()
for importance_measure_name in meta_targets.IMPORTANCE_MEASURES.keys()]
results = [x.get() for x in results]
# Combine individual meta-targets to one data frame per base dataset:
meta_target_data = {base_dataset_name: pd.DataFrame() for base_dataset_name in base_datasets}
for result in results:
column_name = data_utility.name_meta_target(
importance_measure_name=result['importance_measure'],
base_model_name=result['base_model'])
meta_target_data[result['base_dataset']][column_name] = result['values']
for base_dataset_name, data_frame in meta_target_data.items():
data_utility.save_dataset(dataset_name=base_dataset_name, directory=meta_data_dir,
y=data_frame)
print('Meta-targets prepared and saved.')
# Parse command-line arguments and prepare base + meta datasets.
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Retrieves base datasets from OpenML, creates meta-datasets ' +
'and stores all these data.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', '--base_data_dir', type=pathlib.Path, default='data/base_datasets/',
help='Directory to store base datasets. Will be created if necessary.')
parser.add_argument('-i', '--data_ids', type=int, default=[], nargs='*',
help='Ids of OpenML datasets. If none provided, will search for datasets.')
parser.add_argument('-m', '--meta_data_dir', type=pathlib.Path, default='data/meta_datasets/',
help='Directory to store meta-datasets. Will be created if necessary.')
parser.add_argument('-p', '--n_processes', type=int, default=None,
help='Number of processes for multi-processing (default: all cores).')
args = parser.parse_args()
prepare_base_datasets(base_data_dir=args.base_data_dir, data_ids=args.data_ids)
prepare_meta_features(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
prepare_meta_targets(base_data_dir=args.base_data_dir, meta_data_dir=args.meta_data_dir,
n_processes=args.n_processes)
| 0
| 0
| 0
| 0
| 0
| 7,730
| 0
| 15
| 376
|
3f79f678ffc367e4156a50c0372bba0efcd118d0
| 4,749
|
py
|
Python
|
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
mywebsocket.py
|
malengelajosue/app_flask
|
ea656abb859d8941e9a4761736f2a6ce4b91f7aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import signal
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.wsgi as myapp_wsgi
# Javascript Usage:
# var ws = new WebSocket('ws://localhost:8000/ws');
# ws.onopen = function(event){ console.log('socket open'); }
# ws.onclose = function(event){ console.log('socket closed'); }
# ws.onerror = function(error){ console.log('error:', err); }
# ws.onmessage = function(event){ console.log('message:', event.data); }
# # ... wait for connection to open
# ws.send('hello world')
application = tornado.web.Application([
(r'/ws', MyAppWebSocket),
(r'/(.*)', tornado.web.FallbackHandler, dict(
fallback=tornado.wsgi.WSGIContainer(myapp_wsgi)
)),
], debug=True)
if __name__ == '__main__':
application.listen(8001)
instance=tornado.ioloop.IOLoop.instance()
instance.start()
signal.signal(signal.SIGINT, signal_handler)
signal.pause()
| 29.314815
| 169
| 0.620131
|
#!/usr/bin/env python
import signal
import sys
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.wsgi as myapp_wsgi
from datetime import datetime
import time
import ast
import random
from datetime import date
from models.model import Sites
from models.model import Coordonnates
from models.db_connection import Session,engine,Base
from myclasses.gpsaccess import Gpsaccess as Gps
# Javascript Usage:
# var ws = new WebSocket('ws://localhost:8000/ws');
# ws.onopen = function(event){ console.log('socket open'); }
# ws.onclose = function(event){ console.log('socket closed'); }
# ws.onerror = function(error){ console.log('error:', err); }
# ws.onmessage = function(event){ console.log('message:', event.data); }
# # ... wait for connection to open
# ws.send('hello world')
class MyAppWebSocket(tornado.websocket.WebSocketHandler):
# Simple Websocket echo handler. This could be extended to
# use Redis PubSub to broadcast updates to clients.
def getPosition(self):
self.connected = False
if self.connected == False:
self.gpsDevice = Gps()
self.myCoord = ''
self.connected = True
time.sleep(0.5)
coordonnates = self.gpsDevice.readCoordonates()
self.myCoord = coordonnates
if coordonnates != {}:
self.lat = float(coordonnates['latitude'])
self.long = float(coordonnates['longitude'])
self.alt = coordonnates['altitude']
self.speed = coordonnates['speed']
self.course = coordonnates['course']
self.satellite = coordonnates['satellite']
self.moment = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
coordonnates = {'Lat': self.lat, 'Long': self.long, 'Alt': self.alt, 'Moment': self.moment, 'Sat': self.satellite,'Course': self.course, 'Speed': self.speed}
self.write_message(coordonnates)
if self.persit==True:
self.saveCoordonates()
else:
self.write_message({'status':0})
return coordonnates
def open(self):
self.persit=False
self.mysite = ''
def on_message(self, message):
message=ast.literal_eval(message)
print(message)
coordonates={}
if message.get('action')=='get_position':
coordonates=self.getPosition()
elif message.get('action')=='start_persiste':
print("start persisting....")
self.site_name=str(message.get('site_name'))
self.capture_type=str(message.get('type'))
self.description=str(message.get('description'))
_name=self.site_name
_description=self.description
_type=self.capture_type
mySite=Sites(name=_name,description=_description,type_prelevement=_type)
self.mysite=mySite
self.persit = True
elif message.get('action')=='stop_persiste':
self.persit=False
session=Session()
session.add(self.mysite)
session.commit()
session.close()
elif message.get('action')=='gps_test':
self.getPosition()
print('gps test')
elif message.get('action') == 'multiwii_test':
self.getPosition()
print('Multiwii test')
elif message.get('action') == 'arm_test':
self.getPosition()
print('Arm test')
def run(self):
time.sleep(1)
return
def on_close(self):
try:
print
'connection closed'
except tornado.websocket.WebSocketClosedError:
print('connection fermee de maniere inatendu!')
self.close()
def check_origin(self, origin):
return True
def saveCoordonates(self):
_lat=str(self.lat)
_long=str(self.long)
_alt=str(self.alt)
_moment=datetime.now()
_vitesse=str(self.speed)
_course=str(self.course)
_satellite=str(self.satellite)
coord=Coordonnates(lat=_lat,long=_long,alt=_alt,moment=_moment,speed=_vitesse,course=_course,satellite=_satellite)
self.mysite.coordonnates.append(coord)
application = tornado.web.Application([
(r'/ws', MyAppWebSocket),
(r'/(.*)', tornado.web.FallbackHandler, dict(
fallback=tornado.wsgi.WSGIContainer(myapp_wsgi)
)),
], debug=True)
if __name__ == '__main__':
application.listen(8001)
instance=tornado.ioloop.IOLoop.instance()
instance.start()
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
instance.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.pause()
| 0
| 0
| 0
| 3,391
| 0
| 94
| 0
| 58
| 271
|
ef071130d8e688b2bf7d1480cb9a43266fc55e27
| 4,576
|
py
|
Python
|
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
compile.py
|
KValexander/compile-java-project
|
62aab5ca9ec53705daa25a21875fc5c97e71db97
|
[
"MIT"
] | null | null | null |
import os
# Storage
concat = ""
assets = []
startclass = ""
# Static config
config = {
"javapath": "java",
"classpath": "class",
"sourcetxt": "source.txt",
"compilebat": "compile.bat",
"startclass": "Main.class",
"runbat": "run.bat",
"copyassets": "true"
}
# Getting configurations from a file
if os.path.exists("compile_config.txt"):
f = open("compile_config.txt", "r")
for line in f:
line = line.replace(" ", "").split("=");
config[line[0]] = line[1].rstrip();
f.close()
# Entries
entries = {
"javapath": "Java dir: ",
"classpath": "Class dir: ",
"sourcetxt": "Source txt: ",
"compilebat": "Compile bat: ",
"startclass": "Start class: ",
"runbat": "Run bat: ",
"copyassets": "Copy assets: "
}
# Setting configurations
# GUI
# Create field
# Concatenating paths to java files
# Getting the path to the starting class
# Copy assets
# File creation
# Start programm
# Call GUI
tkinter_interface()
| 26
| 138
| 0.674825
|
import shutil
import os
import re
from tkinter import *
# Storage
concat = ""
assets = []
startclass = ""
# Static config
config = {
"javapath": "java",
"classpath": "class",
"sourcetxt": "source.txt",
"compilebat": "compile.bat",
"startclass": "Main.class",
"runbat": "run.bat",
"copyassets": "true"
}
# Getting configurations from a file
if os.path.exists("compile_config.txt"):
f = open("compile_config.txt", "r")
for line in f:
line = line.replace(" ", "").split("=");
config[line[0]] = line[1].rstrip();
f.close()
# Entries
entries = {
"javapath": "Java dir: ",
"classpath": "Class dir: ",
"sourcetxt": "Source txt: ",
"compilebat": "Compile bat: ",
"startclass": "Start class: ",
"runbat": "Run bat: ",
"copyassets": "Copy assets: "
}
# Setting configurations
def setting_configurations():
for key, val in entries.items():
if(entries[key].get() != ""): config[key] = entries[key].get()
# Overwrite config file
f = open("compile_config.txt", "w+")
for key, val in config.items():
f.write(key + " = " + val + "\n")
f.close()
# Call start processing
start_processing()
# GUI
def tkinter_interface():
global entries
# Window
window = Tk()
window.title("Java compilation automator")
window.resizable(width=False, height=False)
window.geometry("400x300")
# Labels and Entries
i = 0
for key, val in entries.items():
entries[key] = create_field(window, val, 30, 0, i)
entries[key].insert(0, config[key])
i += 2
# Button
button = Button(window, text="Run", background="#888", foreground="#eee", padx="20", pady="0", font="20", command=setting_configurations)
button.grid(column=2,row=0, padx=20)
# Mainloop
window.mainloop()
# Create field
def create_field(win, text, width, c, r):
label = Label(win, text=text)
label.grid(column=c, row=r, pady=10, padx=10)
txt = Entry(win, width=width)
txt.grid(column=c+1, row=r)
return txt
# Concatenating paths to java files
def java_dir_processing(path):
global concat, assets
ld = os.listdir(path)
for file in ld:
if re.search(r"\.java", file):
concat += "./" + path + "/" + file + "\n"
elif os.path.isdir(path + "/" + file): java_dir_processing(path + "/" + file)
else: assets.append(path + "/" + file)
# Getting the path to the starting class
def class_dir_processing(path):
global startclass
if(not os.path.exists(path)): return False;
ld = os.listdir(path)
for file in ld:
if re.search(config["startclass"], file):
startclass = path + "/" + re.split(r"\.", file)[0]
startclass = re.sub(r"/", ".", startclass.replace(config["classpath"]+"/", ""))
return;
elif os.path.isdir(path + "/" + file): class_dir_processing(path + "/" + file)
# Copy assets
def assets_processing():
global assets
for asset in assets:
topath = re.sub(r"\/[\w\-]*\.\w*", "/", asset.replace(config["javapath"], config["classpath"], 1))
if not os.path.exists(topath):
shutil.copytree(topath.replace(config["classpath"], config["javapath"]),topath)
for filename in os.listdir(topath):
fullpath = topath + filename
if os.path.isfile(fullpath): os.unlink(fullpath)
elif os.path.isdir(fullpath): shutil.rmtree(fullpath)
shutil.copy(asset, topath)
# File creation
def create_file(name, content):
f = open(name, "w+")
f.write(content)
f.close()
# Start programm
def start_processing():
global concat, assets
# Call jdp
java_dir_processing(config["javapath"])
# Create file with paths
create_file(config["sourcetxt"], concat)
concat = ""
# Delete class folder if it exists
if os.path.exists(config["classpath"]): shutil.rmtree(config["classpath"])
# Create file with compilation command
create_file(config["compilebat"], "javac -d " + config["classpath"] + " @" + config["sourcetxt"] + "\n")
# Compilation activation
os.system(config["compilebat"])
# Removing intermediate files
os.remove(config["compilebat"])
os.remove(config["sourcetxt"])
# Checking for compilation success
# and getting the path to the starting class
if(class_dir_processing(config["classpath"]) == False):
return print("\nJCA message: Compilation error")
if(not startclass):
return print("\nJCA message: Startup error")
else:
print("JCA message: Compilation is successful")
# Call ap
if(config["copyassets"] == "true"): assets_processing()
assets.clear()
# Creating an interpretation file
create_file(config["runbat"], "java -classpath ./" + config["classpath"] + " " + startclass)
# Running the code
os.system(config["runbat"])
# Removing intermediate files
os.remove(config["runbat"])
# Call GUI
tkinter_interface()
| 0
| 0
| 0
| 0
| 0
| 3,423
| 0
| -20
| 243
|
6d79c61a4cd03cad002390bea3fef1d83f0bef83
| 601
|
py
|
Python
|
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 1
|
2019-02-24T09:57:16.000Z
|
2019-02-24T09:57:16.000Z
|
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | null | null | null |
multiprocessing_module/multiprocessing_test2.py
|
kenwaldek/python
|
e6aaf5616a456a4fb91889c0617bd6511f1a223e
|
[
"MIT"
] | 4
|
2017-05-21T15:34:53.000Z
|
2018-09-25T06:56:15.000Z
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# kenwaldek MIT-license
#
# Title: multiprocessing 2 Version: 1.0
# Date: 30-12-16 Language: python3
# Description: multiprocessing dus met meerdere cores te samen
#
###############################################################
from multiprocessing import Pool
if __name__ == '__main__':
p = Pool(processes=20)
data = p.map(job, range(20))
p.close()
print(data)
| 25.041667
| 63
| 0.44426
|
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
###############################################################
# © kenwaldek MIT-license
#
# Title: multiprocessing 2 Version: 1.0
# Date: 30-12-16 Language: python3
# Description: multiprocessing dus met meerdere cores te samen
#
###############################################################
from multiprocessing import Pool
def job(num):
return num * 2
if __name__ == '__main__':
p = Pool(processes=20)
data = p.map(job, range(20))
p.close()
print(data)
| 2
| 0
| 0
| 0
| 0
| 11
| 0
| 0
| 23
|
d0f18eb34b3ac7f1fbda4ccdac297a8ef889417b
| 1,088
|
py
|
Python
|
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
leetcode-python/num002.py
|
shuaizi/leetcode
|
c943410575f380a00335bf5ac8d361af53a92d78
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'shuai'
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
sol = Solution()
sol.addTwoNumbers(l1, l2)
| 23.148936
| 45
| 0.465074
|
__author__ = 'shuai'
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
ret = ListNode(0)
tmp = 0
tmpNode = ret
while l1 or l2:
if not l1:
sum = l2.val
l2 = l2.next
elif not l2:
sum = l1.val
l1 = l1.next
else:
sum = l1.val + l2.val
l1 = l1.next
l2 = l2.next
tmpN = ListNode((sum + tmp) % 10)
tmp = (sum + tmp) / 10
tmpNode.next = tmpN
tmpNode = tmpNode.next
if tmp != 0:
tmpN = ListNode(tmp)
tmpNode.next = tmpN
return ret.next
l1 = ListNode(2)
l1.next = ListNode(4)
l1.next.next = ListNode(3)
l2 = ListNode(5)
l2.next = ListNode(6)
l2.next.next = ListNode(4)
sol = Solution()
sol.addTwoNumbers(l1, l2)
| 0
| 0
| 0
| 844
| 0
| 0
| 0
| 0
| 46
|
9d16548fc6a8b1b86bb49107b9c13023f78ef594
| 3,051
|
py
|
Python
|
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | null | null | null |
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | null | null | null |
publish/tests/models.py
|
nacady/django-publish
|
a9b0b0b0ce0a2cd664d256edc4c819180dc882df
|
[
"BSD-3-Clause"
] | 1
|
2021-06-28T03:59:45.000Z
|
2021-06-28T03:59:45.000Z
|
from datetime import datetime
# publishable model with a reverse relation to
# page (as a child)
# non-publishable reverse relation to page (as a child)
update_pub_date.pub_date = datetime.now()
| 29.621359
| 74
| 0.715831
|
from django.db import models
from datetime import datetime
from publish.models import Publishable
class Site(models.Model):
title = models.CharField(max_length=100)
domain = models.CharField(max_length=100)
class FlatPage(Publishable):
url = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
enable_comments = models.BooleanField()
template_name = models.CharField(max_length=70, blank=True)
registration_required = models.BooleanField()
sites = models.ManyToManyField(Site)
class Meta:
ordering = ['url']
def get_absolute_url(self):
if self.is_public:
return self.url
return '%s*' % self.url
class Author(Publishable):
name = models.CharField(max_length=100)
profile = models.TextField(blank=True)
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = ['authorprofile']
class AuthorProfile(Publishable):
author = models.OneToOneField(Author)
extra_profile = models.TextField(blank=True)
class ChangeLog(models.Model):
changed = models.DateTimeField(db_index=True, auto_now_add=True)
message = models.CharField(max_length=200)
class Tag(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.CharField(max_length=100)
# publishable model with a reverse relation to
# page (as a child)
class PageBlock(Publishable):
page = models.ForeignKey('Page')
content = models.TextField(blank=True)
# non-publishable reverse relation to page (as a child)
class Comment(models.Model):
page = models.ForeignKey('Page')
comment = models.TextField()
def update_pub_date(page, field_name, value):
# ignore value entirely and replace with now
setattr(page, field_name, update_pub_date.pub_date)
update_pub_date.pub_date = datetime.now()
class Page(Publishable):
slug = models.CharField(max_length=100, db_index=True)
title = models.CharField(max_length=200)
content = models.TextField(blank=True)
pub_date = models.DateTimeField(default=datetime.now)
parent = models.ForeignKey('self', blank=True, null=True)
authors = models.ManyToManyField(Author, blank=True)
log = models.ManyToManyField(ChangeLog, blank=True)
tags = models.ManyToManyField(Tag, through='PageTagOrder', blank=True)
class Meta:
ordering = ['slug']
class PublishMeta(Publishable.PublishMeta):
publish_exclude_fields = ['log']
publish_reverse_fields = ['pageblock_set']
publish_functions = {'pub_date': update_pub_date}
def get_absolute_url(self):
if not self.parent:
return u'/%s/' % self.slug
return '%s%s/' % (self.parent.get_absolute_url(), self.slug)
class PageTagOrder(Publishable):
# note these are named in non-standard way to
# ensure we are getting correct names
tagged_page = models.ForeignKey(Page)
page_tag = models.ForeignKey(Tag)
tag_order = models.IntegerField()
| 0
| 0
| 0
| 2,395
| 0
| 129
| 0
| 24
| 295
|
c4bcfd12173f327f06cebc80aa483d7df62edc93
| 3,151
|
py
|
Python
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 6,132
|
2015-08-06T23:24:47.000Z
|
2022-03-31T21:49:34.000Z
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2,272
|
2015-08-10T08:40:07.000Z
|
2022-03-31T23:46:44.000Z
|
tests/test_ddg_global_var_dependencies.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 1,155
|
2015-08-06T23:37:39.000Z
|
2022-03-31T05:54:11.000Z
|
import os
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
arches = {'x86_64'}
if __name__ == "__main__":
main()
| 43.164384
| 171
| 0.720406
|
import os
import angr
import nose
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
arches = {'x86_64'}
def main():
test_ddg_global_var_dependencies()
def test_ddg_global_var_dependencies():
for arch in arches:
run_ddg_global_var_dependencies(arch)
def run_ddg_global_var_dependencies(arch):
test_file = os.path.join(test_location, arch, 'ddg_global_var_dependencies')
proj = angr.Project(test_file, auto_load_libs=False)
cfg = proj.analyses.CFGEmulated(context_sensitivity_level=2, keep_state=True, state_add_options=angr.sim_options.refs)
ddg = proj.analyses.DDG(cfg)
main_func = cfg.functions.function(name='main')
target_block_addr = main_func.ret_sites[0].addr
target_block = proj.factory.block(addr=target_block_addr)
tgt_stmt_idx, tgt_stmt = get_target_stmt(proj, target_block)
assert tgt_stmt_idx is not None
buf_addr = tgt_stmt.data.addr.con.value
tgt_ddg_node = get_ddg_node(ddg, target_block_addr, tgt_stmt_idx)
assert tgt_ddg_node is not None
# Whether the target depends on the statement assigning 'b' to the global variable
has_correct_dependency = False
for pred in ddg.get_predecessors(tgt_ddg_node):
pred_block = proj.factory.block(addr=pred.block_addr)
stmt = pred_block.vex.statements[pred.stmt_idx]
has_correct_dependency |= check_dependency(stmt, buf_addr, ord('b'))
# If the target depends on the statement assigning 'a' to the global variable, it is underconstrained (this assignment should be overwritten by the 'b' assignment)
nose.tools.assert_false(check_dependency(stmt, buf_addr, ord('a')), msg="Target statement has incorrect dependency (DDG is underconstrained)")
nose.tools.assert_true(has_correct_dependency, msg='Target statement does not have correct dependency (DDG is overconstrained)')
def check_dependency(stmt, addr, const):
# Check if we are storing a constant to a variable with constant address
if stmt.tag == 'Ist_Store' and stmt.addr.tag == 'Iex_Const' and stmt.data.tag == 'Iex_Const':
# Check if we are storing the specified constant to the specified variable address
if stmt.addr.con.value == addr and stmt.data.con.value == const:
return True
return False
def get_ddg_node(ddg, block_addr, stmt_idx):
for node in ddg.graph.nodes:
if node.block_addr == block_addr and node.stmt_idx == stmt_idx:
return node
return None
def get_target_stmt(proj, block):
for i, stmt in enumerate(block.vex.statements):
# We're looking for the instruction that loads a constant memory address into a temporary variable
if stmt.tag == 'Ist_WrTmp' and stmt.data.tag == 'Iex_Load' and stmt.data.addr.tag == 'Iex_Const':
addr = stmt.data.addr.con.value
section = proj.loader.main_object.find_section_containing(addr)
# Confirm the memory address is in the uninitialized data section
if section.name == '.bss':
return i, stmt
return None, None
if __name__ == "__main__":
main()
| 0
| 0
| 0
| 0
| 0
| 2,812
| 0
| -20
| 182
|
83b0710d125addf1a454b4ea6976092a23001346
| 930
|
py
|
Python
|
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
src/IO.py
|
Rahoo11/Jarvis
|
6fac03e6f7bb963d0632ec781323210b3379603b
|
[
"MIT"
] | null | null | null |
import logging
# LOGGING SETTINGS
# Save detailed information to log file
handler_file = logging.FileHandler("jarvis.log")
handler_file.setFormatter(logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s",
"%Y-%m-%d %H:%M:%S"
))
# Output simple information to stderr
handler_stderr = logging.StreamHandler()
handler_stderr.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
# Log everything of level INFO or higher (everything apart from DEBUG)
logging.basicConfig(
level=logging.INFO,
handlers=[
handler_file,
handler_stderr
]
)
# END LOGGING SETTINGS
def stdin() -> str:
"""
Use this to input commands for Jarvis if the desired way fails
"""
return input("Command: ")
def stdout(response: str):
"""
Use this to output Jarvis's response if the desired way fails
"""
print(response)
| 22.682927
| 76
| 0.691398
|
from datetime import datetime
import logging
# LOGGING SETTINGS
# Save detailed information to log file
handler_file = logging.FileHandler("jarvis.log")
handler_file.setFormatter(logging.Formatter(
"%(asctime)s %(levelname)s %(filename)s:%(lineno)d - %(message)s",
"%Y-%m-%d %H:%M:%S"
))
# Output simple information to stderr
handler_stderr = logging.StreamHandler()
handler_stderr.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
# Log everything of level INFO or higher (everything apart from DEBUG)
logging.basicConfig(
level=logging.INFO,
handlers=[
handler_file,
handler_stderr
]
)
# END LOGGING SETTINGS
def stdin() -> str:
"""
Use this to input commands for Jarvis if the desired way fails
"""
return input("Command: ")
def stdout(response: str):
"""
Use this to output Jarvis's response if the desired way fails
"""
print(response)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 22
|
6bf254e4d47110abc5fa56df01806709a669c1dd
| 8,744
|
py
|
Python
|
sfo.py
|
ayassinsayed/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | 1
|
2021-09-06T04:27:13.000Z
|
2021-09-06T04:27:13.000Z
|
sfo.py
|
Jasily/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | null | null | null |
sfo.py
|
Jasily/py.dataformat.sfo
|
99b2ad11b162318f7e5251a760bd5b53e1cf826d
|
[
"MIT"
] | 4
|
2017-10-28T18:31:00.000Z
|
2021-01-26T00:24:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - cologler <[email protected]>
# ----------
#
# ----------
__all__ = [
'FormatError',
'SfoFile',
'PSVGameSfo',
'PSPGameSfo',
]
_BYTE_ORDER = 'little'
if __name__ == '__main__':
for i in range(0, 1):
test(r'test_res\param_%s.sfo' % str(i).rjust(2, '0'))
| 28.763158
| 98
| 0.589776
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - cologler <[email protected]>
# ----------
#
# ----------
import io
__all__ = [
'FormatError',
'SfoFile',
'PSVGameSfo',
'PSPGameSfo',
]
class FormatError(Exception):
pass
_BYTE_ORDER = 'little'
class Header:
def __init__(self):
# uint32_t magic; Always PSF
# uint32_t version; Usually 1.1
# uint32_t key_table_start; Start offset of key_table
# uint32_t data_table_start; Start offset of data_table
# uint32_t tables_entries; Number of entries in all tables
self._magic = None
self._version = None
self._key_table_start = None
self._data_table_start = None
self._tables_entries = None
@property
def key_table_start(self):
return self._key_table_start
@property
def data_table_start(self):
return self._data_table_start
@property
def tables_entries(self):
return self._tables_entries
def fix_data(self, sfo):
self._tables_entries = len(sfo)
raise NotImplementedError
def from_reader(self, reader):
self._magic = reader.read(4)
self._version = reader.read(4)
self._key_table_start = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_table_start = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._tables_entries = int.from_bytes(reader.read(4), _BYTE_ORDER)
if self._magic != b'\x00PSF':
raise FormatError
return self
class IndexTableEntry:
FORMAT_UTF8S = b'\x04\x00'
'''utf8 character string, NULL terminated'''
FORMAT_UTF8 = b'\x04\x02'
'''
Allways has a length of 4 bytes in len and max_len
(even in the case some bytes are not used, all them are marked as used)
'''
FORMAT_INT32 = b'\x04\x04'
def __init__(self):
# uint16_t key_offset; param_key offset (relative to start offset of key_table) */
# uint16_t data_fmt; param_data data type */
# uint32_t data_len; param_data used bytes */
# uint32_t data_max_len; param_data total bytes */
# uint32_t data_offset; param_data offset (relative to start offset of data_table) */
self._key_offset = None
self._data_fmt = None
self._data_len = None
self._data_max_len = None
self._data_offset = None
@property
def key_offset(self):
return self._key_offset
@property
def data_fmt(self):
return self._data_fmt
@property
def data_len(self):
return self._data_len
@property
def data_offset(self):
return self._data_offset
@property
def data_max_len(self):
return self._data_max_len
def fix_data(self, data):
raise NotImplementedError
def from_reader(self, reader):
self._key_offset = int.from_bytes(reader.read(2), _BYTE_ORDER)
self._data_fmt = reader.read(2)
self._data_len = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_max_len = int.from_bytes(reader.read(4), _BYTE_ORDER)
self._data_offset = int.from_bytes(reader.read(4), _BYTE_ORDER)
if self._data_fmt != self.FORMAT_UTF8 and\
self._data_fmt != self.FORMAT_INT32 and\
self._data_fmt != self.FORMAT_UTF8S:
print(self._data_fmt)
raise FormatError
class Data:
def __init__(self):
self._index_table_entry = IndexTableEntry()
self._key = None
self._value = None
@property
def index_table_entry(self):
return self._index_table_entry
@property
def key(self):
return self._key
@property
def value(self):
return self._value
def fix_data(self):
self._index_table_entry.fix_data(self)
raise NotImplementedError
def __seek(self, reader, offset):
pos = reader.tell()
if pos != offset:
reader.seek(offset)
def key_from_reader(self, reader, header):
offset = header.key_table_start + self._index_table_entry.key_offset
self.__seek(reader, offset)
buffer = b''
while True:
b = reader.read(1)
if b == b'\x00':
break
buffer += b
self._key = buffer.decode('utf8')
def value_from_reader(self, reader, header):
offset = header.data_table_start + self._index_table_entry.data_offset
self.__seek(reader, offset)
buffer = reader.read(self._index_table_entry.data_max_len)
if self._index_table_entry.data_fmt == IndexTableEntry.FORMAT_UTF8:
i = buffer.find(b'\x00')
assert i >= 0
buffer = buffer[:i]
self._value = buffer.decode('utf8')
elif self._index_table_entry.data_fmt == IndexTableEntry.FORMAT_INT32:
assert len(buffer) == 4
self._value = int.from_bytes(buffer, _BYTE_ORDER)
else:
raise NotImplementedError
class SfoFile:
def __init__(self, header, data):
assert isinstance(header, Header)
self._header = header
self._data = {}
for d in data:
self._data[d.key] = d
def __contains__(self, key):
return key in self._data
def __getitem__(self, key):
return self._data[key].value
def __setitem__(self, key, value):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __len__(self):
return len(self._data)
def keys(self):
return self._data.keys()
def values(self):
return self._data.values()
def get_or_None(self, key):
r = self._data.get(key, None)
return None if r == None else r.value
def _fix_data(self):
for v in self.values():
v.fix_data()
self._header.fix_data(self)
raise NotImplementedError
@staticmethod
def from_reader(reader):
header = Header().from_reader(reader)
datas = [Data() for _ in range(0, header.tables_entries)]
for d in datas:
d.index_table_entry.from_reader(reader)
for d in datas:
d.key_from_reader(reader, header)
for d in datas:
d.value_from_reader(reader, header)
sfo = SfoFile(header, datas)
return sfo
@staticmethod
def from_bytes(buffer):
return SfoFile.from_reader(io.BytesIO(buffer))
class _Loader:
def __init__(self, sfo: SfoFile, key):
self._sfo = sfo
self._key = key
self._value = None
self._is_loaded = False
def refresh(self):
self._is_loaded = False
@property
def value(self):
if not self._is_loaded:
self._value = self._sfo.get_or_None(self._key)
self._is_loaded = True
return self._value
class SfoInfoWrapper:
def __init__(self, sfo):
self._sfo = sfo
self._cache = {}
@classmethod
def from_bytes(cls, buffer):
return cls(SfoFile.from_reader(io.BytesIO(buffer)))
def refresh(self):
for value in self._cache.values():
value.refresh()
def _get_value(self, key):
loader = self._cache.get(key)
if loader == None:
loader = _Loader(self._sfo, key)
self._cache[key] = loader
return loader.value
@property
def app_ver(self): return self._get_value('APP_VER')
@property
def category(self): return self._get_value('CATEGORY')
@property
def title(self): return self._get_value('TITLE')
class PSVGameSfo(SfoInfoWrapper):
@property
def content_id(self): return self._get_value('CONTENT_ID')
@property
def title_id(self): return self._get_value('TITLE_ID')
class PSPGameSfo(SfoInfoWrapper):
@property
def disc_id(self): return self._get_value('DISC_ID')
@property
def category(self): return self._get_value('CATEGORY')
def test(path):
with open(path, mode='rb') as reader:
sfo = SfoFile.from_reader(reader)
for k in sfo._data:
v = sfo._data[k]
print('%s: "%s"' % (v._key, v._value))
if __name__ == '__main__':
for i in range(0, 1):
test(r'test_res\param_%s.sfo' % str(i).rjust(2, '0'))
| 0
| 1,655
| 0
| 6,146
| 0
| 191
| 0
| -12
| 384
|
2f90e72ab2ad376594d32a0c909e3065372a297e
| 1,066
|
py
|
Python
|
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | 2
|
2015-05-02T12:30:22.000Z
|
2015-05-08T18:13:43.000Z
|
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | null | null | null |
motelsAPI/settings/dev.py
|
amartinez1/5letrasAPI
|
670b638a8254a0809c9f953350cd1a3264b61bf7
|
[
"MIT"
] | null | null | null |
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'motels_db',
}
}
ALLOWED_HOSTS = []
CORS_ORIGIN_ALLOW_ALL = True
DEBUG = True
SECRET_KEY = 'test'
INSTALLED_APPS += (
'autofixture',
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
| 23.688889
| 80
| 0.661351
|
from .base import *
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'motels_db',
}
}
ALLOWED_HOSTS = []
CORS_ORIGIN_ALLOW_ALL = True
DEBUG = True
SECRET_KEY = 'test'
INSTALLED_APPS += (
'autofixture',
'debug_toolbar',
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -2
| 23
|
a1adb53a7219e0575c94c4f8e32bc32af0a24a42
| 955
|
py
|
Python
|
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
snooper.py
|
boztalay/SuperconCubeCmd
|
9cbd685a75dbf9fdf7a04e7a240b07117b1fbe82
|
[
"MIT"
] | null | null | null |
import sys
import cubey
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Gimme a serial port!"
sys.exit(1)
serialPort = sys.argv[1]
main(serialPort)
| 23.292683
| 93
| 0.536126
|
import sys
import cubey
def main(serialPort):
cube = cubey.Cube(serialPort)
print "Listening, Ctrl-C to stop..."
try:
while True:
rawMessage = cube.sendCommand("m n u")
printMessage(rawMessage)
except KeyboardInterrupt:
print
cube.breakOut()
print "Done!"
def printMessage(rawMessage):
print
print "Got a message!"
print "=============="
print
contents = map(int, rawMessage.split())
rowFormat = "% 4X |" + (" %02X" * 16)
print " 0 1 2 3 4 5 6 7 8 9 A B C D E F"
print " ------------------------------------------------"
for rowStartIndex in range(0, 512, 16):
print rowFormat % tuple([rowStartIndex] + contents[rowStartIndex:rowStartIndex + 16])
if __name__ == "__main__":
if len(sys.argv) != 2:
print "Gimme a serial port!"
sys.exit(1)
serialPort = sys.argv[1]
main(serialPort)
| 0
| 0
| 0
| 0
| 0
| 721
| 0
| 0
| 46
|
9e5764903cdf85638ab62747d681b0695238c4e3
| 1,411
|
py
|
Python
|
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
day-9&10/main.py
|
a18antsv/Python-Two-Week-Challenge
|
cfdefe5e2643d1c1ee66d08a16a7ffc175ba1a3a
|
[
"MIT"
] | null | null | null |
from flask import Flask
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
db = {}
app = Flask("DayNine")
app.run(host="0.0.0.0")
| 24.754386
| 70
| 0.690291
|
import requests
from flask import Flask, render_template, request, redirect
base_url = "http://hn.algolia.com/api/v1"
# This URL gets the newest stories.
new = f"{base_url}/search_by_date?tags=story"
# This URL gets the most popular stories
popular = f"{base_url}/search?tags=story"
# This function makes the URL to get the detail of a storie by id.
# Heres the documentation: https://hn.algolia.com/api
def make_detail_url(id):
return f"{base_url}/items/{id}"
db = {}
app = Flask("DayNine")
@app.route("/")
def index():
allowed_orders = ("popular", "new")
order_by = request.args.get("order_by")
if order_by:
order_by = order_by.lower()
if order_by not in allowed_orders:
order_by = allowed_orders[0]
posts_from_db = db.get(order_by)
if posts_from_db:
posts = posts_from_db
else:
posts = requests.get(globals()[order_by]).json()["hits"]
db[order_by] = posts
return render_template("index.html", order_by=order_by, posts=posts)
@app.route("/<id>")
def detail(id):
try:
request = requests.get(make_detail_url(id))
request.raise_for_status()
except requests.exceptions.HTTPError:
return redirect("/")
post = request.json()
return render_template(
"detail.html",
title=post.get("title"),
url=post.get("url"),
points=post.get("points"),
author=post.get("author"),
comments=post.get("children")
)
app.run(host="0.0.0.0")
| 0
| 841
| 0
| 0
| 0
| 37
| 0
| 30
| 90
|
d32135b6fdf1615d5e0b4352267bf443c9e38704
| 2,651
|
py
|
Python
|
feewaiver/urls.py
|
dbca-wa/feewaiver
|
7938a0e9d18924c12b27c0a411b6d7eccb40166b
|
[
"Apache-2.0"
] | null | null | null |
feewaiver/urls.py
|
dbca-wa/feewaiver
|
7938a0e9d18924c12b27c0a411b6d7eccb40166b
|
[
"Apache-2.0"
] | 12
|
2021-02-24T02:33:01.000Z
|
2022-01-25T02:37:39.000Z
|
feewaiver/urls.py
|
mintcoding/feewaiver
|
47d69db91386f760dd36d87cbb565a9bb72a27d5
|
[
"Apache-2.0"
] | 1
|
2021-01-08T02:15:27.000Z
|
2021-01-08T02:15:27.000Z
|
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rest_framework import routers
#from feewaiver import views, users_api, api
from feewaiver import views, api
from ledger.urls import urlpatterns as ledger_patterns
# API patterns
router = routers.DefaultRouter()
router.register(r'feewaivers',api.FeeWaiverViewSet)
router.register(r'feewaivers_paginated',api.FeeWaiverPaginatedViewSet)
router.register(r'participants',api.ParticipantsViewSet)
router.register(r'parks',api.ParkViewSet)
router.register(r'campgrounds',api.CampGroundViewSet)
router.register(r'temporary_document', api.TemporaryDocumentCollectionViewSet)
api_patterns = [
#url(r'^api/profile$', users_api.GetProfile.as_view(), name='get-profile'),
#url(r'^api/department_users$', users_api.DepartmentUserList.as_view(), name='department-users-list'),
#url(r'^api/filtered_users$', users_api.UserListFilterView.as_view(), name='filtered_users'),
url(r'^api/',include(router.urls)),
]
# URL Patterns
urlpatterns = [
url(r'^ledger/admin/', admin.site.urls, name='ledger_admin'),
url(r'', include(api_patterns)),
url(r'^$', views.FeeWaiverRoutingView.as_view(), name='ds_home'),
url(r'^contact/', views.FeeWaiverContactView.as_view(), name='ds_contact'),
url(r'^admin_data/', views.FeeWaiverAdminDataView.as_view(), name='admin_data'),
url(r'^further_info/', views.FeeWaiverFurtherInformationView.as_view(), name='ds_further_info'),
url(r'^internal/', views.InternalView.as_view(), name='internal'),
url(r'^external/', views.ExternalView.as_view(), name='external'),
url(r'^account/$', views.ExternalView.as_view(), name='manage-account'),
url(r'^profiles/', views.ExternalView.as_view(), name='manage-profiles'),
url(r'^help/(?P<application_type>[^/]+)/(?P<help_type>[^/]+)/$', views.HelpView.as_view(), name='help'),
url(r'^mgt-commands/$', views.ManagementCommandsView.as_view(), name='mgt-commands'),
url(r'^internal/fee_waiver/(?P<feewaiver_pk>\d+)/$', views.InternalFeeWaiverView.as_view(), name='internal-feewaiver-detail'),
url(r'^history/fee_waiver/(?P<pk>\d+)/$', views.FeeWaiverHistoryCompareView.as_view(), name='feewaiver_history'),
] + ledger_patterns
if settings.DEBUG: # Serve media locally in development.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.SHOW_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns = [
url('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 48.2
| 130
| 0.744247
|
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url, include
from django.conf.urls.static import static
from rest_framework import routers
#from feewaiver import views, users_api, api
from feewaiver import views, api
from ledger.urls import urlpatterns as ledger_patterns
from feewaiver.utils import are_migrations_running
# API patterns
router = routers.DefaultRouter()
router.register(r'feewaivers',api.FeeWaiverViewSet)
router.register(r'feewaivers_paginated',api.FeeWaiverPaginatedViewSet)
router.register(r'participants',api.ParticipantsViewSet)
router.register(r'parks',api.ParkViewSet)
router.register(r'campgrounds',api.CampGroundViewSet)
router.register(r'temporary_document', api.TemporaryDocumentCollectionViewSet)
api_patterns = [
#url(r'^api/profile$', users_api.GetProfile.as_view(), name='get-profile'),
#url(r'^api/department_users$', users_api.DepartmentUserList.as_view(), name='department-users-list'),
#url(r'^api/filtered_users$', users_api.UserListFilterView.as_view(), name='filtered_users'),
url(r'^api/',include(router.urls)),
]
# URL Patterns
urlpatterns = [
url(r'^ledger/admin/', admin.site.urls, name='ledger_admin'),
url(r'', include(api_patterns)),
url(r'^$', views.FeeWaiverRoutingView.as_view(), name='ds_home'),
url(r'^contact/', views.FeeWaiverContactView.as_view(), name='ds_contact'),
url(r'^admin_data/', views.FeeWaiverAdminDataView.as_view(), name='admin_data'),
url(r'^further_info/', views.FeeWaiverFurtherInformationView.as_view(), name='ds_further_info'),
url(r'^internal/', views.InternalView.as_view(), name='internal'),
url(r'^external/', views.ExternalView.as_view(), name='external'),
url(r'^account/$', views.ExternalView.as_view(), name='manage-account'),
url(r'^profiles/', views.ExternalView.as_view(), name='manage-profiles'),
url(r'^help/(?P<application_type>[^/]+)/(?P<help_type>[^/]+)/$', views.HelpView.as_view(), name='help'),
url(r'^mgt-commands/$', views.ManagementCommandsView.as_view(), name='mgt-commands'),
url(r'^internal/fee_waiver/(?P<feewaiver_pk>\d+)/$', views.InternalFeeWaiverView.as_view(), name='internal-feewaiver-detail'),
url(r'^history/fee_waiver/(?P<pk>\d+)/$', views.FeeWaiverHistoryCompareView.as_view(), name='feewaiver_history'),
] + ledger_patterns
if settings.DEBUG: # Serve media locally in development.
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.SHOW_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns = [
url('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 22
|
54f82229c0438a79d9123d69c7d0467d0c47c179
| 1,758
|
py
|
Python
|
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
ros/src/twist_controller/twist_controller.py
|
Acharya-Kiran/CarND-Capstone
|
bc5f59ea20271e2e46e156fff86cd2482b52c5f2
|
[
"MIT"
] | null | null | null |
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
| 27.904762
| 101
| 0.755973
|
from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self,vehicle_mass,fuel_capacity,brake_deadband,decel_limit,
accel_limit,wheel_radius,wheel_base,steer_ratio,max_lat_accel,max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base,steer_ratio,0.1,max_lat_accel,max_steer_angle)
kp=0.3
ki=0.1
kd=0.
mn=0.
mx=0.2
self.throttle_controller=PID(kp,ki,kd,mn,mx)
tau=0.5
ts=.02
self.vel_lpf = LowPassFilter(tau,ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity=fuel_capacity
self.brake_deadband=brake_deadband
self.decel_limit=decel_limit
self.accel_limit=accel_limit
self.wheel_radius=wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel,dbw_enabled,linear_vel,angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel,angular_vel,current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error,sample_time)
brake = 0
if linear_vel==0 and current_vel<0.1:
throttle=0
brake=400
elif throttle<.1 and vel_error<0:
throttle=0
decel = max(vel_error,self.decel_limit)
brake = abs(decel)*self.vehicle_mass*self.wheel_radius
return throttle,brake,steering
| 0
| 0
| 0
| 1,587
| 0
| 0
| 0
| 20
| 111
|
9ecb3b223a203a77d74b6711d0796c6b4e890962
| 27,213
|
py
|
Python
|
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
others/Pytorch/utilis_rnn.py
|
jhuebotter/CartpoleSNNdemo
|
d18a85cbc45bff48295c46c9cd8c9fc00192318c
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from datetime import datetime
import collections
import os
import copy
from SI_Toolkit.load_and_normalize import load_normalization_info, load_data, normalize_df, denormalize_df
def get_device():
"""
Small function to correctly send data to GPU or CPU depending what is available
"""
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
return device
# Set seeds everywhere required to make results reproducible
# Print parameter count
# https://stackoverflow.com/questions/49201236/check-the-total-number-of-parameters-in-a-pytorch-model
def load_pretrained_rnn(net, pt_path, device):
"""
A function loading parameters (weights and biases) from a previous training to a net RNN instance
:param net: An instance of RNN
:param pt_path: path to .pt file storing weights and biases
:return: No return. Modifies net in place.
"""
pre_trained_model = torch.load(pt_path, map_location=device)
print("Loading Model: ", pt_path)
print('')
pre_trained_model = list(pre_trained_model.items())
new_state_dict = collections.OrderedDict()
count = 0
num_param_key = len(pre_trained_model)
for key, value in net.state_dict().items():
if count >= num_param_key:
break
layer_name, weights = pre_trained_model[count]
new_state_dict[key] = weights
# print("Pre-trained Layer: %s - Loaded into new layer: %s" % (layer_name, key))
count += 1
print('')
net.load_state_dict(new_state_dict)
# Initialize weights and biases - should be only applied if no pretrained net loaded
# FIXME: To tailor this sequence class according to the commands and state_variables of cartpole
import pandas as pd
#
# def load_data(a, filepath=None, columns_list=None, norm_inf=False, rnn_full_name=None, downsample=1):
# if filepath is None:
# filepath = a.val_file_name
#
# if columns_list is None:
# columns_list = list(set(a.inputs_list).union(set(a.outputs_list)))
#
# if type(filepath) == list:
# filepaths = filepath
# else:
# filepaths = [filepath]
#
# all_dfs = [] # saved separately to get normalization
# all_time_axes = []
#
# for one_filepath in filepaths:
# # Load dataframe
# print('loading data from ' + str(one_filepath))
# print('')
# df = pd.read_csv(one_filepath, comment='#')
# df=df.iloc[::downsample].reset_index()
#
# # You can shift dt by one time step to know "now" the timestep till the next row
# if a.cheat_dt:
# if 'dt' in df:
# df['dt'] = df['dt'].shift(-1)
# df = df[:-1]
#
# # FIXME: Make calculation of dt compatible with downsampling
# # Get time axis as separate Dataframe
# if 'time' in df.columns:
# t = df['time']
# elif 'dt' in df.columns:
# dt = df['dt']
# t = dt.cumsum()
# t.rename('time', inplace=True)
# else:
# t = pd.Series([])
# t.rename('time', inplace=True)
#
# time_axis = t
# all_time_axes.append(time_axis)
#
# # Get only relevant subset of columns
# if columns_list == 'all':
# pass
# else:
# df = df[columns_list]
#
# all_dfs.append(df)
#
#
# return all_dfs, all_time_axes
#
# # This way of doing normalization is fine for long data sets and (relatively) short sequence lengths
# # The points from the edges of the datasets count too little
# def calculate_normalization_info(df, PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# if type(df) is list:
# df_total = pd.concat(df)
# else:
# df_total = df
#
# if 'time' in df_total.columns:
# df_total.drop('time',
# axis='columns', inplace=True)
#
# df_mean = df_total.mean(axis=0)
# df_std = df_total.std(axis=0)
# df_max = df_total.max(axis=0)
# df_min = df_total.min(axis=0)
# frame = {'mean': df_mean, 'std': df_std, 'max': df_max, 'min': df_min}
# df_norm_info = pd.DataFrame(frame).transpose()
#
# df_norm_info.to_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv')
#
# # Plot historgrams to make the firs check about gaussian assumption
# # for feature in df_total.columns:
# # plt.hist(df_total[feature].to_numpy(), 50, density=True, facecolor='g', alpha=0.75)
# # plt.title(feature)
# # plt.show()
#
# return df_norm_info
#
#
# def load_normalization_info(PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# return pd.read_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv', index_col=0)
#
#
# def normalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def normalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return (col - col_mean) / col_std
# elif normalization_type == 'minmax_pos':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return (col - col_min) / (col_max - col_min)
# elif normalization_type == 'minmax_sym':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return -1.0 + 2.0 * (col - col_min) / (col_max - col_min)
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(normalize_feature, axis=0)
# else:
# dfs = dfs.apply(normalize_feature, axis=0)
#
# return dfs
#
#
# def denormalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def denormalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return col * col_std + col_mean
# elif normalization_type == 'minmax_pos':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return col * (col_max - col_min) + col_min
# elif normalization_type == 'minmax_sym':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return ((col + 1.0) / 2.0) * (col_max - col_min) + col_min
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(denormalize_feature, axis=0)
# else:
# dfs = dfs.apply(denormalize_feature, axis=0)
#
# return dfs
def plot_results(net,
args,
dataset=None,
normalization_info = None,
time_axes=None,
filepath=None,
inputs_list=None,
outputs_list=None,
closed_loop_list=None,
seq_len=None,
warm_up_len=None,
closed_loop_enabled=False,
comment='',
rnn_full_name=None,
save=False,
close_loop_idx=512):
"""
This function accepts RNN instance, arguments and CartPole instance.
It runs one random experiment with CartPole,
inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
"""
rnn_full_name = net.rnn_full_name
if filepath is None:
filepath = args.val_file_name
if type(filepath) == list:
filepath = filepath[0]
if warm_up_len is None:
warm_up_len = args.warm_up_len
if seq_len is None:
seq_len = args.seq_len
if inputs_list is None:
inputs_list = args.inputs_list
if inputs_list is None:
raise ValueError('RNN inputs not provided!')
if outputs_list is None:
outputs_list = args.outputs_list
if outputs_list is None:
raise ValueError('RNN outputs not provided!')
if closed_loop_enabled and (closed_loop_list is None):
closed_loop_list = args.close_loop_for
if closed_loop_list is None:
raise ValueError('RNN closed-loop-inputs not provided!')
net.reset()
net.eval()
device = get_device()
if normalization_info is None:
normalization_info = load_normalization_info(args.PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name)
if dataset is None or time_axes is None:
test_dfs, time_axes = load_data(args, filepath)
test_dfs_norm = normalize_df(test_dfs, normalization_info)
test_set = Dataset(test_dfs_norm, args, time_axes=time_axes, seq_len=seq_len)
del test_dfs
else:
test_set = copy.deepcopy(dataset)
test_set.reset_seq_len(seq_len=seq_len)
# Format the experiment data
features, targets, time_axis = test_set.get_experiment(1) # Put number in brackets to get the same idx at every run
features_pd = pd.DataFrame(data=features, columns=inputs_list)
targets_pd = pd.DataFrame(data=targets, columns=outputs_list)
rnn_outputs = pd.DataFrame(columns=outputs_list)
warm_up_idx = 0
rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
# Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
while warm_up_idx < warm_up_len:
rnn_input = rnn_input_0
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
net(rnn_input=rnn_input)
warm_up_idx += 1
net.outputs = []
net.sample_counter = 0
idx_cl = 0
close_the_loop = False
for index, row in features_pd.iterrows():
rnn_input = pd.DataFrame(copy.deepcopy(row)).transpose().reset_index(drop=True)
if idx_cl == close_loop_idx:
close_the_loop = True
if closed_loop_enabled and close_the_loop and (normalized_rnn_output is not None):
rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
normalized_rnn_output = net(rnn_input=rnn_input)
normalized_rnn_output = np.squeeze(normalized_rnn_output.detach().cpu().numpy()).tolist()
normalized_rnn_output = copy.deepcopy(pd.DataFrame(data=[normalized_rnn_output], columns=outputs_list))
rnn_outputs = rnn_outputs.append(copy.deepcopy(normalized_rnn_output), ignore_index=True)
idx_cl += 1
targets_pd_denorm = denormalize_df(targets_pd, normalization_info)
rnn_outputs_denorm = denormalize_df(rnn_outputs, normalization_info)
fig, axs = plot_results_specific(targets_pd_denorm, rnn_outputs_denorm, time_axis, comment, closed_loop_enabled, close_loop_idx)
plt.show()
if save:
# Make folders if not yet exist
try:
os.makedirs('save_plots')
except FileExistsError:
pass
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("-%d%b%Y_%H%M%S")
if rnn_full_name is not None:
fig.savefig('./save_plots/' + rnn_full_name + timestampStr + '.png')
else:
fig.savefig('./save_plots/' + timestampStr + '.png')
| 38.545326
| 132
| 0.614449
|
import torch
import torch.nn as nn
from torch.utils import data
from datetime import datetime
import collections
import os
import random as rnd
import copy
from Modeling.Pytorch.utilis_rnn_specific import *
from SI_Toolkit.load_and_normalize import load_normalization_info, load_data, normalize_df, denormalize_df
def get_device():
"""
Small function to correctly send data to GPU or CPU depending what is available
"""
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
return device
# Set seeds everywhere required to make results reproducible
def set_seed(args):
seed = args.seed
rnd.seed(seed)
np.random.seed(seed)
# Print parameter count
# https://stackoverflow.com/questions/49201236/check-the-total-number-of-parameters-in-a-pytorch-model
def print_parameter_count(net):
pytorch_total_params = sum(p.numel() for p in net.parameters())
pytorch_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print('::: # network all parameters: ' + str(pytorch_total_params))
print('::: # network trainable parameters: ' + str(pytorch_trainable_params))
print('')
def load_pretrained_rnn(net, pt_path, device):
"""
A function loading parameters (weights and biases) from a previous training to a net RNN instance
:param net: An instance of RNN
:param pt_path: path to .pt file storing weights and biases
:return: No return. Modifies net in place.
"""
pre_trained_model = torch.load(pt_path, map_location=device)
print("Loading Model: ", pt_path)
print('')
pre_trained_model = list(pre_trained_model.items())
new_state_dict = collections.OrderedDict()
count = 0
num_param_key = len(pre_trained_model)
for key, value in net.state_dict().items():
if count >= num_param_key:
break
layer_name, weights = pre_trained_model[count]
new_state_dict[key] = weights
# print("Pre-trained Layer: %s - Loaded into new layer: %s" % (layer_name, key))
count += 1
print('')
net.load_state_dict(new_state_dict)
# Initialize weights and biases - should be only applied if no pretrained net loaded
def initialize_weights_and_biases(net):
print('Initialize weights and biases')
for name, param in net.named_parameters():
print('Initialize {}'.format(name))
if 'gru' in name:
if 'weight' in name:
nn.init.orthogonal_(param)
if 'linear' in name:
if 'weight' in name:
nn.init.orthogonal_(param)
# nn.init.xavier_uniform_(param)
if 'bias' in name: # all biases
nn.init.constant_(param, 0)
print('')
def create_rnn_instance(rnn_name=None, inputs_list=None, outputs_list=None, load_rnn=None, path_save=None, device=None):
if load_rnn is not None and load_rnn != 'last':
# 1) Find csv with this name if exists load name, inputs and outputs list
# if it does not exist raise error
# 2) Create corresponding net
# 3) Load parameters from corresponding pt file
filename = load_rnn
print('Loading a pretrained RNN with the full name: {}'.format(filename))
print('')
txt_filename = filename + '.txt'
pt_filename = filename + '.pt'
txt_path = path_save + txt_filename
pt_path = path_save + pt_filename
if not os.path.isfile(txt_path):
raise ValueError(
'The corresponding .txt file is missing (information about inputs and outputs) at the location {}'.format(
txt_path))
if not os.path.isfile(pt_path):
raise ValueError(
'The corresponding .pt file is missing (information about weights and biases) at the location {}'.format(
pt_path))
f = open(txt_path, 'r')
lines = f.readlines()
rnn_name = lines[1].rstrip("\n")
inputs_list = lines[7].rstrip("\n").split(sep=', ')
outputs_list = lines[10].rstrip("\n").split(sep=', ')
f.close()
print('Inputs to the loaded RNN: {}'.format(', '.join(map(str, inputs_list))))
print('Outputs from the loaded RNN: {}'.format(', '.join(map(str, outputs_list))))
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
net.rnn_full_name = load_rnn
# Load the parameters
load_pretrained_rnn(net, pt_path, device)
elif load_rnn == 'last':
files_found = False
while (not files_found):
try:
import glob
list_of_files = glob.glob(path_save + '/*.txt')
txt_path = max(list_of_files, key=os.path.getctime)
except FileNotFoundError:
raise ValueError('No information about any pretrained network found at {}'.format(path_save))
f = open(txt_path, 'r')
lines = f.readlines()
rnn_name = lines[1].rstrip("\n")
pre_rnn_full_name = lines[4].rstrip("\n")
inputs_list = lines[7].rstrip("\n").split(sep=', ')
outputs_list = lines[10].rstrip("\n").split(sep=', ')
f.close()
pt_path = path_save + pre_rnn_full_name + '.pt'
if not os.path.isfile(pt_path):
print('The .pt file is missing (information about weights and biases) at the location {}'.format(
pt_path))
print('I delete the corresponding .txt file and try to search again')
print('')
os.remove(txt_path)
else:
files_found = True
print('Full name of the loaded RNN is {}'.format(pre_rnn_full_name))
print('Inputs to the loaded RNN: {}'.format(', '.join(map(str, inputs_list))))
print('Outputs from the loaded RNN: {}'.format(', '.join(map(str, outputs_list))))
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
net.rnn_full_name = pre_rnn_full_name
# Load the parameters
load_pretrained_rnn(net, pt_path, device)
else: # a.load_rnn is None
print('No pretrained network specified. I will train a network from scratch.')
print('')
# Construct the requested RNN
net = Sequence(rnn_name=rnn_name, inputs_list=inputs_list, outputs_list=outputs_list)
initialize_weights_and_biases(net)
return net, rnn_name, inputs_list, outputs_list
def create_log_file(rnn_name, inputs_list, outputs_list, path_save):
rnn_full_name = rnn_name[:4] + str(len(inputs_list)) + 'IN-' + rnn_name[4:] + '-' + str(len(outputs_list)) + 'OUT'
net_index = 0
while True:
txt_path = path_save + rnn_full_name + '-' + str(net_index) + '.txt'
if os.path.isfile(txt_path):
pass
else:
rnn_full_name += '-' + str(net_index)
f = open(txt_path, 'w')
f.write('RNN NAME: \n' + rnn_name + '\n\n')
f.write('RNN FULL NAME: \n' + rnn_full_name + '\n\n')
f.write('INPUTS: \n' + ', '.join(map(str, inputs_list)) + '\n\n')
f.write('OUTPUTS: \n' + ', '.join(map(str, outputs_list)) + '\n\n')
f.close()
break
net_index += 1
print('Full name given to the currently trained network is {}.'.format(rnn_full_name))
print('')
return rnn_full_name
# FIXME: To tailor this sequence class according to the commands and state_variables of cartpole
class Sequence(nn.Module):
""""
Our RNN class.
"""
def __init__(self, rnn_name, inputs_list, outputs_list):
super(Sequence, self).__init__()
"""Initialization of an RNN instance
We assume that inputs may be both commands and state variables, whereas outputs are always state variables
"""
# Check if GPU is available. If yes device='cuda:0' if not device='cpu'
self.device = get_device()
self.rnn_name = rnn_name
self.rnn_full_name = None
# Get the information about network architecture from the network name
# Split the names into "LSTM/GRU", "128H1", "64H2" etc.
names = rnn_name.split('-')
layers = ['H1', 'H2', 'H3', 'H4', 'H5']
self.h_size = [] # Hidden layers sizes
for name in names:
for index, layer in enumerate(layers):
if layer in name:
# assign the variable with name obtained from list layers.
self.h_size.append(int(name[:-2]))
if not self.h_size:
raise ValueError('You have to provide the size of at least one hidden layer in rnn name')
if 'GRU' in names:
self.rnn_type = 'GRU'
elif 'LSTM' in names:
self.rnn_type = 'LSTM'
else:
self.rnn_type = 'RNN-Basic'
# Construct network
if self.rnn_type == 'GRU':
self.rnn_cell = [nn.GRUCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.GRUCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
elif self.rnn_type == 'LSTM':
self.rnn_cell = [nn.LSTMCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.LSTMCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
else:
self.rnn_cell = [nn.RNNCell(len(inputs_list), self.h_size[0]).to(get_device())]
for i in range(len(self.h_size) - 1):
self.rnn_cell.append(nn.RNNCell(self.h_size[i], self.h_size[i + 1]).to(get_device()))
self.linear = nn.Linear(self.h_size[-1], len(outputs_list)) # RNN out
self.layers = nn.ModuleList([])
for cell in self.rnn_cell:
self.layers.append(cell)
self.layers.append(self.linear)
# Count data samples (=time steps)
self.sample_counter = 0
# Declaration of the variables keeping internal state of GRU hidden layers
self.h = [None] * len(self.h_size)
self.c = [None] * len(self.h_size) # Internal state cell - only matters for LSTM
# Variable keeping the most recent output of RNN
self.output = None
# List storing the history of RNN outputs
self.outputs = []
# Send the whole RNN to GPU if available, otherwise send it to CPU
self.to(self.device)
print('Constructed a neural network of type {}, with {} hidden layers with sizes {} respectively.'
.format(self.rnn_type, len(self.h_size), ', '.join(map(str, self.h_size))))
print('The inputs are (in this order): {}'.format(', '.join(map(str, inputs_list))))
print('The outputs are (in this order): {}'.format(', '.join(map(str, outputs_list))))
def reset(self):
"""
Reset the network (not the weights!)
"""
self.sample_counter = 0
self.h = [None] * len(self.h_size)
self.c = [None] * len(self.h_size)
self.output = None
self.outputs = []
def forward(self, rnn_input):
"""
Predicts future CartPole states IN "OPEN LOOP"
(at every time step prediction for the next time step is done based on the true CartPole state)
"""
# Initialize hidden layers - this change at every call as the batch size may vary
for i in range(len(self.h_size)):
self.h[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
self.c[i] = torch.zeros(rnn_input.size(1), self.h_size[i], dtype=torch.float).to(self.device)
# The for loop takes the consecutive time steps from input plugs them into RNN and save the outputs into a list
# THE NETWORK GETS ALWAYS THE GROUND TRUTH, THE REAL STATE OF THE CARTPOLE, AS ITS INPUT
# IT PREDICTS THE STATE OF THE CARTPOLE ONE TIME STEP AHEAD BASED ON TRUE STATE NOW
for iteration, input_t in enumerate(rnn_input.chunk(rnn_input.size(0), dim=0)):
# Propagate input through RNN layers
if self.rnn_type == 'LSTM':
self.h[0], self.c[0] = self.layers[0](input_t.squeeze(0), (self.h[0], self.c[0]))
for i in range(len(self.h_size) - 1):
self.h[i + 1], self.c[i + 1] = self.layers[i + 1](self.h[i], (self.h[i + 1], self.c[i + 1]))
else:
self.h[0] = self.layers[0](input_t.squeeze(0), self.h[0])
for i in range(len(self.h_size) - 1):
self.h[i + 1] = self.layers[i + 1](self.h[i], self.h[i + 1])
self.output = self.layers[-1](self.h[-1])
self.outputs += [self.output]
self.sample_counter = self.sample_counter + 1
# In the train mode we want to continue appending the outputs by calling forward function
# The outputs will be saved internally in the network instance as a list
# Otherwise we want to transform outputs list to a tensor and return it
return self.output
def return_outputs_history(self):
return torch.stack(self.outputs, 1)
import pandas as pd
#
# def load_data(a, filepath=None, columns_list=None, norm_inf=False, rnn_full_name=None, downsample=1):
# if filepath is None:
# filepath = a.val_file_name
#
# if columns_list is None:
# columns_list = list(set(a.inputs_list).union(set(a.outputs_list)))
#
# if type(filepath) == list:
# filepaths = filepath
# else:
# filepaths = [filepath]
#
# all_dfs = [] # saved separately to get normalization
# all_time_axes = []
#
# for one_filepath in filepaths:
# # Load dataframe
# print('loading data from ' + str(one_filepath))
# print('')
# df = pd.read_csv(one_filepath, comment='#')
# df=df.iloc[::downsample].reset_index()
#
# # You can shift dt by one time step to know "now" the timestep till the next row
# if a.cheat_dt:
# if 'dt' in df:
# df['dt'] = df['dt'].shift(-1)
# df = df[:-1]
#
# # FIXME: Make calculation of dt compatible with downsampling
# # Get time axis as separate Dataframe
# if 'time' in df.columns:
# t = df['time']
# elif 'dt' in df.columns:
# dt = df['dt']
# t = dt.cumsum()
# t.rename('time', inplace=True)
# else:
# t = pd.Series([])
# t.rename('time', inplace=True)
#
# time_axis = t
# all_time_axes.append(time_axis)
#
# # Get only relevant subset of columns
# if columns_list == 'all':
# pass
# else:
# df = df[columns_list]
#
# all_dfs.append(df)
#
#
# return all_dfs, all_time_axes
#
# # This way of doing normalization is fine for long data sets and (relatively) short sequence lengths
# # The points from the edges of the datasets count too little
# def calculate_normalization_info(df, PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# if type(df) is list:
# df_total = pd.concat(df)
# else:
# df_total = df
#
# if 'time' in df_total.columns:
# df_total.drop('time',
# axis='columns', inplace=True)
#
# df_mean = df_total.mean(axis=0)
# df_std = df_total.std(axis=0)
# df_max = df_total.max(axis=0)
# df_min = df_total.min(axis=0)
# frame = {'mean': df_mean, 'std': df_std, 'max': df_max, 'min': df_min}
# df_norm_info = pd.DataFrame(frame).transpose()
#
# df_norm_info.to_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv')
#
# # Plot historgrams to make the firs check about gaussian assumption
# # for feature in df_total.columns:
# # plt.hist(df_total[feature].to_numpy(), 50, density=True, facecolor='g', alpha=0.75)
# # plt.title(feature)
# # plt.show()
#
# return df_norm_info
#
#
# def load_normalization_info(PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name):
# return pd.read_csv(PATH_TO_EXPERIMENT_RECORDINGS + rnn_full_name + '-norm' + '.csv', index_col=0)
#
#
# def normalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def normalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return (col - col_mean) / col_std
# elif normalization_type == 'minmax_pos':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return (col - col_min) / (col_max - col_min)
# elif normalization_type == 'minmax_sym':
# def normalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return -1.0 + 2.0 * (col - col_min) / (col_max - col_min)
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(normalize_feature, axis=0)
# else:
# dfs = dfs.apply(normalize_feature, axis=0)
#
# return dfs
#
#
# def denormalize_df(dfs, normalization_info, normalization_type='minmax_sym'):
# if normalization_type == 'gaussian':
# def denormalize_feature(col):
# col_mean = normalization_info.loc['mean', col.name]
# col_std = normalization_info.loc['std', col.name]
# return col * col_std + col_mean
# elif normalization_type == 'minmax_pos':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return col * (col_max - col_min) + col_min
# elif normalization_type == 'minmax_sym':
# def denormalize_feature(col):
# col_min = normalization_info.loc['min', col.name]
# col_max = normalization_info.loc['max', col.name]
# return ((col + 1.0) / 2.0) * (col_max - col_min) + col_min
#
# if type(dfs) is list:
# for i in range(len(dfs)):
# dfs[i] = dfs[i].apply(denormalize_feature, axis=0)
# else:
# dfs = dfs.apply(denormalize_feature, axis=0)
#
# return dfs
class Dataset(data.Dataset):
def __init__(self, dfs, args, time_axes=None, seq_len=None):
'Initialization - divide data in features and labels'
self.data = []
self.labels = []
for df in dfs:
# Get Raw Data
features = copy.deepcopy(df)
targets = copy.deepcopy(df)
features.drop(features.tail(1).index, inplace=True) # Drop last row
targets.drop(targets.head(1).index, inplace=True)
features.reset_index(inplace=True) # Reset index
targets.reset_index(inplace=True)
features = features[args.inputs_list]
targets = targets[args.outputs_list]
self.data.append(features)
self.labels.append(targets)
self.args = args
self.seq_len = None
self.df_lengths = []
self.df_lengths_cs = []
self.number_of_samples = 0
self.time_axes = time_axes
self.reset_seq_len(seq_len=seq_len)
def reset_seq_len(self, seq_len=None):
"""
This method should be used if the user wants to change the seq_len without creating new Dataset
Please remember that one can reset it again to come back to old configuration
:param seq_len: Gives new user defined seq_len. Call empty to come back to default.
"""
if seq_len is None:
self.seq_len = self.args.seq_len # Sequence length
else:
self.seq_len = seq_len
self.df_lengths = []
self.df_lengths_cs = []
if type(self.data) == list:
for data_set in self.data:
self.df_lengths.append(data_set.shape[0] - self.seq_len)
if not self.df_lengths_cs:
self.df_lengths_cs.append(self.df_lengths[0])
else:
self.df_lengths_cs.append(self.df_lengths_cs[-1] + self.df_lengths[-1])
self.number_of_samples = self.df_lengths_cs[-1]
else:
self.number_of_samples = self.data.shape[0] - self.seq_len
def __len__(self):
'Total number of samples'
return self.number_of_samples
def __getitem__(self, idx, get_time_axis=False):
"""
Requires the self.data to be a list of pandas dataframes
"""
# Find index of the dataset in self.data and index of the starting point in this dataset
idx_data_set = next(i for i, v in enumerate(self.df_lengths_cs) if v > idx)
if idx_data_set == 0:
pass
else:
idx -= self.df_lengths_cs[idx_data_set - 1]
# Get data
features = self.data[idx_data_set].to_numpy()[idx:idx + self.seq_len, :]
# Every point in features has its target value corresponding to the next time step:
targets = self.labels[idx_data_set].to_numpy()[idx:idx + self.seq_len]
# After feeding the whole sequence we just compare the final output of the RNN with the state following afterwards
# targets = self.labels[idx_data_set].to_numpy()[idx + self.seq_len-1]
# If get_time_axis try to obtain a vector of time data for the chosen sample
if get_time_axis:
try:
time_axis = self.time_axes[idx_data_set].to_numpy()[idx:idx + self.seq_len + 1]
except IndexError:
time_axis = []
# Return results
if get_time_axis:
return features, targets, time_axis
else:
return features, targets
def get_experiment(self, idx=None):
if self.time_axes is None:
raise Exception('No time information available!')
if idx is None:
idx = np.random.randint(0, self.number_of_samples)
return self.__getitem__(idx, get_time_axis=True)
def plot_results(net,
args,
dataset=None,
normalization_info = None,
time_axes=None,
filepath=None,
inputs_list=None,
outputs_list=None,
closed_loop_list=None,
seq_len=None,
warm_up_len=None,
closed_loop_enabled=False,
comment='',
rnn_full_name=None,
save=False,
close_loop_idx=512):
"""
This function accepts RNN instance, arguments and CartPole instance.
It runs one random experiment with CartPole,
inputs the data into RNN and check how well RNN predicts CartPole state one time step ahead of time
"""
rnn_full_name = net.rnn_full_name
if filepath is None:
filepath = args.val_file_name
if type(filepath) == list:
filepath = filepath[0]
if warm_up_len is None:
warm_up_len = args.warm_up_len
if seq_len is None:
seq_len = args.seq_len
if inputs_list is None:
inputs_list = args.inputs_list
if inputs_list is None:
raise ValueError('RNN inputs not provided!')
if outputs_list is None:
outputs_list = args.outputs_list
if outputs_list is None:
raise ValueError('RNN outputs not provided!')
if closed_loop_enabled and (closed_loop_list is None):
closed_loop_list = args.close_loop_for
if closed_loop_list is None:
raise ValueError('RNN closed-loop-inputs not provided!')
net.reset()
net.eval()
device = get_device()
if normalization_info is None:
normalization_info = load_normalization_info(args.PATH_TO_EXPERIMENT_RECORDINGS, rnn_full_name)
if dataset is None or time_axes is None:
test_dfs, time_axes = load_data(args, filepath)
test_dfs_norm = normalize_df(test_dfs, normalization_info)
test_set = Dataset(test_dfs_norm, args, time_axes=time_axes, seq_len=seq_len)
del test_dfs
else:
test_set = copy.deepcopy(dataset)
test_set.reset_seq_len(seq_len=seq_len)
# Format the experiment data
features, targets, time_axis = test_set.get_experiment(1) # Put number in brackets to get the same idx at every run
features_pd = pd.DataFrame(data=features, columns=inputs_list)
targets_pd = pd.DataFrame(data=targets, columns=outputs_list)
rnn_outputs = pd.DataFrame(columns=outputs_list)
warm_up_idx = 0
rnn_input_0 = copy.deepcopy(features_pd.iloc[0])
# Does not bring anything. Why? 0-state shouldn't have zero internal state due to biases...
while warm_up_idx < warm_up_len:
rnn_input = rnn_input_0
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
net(rnn_input=rnn_input)
warm_up_idx += 1
net.outputs = []
net.sample_counter = 0
idx_cl = 0
close_the_loop = False
for index, row in features_pd.iterrows():
rnn_input = pd.DataFrame(copy.deepcopy(row)).transpose().reset_index(drop=True)
if idx_cl == close_loop_idx:
close_the_loop = True
if closed_loop_enabled and close_the_loop and (normalized_rnn_output is not None):
rnn_input[closed_loop_list] = normalized_rnn_output[closed_loop_list]
rnn_input = np.squeeze(rnn_input.to_numpy())
rnn_input = torch.from_numpy(rnn_input).float().unsqueeze(0).unsqueeze(0).to(device)
normalized_rnn_output = net(rnn_input=rnn_input)
normalized_rnn_output = np.squeeze(normalized_rnn_output.detach().cpu().numpy()).tolist()
normalized_rnn_output = copy.deepcopy(pd.DataFrame(data=[normalized_rnn_output], columns=outputs_list))
rnn_outputs = rnn_outputs.append(copy.deepcopy(normalized_rnn_output), ignore_index=True)
idx_cl += 1
targets_pd_denorm = denormalize_df(targets_pd, normalization_info)
rnn_outputs_denorm = denormalize_df(rnn_outputs, normalization_info)
fig, axs = plot_results_specific(targets_pd_denorm, rnn_outputs_denorm, time_axis, comment, closed_loop_enabled, close_loop_idx)
plt.show()
if save:
# Make folders if not yet exist
try:
os.makedirs('save_plots')
except FileExistsError:
pass
dateTimeObj = datetime.now()
timestampStr = dateTimeObj.strftime("-%d%b%Y_%H%M%S")
if rnn_full_name is not None:
fig.savefig('./save_plots/' + rnn_full_name + timestampStr + '.png')
else:
fig.savefig('./save_plots/' + timestampStr + '.png')
| 0
| 0
| 0
| 9,472
| 0
| 5,711
| 0
| 35
| 225
|
a14001fe338c11a2de9e1cb5a8130727cb1dcd35
| 7,654
|
py
|
Python
|
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | 6
|
2019-12-20T09:12:30.000Z
|
2021-07-08T11:44:55.000Z
|
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | null | null | null |
resto_client/cli/parser/parser_configure_server.py
|
CNES/resto_client
|
7048bd79c739e33882ebd664790dcf0528e81aa4
|
[
"Apache-2.0"
] | 1
|
2019-12-17T20:16:39.000Z
|
2019-12-17T20:16:39.000Z
|
# -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from resto_client.base_exceptions import RestoClientDesignError
from resto_client.services.service_access import (AuthenticationServiceAccess, RestoServiceAccess)
from resto_client.settings.resto_client_config import resto_client_print
from resto_client.settings.servers_database import DB_SERVERS
from .parser_common import CliFunctionReturnType
from .parser_settings import (SERVER_ARGNAME, RESTO_URL_ARGNAME, RESTO_PROTOCOL_ARGNAME, AUTH_URL_ARGNAME, AUTH_PROTOCOL_ARGNAME)
def cli_create_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to create a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
# TODO: Modify ServiceAcces such that lower is implemented in them
resto_access = RestoServiceAccess(getattr(args, RESTO_URL_ARGNAME),
getattr(args, RESTO_PROTOCOL_ARGNAME).lower())
auth_access = AuthenticationServiceAccess(getattr(args, AUTH_URL_ARGNAME),
getattr(args, AUTH_PROTOCOL_ARGNAME).lower())
DB_SERVERS.create_server(getattr(args, SERVER_ARGNAME), resto_access, auth_access)
return None, None
def cli_delete_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to delete a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
DB_SERVERS.delete(getattr(args, SERVER_ARGNAME))
return None, None
def cli_edit_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to edit the server characteristics
:param args: arguments parsed by the CLI parser
:raises RestoClientDesignError: unconditionally, as this function is not implemented yet
"""
raise RestoClientDesignError('Edit server unimplemented')
def cli_show_servers(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to show the servers database
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
_ = args # to avoid pylint warning
resto_client_print(DB_SERVERS)
return None, None
# We need to specify argparse._SubParsersAction for mypy to run. Thus pylint squeals.
# pylint: disable=protected-access
def add_configure_server_subparser(sub_parsers: argparse._SubParsersAction) -> None:
"""
Add the 'configure_server' subparser
:param sub_parsers: argparse object used to add a parser for that subcommand.
"""
parser_configure_server = sub_parsers.add_parser(
'configure_server', help='configure servers known by resto_client.',
description='Allows to create, modify or delete servers characteristics: url, type, etc.',
epilog='Servers definition is stored in a configuration file and can be edited using this'
' command.')
help_msg = 'For more help: {} <parameter> -h'.format(parser_configure_server.prog)
sub_parsers_configure_server = parser_configure_server.add_subparsers(description=help_msg)
add_config_server_create_parser(sub_parsers_configure_server)
add_config_server_delete_parser(sub_parsers_configure_server)
add_config_server_edit_parser(sub_parsers_configure_server)
add_config_server_show_parser(sub_parsers_configure_server)
def add_config_server_create_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server create'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'create', help='create a new server',
description='Create a new server in the servers configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_create_server)
def add_config_server_delete_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server delete'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'delete', help='delete an existing server',
description='Delete a server from the configuration database.')
subparser.add_argument(SERVER_ARGNAME, help='name of the server to delete')
subparser.set_defaults(func=cli_delete_server)
def add_config_server_edit_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server edit'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'edit', help='edit server characteristics',
description='Edit the characteristics of a server existing in the configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_edit_server)
def add_config_server_show_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server show'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'show', help='show servers database',
description='Show all the servers defined in the database with their configuration.')
subparser.set_defaults(func=cli_show_servers)
def _add_positional_args_parser(subparser: argparse.ArgumentParser) -> None:
"""
Add the positional arguments parsing rules for configure_server subcommands
:param subparser: parser to be supplemented with positional arguments.
"""
subparser.add_argument(SERVER_ARGNAME, help='name of the server')
group_resto = subparser.add_argument_group('resto service')
group_resto.add_argument(RESTO_URL_ARGNAME, help='URL of the resto server')
group_resto.add_argument(RESTO_PROTOCOL_ARGNAME,
choices=RestoServiceAccess.supported_protocols(),
help='Protocol of the resto server')
group_auth = subparser.add_argument_group('authentication service')
group_auth.add_argument(AUTH_URL_ARGNAME, nargs='?', help='URL of the authentication server')
group_auth.add_argument(AUTH_PROTOCOL_ARGNAME,
choices=AuthenticationServiceAccess.supported_protocols(),
help='Protocol of the authentication server')
| 44.5
| 100
| 0.74902
|
# -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from resto_client.base_exceptions import RestoClientDesignError
from resto_client.services.service_access import (AuthenticationServiceAccess, RestoServiceAccess)
from resto_client.settings.resto_client_config import resto_client_print
from resto_client.settings.servers_database import DB_SERVERS
from .parser_common import CliFunctionReturnType
from .parser_settings import (SERVER_ARGNAME, RESTO_URL_ARGNAME, RESTO_PROTOCOL_ARGNAME,
AUTH_URL_ARGNAME, AUTH_PROTOCOL_ARGNAME)
def cli_create_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to create a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
# TODO: Modify ServiceAcces such that lower is implemented in them
resto_access = RestoServiceAccess(getattr(args, RESTO_URL_ARGNAME),
getattr(args, RESTO_PROTOCOL_ARGNAME).lower())
auth_access = AuthenticationServiceAccess(getattr(args, AUTH_URL_ARGNAME),
getattr(args, AUTH_PROTOCOL_ARGNAME).lower())
DB_SERVERS.create_server(getattr(args, SERVER_ARGNAME), resto_access, auth_access)
return None, None
def cli_delete_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to delete a server definition
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
DB_SERVERS.delete(getattr(args, SERVER_ARGNAME))
return None, None
def cli_edit_server(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to edit the server characteristics
:param args: arguments parsed by the CLI parser
:raises RestoClientDesignError: unconditionally, as this function is not implemented yet
"""
raise RestoClientDesignError('Edit server unimplemented')
def cli_show_servers(args: argparse.Namespace) -> CliFunctionReturnType:
"""
CLI adapter to show the servers database
:param args: arguments parsed by the CLI parser
:returns: the resto client parameters and the resto server possibly built by this command.
"""
_ = args # to avoid pylint warning
resto_client_print(DB_SERVERS)
return None, None
# We need to specify argparse._SubParsersAction for mypy to run. Thus pylint squeals.
# pylint: disable=protected-access
def add_configure_server_subparser(sub_parsers: argparse._SubParsersAction) -> None:
"""
Add the 'configure_server' subparser
:param sub_parsers: argparse object used to add a parser for that subcommand.
"""
parser_configure_server = sub_parsers.add_parser(
'configure_server', help='configure servers known by resto_client.',
description='Allows to create, modify or delete servers characteristics: url, type, etc.',
epilog='Servers definition is stored in a configuration file and can be edited using this'
' command.')
help_msg = 'For more help: {} <parameter> -h'.format(parser_configure_server.prog)
sub_parsers_configure_server = parser_configure_server.add_subparsers(description=help_msg)
add_config_server_create_parser(sub_parsers_configure_server)
add_config_server_delete_parser(sub_parsers_configure_server)
add_config_server_edit_parser(sub_parsers_configure_server)
add_config_server_show_parser(sub_parsers_configure_server)
def add_config_server_create_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server create'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'create', help='create a new server',
description='Create a new server in the servers configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_create_server)
def add_config_server_delete_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server delete'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'delete', help='delete an existing server',
description='Delete a server from the configuration database.')
subparser.add_argument(SERVER_ARGNAME, help='name of the server to delete')
subparser.set_defaults(func=cli_delete_server)
def add_config_server_edit_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server edit'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'edit', help='edit server characteristics',
description='Edit the characteristics of a server existing in the configuration database.')
_add_positional_args_parser(subparser)
subparser.set_defaults(func=cli_edit_server)
def add_config_server_show_parser(
sub_parsers_configure_server: argparse._SubParsersAction) -> None:
"""
Update the 'configure_server' command subparser with options for 'configure_server show'
:param sub_parsers_configure_server: argparse object used to add a parser for that subcommand.
"""
subparser = sub_parsers_configure_server.add_parser(
'show', help='show servers database',
description='Show all the servers defined in the database with their configuration.')
subparser.set_defaults(func=cli_show_servers)
def _add_positional_args_parser(subparser: argparse.ArgumentParser) -> None:
"""
Add the positional arguments parsing rules for configure_server subcommands
:param subparser: parser to be supplemented with positional arguments.
"""
subparser.add_argument(SERVER_ARGNAME, help='name of the server')
group_resto = subparser.add_argument_group('resto service')
group_resto.add_argument(RESTO_URL_ARGNAME, help='URL of the resto server')
group_resto.add_argument(RESTO_PROTOCOL_ARGNAME,
choices=RestoServiceAccess.supported_protocols(),
help='Protocol of the resto server')
group_auth = subparser.add_argument_group('authentication service')
group_auth.add_argument(AUTH_URL_ARGNAME, nargs='?', help='URL of the authentication server')
group_auth.add_argument(AUTH_PROTOCOL_ARGNAME,
choices=AuthenticationServiceAccess.supported_protocols(),
help='Protocol of the authentication server')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0
|
4498832be13a9415d6ca76fd5ad2398b9e886b1d
| 1,059
|
py
|
Python
|
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
src/push_button.py
|
albang/arisa
|
9b7ea5e7befc92d1febb038476d03e858a622153
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import os, time
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback,bouncetime=4000) # Setup event on pin 10 rising edge
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(13,GPIO.RISING,callback=button_callback2,bouncetime=4000) # Setup event on pin 10 rising edge
while True:
time.sleep(100000)
GPIO.cleanup() # Clean up
| 40.730769
| 128
| 0.756374
|
#!/usr/bin/env python3
import RPi.GPIO as GPIO # Import Raspberry Pi GPIO library
import os, time
def button_callback(channel):
print("Button was pushed!")
os.system('mpg123 /home/pi/minute_courte.mp3 &')
def button_callback2(channel):
print("Button was pushed!")
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
os.system('mpg123 -g100 /home/pi/paw_patrol_courte.mp3 &')
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BOARD) # Use physical pin numbering
GPIO.setup(10, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(10,GPIO.RISING,callback=button_callback,bouncetime=4000) # Setup event on pin 10 rising edge
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # Set pin 10 to be an input pin and set initial value to be pulled low (off)
GPIO.add_event_detect(13,GPIO.RISING,callback=button_callback2,bouncetime=4000) # Setup event on pin 10 rising edge
while True:
time.sleep(100000)
GPIO.cleanup() # Clean up
| 0
| 0
| 0
| 0
| 0
| 198
| 0
| 0
| 46
|
4da98b7e4cedd701321a8df23f73f41ffd79cf6e
| 1,054
|
py
|
Python
|
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | null | null | null |
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | 1
|
2015-04-07T12:24:26.000Z
|
2015-04-07T12:28:30.000Z
|
src/utils.py
|
michaellas/streaming-vid-to-gifs
|
ee5df22c820d4d631f0437c98a53989ecb76dca3
|
[
"MIT"
] | null | null | null |
import time
import sys
if __name__ == '__main__':
'''
@log_called_times_decorator
def ff():
print 'f'
while True:
ff()
time.sleep(1)
'''
print_progress(45)
print ''
print_progress(x=20,max=200)
| 26.35
| 107
| 0.578748
|
import time
import sys
def log_called_times_decorator(func):
def wrapper(*args):
wrapper.count += 1
# print "The function I modify has been called {0} times(s).".format(wrapper.count)
now = time.time()
if now - wrapper.last_log > wrapper.dt:
print '[DEBUG] In last %ds %s() was called %d times' % (wrapper.dt,func.__name__,wrapper.count)
wrapper.count = 0
wrapper.last_log = now
return func(*args)
wrapper.count = 0
wrapper.last_log = time.time()
wrapper.dt = 5
return wrapper
def print_progress( percent=None, x=0, max=100):
if not percent:
percent = x*100.0/max
sys.stdout.write('\r')
bars = int(percent / 5)
sys.stdout.write("[%-20s] %d%% " % ('='*bars, int(percent)))
sys.stdout.flush()
if __name__ == '__main__':
'''
@log_called_times_decorator
def ff():
print 'f'
while True:
ff()
time.sleep(1)
'''
print_progress(45)
print ''
print_progress(x=20,max=200)
| 0
| 0
| 0
| 0
| 0
| 748
| 0
| 0
| 46
|
4495fdf8627af041231ecfd1e216c9c24557ea8c
| 847
|
py
|
Python
|
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | 2
|
2016-05-28T15:59:47.000Z
|
2018-07-30T21:05:18.000Z
|
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
monte_carlo.py
|
yandexdataschool/pyretina
|
300d3cd460ded071d75d3729e9b5dc1489d86d73
|
[
"Apache-2.0"
] | null | null | null |
number_of_events = 10
if __name__ == "__main__":
main("config/mc.json")
| 21.175
| 82
| 0.641086
|
from pyretina.mc import monte_carlo
import numpy as np
import json
import os
import os.path as osp
import shutil
number_of_events = 10
def main(conf):
with open(conf, 'r') as f:
config = json.load(f)
for N in np.arange(20, 520, 20):
config['scattering']['number_of_particles'] = {
'type' : 'randint',
'low' : N,
'high' : N + 1
}
plot_dir = osp.join('./events_img', '%d_particles' % N)
try:
shutil.rmtree(plot_dir)
except:
pass
os.mkdir(plot_dir)
events = monte_carlo(number_of_events, config, plot_dir=plot_dir, plot_each=2)
import cPickle as pickle
with open('data/mini_velo_sim_%d.pickled' % N, 'w') as f:
pickle.dump(events, f)
print 'Generated %d events with %d particles' % (number_of_events, N)
if __name__ == "__main__":
main("config/mc.json")
| 0
| 0
| 0
| 0
| 0
| 634
| 0
| -19
| 157
|
18ed809f9eec9232085b1804143efe6ca93e3a6e
| 5,950
|
py
|
Python
|
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
miner.py
|
OwlEyes33/crypto-alpha
|
dc3b39ecf38f3f445ecd94057775220b651633fc
|
[
"Apache-2.0"
] | null | null | null |
import logging
logging.basicConfig(level=logging.DEBUG)
if __name__ == "__main__":
miner = Miner()
miner.routine()
| 37.421384
| 86
| 0.557479
|
import logging
import os
import time
from math import inf
from os import environ
from threading import Thread
import requests
from redis import Redis
from block import Block
from blockchain import Blockchain
from peer2peer import PeerToPeerMessage
from transaction import Transaction
logging.basicConfig(level=logging.DEBUG)
class Miner(object):
def __init__(self, *args, **kwargs):
self.transactions = kwargs.get('transactions', {})
self.block_size = 64
self.miner = list()
self.peers = environ.get('PEERS', 'http://localhost:8000').split(',')
assert len(self.peers)
self.cached_p2p_messages = dict()
self.blockchain = Blockchain()
self.redis_cli = Redis(host='redis')
self.sync_to_redis()
def get_peers_blockchain(self):
try:
blockchains = dict()
_max = -inf
best_peer = None
with open("blockchain.dat", "rb") as f:
blockchain_size = len(f.read())
for peer in self.peers:
r = requests.get("http://{}/api/blockchain".format(peer))
if r.json().get('size'):
size = int(r.json().get('size'))
if size > _max:
_max = size
best_peer = peer
blockchains[peer] = r.json().get('size')
if _max > blockchain_size:
logging.debug("Downloading new blockchain from: {}".format(best_peer))
os.rename('blockchain.dat', 'blockchain.backup')
r = requests.get("http://{}/api/sync".format(best_peer))
with open('blockchain.dat', 'wb') as f:
f.write(r.content)
if self.blockchain.verify_blockchain():
os.remove('blockchain.backup')
else:
os.remove('blockchain.dat')
os.rename('blockchain.backup', 'blockchain.dat')
except requests.exceptions.ConnectionError:
pass
def sync_to_redis(self):
for _, key in enumerate(self.transactions):
self.redis_cli[key] = str(self.transactions[key])
self.transactions = {}
def broadcast_new_block(self, block):
p2p = PeerToPeerMessage(block=block)
for peer in self.peers:
r = requests.post("http://{}/api/block".format(peer), data=p2p.to_json())
assert r.status_code <= 299
@staticmethod
def ping_peer_transactions(peer, p2p_message):
logging.debug("Forwarding transactions to nearest peer {}".format(peer))
payload = p2p_message.to_json()
try:
requests.post("http://{}/api/transactions".format(peer), data=payload)
except requests.exceptions.ConnectionError as e:
logging.warning("Connection error {}".format(str(e)))
@staticmethod
def ping_peer_block(peer, p2p_message):
logging.debug("Forwarding block to nearest peer {}".format(peer))
payload = p2p_message.to_json()
try:
requests.post("http://{}/api/block".format(peer), data=payload)
except requests.exceptions.ConnectionError as e:
logging.warning("Connection error {}".format(str(e)))
def forward(self, p2p, target):
for peer in self.peers:
t = Thread(target=target, args=(peer, p2p))
t.start()
# Todo: Transactions should be sorted by timestamp
def compile_block(self):
data = str()
i = 0
for transaction_id in self.redis_cli.keys():
if i < 64:
try:
transaction = self.redis_cli[transaction_id]
t = Transaction()
transaction = t.from_string(transaction.decode('utf-8'))
if not transaction.verify_signature():
logging.warning("Transaction signature not valid")
continue
data = data + str(transaction) + '\n'
self.redis_cli.delete(transaction.id)
i = i + 1
except IndexError:
return False
block = Block(data=data)
return block
def do_proof_of_work(self, block, first=False):
if block:
magic_number = 0
while True:
block.magic_number = magic_number
if not first:
block.blockchain_snapshot = self.blockchain.get_sha512hash()
else:
block.blockchain_snapshot = 'None'
sha512hash = block.generate_hash()
block.sha512hash = sha512hash
if block.check_proof_of_work():
block.magic_number = magic_number
block.sha512hash = sha512hash
return block
magic_number = magic_number + 1
def routine(self):
# Check if there is a new blockchain version
while True:
logging.debug("Requesting new blockchain info from P2P network")
self.get_peers_blockchain()
time.sleep(1)
# Check if we have transactions
if len(list(self.redis_cli.keys())):
# Compile a block
logging.debug("Building a new block")
block = self.compile_block()
# Do proof of work
logging.debug("Doing proof of work on block")
block = self.do_proof_of_work(block)
# Verify a block
logging.debug("Verifying the block")
if self.blockchain.verify_blockchain(new_block=block):
# Write the block
logging.debug("Writing a new block")
self.blockchain.write_new_block(block)
if __name__ == "__main__":
miner = Miner()
miner.routine()
| 0
| 745
| 0
| 4,784
| 0
| 0
| 0
| 27
| 267
|
1486c16002e2c1f7f36eced992718519ad8c6db1
| 959
|
py
|
Python
|
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
web2py-appliances-master/MyForum/models/db.py
|
wantsomechocolate/WantsomeBeanstalk
|
8c8a0a80490d04ea52661a3114fd3db8de65a01e
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
DEBUG = True
db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])
response.generic_patterns = ['*'] if request.is_local else []
from gluon.tools import Auth, Service
auth = Auth(db)
auth.define_tables(username=False, signature=False)
service = Service()
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = '[email protected]'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth, filename='private/janrain.key')
| 33.068966
| 78
| 0.777894
|
# -*- coding: utf-8 -*-
DEBUG = True
db = DAL('sqlite://storage.sqlite',pool_size=1,check_reserved=['all'])
response.generic_patterns = ['*'] if request.is_local else []
from gluon.tools import Auth, Service, prettydate
auth = Auth(db)
auth.define_tables(username=False, signature=False)
service = Service()
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' or 'smtp.gmail.com:587'
mail.settings.sender = '[email protected]'
mail.settings.login = 'username:password'
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
## if you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc.
## register with janrain.com, write your domain:api_key in private/janrain.key
from gluon.contrib.login_methods.rpx_account import use_janrain
use_janrain(auth, filename='private/janrain.key')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0
|
9ded2fcc8e677e149baf4d0a230b66939619b9e9
| 8,353
|
py
|
Python
|
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | 1
|
2018-11-27T17:00:57.000Z
|
2018-11-27T17:00:57.000Z
|
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
conceptnet5/vectors/retrofit.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
def retrofit(row_labels, dense_frame, sparse_csr,
iterations=5, verbosity=0, max_cleanup_iters=20,
orig_vec_weight=0.15):
"""
Retrofitting is a process of combining information from a machine-learned
space of term vectors with further structured information about those
terms. It was originally presented in this 2015 NAACL paper by Manaal
Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, and Noah
Smith, "Retrofitting Word Vectors to Semantic Lexicons":
https://www.cs.cmu.edu/~hovy/papers/15HLT-retrofitting-word-vectors.pdf
This function implements a variant that I've been calling "wide
retrofitting", which extends the process to learn vectors for terms that
were outside the original space.
`row_labels` is the list of terms that we want to have vectors for.
`dense_frame` is a DataFrame assigning vectors to some of these terms.
`sparse_csr` is a SciPy sparse square matrix, whose rows and columns are
implicitly labeled with `row_labels`. The entries of this matrix are
positive for terms that we know are related from our structured data.
(This is an awkward form of input, but unfortunately there is no good
way to represent sparse labeled data in Pandas.)
`sharded_retrofit` is responsible for building `row_labels` and `sparse_csr`
appropriately.
"""
# Initialize a DataFrame with rows that we know
retroframe = pd.DataFrame(
index=row_labels, columns=dense_frame.columns, dtype='f'
)
retroframe.update(dense_frame)
# orig_weights = 1 for known vectors, 0 for unknown vectors
orig_weights = 1 - retroframe.iloc[:, 0].isnull()
orig_vec_indicators = (orig_weights.values != 0)
orig_vecs = retroframe.fillna(0).values
# Subtract the mean so that vectors don't just clump around common
# hypernyms
orig_vecs[orig_vec_indicators] -= orig_vecs[orig_vec_indicators].mean(0)
# Delete the frame we built, we won't need its indices again until the end
del retroframe
vecs = orig_vecs
for iteration in range(iterations):
if verbosity >= 1:
print('Retrofitting: Iteration %s of %s' % (iteration+1, iterations))
# Since the sparse weight matrix is row-stochastic and has self-loops,
# pre-multiplication by it replaces each vector by a weighted average
# of itself and its neighbors. We really want to take the average
# of (itself and) the nonzero neighbors, which we can do by dividing
# the average with all the neighbors by the total of the weights of the
# nonzero neighbors. This avoids unduly shrinking vectors assigned to
# terms with lots of zero neighbors.
# Find, for every term, the total weight of its nonzero neighbors.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr.dot(nonzero_indicators)
# Now average with all the neighbors.
vecs = sparse_csr.dot(vecs)
# Now divide each vector (row) by the associated total weight.
# Some of the total weights could be zero, but only for rows that,
# before averaging, were zero and had all neighbors zero, whence
# after averaging will be zero. So only do the division for rows
# that are nonzero now, after averaging. Also, we reshape the total
# weights into a column vector so that numpy will broadcast the
# division by weights across the columns of the embedding matrix.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = total_neighbor_weights[nonzero_indicators]
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[nonzero_indicators] /= total_neighbor_weights
# Re-center the (new) non-zero vectors.
vecs[nonzero_indicators] -= vecs[nonzero_indicators].mean(0)
# Average known rows with original vectors
vecs[orig_vec_indicators, :] = \
(1.0 - orig_vec_weight) * vecs[orig_vec_indicators, :] + orig_vec_weight * orig_vecs[orig_vec_indicators, :]
# Clean up as many all-zero vectors as possible. Zero vectors
# can either come from components of the conceptnet graph that
# don't contain any terms from the embedding we are currently
# retrofitting (and there is nothing we can do about those here,
# but when retrofitting is done on that embedding they should be
# taken care of then) or from terms whose distance in the graph is
# larger than the number of retrofitting iterations used above; we
# propagate non-zero values to those terms by averaging over their
# non-zero neighbors. Note that this propagation can never reach
# the first class of terms, so we can't necessarily expect the
# number of zero vectors to go to zero at any one invocation of
# this code.
n_zero_indicators_old = -1
for iteration in range(max_cleanup_iters):
zero_indicators = (np.abs(vecs).sum(1) == 0)
n_zero_indicators = np.sum(zero_indicators)
if n_zero_indicators == 0 or n_zero_indicators == n_zero_indicators_old:
break
n_zero_indicators_old = n_zero_indicators
# First replace each zero vector (row) by the weighted average of all its
# neighbors.
vecs[zero_indicators, :] = sparse_csr[zero_indicators, :].dot(vecs)
# Now divide each newly nonzero vector (row) by the total weight of its
# old nonzero neighbors.
new_nonzero_indicators = np.logical_and(zero_indicators, np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr[new_nonzero_indicators, :].dot(np.logical_not(zero_indicators))
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[new_nonzero_indicators, :] /= total_neighbor_weights
else:
print('Warning: cleanup iteration limit exceeded.')
retroframe = pd.DataFrame(data=vecs, index=row_labels, columns=dense_frame.columns)
return retroframe
| 48.005747
| 130
| 0.704058
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import normalize
from .sparse_matrix_builder import build_from_conceptnet_table
from .formats import load_hdf, save_hdf
def sharded_retrofit(dense_hdf_filename, conceptnet_filename, output_filename,
iterations=5, nshards=6, verbosity=0,
max_cleanup_iters=20, orig_vec_weight=0.15):
# frame_box is basically a reference to a single large DataFrame. The
# DataFrame will at times be present or absent. When it's present, the list
# contains one item, which is the DataFrame. When it's absent, the list
# is empty.
frame_box = [load_hdf(dense_hdf_filename)]
sparse_csr, combined_index = build_from_conceptnet_table(conceptnet_filename, orig_index=frame_box[0].index)
shard_width = frame_box[0].shape[1] // nshards
for i in range(nshards):
temp_filename = output_filename + '.shard%d' % i
shard_from = shard_width * i
shard_to = shard_from + shard_width
if len(frame_box) == 0:
frame_box.append(load_hdf(dense_hdf_filename))
dense_frame = pd.DataFrame(frame_box[0].iloc[:, shard_from:shard_to])
# Delete full_dense_frame while running retrofitting, because it takes
# up a lot of memory and we can reload it from disk later.
frame_box.clear()
retrofitted = retrofit(combined_index, dense_frame, sparse_csr, iterations, verbosity, max_cleanup_iters, orig_vec_weight)
save_hdf(retrofitted, temp_filename)
del retrofitted
def join_shards(output_filename, nshards=6, sort=False):
joined_matrix = None
joined_labels = None
for i in range(nshards):
shard = load_hdf(output_filename + '.shard%d' % i)
nrows, ncols = shard.shape
if joined_matrix is None:
joined_matrix = np.zeros((nrows, ncols * nshards), dtype='f')
joined_labels = shard.index
joined_matrix[:, (ncols * i):(ncols * (i + 1))] = shard.values
del shard
normalize(joined_matrix, axis=1, norm='l2', copy=False)
dframe = pd.DataFrame(joined_matrix, index=joined_labels)
if sort:
dframe.sort_index(inplace=True)
save_hdf(dframe, output_filename)
def retrofit(row_labels, dense_frame, sparse_csr,
iterations=5, verbosity=0, max_cleanup_iters=20,
orig_vec_weight=0.15):
"""
Retrofitting is a process of combining information from a machine-learned
space of term vectors with further structured information about those
terms. It was originally presented in this 2015 NAACL paper by Manaal
Faruqui, Jesse Dodge, Sujay Jauhar, Chris Dyer, Eduard Hovy, and Noah
Smith, "Retrofitting Word Vectors to Semantic Lexicons":
https://www.cs.cmu.edu/~hovy/papers/15HLT-retrofitting-word-vectors.pdf
This function implements a variant that I've been calling "wide
retrofitting", which extends the process to learn vectors for terms that
were outside the original space.
`row_labels` is the list of terms that we want to have vectors for.
`dense_frame` is a DataFrame assigning vectors to some of these terms.
`sparse_csr` is a SciPy sparse square matrix, whose rows and columns are
implicitly labeled with `row_labels`. The entries of this matrix are
positive for terms that we know are related from our structured data.
(This is an awkward form of input, but unfortunately there is no good
way to represent sparse labeled data in Pandas.)
`sharded_retrofit` is responsible for building `row_labels` and `sparse_csr`
appropriately.
"""
# Initialize a DataFrame with rows that we know
retroframe = pd.DataFrame(
index=row_labels, columns=dense_frame.columns, dtype='f'
)
retroframe.update(dense_frame)
# orig_weights = 1 for known vectors, 0 for unknown vectors
orig_weights = 1 - retroframe.iloc[:, 0].isnull()
orig_vec_indicators = (orig_weights.values != 0)
orig_vecs = retroframe.fillna(0).values
# Subtract the mean so that vectors don't just clump around common
# hypernyms
orig_vecs[orig_vec_indicators] -= orig_vecs[orig_vec_indicators].mean(0)
# Delete the frame we built, we won't need its indices again until the end
del retroframe
vecs = orig_vecs
for iteration in range(iterations):
if verbosity >= 1:
print('Retrofitting: Iteration %s of %s' % (iteration+1, iterations))
# Since the sparse weight matrix is row-stochastic and has self-loops,
# pre-multiplication by it replaces each vector by a weighted average
# of itself and its neighbors. We really want to take the average
# of (itself and) the nonzero neighbors, which we can do by dividing
# the average with all the neighbors by the total of the weights of the
# nonzero neighbors. This avoids unduly shrinking vectors assigned to
# terms with lots of zero neighbors.
# Find, for every term, the total weight of its nonzero neighbors.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr.dot(nonzero_indicators)
# Now average with all the neighbors.
vecs = sparse_csr.dot(vecs)
# Now divide each vector (row) by the associated total weight.
# Some of the total weights could be zero, but only for rows that,
# before averaging, were zero and had all neighbors zero, whence
# after averaging will be zero. So only do the division for rows
# that are nonzero now, after averaging. Also, we reshape the total
# weights into a column vector so that numpy will broadcast the
# division by weights across the columns of the embedding matrix.
nonzero_indicators = (np.abs(vecs).sum(1) != 0)
total_neighbor_weights = total_neighbor_weights[nonzero_indicators]
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[nonzero_indicators] /= total_neighbor_weights
# Re-center the (new) non-zero vectors.
vecs[nonzero_indicators] -= vecs[nonzero_indicators].mean(0)
# Average known rows with original vectors
vecs[orig_vec_indicators, :] = \
(1.0 - orig_vec_weight) * vecs[orig_vec_indicators, :] + orig_vec_weight * orig_vecs[orig_vec_indicators, :]
# Clean up as many all-zero vectors as possible. Zero vectors
# can either come from components of the conceptnet graph that
# don't contain any terms from the embedding we are currently
# retrofitting (and there is nothing we can do about those here,
# but when retrofitting is done on that embedding they should be
# taken care of then) or from terms whose distance in the graph is
# larger than the number of retrofitting iterations used above; we
# propagate non-zero values to those terms by averaging over their
# non-zero neighbors. Note that this propagation can never reach
# the first class of terms, so we can't necessarily expect the
# number of zero vectors to go to zero at any one invocation of
# this code.
n_zero_indicators_old = -1
for iteration in range(max_cleanup_iters):
zero_indicators = (np.abs(vecs).sum(1) == 0)
n_zero_indicators = np.sum(zero_indicators)
if n_zero_indicators == 0 or n_zero_indicators == n_zero_indicators_old:
break
n_zero_indicators_old = n_zero_indicators
# First replace each zero vector (row) by the weighted average of all its
# neighbors.
vecs[zero_indicators, :] = sparse_csr[zero_indicators, :].dot(vecs)
# Now divide each newly nonzero vector (row) by the total weight of its
# old nonzero neighbors.
new_nonzero_indicators = np.logical_and(zero_indicators, np.abs(vecs).sum(1) != 0)
total_neighbor_weights = sparse_csr[new_nonzero_indicators, :].dot(np.logical_not(zero_indicators))
total_neighbor_weights = total_neighbor_weights.reshape((len(total_neighbor_weights), 1))
vecs[new_nonzero_indicators, :] /= total_neighbor_weights
else:
print('Warning: cleanup iteration limit exceeded.')
retroframe = pd.DataFrame(data=vecs, index=row_labels, columns=dense_frame.columns)
return retroframe
| 0
| 0
| 0
| 0
| 0
| 2,009
| 0
| 81
| 112
|
fcd076838a13b16b0181931dfa476968f0b03f64
| 11,297
|
py
|
Python
|
Stock_Analysis/auto_value_stock.py
|
parmarsuraj99/Finance
|
d9f012e33a99b959fdde575feedeb5922b379fe2
|
[
"MIT"
] | 1
|
2022-02-25T01:25:21.000Z
|
2022-02-25T01:25:21.000Z
|
Stock_Analysis/auto_value_stock.py
|
StockScripts/Finance
|
330bb46ea8e4c7ad5f3150cfa6d25e356178b189
|
[
"MIT"
] | null | null | null |
Stock_Analysis/auto_value_stock.py
|
StockScripts/Finance
|
330bb46ea8e4c7ad5f3150cfa6d25e356178b189
|
[
"MIT"
] | 2
|
2021-01-28T21:52:30.000Z
|
2021-02-16T13:26:35.000Z
|
# Code from https://medium.com/datadriveninvestor/use-python-to-value-a-stock-automatically-3b520422ab6 by Bohmian
# Importing required modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import time
from config import financial_model_prep
pd.set_option('display.max_columns', None)
# Settings to produce nice plots in a Jupyter notebook
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = [15, 10]
# To extract and parse fundamental data from finviz website
import warnings
warnings.filterwarnings('ignore')
# For parsing financial statements data from financialmodelingprep api
# inputs
base_url = "https://financialmodelingprep.com/api/v3/"
tickers = ['AAL']
apiKey = financial_model_prep()
cash_flows = []
total_debts = []
cash_and_ST_investments_list = []
betas = []
discount_rates = []
EPS_growth_5Ys = []
EPS_growth_6Y_to_10Ys = []
EPS_growth_11Y_to_20Ys = []
shares_outstandings = []
intrinsic_values = []
current_prices = []
margins_safety = []
valid_tickers = []
for ticker in tickers:
try:
q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_cash_flow_statement = q_cash_flow_statement.set_index('date').iloc[:4] # extract for last 4 quarters
q_cash_flow_statement = q_cash_flow_statement.apply(pd.to_numeric, errors='coerce')
cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey))
cash_flow_statement = cash_flow_statement.set_index('date')
cash_flow_statement = cash_flow_statement.apply(pd.to_numeric, errors='coerce')
ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow
cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1)
final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first
# final_cash_flow_statement[['freeCashFlow']].iloc[::-1].iloc[-15:].plot(kind='bar', title=ticker + ' Cash Flows')
# plt.show()
q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_balance_statement = q_balance_statement.set_index('date')
q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce')
cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow']
total_debt = q_balance_statement.iloc[0]['totalDebt']
cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments']
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# List of data we want to extract from Finviz Table
metric = ['Price', 'EPS next 5Y', 'Beta', 'Shs Outstand']
finviz_data = get_finviz_data(ticker)
# print('\nFinViz Data:\n' + str(finviz_data))
Beta = finviz_data['Beta']
discount_rate = 7
if(Beta<0.80):
discount_rate = 5
elif(Beta>=0.80 and Beta<1):
discount_rate = 6
elif(Beta>=1 and Beta<1.1):
discount_rate = 6.5
elif(Beta>=1.1 and Beta<1.2):
discount_rate = 7
elif(Beta>=1.2 and Beta<1.3):
discount_rate =7.5
elif(Beta>=1.3 and Beta<1.4):
discount_rate = 8
elif(Beta>=1.4 and Beta<1.6):
discount_rate = 8.5
elif(Beta>=1.61):
discount_rate = 9
# print("\nDiscount Rate: ", discount_rate)
EPS_growth_5Y = finviz_data['EPS next 5Y']
EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate
EPS_growth_11Y_to_20Y = np.minimum(EPS_growth_6Y_to_10Y, 4) # Slightly higher than long term inflation rate, conservative estimate
shares_outstanding = round(finviz_data['Shs Outstand'])
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# print("EPS Growth 5Y: ", EPS_growth_5Y)
# print("EPS Growth 6Y to 10Y: ", EPS_growth_6Y_to_10Y)
# print("EPS Growth 11Y to 20Y: ", EPS_growth_11Y_to_20Y)
# print("Discount Rate: ", discount_rate)
# print("Shares Outstanding: ", shares_outstanding)
intrinsic_value = round(calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate), 2)
# print("\nIntrinsic Value: ", intrinsic_value)
current_price = finviz_data['Price']
# print("Current Price: ", current_price)
change = round(((intrinsic_value-current_price)/current_price)*100, 2)
# print("Margin of Safety: ", margin_safety)
cash_flows.append(cash_flow)
total_debts.append(total_debt)
cash_and_ST_investments_list.append(cash_and_ST_investments)
betas.append(Beta)
discount_rates.append(discount_rate)
EPS_growth_5Ys.append(EPS_growth_5Y)
EPS_growth_6Y_to_10Ys.append(EPS_growth_6Y_to_10Y)
EPS_growth_11Y_to_20Ys.append(EPS_growth_11Y_to_20Y)
shares_outstandings.append(shares_outstanding)
intrinsic_values.append(intrinsic_value)
current_prices.append(current_price)
margins_safety.append(change)
valid_tickers.append(ticker)
except:
pass
df = pd.DataFrame(np.column_stack([valid_tickers, cash_flows, total_debts, cash_and_ST_investments_list, betas, discount_rates, EPS_growth_5Ys, EPS_growth_6Y_to_10Ys, EPS_growth_11Y_to_20Ys, shares_outstandings, intrinsic_values, current_prices, margins_safety]),
columns=['Ticker', 'Cash Flow', 'Total Debt', 'Cash and ST investment', 'Beta', 'Discount Rate', 'EPS Growth 5 Y', 'EPS Growth 6-10 Y', 'EPS Growth 11-20 Y', 'Shares Outstanding', 'Intrinsic Value', 'Current Price', 'Margin Safety']).set_index('Ticker')
df = df.sort_values(['Margin Safety'], ascending=True)
df.to_csv(f'{time.time()}.csv')
print (df)
| 46.681818
| 284
| 0.615208
|
# Code from https://medium.com/datadriveninvestor/use-python-to-value-a-stock-automatically-3b520422ab6 by Bohmian
# Importing required modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import time
from config import financial_model_prep
pd.set_option('display.max_columns', None)
# Settings to produce nice plots in a Jupyter notebook
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = [15, 10]
import seaborn as sns
# To extract and parse fundamental data from finviz website
import requests
from bs4 import BeautifulSoup as bs
import warnings
warnings.filterwarnings('ignore')
# For parsing financial statements data from financialmodelingprep api
from urllib.request import urlopen
import json
def get_jsonparsed_data(url):
response = urlopen(url)
data = response.read().decode("utf-8")
return json.loads(data)
# inputs
base_url = "https://financialmodelingprep.com/api/v3/"
tickers = ['AAL']
apiKey = financial_model_prep()
cash_flows = []
total_debts = []
cash_and_ST_investments_list = []
betas = []
discount_rates = []
EPS_growth_5Ys = []
EPS_growth_6Y_to_10Ys = []
EPS_growth_11Y_to_20Ys = []
shares_outstandings = []
intrinsic_values = []
current_prices = []
margins_safety = []
valid_tickers = []
for ticker in tickers:
try:
q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_cash_flow_statement = q_cash_flow_statement.set_index('date').iloc[:4] # extract for last 4 quarters
q_cash_flow_statement = q_cash_flow_statement.apply(pd.to_numeric, errors='coerce')
cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey))
cash_flow_statement = cash_flow_statement.set_index('date')
cash_flow_statement = cash_flow_statement.apply(pd.to_numeric, errors='coerce')
ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow
cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1)
final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first
# final_cash_flow_statement[['freeCashFlow']].iloc[::-1].iloc[-15:].plot(kind='bar', title=ticker + ' Cash Flows')
# plt.show()
q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
q_balance_statement = q_balance_statement.set_index('date')
q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce')
cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow']
total_debt = q_balance_statement.iloc[0]['totalDebt']
cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments']
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# List of data we want to extract from Finviz Table
metric = ['Price', 'EPS next 5Y', 'Beta', 'Shs Outstand']
def fundamental_metric(soup, metric):
# the table which stores the data in Finviz has html table attribute class of 'snapshot-td2'
return soup.find(text = metric).find_next(class_='snapshot-td2').text
def get_finviz_data(ticker):
try:
url = ("http://finviz.com/quote.ashx?t=" + ticker.lower())
soup = bs(requests.get(url,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'}).content)
dict_finviz = {}
for m in metric:
dict_finviz[m] = fundamental_metric(soup,m)
for key, value in dict_finviz.items():
# replace percentages
if (value[-1]=='%'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])
# billion
if (value[-1]=='B'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])*1000000000
# million
if (value[-1]=='M'):
dict_finviz[key] = value[:-1]
dict_finviz[key] = float(dict_finviz[key])*1000000
try:
dict_finviz[key] = float(dict_finviz[key])
except:
pass
except Exception as e:
print (e)
print ('Not successful parsing ' + ticker + ' data.')
return dict_finviz
finviz_data = get_finviz_data(ticker)
# print('\nFinViz Data:\n' + str(finviz_data))
Beta = finviz_data['Beta']
discount_rate = 7
if(Beta<0.80):
discount_rate = 5
elif(Beta>=0.80 and Beta<1):
discount_rate = 6
elif(Beta>=1 and Beta<1.1):
discount_rate = 6.5
elif(Beta>=1.1 and Beta<1.2):
discount_rate = 7
elif(Beta>=1.2 and Beta<1.3):
discount_rate =7.5
elif(Beta>=1.3 and Beta<1.4):
discount_rate = 8
elif(Beta>=1.4 and Beta<1.6):
discount_rate = 8.5
elif(Beta>=1.61):
discount_rate = 9
# print("\nDiscount Rate: ", discount_rate)
EPS_growth_5Y = finviz_data['EPS next 5Y']
EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate
EPS_growth_11Y_to_20Y = np.minimum(EPS_growth_6Y_to_10Y, 4) # Slightly higher than long term inflation rate, conservative estimate
shares_outstanding = round(finviz_data['Shs Outstand'])
# print("Free Cash Flow: ", cash_flow)
# print("Total Debt: ", total_debt)
# print("Cash and ST Investments: ", cash_and_ST_investments)
# print("EPS Growth 5Y: ", EPS_growth_5Y)
# print("EPS Growth 6Y to 10Y: ", EPS_growth_6Y_to_10Y)
# print("EPS Growth 11Y to 20Y: ", EPS_growth_11Y_to_20Y)
# print("Discount Rate: ", discount_rate)
# print("Shares Outstanding: ", shares_outstanding)
def calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate):
# Convert all percentages to decmials
EPS_growth_5Y_d = EPS_growth_5Y/100
EPS_growth_6Y_to_10Y_d = EPS_growth_6Y_to_10Y/100
EPS_growth_11Y_to_20Y_d = EPS_growth_11Y_to_20Y/100
discount_rate_d = discount_rate/100
# print("\nDiscounted Cash Flows")
# Lists of projected cash flows from year 1 to year 20
cash_flow_list = []
cash_flow_discounted_list = []
year_list = []
# Years 1 to 5
for year in range(1, 6):
year_list.append(year)
cash_flow*=(1 + EPS_growth_5Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
# Years 6 to 10
for year in range(6, 11):
year_list.append(year)
cash_flow*=(1 + EPS_growth_6Y_to_10Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
# Years 11 to 20
for year in range(11, 21):
year_list.append(year)
cash_flow*=(1 + EPS_growth_11Y_to_20Y_d)
cash_flow_list.append(cash_flow)
cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
cash_flow_discounted_list.append(cash_flow_discounted)
# print("Year " + str(year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
intrinsic_value = (sum(cash_flow_discounted_list) - total_debt + cash_and_ST_investments)/shares_outstanding
df = pd.DataFrame.from_dict({'Year': year_list, 'Cash Flow': cash_flow_list, 'Discounted Cash Flow': cash_flow_discounted_list})
df.index = df.Year
# df.plot(kind='bar', title = 'Projected Cash Flows of ' + ticker)
# plt.show()
return intrinsic_value
intrinsic_value = round(calculate_intrinsic_value(cash_flow, total_debt, cash_and_ST_investments,
EPS_growth_5Y, EPS_growth_6Y_to_10Y, EPS_growth_11Y_to_20Y,
shares_outstanding, discount_rate), 2)
# print("\nIntrinsic Value: ", intrinsic_value)
current_price = finviz_data['Price']
# print("Current Price: ", current_price)
change = round(((intrinsic_value-current_price)/current_price)*100, 2)
# print("Margin of Safety: ", margin_safety)
cash_flows.append(cash_flow)
total_debts.append(total_debt)
cash_and_ST_investments_list.append(cash_and_ST_investments)
betas.append(Beta)
discount_rates.append(discount_rate)
EPS_growth_5Ys.append(EPS_growth_5Y)
EPS_growth_6Y_to_10Ys.append(EPS_growth_6Y_to_10Y)
EPS_growth_11Y_to_20Ys.append(EPS_growth_11Y_to_20Y)
shares_outstandings.append(shares_outstanding)
intrinsic_values.append(intrinsic_value)
current_prices.append(current_price)
margins_safety.append(change)
valid_tickers.append(ticker)
except:
pass
df = pd.DataFrame(np.column_stack([valid_tickers, cash_flows, total_debts, cash_and_ST_investments_list, betas, discount_rates, EPS_growth_5Ys, EPS_growth_6Y_to_10Ys, EPS_growth_11Y_to_20Ys, shares_outstandings, intrinsic_values, current_prices, margins_safety]),
columns=['Ticker', 'Cash Flow', 'Total Debt', 'Cash and ST investment', 'Beta', 'Discount Rate', 'EPS Growth 5 Y', 'EPS Growth 6-10 Y', 'EPS Growth 11-20 Y', 'Shares Outstanding', 'Intrinsic Value', 'Current Price', 'Margin Safety']).set_index('Ticker')
df = df.sort_values(['Margin Safety'], ascending=True)
df.to_csv(f'{time.time()}.csv')
print (df)
| 0
| 0
| 0
| 0
| 0
| 4,418
| 0
| 11
| 253
|
901b7a71198943a53f223f18bbc124edf656a124
| 2,580
|
py
|
Python
|
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
src/100_simple_aggregation.py
|
j20232/kaggle_earthquake
|
47fac5f2e8d2ad4fab82426a0b6af18b71e4b57b
|
[
"MIT"
] | null | null | null |
"""Extract simple aggregation features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import competition as cc
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| 38.507463
| 91
| 0.622481
|
"""Extract simple aggregation features
Reference: https://www.kaggle.com/gpreda/lanl-earthquake-eda-and-prediction
"""
import sys
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import competition as cc
from common import stop_watch
TRAIN_CSV_DIRECTORY_PATH = cc.INPUT_PATH / sys.argv[1]
TRAIN_CSV_LIST = list(TRAIN_CSV_DIRECTORY_PATH.glob('**/*.csv'))
@stop_watch
def extract_features(csv_list, feature_dir_path):
df = pd.DataFrame()
Path.mkdir(feature_dir_path, exist_ok=True, parents=True)
for index, each_csv in enumerate(tqdm(sorted(csv_list))):
seg = pd.read_csv(each_csv, dtype=cc.DTYPES)
seg_id = each_csv.split("/")[-1].split(".")[0]
df.loc[index, "seg_id"] = seg_id
xc = pd.Series(seg['acoustic_data'].values)
# basic aggregation
df.loc[index, "mean"] = xc.mean()
df.loc[index, "std"] = xc.std()
df.loc[index, "max"] = xc.max()
df.loc[index, "min"] = xc.min()
df.loc[index, 'sum'] = xc.sum()
df.loc[index, 'mad'] = xc.mad()
df.loc[index, 'kurtosis'] = xc.kurtosis()
df.loc[index, 'skew'] = xc.skew()
df.loc[index, 'median'] = xc.median()
df.loc[index, 'mean_change_rate'] = np.mean(np.nonzero((np.diff(xc) / xc[:-1]))[0])
# abs aggregation
df.loc[index, 'abs_mean'] = np.abs(xc).mean()
df.loc[index, 'abs_std'] = np.abs(xc).std()
df.loc[index, 'abs_max'] = np.abs(xc).max()
df.loc[index, 'abs_min'] = np.abs(xc).min()
df.loc[index, 'abs_sum'] = np.abs(xc).sum()
df.loc[index, 'abs_mad'] = np.abs(xc).mad()
df.loc[index, 'abs_kurtosis'] = np.abs(xc).kurtosis()
df.loc[index, 'abs_skew'] = np.abs(xc).skew()
df.loc[index, 'abs_median'] = np.abs(xc).median()
df.loc[index, 'mean_change_abs'] = np.mean(np.diff(xc))
df.loc[index, 'max_to_min'] = xc.max() / np.abs(xc.min())
df.loc[index, 'max_to_min_diff'] = xc.max() - np.abs(xc.min())
df.loc[index, 'count_big'] = len(xc[np.abs(xc) > 500])
print("Aggregation output is belows:")
print(df.head(3))
df.to_csv(feature_dir_path / "{}.csv".format(cc.PREF), index=False)
if __name__ == "__main__":
train_csv_path = cc.FEATURE_PATH / "{}".format(sys.argv[1])
train_csv_l = [str(item) for item in TRAIN_CSV_LIST]
extract_features(train_csv_l, train_csv_path)
test_csv_path = cc.FEATURE_PATH / "test"
test_csv_l = [str(item) for item in cc.TEST_CSV_LIST]
extract_features(test_csv_l, test_csv_path)
| 0
| 1,807
| 0
| 0
| 0
| 0
| 0
| 6
| 133
|
0b3eba4af37debbbb40bec37c6e9b379c1156729
| 8,817
|
py
|
Python
|
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
segment.py
|
neelsj/syndata-generation
|
df73cc9a146c34870c3d80acce0ca04b314ec1b0
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import torch
#model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)
model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)
model.eval()
COCO_INFO = {
"description": "",
"url": "",
"version": "1",
"year": 2022,
"contributor": "MSR CV Group",
"date_created": datetime.now().strftime("%m/%d/%Y")
}
COCO_LICENSES = [{
"url": "",
"id": 0,
"name": "License"
}]
if __name__ == "__main__":
data_dir = "E:/Research/Images/FineGrained/StanfordCars/train_bing/"
| 31.830325
| 132
| 0.565612
|
import os
from datetime import datetime
import json
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch
import numpy as np
from skimage import measure
from shapely.geometry import Polygon, MultiPolygon
from PIL import Image
import cv2
#model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet50', pretrained=True)
model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_resnet101', pretrained=True)
# model = torch.hub.load('pytorch/vision:v0.10.0', 'deeplabv3_mobilenet_v3_large', pretrained=True)
model.eval()
from torchvision import transforms
COCO_INFO = {
"description": "",
"url": "",
"version": "1",
"year": 2022,
"contributor": "MSR CV Group",
"date_created": datetime.now().strftime("%m/%d/%Y")
}
COCO_LICENSES = [{
"url": "",
"id": 0,
"name": "License"
}]
def create_mask(input_image):
input_image = input_image.convert("RGB")
preprocess = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0) # create a mini-batch as expected by the model
# move the input and model to GPU for speed if available
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
model.to('cuda')
with torch.no_grad():
output = model(input_batch)['out'][0]
output_predictions = output.argmax(0)
# plot the semantic segmentation predictions of 21 classes in each color
mask = np.uint8(255*(output_predictions.cpu().numpy() > 0))
#mask = output_predictions.byte().cpu().numpy()
return mask
def create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, is_crowd, bbox=None):
# Find contours (boundary lines) around each sub-mask
# Note: there could be multiple contours if the object
# is partially occluded. (E.g. an elephant behind a tree)
#contours = measure.find_contours(sub_mask, 0.5, positive_orientation='low')
padded_binary_mask = np.pad(sub_mask, pad_width=1, mode='constant', constant_values=0)
contours = measure.find_contours(padded_binary_mask, 0.5, positive_orientation='low')
segmentations = []
polygons = []
for contour in contours:
# Flip from (row, col) representation to (x, y)
# and subtract the padding pixel
for i in range(len(contour)):
row, col = contour[i]
contour[i] = (col - 1, row - 1)
# Make a polygon and simplify it
poly = Polygon(contour)
poly = poly.simplify(1.0, preserve_topology=False)
polygons.append(poly)
segmentation = np.array(poly.exterior.coords).ravel().tolist()
segmentations.append(segmentation)
# Combine the polygons to calculate the bounding box and area
multi_poly = MultiPolygon(polygons)
x, y, max_x, max_y = multi_poly.bounds
width = max_x - x
height = max_y - y
bbox = bbox if (bbox) else (x, y, width, height)
area = multi_poly.area
annotation = {
'segmentation': segmentations,
'iscrowd': is_crowd,
'image_id': image_id,
'category_id': category_id,
'id': annotation_id,
'bbox': bbox,
'area': area
}
return annotation
def generate_masks(data_dir, background=False):
dirs = os.listdir(data_dir)
# create a color pallette, selecting a color for each class
palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])
colors = torch.as_tensor([i for i in range(21)])[:, None] * palette
colors = (colors % 255).numpy().astype("uint8")
prcThresh = 3
images = []
annotations = []
image_id = 1
category_id = 1
annotation_id = 1
categories = []
for dir in tqdm(dirs):
files_dir = os.path.join(data_dir, dir)
if (not os.path.isdir(files_dir)):
continue
files = os.listdir(files_dir)
files = [file for file in files if "_mask" not in file]
category = {"supercategory": "object", "id": category_id, "name": dir}
categories.append(category)
for file in tqdm(files):
filename = os.path.join(data_dir, dir, file)
#print(filename)
image = Image.open(filename)
new_img={}
new_img["license"] = 0
new_img["file_name"] = os.path.join(dir, file)
new_img["width"] = int(image.size[0])
new_img["height"] = int(image.size[1])
new_img["id"] = image_id
images.append(new_img)
mask = create_mask(image)
if (background):
maskname = os.path.splitext(filename)[0] + "_mask.jpg"
maskObj = np.uint8(255*(mask==0))
Image.fromarray(maskObj).save(maskname)
#plt.imshow(np.array(image)[:,:,0]*mask)
#plt.show()
else:
nb_components, output, boxes, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)
box_sizes = [box[4] for box in boxes[1:]]
for id in range(1, nb_components):
box = [int(b) for b in boxes[id][0:4]]
sub_mask = np.reshape(output==id, mask.shape).astype(np.double)
#plt.imshow(sub_mask)
#plt.show()
prc = 100*box_sizes[id-1]/(mask.shape[0]*mask.shape[1])
if (prc >= prcThresh):
try:
annotation = create_sub_mask_annotation(sub_mask, image_id, category_id, annotation_id, False, bbox=box)
annotations.append(annotation)
annotation_id += 1
except Exception as e:
print(e)
pass
#print(nb_components)
#print(output)
#print(stats)
#print(centroids)
# save mask for dominant big object
if (box_sizes):
max_ind = np.argmax(box_sizes)
#print(max_ind)
prc = 100*box_sizes[max_ind]/(mask.shape[0]*mask.shape[1])
#print(prc)
if (prc >= prcThresh):
maskname = os.path.splitext(filename)[0] + "_mask.jpg"
#print(maskname)
maskObj = np.uint8(255*np.reshape(1-(output==max_ind+1), mask.shape))
#maskObjN = 255-maskObj
#edgeSum = np.sum(maskObjN[:,0]) + np.sum(maskObjN[:,-1]) + np.sum(maskObjN[0,:]) + np.sum(maskObjN[-1,:])
#if (edgeSum == 0):
Image.fromarray(maskObj).save(maskname)
##mask.putpalette(colors)
#plt.subplot(121)
#plt.imshow(image)
#plt.subplot(122)
#plt.imshow(maskObj)
#plt.show()
image_id += 1
#if (image_id > 3):
# break
category_id += 1
#if (category_id > 3):
# break
print("saving annotations to coco as json ")
### create COCO JSON annotations
coco = {}
coco["info"] = COCO_INFO
coco["licenses"] = COCO_LICENSES
coco["images"] = images
coco["categories"] = categories
coco["annotations"] = annotations
# TODO: specify coco file locaiton
output_file_path = os.path.join(data_dir,"../", "coco_instances.json")
with open(output_file_path, 'w+') as json_file:
json_file.write(json.dumps(coco))
print(">> complete. find coco json here: ", output_file_path)
print("last annotation id: ", annotation_id)
print("last image_id: ", image_id)
#from pycocotools.coco import COCO
## Initialize the COCO api for instance annotations
#coco = COCO(output_file_path)
## Load the categories in a variable
#imgIds = coco.getImgIds()
#print("Number of images:", len(imgIds))
## load and display a random image
#for i in range(len(imgIds)):
# img = coco.loadImgs(imgIds[i])[0]
# I = Image.open(data_dir + "/" + img['file_name'])
# plt.clf()
# plt.imshow(I)
# plt.axis('off')
# annIds = coco.getAnnIds(imgIds=img['id'])
# anns = coco.loadAnns(annIds)
# coco.showAnns(anns, True)
# plt.waitforbuttonpress()
if __name__ == "__main__":
data_dir = "E:/Research/Images/FineGrained/StanfordCars/train_bing/"
| 0
| 0
| 0
| 0
| 0
| 7,808
| 0
| 22
| 293
|
1abc147f5b65fc34db7ff312e43a5af4e6f6fb0a
| 21,660
|
py
|
Python
|
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | null | null | null |
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | 1
|
2022-03-18T16:20:19.000Z
|
2022-03-18T16:20:19.000Z
|
analysis/graveyard/study_definition.py
|
opensafely/antibody-and-antiviral-deployment
|
27cd171870fdd161468d1cabd1eaee76f1943593
|
[
"MIT"
] | null | null | null |
################################################################################
#
# Description: This script provides the formal specification of the study data
# that will be extracted from the OpenSAFELY database.
#
# Output: output/data/input_*.csv.gz
#
# Author(s): M Green (edited by H Curtis)
# Date last updated: 03/02/2022
#
################################################################################
# IMPORT STATEMENTS ----
## Import code building blocks from cohort extractor package
from cohortextractor import (StudyDefinition, patients, combine_codelists)
## Import codelists from codelist.py (which pulls them from the codelist folder)
# DEFINE STUDY POPULATION ----
## Define study time variables
from datetime import date
campaign_start = "2021-12-16"
end_date = date.today().isoformat()
## Define study population and variables
study = StudyDefinition(
# PRELIMINARIES ----
## Configure the expectations framework
default_expectations = {
"date": {"earliest": "2021-11-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.4,
},
## Define index date
index_date = campaign_start,
# POPULATION ----
population = patients.satisfying(
"""
(registered_eligible OR registered_treated)
AND
NOT has_died
AND
(sotrovimab_covid_therapeutics
OR molnupiravir_covid_therapeutics
OR casirivimab_covid_therapeutics
OR covid_test_positive
)
""",
has_died = patients.died_from_any_cause(
on_or_before = "index_date - 1 day",
returning = "binary_flag",
),
),
# TREATMENT - NEUTRALISING MONOCLONAL ANTIBODIES OR ANTIVIRALS ----
## Sotrovimab
sotrovimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Sotrovimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Molnupiravir
molnupiravir_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Molnupiravir",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Casirivimab and imdevimab
casirivimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Casirivimab and imdevimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
date_treated = patients.minimum_of(
"sotrovimab_covid_therapeutics",
"molnupiravir_covid_therapeutics",
"casirivimab_covid_therapeutics",
),
# ELIGIBILITY CRITERIA VARIABLES ----
## Inclusion criteria variables
### SARS-CoV-2 test
# Note patients are eligible for treatment if diagnosed <=5d ago
# in the latest 5 days there may be patients identified as eligible who have not yet been treated
covid_test_positive = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.2
},
),
covid_test_date = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
returning = "date",
date_format = "YYYY-MM-DD",
on_or_after = "index_date - 5 days",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.9
},
),
covid_positive_test_type = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "case_category",
on_or_after = "index_date - 5 days",
restrict_to_earliest_specimen_date = True,
return_expectations = {
"category": {"ratios": {"LFT_Only": 0.4, "PCR_Only": 0.4, "LFT_WithPCR": 0.2}},
"incidence": 0.2,
},
),
covid_positive_previous_30_days = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
between = ["covid_test_date - 31 days", "covid_test_date - 1 day"],
find_last_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.05
},
),
### Onset of symptoms of COVID-19
symptomatic_covid_test = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "any",
returning = "symptomatic",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations={
"incidence": 0.1,
"category": {
"ratios": {
"": 0.2,
"N": 0.2,
"Y": 0.6,
}
},
},
),
covid_symptoms_snomed = patients.with_these_clinical_events(
covid_symptoms_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
on_or_after = "index_date - 5 days",
),
# CENSORING ----
registered_eligible = patients.registered_as_of("covid_test_date"),
registered_treated = patients.registered_as_of("date_treated"),
## Death of any cause
death_date = patients.died_from_any_cause(
returning = "date_of_death",
date_format = "YYYY-MM-DD",
on_or_after = "covid_test_date",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
## De-registration
dereg_date = patients.date_deregistered_from_all_supported_practices(
on_or_after = "covid_test_date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
### Blueteq high risk cohort
high_risk_cohort_covid_therapeutics = patients.with_covid_therapeutics(
with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir","Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "risk_group",
date_format = "YYYY-MM-DD",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"Down's syndrome": 0.1,
"Sickle cell disease": 0.1,
"solid cancer": 0.1,
"haematological diseases, stem cell transplant recipients": 0.1,
"renal disease": 0.1,
"liver disease": 0.1,
"immune-mediated inflammatory disorders (IMID)": 0.2,
"Primary immune deficiencies": 0.1,
"HIV/AIDS": 0.1,},},
},
),
### NHSD high risk cohort (codelist to be defined if/when data avaliable)
# high_risk_cohort_nhsd = patients.with_these_clinical_events(
# high_risk_cohort_nhsd_codes,
# between = [campaign_start, index_date],
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# ),
## Exclusion criteria
### Pattern of clinical presentation indicates that there is recovery rather than risk of deterioration from infection
# (not currently possible to define/code)
### Require hospitalisation for COVID-19
## NB this data lags behind the therapeutics/testing data so may be missing
covid_hospital_admission_date = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = covid_icd10_codes,
on_or_after = "index_date - 5 days",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "index_date - 5 days", "latest": "index_date"},
"rate": "uniform",
"incidence": 0.05
},
),
### New supplemental oxygen requirement specifically for the management of COVID-19 symptoms
# (not currently possible to define/code)
### Children weighing less than 40kg
# (not currently possible to define/code)
### Children aged under 12 years
age = patients.age_as_of(
"index_date",
return_expectations = {
"rate": "universal",
"int": {"distribution": "population_ages"},
"incidence" : 0.9
},
),
### Known hypersensitivity reaction to the active substances or to any of the excipients of sotrovimab
# (not currently possible to define/code)
# HIGH RISK GROUPS ----
## Down's syndrome
downs_syndrome_nhsd_snomed = patients.with_these_clinical_events(
downs_syndrome_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
downs_syndrome_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = downs_syndrome_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
downs_syndrome_nhsd = patients.minimum_of("downs_syndrome_nhsd_snomed", "downs_syndrome_nhsd_icd10"),
## Sickle cell disease
sickle_cell_disease_nhsd_snomed = patients.with_these_clinical_events(
sickle_cell_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
sickle_cell_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = sickle_cell_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
sickle_cell_disease_nhsd = patients.minimum_of("sickle_cell_disease_nhsd_snomed", "sickle_cell_disease_nhsd_icd10"),
## Solid cancer
cancer_opensafely_snomed = patients.with_these_clinical_events(
combine_codelists(
non_haematological_cancer_opensafely_snomed_codes,
lung_cancer_opensafely_snomed_codes,
chemotherapy_radiotherapy_opensafely_snomed_codes
),
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
## Haematological diseases
haematopoietic_stem_cell_transplant_nhsd_snomed = patients.with_these_clinical_events(
haematopoietic_stem_cell_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
haematopoietic_stem_cell_transplant_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematopoietic_stem_cell_transplant_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematopoietic_stem_cell_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = haematopoietic_stem_cell_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
# haematological_malignancies_nhsd_snomed = patients.with_these_clinical_events(
# haematological_malignancies_nhsd_snomed_codes,
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# #on_or_before = "end_date",
# ),
haematological_malignancies_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematological_malignancies_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematological_disease_nhsd = patients.minimum_of("haematopoietic_stem_cell_transplant_nhsd_snomed",
"haematopoietic_stem_cell_transplant_nhsd_icd10",
"haematopoietic_stem_cell_transplant_nhsd_opcs4",
#"haematological_malignancies_nhsd_snomed",
"haematological_malignancies_nhsd_icd10"),
## Renal disease
ckd_stage_5_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
ckd_stage_5_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
ckd_stage_5_nhsd = patients.minimum_of("ckd_stage_5_nhsd_snomed", "ckd_stage_5_nhsd_icd10"),
## Liver disease
liver_disease_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
liver_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
liver_disease_nhsd = patients.minimum_of("liver_disease_nhsd_snomed", "liver_disease_nhsd_icd10"),
## Immune-mediated inflammatory disorders (IMID)
imid_nhsd = patients.with_these_clinical_events(
codelist = combine_codelists(immunosuppresant_drugs_dmd_codes, immunosuppresant_drugs_snomed_codes,
oral_steroid_drugs_dmd_codes,
oral_steroid_drugs_snomed_codes),
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## Primary immune deficiencies
immunosupression_nhsd = patients.with_these_clinical_events(
immunosupression_nhsd_codes,
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## HIV/AIDs
hiv_aids_nhsd_snomed = patients.with_these_clinical_events(
hiv_aids_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
hiv_aids_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = hiv_aids_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
hiv_aids_nhsd = patients.minimum_of("hiv_aids_nhsd_snomed", "hiv_aids_nhsd_icd10"),
## Solid organ transplant
solid_organ_transplant_nhsd_snomed = patients.with_these_clinical_events(
solid_organ_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
solid_organ_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = solid_organ_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
solid_organ_transplant_nhsd = patients.minimum_of("solid_organ_transplant_nhsd_snomed", "solid_organ_transplant_nhsd_opcs4"),
## Rare neurological conditions
### Multiple sclerosis
multiple_sclerosis_nhsd_snomed = patients.with_these_clinical_events(
multiple_sclerosis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
multiple_sclerosis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = multiple_sclerosis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
multiple_sclerosis_nhsd = patients.minimum_of("multiple_sclerosis_nhsd_snomed", "multiple_sclerosis_nhsd_icd10"),
### Motor neurone disease
motor_neurone_disease_nhsd_snomed = patients.with_these_clinical_events(
motor_neurone_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
motor_neurone_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = motor_neurone_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
motor_neurone_disease_nhsd = patients.minimum_of("motor_neurone_disease_nhsd_snomed", "motor_neurone_disease_nhsd_icd10"),
### Myasthenia gravis
myasthenia_gravis_nhsd_snomed = patients.with_these_clinical_events(
myasthenia_gravis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
myasthenia_gravis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = myasthenia_gravis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
myasthenia_gravis_nhsd = patients.minimum_of("myasthenia_gravis_nhsd_snomed", "myasthenia_gravis_nhsd_icd10"),
### Huntingtons disease
huntingtons_disease_nhsd_snomed = patients.with_these_clinical_events(
huntingtons_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
huntingtons_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = huntingtons_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
huntingtons_disease_nhsd = patients.minimum_of("huntingtons_disease_nhsd_snomed", "huntingtons_disease_nhsd_icd10"),
# CLINICAL/DEMOGRAPHIC COVARIATES ----
## Sex
sex = patients.sex(
return_expectations = {
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
## Ethnicity
ethnicity_primis = patients.with_these_clinical_events(
ethnicity_primis_codes,
returning = "category",
find_last_match_in_period = True,
include_date_of_match = False,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.75,
},
),
ethnicity_sus = patients.with_ethnicity_from_sus(
returning = "group_6",
use_most_frequent_code = True,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.8,
},
),
## Index of multiple deprivation
imd = patients.categorised_as(
{"0": "DEFAULT",
"1": """index_of_multiple_deprivation >=1 AND index_of_multiple_deprivation < 32844*1/5""",
"2": """index_of_multiple_deprivation >= 32844*1/5 AND index_of_multiple_deprivation < 32844*2/5""",
"3": """index_of_multiple_deprivation >= 32844*2/5 AND index_of_multiple_deprivation < 32844*3/5""",
"4": """index_of_multiple_deprivation >= 32844*3/5 AND index_of_multiple_deprivation < 32844*4/5""",
"5": """index_of_multiple_deprivation >= 32844*4/5 """,
},
index_of_multiple_deprivation = patients.address_as_of(
"index_date",
returning = "index_of_multiple_deprivation",
round_to_nearest = 100,
),
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"0": 0.01,
"1": 0.20,
"2": 0.20,
"3": 0.20,
"4": 0.20,
"5": 0.19,
}},
},
),
## Region - NHS England 9 regions
region_nhs = patients.registered_practice_as_of(
"index_date",
returning = "nuts1_region_name",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
region_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir", "Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "region",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
## CMDUs/ICS
)
| 31.255411
| 128
| 0.673084
|
################################################################################
#
# Description: This script provides the formal specification of the study data
# that will be extracted from the OpenSAFELY database.
#
# Output: output/data/input_*.csv.gz
#
# Author(s): M Green (edited by H Curtis)
# Date last updated: 03/02/2022
#
################################################################################
# IMPORT STATEMENTS ----
## Import code building blocks from cohort extractor package
from cohortextractor import (
StudyDefinition,
patients,
codelist_from_csv,
codelist,
filter_codes_by_category,
combine_codelists,
Measure
)
## Import codelists from codelist.py (which pulls them from the codelist folder)
from codelists import *
# DEFINE STUDY POPULATION ----
## Define study time variables
from datetime import date
campaign_start = "2021-12-16"
end_date = date.today().isoformat()
## Define study population and variables
study = StudyDefinition(
# PRELIMINARIES ----
## Configure the expectations framework
default_expectations = {
"date": {"earliest": "2021-11-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.4,
},
## Define index date
index_date = campaign_start,
# POPULATION ----
population = patients.satisfying(
"""
(registered_eligible OR registered_treated)
AND
NOT has_died
AND
(sotrovimab_covid_therapeutics
OR molnupiravir_covid_therapeutics
OR casirivimab_covid_therapeutics
OR covid_test_positive
)
""",
has_died = patients.died_from_any_cause(
on_or_before = "index_date - 1 day",
returning = "binary_flag",
),
),
# TREATMENT - NEUTRALISING MONOCLONAL ANTIBODIES OR ANTIVIRALS ----
## Sotrovimab
sotrovimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Sotrovimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Molnupiravir
molnupiravir_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Molnupiravir",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
### Casirivimab and imdevimab
casirivimab_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = "Casirivimab and imdevimab",
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20"},
"incidence": 0.4
},
),
date_treated = patients.minimum_of(
"sotrovimab_covid_therapeutics",
"molnupiravir_covid_therapeutics",
"casirivimab_covid_therapeutics",
),
# ELIGIBILITY CRITERIA VARIABLES ----
## Inclusion criteria variables
### SARS-CoV-2 test
# Note patients are eligible for treatment if diagnosed <=5d ago
# in the latest 5 days there may be patients identified as eligible who have not yet been treated
covid_test_positive = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.2
},
),
covid_test_date = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
returning = "date",
date_format = "YYYY-MM-DD",
on_or_after = "index_date - 5 days",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.9
},
),
covid_positive_test_type = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "case_category",
on_or_after = "index_date - 5 days",
restrict_to_earliest_specimen_date = True,
return_expectations = {
"category": {"ratios": {"LFT_Only": 0.4, "PCR_Only": 0.4, "LFT_WithPCR": 0.2}},
"incidence": 0.2,
},
),
covid_positive_previous_30_days = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "positive",
returning = "binary_flag",
between = ["covid_test_date - 31 days", "covid_test_date - 1 day"],
find_last_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations = {
"incidence": 0.05
},
),
### Onset of symptoms of COVID-19
symptomatic_covid_test = patients.with_test_result_in_sgss(
pathogen = "SARS-CoV-2",
test_result = "any",
returning = "symptomatic",
on_or_after = "index_date - 5 days",
find_first_match_in_period = True,
restrict_to_earliest_specimen_date = False,
return_expectations={
"incidence": 0.1,
"category": {
"ratios": {
"": 0.2,
"N": 0.2,
"Y": 0.6,
}
},
},
),
covid_symptoms_snomed = patients.with_these_clinical_events(
covid_symptoms_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
on_or_after = "index_date - 5 days",
),
# CENSORING ----
registered_eligible = patients.registered_as_of("covid_test_date"),
registered_treated = patients.registered_as_of("date_treated"),
## Death of any cause
death_date = patients.died_from_any_cause(
returning = "date_of_death",
date_format = "YYYY-MM-DD",
on_or_after = "covid_test_date",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
## De-registration
dereg_date = patients.date_deregistered_from_all_supported_practices(
on_or_after = "covid_test_date",
date_format = "YYYY-MM-DD",
return_expectations = {
"date": {"earliest": "2021-12-20", "latest": "index_date"},
"incidence": 0.1
},
),
### Blueteq ‘high risk’ cohort
high_risk_cohort_covid_therapeutics = patients.with_covid_therapeutics(
with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir","Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "risk_group",
date_format = "YYYY-MM-DD",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"Down's syndrome": 0.1,
"Sickle cell disease": 0.1,
"solid cancer": 0.1,
"haematological diseases, stem cell transplant recipients": 0.1,
"renal disease": 0.1,
"liver disease": 0.1,
"immune-mediated inflammatory disorders (IMID)": 0.2,
"Primary immune deficiencies": 0.1,
"HIV/AIDS": 0.1,},},
},
),
### NHSD ‘high risk’ cohort (codelist to be defined if/when data avaliable)
# high_risk_cohort_nhsd = patients.with_these_clinical_events(
# high_risk_cohort_nhsd_codes,
# between = [campaign_start, index_date],
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# ),
## Exclusion criteria
### Pattern of clinical presentation indicates that there is recovery rather than risk of deterioration from infection
# (not currently possible to define/code)
### Require hospitalisation for COVID-19
## NB this data lags behind the therapeutics/testing data so may be missing
covid_hospital_admission_date = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = covid_icd10_codes,
on_or_after = "index_date - 5 days",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "index_date - 5 days", "latest": "index_date"},
"rate": "uniform",
"incidence": 0.05
},
),
### New supplemental oxygen requirement specifically for the management of COVID-19 symptoms
# (not currently possible to define/code)
### Children weighing less than 40kg
# (not currently possible to define/code)
### Children aged under 12 years
age = patients.age_as_of(
"index_date",
return_expectations = {
"rate": "universal",
"int": {"distribution": "population_ages"},
"incidence" : 0.9
},
),
### Known hypersensitivity reaction to the active substances or to any of the excipients of sotrovimab
# (not currently possible to define/code)
# HIGH RISK GROUPS ----
## Down's syndrome
downs_syndrome_nhsd_snomed = patients.with_these_clinical_events(
downs_syndrome_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
downs_syndrome_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = downs_syndrome_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
downs_syndrome_nhsd = patients.minimum_of("downs_syndrome_nhsd_snomed", "downs_syndrome_nhsd_icd10"),
## Sickle cell disease
sickle_cell_disease_nhsd_snomed = patients.with_these_clinical_events(
sickle_cell_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
sickle_cell_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = sickle_cell_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
sickle_cell_disease_nhsd = patients.minimum_of("sickle_cell_disease_nhsd_snomed", "sickle_cell_disease_nhsd_icd10"),
## Solid cancer
cancer_opensafely_snomed = patients.with_these_clinical_events(
combine_codelists(
non_haematological_cancer_opensafely_snomed_codes,
lung_cancer_opensafely_snomed_codes,
chemotherapy_radiotherapy_opensafely_snomed_codes
),
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
## Haematological diseases
haematopoietic_stem_cell_transplant_nhsd_snomed = patients.with_these_clinical_events(
haematopoietic_stem_cell_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
haematopoietic_stem_cell_transplant_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematopoietic_stem_cell_transplant_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematopoietic_stem_cell_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = haematopoietic_stem_cell_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
# haematological_malignancies_nhsd_snomed = patients.with_these_clinical_events(
# haematological_malignancies_nhsd_snomed_codes,
# returning = "date",
# date_format = "YYYY-MM-DD",
# find_first_match_in_period = True,
# #on_or_before = "end_date",
# ),
haematological_malignancies_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = haematological_malignancies_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
haematological_disease_nhsd = patients.minimum_of("haematopoietic_stem_cell_transplant_nhsd_snomed",
"haematopoietic_stem_cell_transplant_nhsd_icd10",
"haematopoietic_stem_cell_transplant_nhsd_opcs4",
#"haematological_malignancies_nhsd_snomed",
"haematological_malignancies_nhsd_icd10"),
## Renal disease
ckd_stage_5_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
ckd_stage_5_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
ckd_stage_5_nhsd = patients.minimum_of("ckd_stage_5_nhsd_snomed", "ckd_stage_5_nhsd_icd10"),
## Liver disease
liver_disease_nhsd_snomed = patients.with_these_clinical_events(
ckd_stage_5_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
liver_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = ckd_stage_5_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
liver_disease_nhsd = patients.minimum_of("liver_disease_nhsd_snomed", "liver_disease_nhsd_icd10"),
## Immune-mediated inflammatory disorders (IMID)
imid_nhsd = patients.with_these_clinical_events(
codelist = combine_codelists(immunosuppresant_drugs_dmd_codes, immunosuppresant_drugs_snomed_codes,
oral_steroid_drugs_dmd_codes,
oral_steroid_drugs_snomed_codes),
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## Primary immune deficiencies
immunosupression_nhsd = patients.with_these_clinical_events(
immunosupression_nhsd_codes,
returning = "date",
find_last_match_in_period = True,
date_format = "YYYY-MM-DD",
),
## HIV/AIDs
hiv_aids_nhsd_snomed = patients.with_these_clinical_events(
hiv_aids_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
hiv_aids_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = hiv_aids_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
hiv_aids_nhsd = patients.minimum_of("hiv_aids_nhsd_snomed", "hiv_aids_nhsd_icd10"),
## Solid organ transplant
solid_organ_transplant_nhsd_snomed = patients.with_these_clinical_events(
solid_organ_transplant_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
solid_organ_transplant_nhsd_opcs4 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_procedures = solid_organ_transplant_nhsd_opcs4_codes,
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
return_expectations = {
"date": {"earliest": "2020-02-01"},
"rate": "exponential_increase",
"incidence": 0.01,
},
),
solid_organ_transplant_nhsd = patients.minimum_of("solid_organ_transplant_nhsd_snomed", "solid_organ_transplant_nhsd_opcs4"),
## Rare neurological conditions
### Multiple sclerosis
multiple_sclerosis_nhsd_snomed = patients.with_these_clinical_events(
multiple_sclerosis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
multiple_sclerosis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = multiple_sclerosis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
multiple_sclerosis_nhsd = patients.minimum_of("multiple_sclerosis_nhsd_snomed", "multiple_sclerosis_nhsd_icd10"),
### Motor neurone disease
motor_neurone_disease_nhsd_snomed = patients.with_these_clinical_events(
motor_neurone_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
motor_neurone_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = motor_neurone_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
motor_neurone_disease_nhsd = patients.minimum_of("motor_neurone_disease_nhsd_snomed", "motor_neurone_disease_nhsd_icd10"),
### Myasthenia gravis
myasthenia_gravis_nhsd_snomed = patients.with_these_clinical_events(
myasthenia_gravis_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
myasthenia_gravis_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = myasthenia_gravis_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
myasthenia_gravis_nhsd = patients.minimum_of("myasthenia_gravis_nhsd_snomed", "myasthenia_gravis_nhsd_icd10"),
### Huntington’s disease
huntingtons_disease_nhsd_snomed = patients.with_these_clinical_events(
huntingtons_disease_nhsd_snomed_codes,
returning = "date",
date_format = "YYYY-MM-DD",
find_first_match_in_period = True,
),
huntingtons_disease_nhsd_icd10 = patients.admitted_to_hospital(
returning = "date_admitted",
with_these_diagnoses = huntingtons_disease_nhsd_icd10_codes,
find_first_match_in_period = True,
date_format = "YYYY-MM-DD",
),
huntingtons_disease_nhsd = patients.minimum_of("huntingtons_disease_nhsd_snomed", "huntingtons_disease_nhsd_icd10"),
# CLINICAL/DEMOGRAPHIC COVARIATES ----
## Sex
sex = patients.sex(
return_expectations = {
"rate": "universal",
"category": {"ratios": {"M": 0.49, "F": 0.51}},
}
),
## Ethnicity
ethnicity_primis = patients.with_these_clinical_events(
ethnicity_primis_codes,
returning = "category",
find_last_match_in_period = True,
include_date_of_match = False,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.75,
},
),
ethnicity_sus = patients.with_ethnicity_from_sus(
returning = "group_6",
use_most_frequent_code = True,
return_expectations = {
"category": {"ratios": {"1": 0.2, "2": 0.2, "3": 0.2, "4": 0.2, "5": 0.2}},
"incidence": 0.8,
},
),
## Index of multiple deprivation
imd = patients.categorised_as(
{"0": "DEFAULT",
"1": """index_of_multiple_deprivation >=1 AND index_of_multiple_deprivation < 32844*1/5""",
"2": """index_of_multiple_deprivation >= 32844*1/5 AND index_of_multiple_deprivation < 32844*2/5""",
"3": """index_of_multiple_deprivation >= 32844*2/5 AND index_of_multiple_deprivation < 32844*3/5""",
"4": """index_of_multiple_deprivation >= 32844*3/5 AND index_of_multiple_deprivation < 32844*4/5""",
"5": """index_of_multiple_deprivation >= 32844*4/5 """,
},
index_of_multiple_deprivation = patients.address_as_of(
"index_date",
returning = "index_of_multiple_deprivation",
round_to_nearest = 100,
),
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"0": 0.01,
"1": 0.20,
"2": 0.20,
"3": 0.20,
"4": 0.20,
"5": 0.19,
}},
},
),
## Region - NHS England 9 regions
region_nhs = patients.registered_practice_as_of(
"index_date",
returning = "nuts1_region_name",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
region_covid_therapeutics = patients.with_covid_therapeutics(
#with_these_statuses = ["Approved", "Treatment Complete"],
with_these_therapeutics = ["Sotrovimab", "Molnupiravir", "Casirivimab and imdevimab"],
with_these_indications = "non_hospitalised",
on_or_after = "index_date",
find_first_match_in_period = True,
returning = "region",
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South West": 0.1,
"South East": 0.1,},},
},
),
## CMDUs/ICS
)
| 15
| 0
| 0
| 0
| 0
| 0
| 0
| 82
| 22
|
9c633934769dee6380c21948f3259c49e26608fa
| 5,146
|
py
|
Python
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 36
|
2020-03-17T11:56:51.000Z
|
2022-01-19T16:03:32.000Z
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 60
|
2020-03-02T23:13:29.000Z
|
2021-05-19T15:05:42.000Z
|
records_mover/db/bigquery/unloader.py
|
cwegrzyn/records-mover
|
e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2
|
[
"Apache-2.0"
] | 4
|
2020-08-11T13:17:37.000Z
|
2021-11-05T21:11:52.000Z
|
import logging
logger = logging.getLogger(__name__)
| 45.539823
| 115
| 0.666148
|
import sqlalchemy
from contextlib import contextmanager
from typing import List, Iterator, Optional, Union, Tuple
import logging
from google.cloud.bigquery.dbapi.connection import Connection
from google.cloud.bigquery.client import Client
from google.cloud.bigquery.job import ExtractJobConfig
from records_mover.db.unloader import Unloader
from records_mover.records.records_format import BaseRecordsFormat, AvroRecordsFormat
from records_mover.url.base import BaseDirectoryUrl
from records_mover.url.resolver import UrlResolver
from records_mover.records.unload_plan import RecordsUnloadPlan
from records_mover.records.records_directory import RecordsDirectory
from records_mover.db.errors import NoTemporaryBucketConfiguration
logger = logging.getLogger(__name__)
class BigQueryUnloader(Unloader):
def __init__(self,
db: Union[sqlalchemy.engine.Connection, sqlalchemy.engine.Engine],
url_resolver: UrlResolver,
gcs_temp_base_loc: Optional[BaseDirectoryUrl])\
-> None:
self.db = db
self.url_resolver = url_resolver
self.gcs_temp_base_loc = gcs_temp_base_loc
super().__init__(db=db)
def can_unload_format(self, target_records_format: BaseRecordsFormat) -> bool:
if isinstance(target_records_format, AvroRecordsFormat):
return True
return False
def can_unload_to_scheme(self, scheme: str) -> bool:
if scheme == 'gs':
return True
# Otherwise we'll need a temporary bucket configured for
# BigQuery to unload into
return self.gcs_temp_base_loc is not None
def known_supported_records_formats_for_unload(self) -> List[BaseRecordsFormat]:
return [AvroRecordsFormat()]
@contextmanager
def temporary_unloadable_directory_loc(self) -> Iterator[BaseDirectoryUrl]:
if self.gcs_temp_base_loc is None:
raise NoTemporaryBucketConfiguration('Please provide a scratch GCS URL in your config '
'(e.g., set SCRATCH_GCS_URL to a gs:// URL)')
else:
with self.gcs_temp_base_loc.temporary_directory() as temp_loc:
yield temp_loc
def _parse_bigquery_schema_name(self, schema: str) -> Tuple[Optional[str], str]:
# https://github.com/mxmzdlv/pybigquery/blob/master/pybigquery/sqlalchemy_bigquery.py#L320
dataset = None
project = None
schema_split = schema.split('.')
if len(schema_split) == 1:
dataset, = schema_split
elif len(schema_split) == 2:
project, dataset = schema_split
else:
raise ValueError(f"Could not understand schema name {schema}")
return (project, dataset)
def _extract_job_config(self, unload_plan: RecordsUnloadPlan) -> ExtractJobConfig:
config = ExtractJobConfig()
if isinstance(unload_plan.records_format, AvroRecordsFormat):
config.destination_format = 'AVRO'
# https://cloud.google.com/bigquery/docs/loading-data-cloud-storage-avro#logical_types
config.use_avro_logical_types = True
else:
raise NotImplementedError(f'Please add support for {unload_plan.records_format}')
return config
def unload(self,
schema: str,
table: str,
unload_plan: RecordsUnloadPlan,
directory: RecordsDirectory) -> Optional[int]:
if directory.scheme != 'gs':
with self.temporary_unloadable_directory_loc() as temp_gcs_loc:
temp_directory = RecordsDirectory(temp_gcs_loc)
out = self.unload(schema=schema,
table=table,
unload_plan=unload_plan,
directory=temp_directory)
temp_directory.copy_to(directory.loc)
return out
logger.info("Loading from records directory into BigQuery")
# https://googleapis.github.io/google-cloud-python/latest/bigquery/usage/tables.html#creating-a-table
connection: Connection =\
self.db.engine.raw_connection().connection
# https://google-cloud.readthedocs.io/en/latest/bigquery/generated/google.cloud.bigquery.client.Client.html
client: Client = connection._client
project_id, dataset_id = self._parse_bigquery_schema_name(schema)
job_config = self._extract_job_config(unload_plan)
records_format = unload_plan.records_format
filename = records_format.generate_filename('output')
destination_uri = directory.loc.file_in_this_directory(filename)
job = client.extract_table(f"{schema}.{table}",
destination_uri.url,
# Must match the destination dataset location.
job_config=job_config)
job.result() # Waits for table load to complete.
logger.info(f"Unloaded from {dataset_id}:{table} into {filename}")
directory.save_preliminary_manifest()
return None
| 0
| 432
| 0
| 3,922
| 0
| 0
| 0
| 429
| 309
|
82eca7e21b92148d602ade08730e4aef0f573478
| 1,219
|
py
|
Python
|
depth_completion/config/resnet18_Baseline_config.py
|
tsunghan-mama/Depth-Completion
|
d73328d1d704470a6fd3859e2e1810bc311b1dc3
|
[
"MIT"
] | 67
|
2020-07-11T09:44:10.000Z
|
2022-03-30T07:38:46.000Z
|
depth_completion/config/resnet18_Baseline_config.py
|
tsunghan-mama/Depth-Completion
|
d73328d1d704470a6fd3859e2e1810bc311b1dc3
|
[
"MIT"
] | 8
|
2020-07-14T05:50:03.000Z
|
2022-01-19T09:07:46.000Z
|
depth_completion/config/resnet18_Baseline_config.py
|
patrickwu2/Depth-Completion
|
e9c52e2cb2dce558d6787e246bbc51c1670c16ca
|
[
"MIT"
] | 9
|
2019-10-12T01:09:51.000Z
|
2020-05-26T21:35:28.000Z
|
common_config = {
}
train_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0],
"seed": 7122,
"num_workers": 8,
"mode": "train",
"train_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 8,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.)},
"load_model_path": None,
"param_only": False,
"validation": True,
"valid_path": "/tmp2/tsunghan/new_matterport/v1",
"epoches": 100,
"save_prefix": "",
}
test_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0, 1, 2, 3],
"seed": 7122,
"num_workers": 8,
"mode": "test",
"test_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 1,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.), ('img_grad', 'img_grad_loss', 1e-3)},
"load_model_path": "/tmp2/tsunghan/twcc_data/twcc_experience_resnet/matterport_ResNet18SkipConnection_b10_lr0.0001_/epoch_13.pt",
"param_only": True,
"epoches": 100,
"save_prefix": "resnet",
"output":"/tmp2/tsunghan/experiment_result/mat_npy/r18sc_epo13",
}
| 27.088889
| 133
| 0.61854
|
common_config = {
}
train_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0],
"seed": 7122,
"num_workers": 8,
"mode": "train",
"train_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 8,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.)},
"load_model_path": None,
"param_only": False,
"validation": True,
"valid_path": "/tmp2/tsunghan/new_matterport/v1",
"epoches": 100,
"save_prefix": "",
}
test_config = {
"dataset_name": "matterport",
"model_name": "ResNet18SkipConnection",
"in_channel": 9,
"device_ids": [0, 1, 2, 3],
"seed": 7122,
"num_workers": 8,
"mode": "test",
"test_path": "/tmp2/tsunghan/new_matterport/v1",
"lr": 1e-4,
"batch_size": 1,
"loss_func": {('depth(L2)', 'depth_L2_loss', 1.), ('img_grad', 'img_grad_loss', 1e-3)},
"load_model_path": "/tmp2/tsunghan/twcc_data/twcc_experience_resnet/matterport_ResNet18SkipConnection_b10_lr0.0001_/epoch_13.pt",
"param_only": True,
"epoches": 100,
"save_prefix": "resnet",
"output":"/tmp2/tsunghan/experiment_result/mat_npy/r18sc_epo13",
}
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
38b4f6b2219146f62a43cb5525a1f50ceb4102df
| 660
|
py
|
Python
|
scheduler_task/study_apscheduler/examples/demo.py
|
2581676612/python
|
b309564a05838b23044bb8112fd4ef71307266b6
|
[
"MIT"
] | 112
|
2017-09-19T17:38:38.000Z
|
2020-05-27T18:00:27.000Z
|
scheduler_task/study_apscheduler/examples/demo.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | null | null | null |
scheduler_task/study_apscheduler/examples/demo.py
|
tomoncle/Python-notes
|
ce675486290c3d1c7c2e4890b57e3d0c8a1228cc
|
[
"MIT"
] | 56
|
2017-09-20T01:24:12.000Z
|
2020-04-16T06:19:31.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-13 11:33
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : demo.py
# @Product : PyCharm
# @Docs :
# @Source :
import os
from apscheduler.schedulers.blocking import BlockingScheduler
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick ...\n'])
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| 26.4
| 85
| 0.587879
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-8-13 上午11:33
# @Author : Tom.Lee
# @CopyRight : 2016-2017 OpenBridge by yihecloud
# @File : demo.py
# @Product : PyCharm
# @Docs :
# @Source :
import os
from apscheduler.schedulers.blocking import BlockingScheduler
if __name__ == '__main__':
scheduler = BlockingScheduler()
scheduler.add_job('sys:stdout.write', 'interval', seconds=3, args=['tick ...\n'])
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
| 6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0fc246feb45369af60c1a8007ad889850bd24825
| 4,829
|
py
|
Python
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 2
|
2018-05-10T18:38:04.000Z
|
2020-12-19T08:14:21.000Z
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 6
|
2018-01-13T17:05:51.000Z
|
2021-09-01T18:25:41.000Z
|
clearblade/ClearBladeCore.py
|
sraman0302/ClearBlade-Python-SDK
|
bde192ef86969c8d1c592f7697ca104bc2362408
|
[
"Apache-2.0"
] | 4
|
2018-11-08T21:18:08.000Z
|
2021-05-10T01:07:14.000Z
|
from __future__ import absolute_import
| 31.154839
| 168
| 0.600745
|
from __future__ import absolute_import
import atexit
from . import Users
from . import Devices
from . import Collections
from . import Messaging
from . import Code
from .Developers import * # allows you to import Developer from ClearBladeCore
from . import cbLogs
class System:
def __exitcode(self):
# forces all users to log out on system close.
# I did this to prevent possible token reuse
# after client code exits, even if they don't
# log their users out themselves.
while self.users:
self.users.pop(0).logout()
def __init__(self, systemKey, systemSecret, url="https://platform.clearblade.com", safe=True, sslVerify=True):
self.systemKey = systemKey
self.systemSecret = systemSecret
self.url = url
self.users = []
self.collections = []
self.messagingClients = []
self.devices = []
self.sslVerify = sslVerify
if not sslVerify:
cbLogs.warn("You have disabled SSL verification, this should only be done if your ClearBlade Platform instance is leveraging self signed SSL certificates.")
if safe:
atexit.register(self.__exitcode)
#############
# USERS #
#############
def User(self, email, password="", authToken=""):
user = Users.User(self, email, password=password, authToken=authToken)
if authToken == "":
user.authenticate()
return user
elif user.checkAuth():
return user
else:
cbLogs.error("Invalid User authToken")
exit(-1)
def AnonUser(self):
anon = Users.AnonUser(self)
anon.authenticate()
return anon
def registerUser(self, authenticatedUser, email, password):
n00b = Users.registerUser(self, authenticatedUser, email, password)
self.users.append(n00b)
return n00b
def ServiceUser(self, email, token):
user = Users.ServiceUser(self, email, token)
if user.checkAuth():
return user
else:
cbLogs.error("Service User ", email, "failed to Auth")
exit(-1)
###############
# DEVICES #
###############
def getDevices(self, authenticatedUser, query=None):
self.devices = Devices.getDevices(self, authenticatedUser, query)
return self.devices
def getDevice(self, authenticatedUser, name):
dev = Devices.getDevice(self, authenticatedUser, name)
return dev
def Device(self, name, key="", authToken=""):
dev = Devices.Device(system=self, name=name, key=key, authToken=authToken)
# check if dev in self.devices?
return dev
############
# DATA #
############
def Collection(self, authenticatedUser, collectionID="", collectionName=""):
if not collectionID and not collectionName:
cbLogs.error("beep")
exit(-1)
col = Collections.Collection(self, authenticatedUser, collectionID, collectionName)
self.collections.append(col)
return col
############
# MQTT #
############
def Messaging(self, user, port=1883, keepalive=30, url="", client_id="", use_tls=False):
msg = Messaging.Messaging(user, port, keepalive, url, client_id=client_id, use_tls=use_tls)
self.messagingClients.append(msg)
return msg
############
# CODE #
############
def Service(self, name):
return Code.Service(self, name)
class Query:
def __init__(self):
self.sorting = [] # only used in fetches. also, not implemented yet. TODO
self.filters = []
def Or(self, query):
# NOTE: you can't add filters after
# you Or two queries together.
# This function has to be the last step.
q = Query()
for filter in self.filters:
q.filters.append(filter)
for filter in query.filters:
q.filters.append(filter)
return q
def __addFilter(self, column, value, operator):
if len(self.filters) == 0:
self.filters.append([])
self.filters[0].append({operator: [{column: value}]})
def equalTo(self, column, value):
self.__addFilter(column, value, "EQ")
def greaterThan(self, column, value):
self.__addFilter(column, value, "GT")
def lessThan(self, column, value):
self.__addFilter(column, value, "LT")
def greaterThanEqualTo(self, column, value):
self.__addFilter(column, value, "GTE")
def lessThanEqualTo(self, column, value):
self.__addFilter(column, value, "LTE")
def notEqualTo(self, column, value):
self.__addFilter(column, value, "NEQ")
def matches(self, column, value):
self.__addFilter(column, value, "RE")
| 0
| 0
| 0
| 4,516
| 0
| 0
| 0
| -4
| 276
|
78df92a0ac52515a71841949cff2f4cccb3a01f0
| 698
|
py
|
Python
|
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
GoogleCodeJam2017/Round0/TidyNumbers/TidyNumbers.py
|
Jspsun/CompetitiveCoding
|
a815bbcdab1fb30bd83730a7abd3505bff8bfb78
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
__main__()
| 21.151515
| 71
| 0.465616
|
def __main__():
f = open("in.txt", 'r')
o = open("out.txt", 'w')
noOfCases = int(f.readline())
for testNo in range(noOfCases):
counter = 0
data = f.readline()
output = solver(data[:-1])
output = int(output)
o.write("Case #" + str(testNo + 1) + ": " + str(output) + "\n")
def solver(n):
n = list(n)
dex = inOrder(n)
while dex != -1:
n[dex] = str(int(n[dex]) - 1)
n = n[:dex + 1] + ['9'] * (len(n) - dex - 1)
dex = inOrder(n)
return ''.join(n)
def inOrder(n):
for i in range(len(n) - 1):
if n[i] > n[i + 1]:
return i
return -1
if __name__ == '__main__':
__main__()
| 0
| 0
| 0
| 0
| 0
| 585
| 0
| 0
| 68
|
9d9072a0352d441e7a4e2e3e0c976746c5e8f9af
| 986
|
py
|
Python
|
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
project_dashboard/projects/crud.py
|
KruizerChick/project-dashboard
|
aa1d3fa713e49049ac7184dbe44a1f915ff56906
|
[
"MIT"
] | null | null | null |
""" CRUD class for Projects app """
| 29
| 62
| 0.703854
|
""" CRUD class for Projects app """
from crudbuilder.abstract import BaseCrudBuilder
from .models.project import Project
from .models.stakeholder import Stakeholder
class ProjectCrud(BaseCrudBuilder):
""" CRUD class for Project model """
model = Project
search_fields = ["id", "name", "description"]
tables2_fields = ("name", "description", 'is_closed')
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
# tables2_pagination = 20 # default is 10
modelform_excludes = ['created']
# permissions = {}
# custom_templates = {}
class StakeholderCrud(BaseCrudBuilder):
""" CRUD class for Stakeholder model """
model = Stakeholder
search_fields = ["full_name", ]
tables2_fields = ("full_name", "organization")
tables2_css_class = "table table-bordered table-condensed"
login_required = True
permission_required = True
modelform_excludes = ['created']
| 0
| 0
| 0
| 771
| 0
| 0
| 0
| 63
| 113
|
db476ed9048fe8a87e8164fd5dd10cfe61c7b0bf
| 486
|
py
|
Python
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 2
|
2020-10-26T18:40:32.000Z
|
2021-04-10T16:33:25.000Z
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 30
|
2015-11-04T11:42:27.000Z
|
2021-12-01T07:56:34.000Z
|
L1Trigger/L1TMuonOverlap/python/fakeOmtfFwVersion_cff.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 8
|
2016-03-25T07:17:43.000Z
|
2021-07-08T17:11:21.000Z
|
import FWCore.ParameterSet.Config as cms
omtfFwVersionSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonOverlapFwVersionRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
###OMTF FW ESProducer.
omtfFwVersion = cms.ESProducer(
"L1TMuonOverlapFwVersionESProducer",
algoVersion = cms.uint32(0x110),
layersVersion = cms.uint32(6),
patternsVersion = cms.uint32(3),
synthDate = cms.string("2001-01-01 00:00")
)
| 25.578947
| 58
| 0.716049
|
import FWCore.ParameterSet.Config as cms
omtfFwVersionSource = cms.ESSource(
"EmptyESSource",
recordName = cms.string('L1TMuonOverlapFwVersionRcd'),
iovIsRunNotTime = cms.bool(True),
firstValid = cms.vuint32(1)
)
###OMTF FW ESProducer.
omtfFwVersion = cms.ESProducer(
"L1TMuonOverlapFwVersionESProducer",
algoVersion = cms.uint32(0x110),
layersVersion = cms.uint32(6),
patternsVersion = cms.uint32(3),
synthDate = cms.string("2001-01-01 00:00")
)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
bccbd46e4500f876a02aadf6e0c1065d389cdf38
| 4,603
|
py
|
Python
|
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
planning/planning/page/check_in_out/check_in_out.py
|
nishta/planning
|
5be1574111b9b94ec75c74960ace4314985b0014
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
| 39.681034
| 291
| 0.74169
|
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
import datetime
from planning.planning.myfunction import mail_format_pms,actual_date_update,close_task_update
@frappe.whitelist()
def checking_checkout(task=None,check_status=None,name=None):
cur_date_time=frappe.utils.data.now ()
user_name=frappe.session.user
if(task):
if(check_status=="0"):
doctype="NNTask";
#select parent,members,employee_name,parenttype from `tabNNAssign` where parenttype=%s and employee_name=%s",(doctype,user_name)
count=frappe.db.sql("select task from `tabNNTask Check In Out` where status=1 and emp_name=%s",user_name);
if(count):
task=count[0][0]
frappe.msgprint("Please Checkout <b>"+ task+"</b> Task")
return "Not Valid"
else:
frappe.get_doc({
"doctype":"NNTask Check In Out",
"task":task,
"check_in":cur_date_time,
"status":1,
"emp_name":user_name
}).insert(ignore_permissions=True)
actual_date_update(task)
else:
hourly_rate=frappe.db.sql("""select hourly_rate from tabEmployee where employee_name=%s""",(user_name))
if(hourly_rate):
hourly_cost=hourly_rate[0][0]
else:
hourly_cost=0;
checkin_time=frappe.db.sql("""select check_in from `tabNNTask Check In Out` where name=%s""",name)
if(checkin_time):
checked_intime=checkin_time[0][0];
else:
checked_intime=0
time_diff_in_seconds=frappe.utils.data.time_diff_in_seconds(cur_date_time,checked_intime);
#frappe.msgprint(time_diff_in_seconds);
cost_for_seound=float(hourly_cost)/float(3600);
rate=(time_diff_in_seconds)*(cost_for_seound)
#frappe.msgprint(str(rate),raise_exception=1)
frappe.db.sql("""update `tabNNTask Check In Out` set check_out=%s,status=2,hourly_cost=%s,rate=%s where name=%s""",(cur_date_time,hourly_rate,rate,name))
else:
return "not"
@frappe.whitelist()
def getTask(doctype):
data=[]
user_name=frappe.session.user
select_task=frappe.db.sql("select name,parent,members,employee_name,parenttype from `tabNNAssign` where close_status=0 and parenttype=%s and employee_name=%s",(doctype,user_name))
if(select_task):
i=1;
values="";
for select_task_list in select_task:
sno=i;
assign_name=select_task_list[0];
task_name=select_task_list[1];
employee_id=select_task_list[2];
employee_name=select_task_list[3];
select_task_list=frappe.db.sql("""select task_list.project as project ,task_list.milestone as milestone,task_list.tasklist as task_list_name,task.duration as duration from `tabNNTasklist` task_list ,`tabNNTask` task where task.name=%s and task_list.tasklist=task.tasklist""",(task_name))
if(select_task_list):
project_name=select_task_list[0][0];
milestone=select_task_list[0][1];
task_list_name=select_task_list[0][2];
duration=select_task_list[0][3];
else:
project_name="";
milestone="";
status="Status";
close="Status";
status_che=1
checkin_status=frappe.db.sql("""select * from `tabNNTask Check In Out` where status=%s and task=%s and emp_name=%s order by creation desc""",(status_che,task_name,user_name))
if(checkin_status):
check_status=1;
check_status_name=checkin_status[0][0]
else:
check_status=0;
check_status_name="";
#worked_cocuation:
total_seconds=0;
working_hours=frappe.db.sql("""select check_in,check_out from `tabNNTask Check In Out` where status=2 and task=%s and emp_name=%s order by creation desc""",(task_name,user_name))
for working_hours_list in working_hours:
checkin_times=working_hours_list[0];
checkout_times=working_hours_list[1];
seconds=frappe.utils.data.time_diff_in_seconds(checkout_times,checkin_times);
#frappe.msgprint(seconds);
total_seconds=int(seconds)+int(total_seconds);
#frappe.msgprint(total_seconds);
worked_time=str(datetime.timedelta(seconds=total_seconds))
rows=[project_name]+[milestone]+[task_list_name]+[task_name]+[employee_name]+[check_status]+[check_status_name]+[duration]+[worked_time]+[assign_name]
data.append(rows)
i=i+1;
return data
@frappe.whitelist()
def close_task(assign_name=None,):
frappe.db.sql("""Update `tabNNAssign` set close_status=1 where name=%s""",(assign_name))
task=frappe.db.sql("""select parent from tabNNAssign where name=%s""",(assign_name))
mode=1;
task_name=task
if task:
doctype="NNTask";
count=frappe.db.sql("""select *from tabNNAssign where close_status=0 and parent=%s and parenttype=%s""",(task_name,doctype))
if not count:
close_task_update(task)
mail_format_pms(task_name,mode)
| 0
| 4,303
| 0
| 0
| 0
| 0
| 0
| 96
| 158
|
b88cc6b6407fec4332c3df0cdd6f4c0dc8c904b3
| 4,290
|
py
|
Python
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 1
|
2019-11-14T18:13:26.000Z
|
2019-11-14T18:13:26.000Z
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 3
|
2018-11-15T19:52:40.000Z
|
2022-02-14T21:56:22.000Z
|
packages/girder/plugins/oauth/girder_oauth/providers/google.py
|
ShenQianwithC/HistomicsTK
|
4ad7e72a7ebdabbdfc879254fad04ce7ca47e320
|
[
"Apache-2.0"
] | 3
|
2018-05-21T19:45:19.000Z
|
2019-04-08T19:53:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from .. import constants
| 35.75
| 79
| 0.571329
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from six.moves import urllib
from girder.api.rest import getApiUrl
from girder.exceptions import RestException
from girder.models.setting import Setting
from .base import ProviderBase
from .. import constants
class Google(ProviderBase):
_AUTH_URL = 'https://accounts.google.com/o/oauth2/auth'
_AUTH_SCOPES = ['profile', 'email']
_TOKEN_URL = 'https://accounts.google.com/o/oauth2/token'
_API_USER_URL = 'https://www.googleapis.com/plus/v1/people/me'
_API_USER_FIELDS = ('id', 'emails', 'name')
def getClientIdSetting(self):
return Setting().get(constants.PluginSettings.GOOGLE_CLIENT_ID)
def getClientSecretSetting(self):
return Setting().get(constants.PluginSettings.GOOGLE_CLIENT_SECRET)
@classmethod
def getUrl(cls, state):
clientId = Setting().get(constants.PluginSettings.GOOGLE_CLIENT_ID)
if clientId is None:
raise Exception('No Google client ID setting is present.')
callbackUrl = '/'.join((getApiUrl(), 'oauth', 'google', 'callback'))
query = urllib.parse.urlencode({
'response_type': 'code',
'access_type': 'online',
'client_id': clientId,
'redirect_uri': callbackUrl,
'state': state,
'scope': ' '.join(cls._AUTH_SCOPES)
})
return '%s?%s' % (cls._AUTH_URL, query)
def getToken(self, code):
params = {
'grant_type': 'authorization_code',
'code': code,
'client_id': self.clientId,
'client_secret': self.clientSecret,
'redirect_uri': self.redirectUri
}
resp = self._getJson(method='POST', url=self._TOKEN_URL,
data=params)
return resp
def getUser(self, token):
headers = {
'Authorization': ' '.join((
token['token_type'], token['access_token']))
}
# For privacy and efficiency, fetch only the specific needed fields
# https://developers.google.com/+/web/api/rest/#partial-response
query = urllib.parse.urlencode({
'fields': ','.join(self._API_USER_FIELDS)
})
resp = self._getJson(method='GET',
url='%s?%s' % (self._API_USER_URL, query),
headers=headers)
# Get user's OAuth2 ID
oauthId = resp.get('id')
if not oauthId:
raise RestException(
'Google Plus did not return a user ID.', code=502)
# Get user's email address
# Prefer email address with 'account' type
emails = [
email.get('value')
for email in resp.get('emails', [])
if email.get('type') == 'account'
]
if not emails:
# If an 'account' email can't be found, consider them all
emails = [
email.get('value')
for email in resp.get('emails', [])
]
if emails:
# Even if there are multiple emails, just use the first one
email = emails[0]
else:
raise RestException(
'This Google Plus user has no available email address.',
code=502)
# Get user's name
firstName = resp.get('name', {}).get('givenName', '')
lastName = resp.get('name', {}).get('familyName', '')
user = self._createOrReuseUser(oauthId, email, firstName, lastName)
return user
| 0
| 601
| 0
| 2,666
| 0
| 0
| 0
| 74
| 135
|
f00f0283a00861b00d8ace96a341aa1af6392dc8
| 177
|
py
|
Python
|
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
todoapp/todos/urls.py
|
dhavall13/REST-API-TodoCRUD
|
5d7179d12c4436e38658d9a7483497c8db99f4be
|
[
"MIT"
] | null | null | null |
from rest_framework import routers
from .api import TodoViewSet
router = routers.DefaultRouter()
router.register('api/todos', TodoViewSet, 'todos')
urlpatterns = router.urls
| 19.666667
| 50
| 0.79096
|
from rest_framework import routers
from .api import TodoViewSet
router = routers.DefaultRouter()
router.register('api/todos', TodoViewSet, 'todos')
urlpatterns = router.urls
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
8e57bc0091c782bab46c7958d378a4ddf117035a
| 378
|
py
|
Python
|
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | 1
|
2019-11-12T07:11:39.000Z
|
2019-11-12T07:11:39.000Z
|
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
xiaoweiChen/OpenVINO_Model_Convert_Website
|
ce8b0d225d1e0228aace772e3017ad3154543688
|
[
"Apache-2.0"
] | null | null | null |
import sys
from converter import processPreTrainModels
if __name__ == '__main__':
if len(sys.argv) < 4:
print("usage: {} proto caffemodel output_dir".format(sys.argv[0]))
exit(0)
proto = sys.argv[1]
model = sys.argv[2]
output = sys.argv[3]
file_path = processPreTrainModels(
proto,
model,
output)
print("file_path is", file_path)
| 19.894737
| 70
| 0.648148
|
import sys
from converter import processPreTrainModels
if __name__ == '__main__':
if len(sys.argv) < 4:
print("usage: {} proto caffemodel output_dir".format(sys.argv[0]))
exit(0)
proto = sys.argv[1]
model = sys.argv[2]
output = sys.argv[3]
file_path = processPreTrainModels(
proto,
model,
output)
print("file_path is", file_path)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e837781e421b78fc059079fdefb0bdc32efc4414
| 3,229
|
py
|
Python
|
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
scripts/eval.py
|
zsinsense/demosaicnet
|
bbe8151cab86dbe46b76806cf9ec353994b389ff
|
[
"MIT"
] | null | null | null |
#!/bin/env python
"""Evaluate a demosaicking model."""
import argparse
import torch as th
from torch.utils.data import DataLoader
import ttools
from ttools.modules.image_operators import crop_like
import demosaicnet
LOG = ttools.get_logger(__name__)
def main(args):
"""Entrypoint to the training."""
# Load model parameters from checkpoint, if any
# meta = ttools.Checkpointer.load_meta(args.checkpoint_dir)
# if meta is None:
# LOG.warning("No checkpoint found at %s, aborting.", args.checkpoint_dir)
# return
meta = {
'mode': 'bayer',
'depth': 15,
'width': 64
}
data = demosaicnet.Dataset(args.data, download=False,
mode=meta["mode"],
subset=demosaicnet.TEST_SUBSET)
dataloader = DataLoader(
data, batch_size=1, num_workers=4, pin_memory=True, shuffle=False)
if meta["mode"] == demosaicnet.BAYER_MODE:
model = demosaicnet.BayerDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
elif meta["mode"] == demosaicnet.XTRANS_MODE:
model = demosaicnet.XTransDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
# checkpointer = ttools.Checkpointer(args.checkpoint_dir, model, meta=meta)
# checkpointer.load_latest() # Resume from checkpoint, if any.
state_dict = th.load(args.checkpoint_dir)
model.load_state_dict(state_dict)
# No need for gradients
for p in model.parameters():
p.requires_grad = False
mse_fn = th.nn.MSELoss()
psnr_fn = PSNR()
device = "cpu"
if th.cuda.is_available():
device = "cuda"
LOG.info("Using CUDA")
count = 0
mse = 0.0
psnr = 0.0
for idx, batch in enumerate(dataloader):
mosaic = batch[0].to(device)
target = batch[1].to(device)
output = model(mosaic)
target = crop_like(target, output)
output = th.clamp(output, 0, 1)
psnr_ = psnr_fn(output, target).item()
mse_ = mse_fn(output, target).item()
psnr += psnr_
mse += mse_
count += 1
LOG.info("Image %04d, PSNR = %.1f dB, MSE = %.5f", idx, psnr_, mse_)
mse /= count
psnr /= count
LOG.info("-----------------------------------")
LOG.info("Average, PSNR = %.1f dB, MSE = %.5f", psnr, mse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data", help="root directory for the demosaicnet dataset.")
parser.add_argument("checkpoint_dir", help="directory with the model checkpoints.")
args = parser.parse_args()
ttools.set_logger(False)
main(args)
| 29.354545
| 87
| 0.569836
|
#!/bin/env python
"""Evaluate a demosaicking model."""
import argparse
import os
import time
import torch as th
from torch.utils.data import DataLoader
import numpy as np
import ttools
from ttools.modules.image_operators import crop_like
import demosaicnet
LOG = ttools.get_logger(__name__)
class PSNR(th.nn.Module):
def __init__(self):
super(PSNR, self).__init__()
self.mse = th.nn.MSELoss()
def forward(self, out, ref):
mse = self.mse(out, ref)
return -10*th.log10(mse+1e-12)
def main(args):
"""Entrypoint to the training."""
# Load model parameters from checkpoint, if any
# meta = ttools.Checkpointer.load_meta(args.checkpoint_dir)
# if meta is None:
# LOG.warning("No checkpoint found at %s, aborting.", args.checkpoint_dir)
# return
meta = {
'mode': 'bayer',
'depth': 15,
'width': 64
}
data = demosaicnet.Dataset(args.data, download=False,
mode=meta["mode"],
subset=demosaicnet.TEST_SUBSET)
dataloader = DataLoader(
data, batch_size=1, num_workers=4, pin_memory=True, shuffle=False)
if meta["mode"] == demosaicnet.BAYER_MODE:
model = demosaicnet.BayerDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
elif meta["mode"] == demosaicnet.XTRANS_MODE:
model = demosaicnet.XTransDemosaick(depth=meta["depth"],
width=meta["width"],
pretrained=True,
pad=False)
# checkpointer = ttools.Checkpointer(args.checkpoint_dir, model, meta=meta)
# checkpointer.load_latest() # Resume from checkpoint, if any.
state_dict = th.load(args.checkpoint_dir)
model.load_state_dict(state_dict)
# No need for gradients
for p in model.parameters():
p.requires_grad = False
mse_fn = th.nn.MSELoss()
psnr_fn = PSNR()
device = "cpu"
if th.cuda.is_available():
device = "cuda"
LOG.info("Using CUDA")
count = 0
mse = 0.0
psnr = 0.0
for idx, batch in enumerate(dataloader):
mosaic = batch[0].to(device)
target = batch[1].to(device)
output = model(mosaic)
target = crop_like(target, output)
output = th.clamp(output, 0, 1)
psnr_ = psnr_fn(output, target).item()
mse_ = mse_fn(output, target).item()
psnr += psnr_
mse += mse_
count += 1
LOG.info("Image %04d, PSNR = %.1f dB, MSE = %.5f", idx, psnr_, mse_)
mse /= count
psnr /= count
LOG.info("-----------------------------------")
LOG.info("Average, PSNR = %.1f dB, MSE = %.5f", psnr, mse)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data", help="root directory for the demosaicnet dataset.")
parser.add_argument("checkpoint_dir", help="directory with the model checkpoints.")
args = parser.parse_args()
ttools.set_logger(False)
main(args)
| 0
| 0
| 0
| 205
| 0
| 0
| 0
| -25
| 89
|
ace7c9af9eb249c27faf798e56fca31751c8a6ad
| 1,030
|
py
|
Python
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 18
|
2020-07-11T01:58:02.000Z
|
2021-09-17T07:08:34.000Z
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 13
|
2021-01-13T14:41:26.000Z
|
2021-12-29T02:15:10.000Z
|
lrp_toolbox/training_test.py
|
KushDen/deepimportance_code_release
|
5d16f1f95568dc402be6dfed4ad993ec0dbaa356
|
[
"MIT"
] | 8
|
2020-02-19T21:30:30.000Z
|
2022-03-11T01:34:33.000Z
|
'''
@author: Sebastian Lapuschkin
@maintainer: Sebastian Lapuschkin
@contact: [email protected], [email protected]
@date: 30.09.2015
@version: 1.0
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import modules
import model_io
import numpy as np ; na = np.newaxis
D,N = 2,200000
#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels
X += np.random.randn(N,D)*0.1 # add some noise to the data.
#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)
#save the network
model_io.write(nn, '../xor_net_small_1000.txt')
| 28.611111
| 216
| 0.703883
|
'''
@author: Sebastian Lapuschkin
@maintainer: Sebastian Lapuschkin
@contact: [email protected], [email protected]
@date: 30.09.2015
@version: 1.0
@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek
@license : BSD-2-Clause
'''
import modules
import model_io
import numpy as np ; na = np.newaxis
D,N = 2,200000
#this is the XOR problem.
X = np.random.rand(N,D) #we want [NxD] data
X = (X > 0.5)*1.0
Y = X[:,0] == X[:,1]
Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels
X += np.random.randn(N,D)*0.1 # add some noise to the data.
#build a network
nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
#train the network.
nn.train(X,Y,Xval=X,Yval=Y, batchsize = 5)
#save the network
model_io.write(nn, '../xor_net_small_1000.txt')
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
c16cdfe67a57a720e41f4d1f6a82111d663200a5
| 149
|
py
|
Python
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 2,959
|
2018-05-08T21:48:56.000Z
|
2020-08-24T14:35:39.000Z
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 1,469
|
2018-05-08T22:44:28.000Z
|
2020-08-24T20:19:24.000Z
|
tests/iac_integration/cdk/testdata/cdk_v2/python/app.py
|
zhuhaow/aws-sam-cli
|
59d82ec6848b5a0cdd544d8ada838d4d34052971
|
[
"Apache-2.0"
] | 642
|
2018-05-08T22:09:19.000Z
|
2020-08-17T09:04:37.000Z
|
#!/usr/bin/env python3
from aws_cdk import App
from python.python_stack import PythonStack
app = App()
PythonStack(app, "TestStack")
app.synth()
| 13.545455
| 43
| 0.751678
|
#!/usr/bin/env python3
from aws_cdk import App
from python.python_stack import PythonStack
app = App()
PythonStack(app, "TestStack")
app.synth()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
9eeb1c341a09b93233cbe624f89cddfd33fcd2f2
| 940
|
py
|
Python
|
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | null | null | null |
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | null | null | null |
part4c.py
|
ddlatumalea/signal_analysis
|
9e62e553f56e4c60c7e0963187e01c262d8d820e
|
[
"MIT"
] | 1
|
2022-03-03T13:31:23.000Z
|
2022-03-03T13:31:23.000Z
|
def fourier_transform(yi):
"""a, b = fourier_transform(yi).
Real-valued Fourier transform that determines the
coefficients of the Fourier series for a given
signal y. The coefficients of the cosine terms are
returned in the array a; those of the sine terms
in the array b. Frequencies start at zero and do
not exceed the Nyquist frequency.
yi = {y1,y2,...,xn}
"""
xi = np.arange(yi.size)
length = yi.size // 2 + 1
a, b = np.empty(length), np.empty(length)
# Compute zero and Nyquist frequency cases
a[0] = np.mean(yi)
a[-1] = yi @ np.cos(np.pi * xi) / yi.size
b[0] = 0.0
b[-1] = 0.0
# Compute ordinary cases (overwrite Nyquist if odd length)
for index in range(1, length + yi.size % 2 - 1):
arg = 2.0 * np.pi * xi * index / yi.size
a[index] = 2.0 / yi.size * yi @ np.cos(arg)
b[index] = 2.0 / yi.size * yi @ np.sin(arg)
return a, b
| 39.166667
| 62
| 0.601064
|
def fourier_transform(yi):
"""a, b = fourier_transform(yi).
Real-valued Fourier transform that determines the
coefficients of the Fourier series for a given
signal y. The coefficients of the cosine terms are
returned in the array a; those of the sine terms
in the array b. Frequencies start at zero and do
not exceed the Nyquist frequency.
yi = {y1,y2,...,xn}
"""
xi = np.arange(yi.size)
length = yi.size // 2 + 1
a, b = np.empty(length), np.empty(length)
# Compute zero and Nyquist frequency cases
a[0] = np.mean(yi)
a[-1] = yi @ np.cos(np.pi * xi) / yi.size
b[0] = 0.0
b[-1] = 0.0
# Compute ordinary cases (overwrite Nyquist if odd length)
for index in range(1, length + yi.size % 2 - 1):
arg = 2.0 * np.pi * xi * index / yi.size
a[index] = 2.0 / yi.size * yi @ np.cos(arg)
b[index] = 2.0 / yi.size * yi @ np.sin(arg)
return a, b
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
686add8ace25e333d96d69d7abbb938d46abc531
| 1,453
|
py
|
Python
|
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | 1
|
2021-11-08T07:37:05.000Z
|
2021-11-08T07:37:05.000Z
|
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | null | null | null |
distance-betweeen-obj/main.py
|
CrispenGari/opencv-python
|
cfa862fbf3b8b2c8899b76cee2774d6fb72ba00e
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
points = []
letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image = np.zeros((512, 512, 3), np.uint8)
while True:
cv2.putText(image, f'TO CLEAR THE POINTS PRESS (c)', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imshow("DISTANCE BETWEEN TWO POINTS", image)
cv2.setMouseCallback("DISTANCE BETWEEN TWO POINTS", mouseEvent, None)
key = cv2.waitKey(1)
if key & 0xFF == 27:
cv2.destroyAllWindows()
break
elif key & 0xFF == ord('c'):
image = np.zeros((512, 512, 3), np.uint8)
points = []
# cm = pixels / 96 * 2.54
| 37.25641
| 126
| 0.604267
|
import cv2
import numpy as np
from math import pow, sqrt
points = []
letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
image = np.zeros((512, 512, 3), np.uint8)
def mouseEvent(event, x, y, params, flags):
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(image, (x, y), 5, (0, 0, 255), -1)
cv2.putText(image, letters[len(points) if len(points) < 26 else 0], (x, y), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
points.append((x, y))
if len(points) > 1:
last_two_points = points[-2:]
d, midpoint = findDistance(last_two_points)
cv2.putText(image, f'{round(d)} (px)', midpoint, cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.line(image, tuple(last_two_points[0]), tuple(last_two_points[1]),(0, 255, 0), 2)
return
def findDistance(points):
x1, y1 = points[0]
x2, y2 = points[1]
d = sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))
midpoint = tuple(([(x1 + x2)//2, (y1 + y2)//2]))
return d, midpoint
while True:
cv2.putText(image, f'TO CLEAR THE POINTS PRESS (c)', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
cv2.imshow("DISTANCE BETWEEN TWO POINTS", image)
cv2.setMouseCallback("DISTANCE BETWEEN TWO POINTS", mouseEvent, None)
key = cv2.waitKey(1)
if key & 0xFF == 27:
cv2.destroyAllWindows()
break
elif key & 0xFF == ord('c'):
image = np.zeros((512, 512, 3), np.uint8)
points = []
# cm = pixels / 96 * 2.54
| 0
| 0
| 0
| 0
| 0
| 772
| 0
| 5
| 67
|
8e8c991f6293082c8cec862c8abc181e7ff19a46
| 1,948
|
py
|
Python
|
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
Learning/python_data_analysis8.py
|
VictoriaGuXY/MCO-Menu-Checker-Online
|
706e2e1bf7395cc344f382ea2ac53d964d459f86
|
[
"MIT"
] | null | null | null |
import pandas as pd
"""
output
"""
# Note: some output is shortened to save spaces.
# This file introduces methods to group data.
# Data from https://github.com/mwaskom/seaborn-data
df = pd.read_csv('E:\\tips.csv')
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# if we want to form group based on 'day' column
group = df.groupby('day')
# print out the first value (first line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 28.97 3.00 Male Yes Dinner 2
Sat 20.65 3.35 Male No Dinner 3
Sun 16.99 1.01 Female No Dinner 2
Thur 27.20 4.00 Male No Lunch 4
"""
# print out the last value (last line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 10.09 2.00 Female Yes Lunch 2
Sat 17.82 1.75 Male No Dinner 2
Sun 15.69 1.50 Male Yes Dinner 2
Thur 18.78 3.00 Female No Dinner 2
"""
| 32.466667
| 80
| 0.479466
|
import json
import pandas as pd
import numpy as np
from pandas import DataFrame
"""
output
"""
# Note: some output is shortened to save spaces.
# This file introduces methods to group data.
# Data from https://github.com/mwaskom/seaborn-data
df = pd.read_csv('E:\\tips.csv')
"""
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
5 25.29 4.71 Male No Sun Dinner 4
.. ... ... ... ... ... ... ...
240 27.18 2.00 Female Yes Sat Dinner 2
241 22.67 2.00 Male Yes Sat Dinner 2
242 17.82 1.75 Male No Sat Dinner 2
243 18.78 3.00 Female No Thur Dinner 2
[244 rows x 7 columns]
"""
# ------------------------------------------------------------------------------
# if we want to form group based on 'day' column
group = df.groupby('day')
# print out the first value (first line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 28.97 3.00 Male Yes Dinner 2
Sat 20.65 3.35 Male No Dinner 3
Sun 16.99 1.01 Female No Dinner 2
Thur 27.20 4.00 Male No Lunch 4
"""
# print out the last value (last line) in each group
print (group.first())
"""
total_bill tip sex smoker time size
day
Fri 10.09 2.00 Female Yes Lunch 2
Sat 17.82 1.75 Male No Dinner 2
Sun 15.69 1.50 Male Yes Dinner 2
Thur 18.78 3.00 Female No Dinner 2
"""
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| -6
| 66
|
948080e247360f7be9e2aa7cdc3fd4bb0c67bdac
| 438
|
py
|
Python
|
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/reportIssue.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
def reportIssue():
""" help()
Open a webbrowser with the visvis website at the issue list.
"""
import webbrowser
webbrowser.open("http://code.google.com/p/visvis/issues/list")
if __name__ == '__main__':
reportIssue()
| 23.052632
| 66
| 0.639269
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
def reportIssue():
""" help()
Open a webbrowser with the visvis website at the issue list.
"""
import webbrowser
webbrowser.open("http://code.google.com/p/visvis/issues/list")
if __name__ == '__main__':
reportIssue()
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
405b1e05e30665caf1b56d799edb993551a9f5b1
| 217
|
py
|
Python
|
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
thirdfile.py
|
1frenchfrog1/testgithub
|
7191e44d75ba50438d9c2fe8f0fcf9fcf3a2a991
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
def printme3( str ):
"This prints a passed string into this function"
print(str)
return
def printme3too( str ):
"This prints a passed string into this function"
print(str)
return
| 18.083333
| 51
| 0.686636
|
#!/usr/bin/python
def printme3( str ):
"This prints a passed string into this function"
print(str)
return
def printme3too( str ):
"This prints a passed string into this function"
print(str)
return
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
52c36ddcbbbc1ea0125baf76215d709418864b64
| 642
|
py
|
Python
|
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
lec7.py
|
uni-student234/ISAT252
|
4c0942919c432456fe26900c23f076161b4cc266
|
[
"MIT"
] | null | null | null |
"""
Week 2, day 7, lec 7
"""
# i = 5
# while i >= 0:
# i = i - 1
# if i == 3:
# # break #breaks the smallest loop
# # continue #skips the current iteration and moves on
# # pass #does nothing, but is placehold if you need something for syntax
# print(i)
# for word in 'hello world'.split():
# print(word)
# for str_item in word:
# if str_item == '1':
# break
# print(str_item)
# try:
# print(1/0)
# except ZeroDivisionError:
# print('error')
i = 5
while i >= 0:
try:
print(1/(i-3))
except:
pass
i = i - 1
| 20.0625
| 90
| 0.489097
|
"""
Week 2, day 7, lec 7
"""
# i = 5
# while i >= 0:
# i = i - 1
# if i == 3:
# # break #breaks the smallest loop
# # continue #skips the current iteration and moves on
# # pass #does nothing, but is placehold if you need something for syntax
# print(i)
# for word in 'hello world'.split():
# print(word)
# for str_item in word:
# if str_item == '1':
# break
# print(str_item)
# try:
# print(1/0)
# except ZeroDivisionError:
# print('error')
i = 5
while i >= 0:
try:
print(1/(i-3))
except:
pass
i = i - 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6446ebc359e3c3467ceb30fabeaa007c3100a7f7
| 11,447
|
py
|
Python
|
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | null | null | null |
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | 1
|
2022-03-22T19:32:27.000Z
|
2022-03-23T12:43:08.000Z
|
scripts/survivor_analysis/utils/annotate.py
|
a-paxton/oss-community-health
|
93ff4d266b5390b53d8ed59f71616de68bcfdda7
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from collections import Counter
from datetime import datetime
from nltk.tokenize import RegexpTokenizer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
def annotate_logs(comments, tickets):
"""
Annotates comments and tickets with additional information:
1. whether the body was updated (Boolean)
2. the number of PRs and issues opened by the comment author at the time
of the comment posting
3. comment order (comment dataframe only)
4. identify whether ticket is closed (Boolean; ticket dataframe only)
5. identify whether a comment is associated to an issue or a PR
Requires: pandas
Parameters
----------
comments : pd.DataFrame
tickets : pd.DataFrame
Returns
-------
The same dataframe, but with additional columns
Examples
--------
>> import pandas as pd
>> import utils
>> tickets = pd.read_csv("data/numpy/issues.tsv", sep="\t")
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate_logs(comments, tickets)
"""
# identify whether the body of comments or tickets were updated
comments["was_updated"] = comments["created_at"] != comments["updated_at"]
tickets["was_updated"] = tickets["created_at"] != tickets["updated_at"]
# comments df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_PR_created"] = num_PR_per_pers
# issues df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_PR_created"] = num_PR_per_pers
# comments df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_issue_created"] = num_issue_per_pers
# tickets df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_issue_created"] = num_issue_per_pers
# track the comment order
comments['comment_order'] = comments.sort_values(by=['created_at']) \
.groupby(by=['ticket_id']) \
.cumcount()
# identify whether the PR is closed
tickets['is_closed'] = pd.notnull(tickets['closed_at'])
mask = tickets["closed_at"].isnull()
tickets.loc[mask, "closed_at"] = pd.to_datetime(datetime.now())
open_duration = (
pd.to_datetime(tickets["closed_at"]) -
pd.to_datetime(tickets["created_at"]))
tickets["open_duration"] = open_duration.apply(
lambda x: x.total_seconds())
# Now we want to remove this estimate for anything created before 1970
m = [True if c.startswith("1970") else False
for c in tickets["created_at"]]
tickets.loc[m, "open_duration"] = np.nan
# For each comment, get the information on when the corresponding ticket
# has been opened when it is available (comments can also be added to
# commits)
tickets.set_index("ticket_id", inplace=True, drop=False)
# We're using the reindex function to tacket the case where we don't have
# the ticket associated to a particular comment.
comments["ticket_created_at"] = tickets.reindex(
comments["ticket_id"])["created_at"].values
comments["type"] = tickets.reindex(
comments["ticket_id"])["type"].values
# Reset the old index
tickets.set_index("id", inplace=True, drop=False)
# return the dataframes
return comments, tickets
def body_cleanup(comments, grateful_list, bot_list):
"""
Prepare comment or issue dataframe for text analysis:
1. Count number of times gratitude words appear in HTML comments
(i.e., auto-generated templates for PRs and issues provided
by projects)
2. Remove HTML comments
3. Remove quoted text
4. Strip newlines
5. Count and remove code blocks
6. Identify other users referenced in body
7. Flag whether the author was a bot
Requires: pandas , nltk , collections , re
Parameters
----------
comments : pd.DataFrame, ideally annotated with `annotate_logs()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
bot_list : list or pd.Series of bot usernames to be ignored
Returns
-------
The same dataframe, but with cleaned body text and new columns
(code_blocks , referenced_users , bot_flag)
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
"""
# replace all NaN with empty strings
comments['body'] = comments['body'].replace(np.nan, '', regex=True)
# count thanks in HTML comments
comments['html_comments'] = comments['body'].str.findall('(\<\!--.*?--\>)').apply(' '.join)
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['html_tokenized'] = comments['html_comments'].apply(str.lower).apply(tokenizer.tokenize)
comments['html_word_count'] = comments['html_tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['automatic_grateful_count'] = (
comments['html_word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['automatic_grateful_list'] = (
comments['html_word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['html_tokenized',
'html_word_count'])
# remove the HTML comments from the body
comments['body'] = (comments['body'].str.replace(
"(<!--.*?-->)", " ",
regex=True,
flags=re.DOTALL))
# remove text quotes
comments['body'] = (comments['body'].replace(
"(^|\n|\r)+\>.*(?=\n|$)", " ",
regex=True))
# remove newlines
comments['body'] = (comments['body'].replace(
"[\n\r]+", " ", regex=True))
# count and then remove code blocks
comments['code_blocks'] = comments['body'].str.count("\`{3}")/2
comments['body'] = (comments['body'].replace(
"\`{3}.*\`{3}", " ", regex=True))
# identify other humans
comments['referenced_users'] = comments['body'].str.findall('@\w{1,}')
# identify bots
comments['bot_flag'] = comments['author_name'].isin(bot_list)
# return our dataframe
return comments
def add_sentiment(comments):
"""
Add sentiment analysis scores to comments dataframe:
* negative emotion
* positive emotion
* neutral emotion
* compound emotion
Requires: pandas , vaderSentiment
For more on vaderSentiment, see https://github.com/cjhutto/vaderSentiment
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
Returns
-------
The same dataframe but with new sentiment columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_sentiment(comments)
"""
# initialize sentiment analyzer
analyser = SentimentIntensityAnalyzer()
# remove NaNs
comments['body'] = comments['body'].replace(np.nan, ' ', regex=True)
# run sentiment analyzer over each comment body
sentiment_df = (
comments['body']
.apply(analyser.polarity_scores)
.astype(str)
.str.strip('{}')
.str.split(', ', expand=True))
# split the emotion output dictionary into new columns
# (thanks to https://stackoverflow.com/a/13053267 for partial solution)
comments['negative_emotion'] = sentiment_df[0].str.split(
': ').str[-1].astype(float)
comments['neutral_emotion'] = sentiment_df[1].str.split(
': ').str[-1].astype(float)
comments['positive_emotion'] = sentiment_df[2].str.split(
': ').str[-1].astype(float)
comments['compound_emotion'] = sentiment_df[3].str.split(
': ').str[-1].astype(float)
# return our dataframe
return comments
def add_gratitude(comments, grateful_list):
"""
Track expressions of gratitude:
* overall counts
* specific words
Thanks to https://stackoverflow.com/a/47686394
Requires: pandas , nltk , collections
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
Returns
-------
The same dataframe but with new gratitude columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_gratitude(comments)
"""
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['tokenized'] = comments['body'].apply(
str.lower).apply(tokenizer.tokenize)
comments['word_count'] = comments['tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['grateful_count'] = (
comments['word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['grateful_list'] = (
comments['word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['tokenized', 'word_count'])
# spit back our dataframe now
return comments
| 34.478916
| 101
| 0.638857
|
import pandas as pd
import numpy as np
from collections import Counter
from datetime import datetime
from nltk.tokenize import RegexpTokenizer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import re
def annotate_logs(comments, tickets):
"""
Annotates comments and tickets with additional information:
1. whether the body was updated (Boolean)
2. the number of PRs and issues opened by the comment author at the time
of the comment posting
3. comment order (comment dataframe only)
4. identify whether ticket is closed (Boolean; ticket dataframe only)
5. identify whether a comment is associated to an issue or a PR
Requires: pandas
Parameters
----------
comments : pd.DataFrame
tickets : pd.DataFrame
Returns
-------
The same dataframe, but with additional columns
Examples
--------
>> import pandas as pd
>> import utils
>> tickets = pd.read_csv("data/numpy/issues.tsv", sep="\t")
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate_logs(comments, tickets)
"""
# identify whether the body of comments or tickets were updated
comments["was_updated"] = comments["created_at"] != comments["updated_at"]
tickets["was_updated"] = tickets["created_at"] != tickets["updated_at"]
# comments df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_PR_created"] = num_PR_per_pers
# issues df: add number of PRs created by author to date
num_PR_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "pull_request") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_PR_created"] = num_PR_per_pers
# comments df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(comments["created_at"], comments["author_id"])]
comments["num_issue_created"] = num_issue_per_pers
# tickets df: add number of issues created by author to date
num_issue_per_pers = [
sum((tickets["created_at"] < created_at) &
(tickets["type"] == "issue") &
(tickets["author_id"] == author_id))
for created_at, author_id
in zip(tickets["created_at"], tickets["author_id"])]
tickets["num_issue_created"] = num_issue_per_pers
# track the comment order
comments['comment_order'] = comments.sort_values(by=['created_at']) \
.groupby(by=['ticket_id']) \
.cumcount()
# identify whether the PR is closed
tickets['is_closed'] = pd.notnull(tickets['closed_at'])
mask = tickets["closed_at"].isnull()
tickets.loc[mask, "closed_at"] = pd.to_datetime(datetime.now())
open_duration = (
pd.to_datetime(tickets["closed_at"]) -
pd.to_datetime(tickets["created_at"]))
tickets["open_duration"] = open_duration.apply(
lambda x: x.total_seconds())
# Now we want to remove this estimate for anything created before 1970
m = [True if c.startswith("1970") else False
for c in tickets["created_at"]]
tickets.loc[m, "open_duration"] = np.nan
# For each comment, get the information on when the corresponding ticket
# has been opened when it is available (comments can also be added to
# commits)
tickets.set_index("ticket_id", inplace=True, drop=False)
# We're using the reindex function to tacket the case where we don't have
# the ticket associated to a particular comment.
comments["ticket_created_at"] = tickets.reindex(
comments["ticket_id"])["created_at"].values
comments["type"] = tickets.reindex(
comments["ticket_id"])["type"].values
# Reset the old index
tickets.set_index("id", inplace=True, drop=False)
# return the dataframes
return comments, tickets
def body_cleanup(comments, grateful_list, bot_list):
"""
Prepare comment or issue dataframe for text analysis:
1. Count number of times gratitude words appear in HTML comments
(i.e., auto-generated templates for PRs and issues provided
by projects)
2. Remove HTML comments
3. Remove quoted text
4. Strip newlines
5. Count and remove code blocks
6. Identify other users referenced in body
7. Flag whether the author was a bot
Requires: pandas , nltk , collections , re
Parameters
----------
comments : pd.DataFrame, ideally annotated with `annotate_logs()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
bot_list : list or pd.Series of bot usernames to be ignored
Returns
-------
The same dataframe, but with cleaned body text and new columns
(code_blocks , referenced_users , bot_flag)
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
"""
# replace all NaN with empty strings
comments['body'] = comments['body'].replace(np.nan, '', regex=True)
# count thanks in HTML comments
comments['html_comments'] = comments['body'].str.findall('(\<\!--.*?--\>)').apply(' '.join)
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['html_tokenized'] = comments['html_comments'].apply(str.lower).apply(tokenizer.tokenize)
comments['html_word_count'] = comments['html_tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['automatic_grateful_count'] = (
comments['html_word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['automatic_grateful_list'] = (
comments['html_word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['html_tokenized',
'html_word_count'])
# remove the HTML comments from the body
comments['body'] = (comments['body'].str.replace(
"(<!--.*?-->)", " ",
regex=True,
flags=re.DOTALL))
# remove text quotes
comments['body'] = (comments['body'].replace(
"(^|\n|\r)+\>.*(?=\n|$)", " ",
regex=True))
# remove newlines
comments['body'] = (comments['body'].replace(
"[\n\r]+", " ", regex=True))
# count and then remove code blocks
comments['code_blocks'] = comments['body'].str.count("\`{3}")/2
comments['body'] = (comments['body'].replace(
"\`{3}.*\`{3}", " ", regex=True))
# identify other humans
comments['referenced_users'] = comments['body'].str.findall('@\w{1,}')
# identify bots
comments['bot_flag'] = comments['author_name'].isin(bot_list)
# return our dataframe
return comments
def add_sentiment(comments):
"""
Add sentiment analysis scores to comments dataframe:
* negative emotion
* positive emotion
* neutral emotion
* compound emotion
Requires: pandas , vaderSentiment
For more on vaderSentiment, see https://github.com/cjhutto/vaderSentiment
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
Returns
-------
The same dataframe but with new sentiment columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_sentiment(comments)
"""
# initialize sentiment analyzer
analyser = SentimentIntensityAnalyzer()
# remove NaNs
comments['body'] = comments['body'].replace(np.nan, ' ', regex=True)
# run sentiment analyzer over each comment body
sentiment_df = (
comments['body']
.apply(analyser.polarity_scores)
.astype(str)
.str.strip('{}')
.str.split(', ', expand=True))
# split the emotion output dictionary into new columns
# (thanks to https://stackoverflow.com/a/13053267 for partial solution)
comments['negative_emotion'] = sentiment_df[0].str.split(
': ').str[-1].astype(float)
comments['neutral_emotion'] = sentiment_df[1].str.split(
': ').str[-1].astype(float)
comments['positive_emotion'] = sentiment_df[2].str.split(
': ').str[-1].astype(float)
comments['compound_emotion'] = sentiment_df[3].str.split(
': ').str[-1].astype(float)
# return our dataframe
return comments
def add_gratitude(comments, grateful_list):
"""
Track expressions of gratitude:
* overall counts
* specific words
Thanks to https://stackoverflow.com/a/47686394
Requires: pandas , nltk , collections
Parameters
----------
comments : pd.DataFrame
ideally after `annotate_logs()` and `body_cleanup()`;
can be run with either comments df or issues/tickets df
grateful_list : list or pd.Series of gratitude words to identify;
currently works only with grateful unigrams
Returns
-------
The same dataframe but with new gratitude columns
Examples
--------
>> import pandas as pd
>> import utils
>> comments = pd.read_csv("data/numpy/comments.tsv", sep="\t")
>> comments, tickets = utils.annotate.annotate_logs(comments, tickets)
>> comments = utils.annotate.body_cleanup(comments, bot_list_df)
>> comments = utils.annotate.add_gratitude(comments)
"""
# tokenize and count words
tokenizer = RegexpTokenizer(r'\w+')
comments['tokenized'] = comments['body'].apply(
str.lower).apply(tokenizer.tokenize)
comments['word_count'] = comments['tokenized'].apply(lambda x: Counter(x))
# count words if they're in our grateful list
comments['grateful_count'] = (
comments['word_count'].apply(
lambda x: np.sum([v for k, v in x.items()
if k in grateful_list])))
# let us know which ones were used
comments['grateful_list'] = (
comments['word_count'].apply(
lambda x: [k for k in x if k in grateful_list]))
# remove the columns we don't need anymore
comments = comments.drop(columns=['tokenized', 'word_count'])
# spit back our dataframe now
return comments
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
46a90fe428c07ac7366934d1e4ee7724a8b4f434
| 352
|
py
|
Python
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
packages/Python/lldbsuite/test/python_api/sbtype_typeclass/TestSBTypeTypeClass.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
from lldbsuite.test import decorators
from lldbsuite.test import lldbinline
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.skipIfFreeBSD, decorators.skipIfLinux,
decorators.skipIfWindows,
decorators.expectedFailureAll(
oslist=['macosx'], archs=['i386'],
bugnumber='rdar://28656677')])
| 32
| 57
| 0.6875
|
from lldbsuite.test import decorators
from lldbsuite.test import lldbinline
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.skipIfFreeBSD, decorators.skipIfLinux,
decorators.skipIfWindows,
decorators.expectedFailureAll(
oslist=['macosx'], archs=['i386'],
bugnumber='rdar://28656677')])
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
af4dceb229fa3c43802c126ad350cbf15950b67e
| 1,585
|
bzl
|
Python
|
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
js/extensions.bzl
|
stoiky/rules_js
|
e61b61b98c2f5c733bf804f78db9f55b1fb2d599
|
[
"Apache-2.0"
] | null | null | null |
"""Adapt repository rules in npm_import.bzl to be called from MODULE.bazel
See https://bazel.build/docs/bzlmod#extension-definition
"""
load("//js/private:pnpm_utils.bzl", "pnpm_utils")
load("//js/private:translate_pnpm_lock.bzl", translate_pnpm_lock_lib = "translate_pnpm_lock")
load("//js:npm_import.bzl", "npm_import", "translate_pnpm_lock")
load("//js/private:transitive_closure.bzl", "translate_to_transitive_closure")
npm = module_extension(
implementation = _extension_impl,
tag_classes = {
"translate_pnpm_lock": tag_class(attrs = dict({"name": attr.string()}, **translate_pnpm_lock_lib.attrs)),
# todo: support individual packages as well
# "package": tag_class(attrs = dict({"name": attr.string()}, **_npm_import.attrs)),
},
)
| 42.837838
| 113
| 0.637855
|
"""Adapt repository rules in npm_import.bzl to be called from MODULE.bazel
See https://bazel.build/docs/bzlmod#extension-definition
"""
load("//js/private:pnpm_utils.bzl", "pnpm_utils")
load("//js/private:translate_pnpm_lock.bzl", translate_pnpm_lock_lib = "translate_pnpm_lock")
load("//js:npm_import.bzl", "npm_import", "translate_pnpm_lock")
load("//js/private:transitive_closure.bzl", "translate_to_transitive_closure")
def _extension_impl(module_ctx):
for mod in module_ctx.modules:
for attr in mod.tags.translate_pnpm_lock:
lockfile = pnpm_utils.parse_pnpm_lock(module_ctx.read(attr.pnpm_lock))
trans = translate_to_transitive_closure(lockfile, attr.prod, attr.dev, attr.no_optional)
imports = translate_pnpm_lock_lib.gen_npm_imports(trans, attr)
for i in imports:
# fixme: pass the rest of the kwargs from i
npm_import(
name = i.name,
package = i.package,
version = i.pnpm_version,
link_packages = i.link_packages,
)
translate_pnpm_lock(
name = "npm",
pnpm_lock = attr.pnpm_lock,
)
npm = module_extension(
implementation = _extension_impl,
tag_classes = {
"translate_pnpm_lock": tag_class(attrs = dict({"name": attr.string()}, **translate_pnpm_lock_lib.attrs)),
# todo: support individual packages as well
# "package": tag_class(attrs = dict({"name": attr.string()}, **_npm_import.attrs)),
},
)
| 0
| 0
| 0
| 0
| 0
| 787
| 0
| 0
| 23
|
c7b09eb689ac8f721c4645e55ec33f8b5d1f82bf
| 32,780
|
py
|
Python
|
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
paasta_tools/tron_tools.py
|
zhaoyanh1202/paasta
|
b0c148786f44476fe351fe410f0b81f0c941f3b6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import glob
import json
import logging
import os
import pkgutil
import re
from typing import List
from typing import Tuple
import yaml
from service_configuration_lib import read_extra_service_information
try:
from yaml.cyaml import CSafeDumper as Dumper
except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Dumper = yaml.SafeDumper # type: ignore
from paasta_tools.clusterman import get_clusterman_metrics
from paasta_tools.tron import tron_command_context
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import filter_templates_from_config
log = logging.getLogger(__name__)
logging.getLogger("tron").setLevel(logging.WARNING)
MASTER_NAMESPACE = "MASTER"
SPACER = "."
VALID_MONITORING_KEYS = set(
json.loads(
pkgutil.get_data("paasta_tools.cli", "schemas/tron_schema.json").decode()
)["definitions"]["job"]["properties"]["monitoring"]["properties"].keys()
)
MESOS_EXECUTOR_NAMES = ("paasta", "spark")
DEFAULT_AWS_REGION = "us-west-2"
clusterman_metrics, _ = get_clusterman_metrics()
def decompose_instance(instance):
"""Get (job_name, action_name) from an instance."""
decomposed = instance.split(SPACER)
if len(decomposed) != 2:
raise InvalidInstanceConfig("Invalid instance name: %s" % instance)
return (decomposed[0], decomposed[1])
def decompose_executor_id(executor_id) -> Tuple[str, str, int, str]:
"""(service, job, run_number, action)"""
service, job, str_run_number, action, _ = executor_id.split(SPACER)
return (service, job, int(str_run_number), action)
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command)
def format_tron_action_dict(action_config):
"""Generate a dict of tronfig for an action, from the TronActionConfig.
:param job_config: TronActionConfig
"""
executor = action_config.get_executor()
result = {
"command": action_config.get_cmd(),
"executor": executor,
"requires": action_config.get_requires(),
"node": action_config.get_node(),
"retries": action_config.get_retries(),
"retries_delay": action_config.get_retries_delay(),
"expected_runtime": action_config.get_expected_runtime(),
"trigger_downstreams": action_config.get_trigger_downstreams(),
"triggered_by": action_config.get_triggered_by(),
"on_upstream_rerun": action_config.get_on_upstream_rerun(),
"trigger_timeout": action_config.get_trigger_timeout(),
}
if executor in MESOS_EXECUTOR_NAMES:
result["executor"] = "mesos"
result["cpus"] = action_config.get_cpus()
result["mem"] = action_config.get_mem()
result["disk"] = action_config.get_disk()
result["env"] = action_config.get_env()
result["extra_volumes"] = format_volumes(action_config.get_extra_volumes())
result["docker_parameters"] = [
{"key": param["key"], "value": param["value"]}
for param in action_config.format_docker_parameters()
]
constraint_labels = ["attribute", "operator", "value"]
result["constraints"] = [
dict(zip(constraint_labels, constraint))
for constraint in action_config.get_calculated_constraints()
]
result["docker_image"] = action_config.get_docker_url()
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def format_tron_job_dict(job_config):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
action_dict = {
action_config.get_action_name(): format_tron_action_dict(action_config)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(cleanup_config)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs
def create_complete_config(service, cluster, soa_dir=DEFAULT_SOA_DIR):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}"
| 35.864333
| 125
| 0.652013
|
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import difflib
import glob
import hashlib
import json
import logging
import os
import pkgutil
import re
import subprocess
import traceback
from string import Formatter
from typing import List
from typing import Tuple
import yaml
from service_configuration_lib import read_extra_service_information
from service_configuration_lib import read_yaml_file
from service_configuration_lib.spark_config import generate_clusterman_metrics_entries
from service_configuration_lib.spark_config import get_aws_credentials
from service_configuration_lib.spark_config import get_resources_requested
from service_configuration_lib.spark_config import get_spark_conf
from service_configuration_lib.spark_config import K8S_AUTH_FOLDER
from service_configuration_lib.spark_config import stringify_spark_env
from paasta_tools.mesos_tools import mesos_services_running_here
try:
from yaml.cyaml import CSafeDumper as Dumper
except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Dumper = yaml.SafeDumper # type: ignore
from paasta_tools.clusterman import get_clusterman_metrics
from paasta_tools.tron.client import TronClient
from paasta_tools.tron import tron_command_context
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import DockerParameter
from paasta_tools.utils import DockerVolume
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import time_cache
from paasta_tools.utils import filter_templates_from_config
from paasta_tools.spark_tools import get_webui_url
from paasta_tools.spark_tools import inject_spark_conf_str
from paasta_tools import monitoring_tools
from paasta_tools.monitoring_tools import list_teams
from typing import Optional
from typing import Dict
from typing import Any
log = logging.getLogger(__name__)
logging.getLogger("tron").setLevel(logging.WARNING)
MASTER_NAMESPACE = "MASTER"
SPACER = "."
VALID_MONITORING_KEYS = set(
json.loads(
pkgutil.get_data("paasta_tools.cli", "schemas/tron_schema.json").decode()
)["definitions"]["job"]["properties"]["monitoring"]["properties"].keys()
)
MESOS_EXECUTOR_NAMES = ("paasta", "spark")
DEFAULT_AWS_REGION = "us-west-2"
clusterman_metrics, _ = get_clusterman_metrics()
class TronNotConfigured(Exception):
pass
class InvalidTronConfig(Exception):
pass
class TronConfig(dict):
"""System-level configuration for Tron."""
def __init__(self, config):
super().__init__(config)
def get_cluster_name(self):
""":returns The name of the Tron cluster"""
try:
return self["cluster_name"]
except KeyError:
raise TronNotConfigured(
"Could not find name of Tron cluster in system Tron config"
)
def get_url(self):
""":returns The URL for the Tron master's API"""
try:
return self["url"]
except KeyError:
raise TronNotConfigured(
"Could not find URL of Tron master in system Tron config"
)
def get_tronfig_folder(cluster, soa_dir):
return os.path.join(soa_dir, "tron", cluster)
def load_tron_config():
return TronConfig(load_system_paasta_config().get_tron_config())
def get_tron_client():
return TronClient(load_tron_config().get_url())
def compose_instance(job, action):
return f"{job}{SPACER}{action}"
def decompose_instance(instance):
"""Get (job_name, action_name) from an instance."""
decomposed = instance.split(SPACER)
if len(decomposed) != 2:
raise InvalidInstanceConfig("Invalid instance name: %s" % instance)
return (decomposed[0], decomposed[1])
def decompose_executor_id(executor_id) -> Tuple[str, str, int, str]:
"""(service, job, run_number, action)"""
service, job, str_run_number, action, _ = executor_id.split(SPACER)
return (service, job, int(str_run_number), action)
class StringFormatter(Formatter):
def __init__(self, context=None):
Formatter.__init__(self)
self.context = context
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
return kwds[key]
except KeyError:
return self.context[key]
else:
return Formatter.get_value(key, args, kwds)
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command)
def pick_spark_ui_port(service, instance):
# We don't know what ports will be available on the agent that the driver
# will be scheduled on, so we just try to make them unique per service / instance.
hash_key = f"{service} {instance}".encode()
hash_number = int(hashlib.sha1(hash_key).hexdigest(), 16)
preferred_port = 33000 + (hash_number % 25000)
return preferred_port
class TronActionConfig(InstanceConfig):
config_filename_prefix = "tron"
def __init__(
self,
service,
instance,
cluster,
config_dict,
branch_dict,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
self.job, self.action = decompose_instance(instance)
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_spark_config_dict(self):
spark_config_dict = getattr(self, "_spark_config_dict", None)
# cached the created dict, so that we don't need to process it multiple
# times, and having inconsistent result
if spark_config_dict is not None:
return spark_config_dict
if self.get_spark_cluster_manager() == "mesos":
mesos_leader = (
f"zk://{load_system_paasta_config().get_zk_hosts()}"
if not self.for_validation
else "N/A"
)
else:
mesos_leader = None
aws_creds = get_aws_credentials(
aws_credentials_yaml=self.config_dict.get("aws_credentials_yaml")
)
self._spark_config_dict = get_spark_conf(
cluster_manager=self.get_spark_cluster_manager(),
spark_app_base_name=f"tron_spark_{self.get_service()}_{self.get_instance()}",
user_spark_opts=self.config_dict.get("spark_args", {}),
paasta_cluster=self.get_spark_paasta_cluster(),
paasta_pool=self.get_spark_paasta_pool(),
paasta_service=self.get_service(),
paasta_instance=self.get_instance(),
docker_img=self.get_docker_url(),
aws_creds=aws_creds,
extra_volumes=self.get_volumes(load_system_paasta_config().get_volumes()),
# tron is using environment variable to load the required creds
with_secret=False,
mesos_leader=mesos_leader,
# load_system_paasta already load the default volumes
load_paasta_default_volumes=False,
)
return self._spark_config_dict
def get_job_name(self):
return self.job
def get_action_name(self):
return self.action
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_docker_url(
self, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> str:
# It's okay for tronfig to contain things that aren't deployed yet - it's normal for developers to
# push tronfig well before the job is scheduled to run, and either they'll deploy the service before
# or get notified when the job fails.
#
# This logic ensures that we can still pass validation and run setup_tron_namespace even if
# there's nothing in deployments.json yet.
return (
""
if not self.get_docker_image()
else super().get_docker_url(system_paasta_config=system_paasta_config)
)
def get_cmd(self):
command = self.config_dict.get("command")
if self.get_executor() == "spark":
# Spark expects to be able to write to MESOS_SANDBOX if it is set
# but the default value (/mnt/mesos/sandbox) doesn't get mounted in
# our Docker containers, so we unset it here. (Un-setting is fine,
# since Spark will just write to /tmp instead).
command = "unset MESOS_DIRECTORY MESOS_SANDBOX; " + inject_spark_conf_str(
command, stringify_spark_env(self.get_spark_config_dict())
)
return command
def get_spark_paasta_cluster(self):
return self.config_dict.get("spark_paasta_cluster", self.get_cluster())
def get_spark_paasta_pool(self):
return self.config_dict.get("spark_paasta_pool", "batch")
def get_spark_cluster_manager(self):
return self.config_dict.get("spark_cluster_manager", "mesos")
def get_env(self):
env = super().get_env()
if self.get_executor() == "spark":
spark_config_dict = self.get_spark_config_dict()
env["EXECUTOR_CLUSTER"] = self.get_spark_paasta_cluster()
env["EXECUTOR_POOL"] = self.get_spark_paasta_pool()
env["SPARK_OPTS"] = stringify_spark_env(spark_config_dict)
# The actual mesos secret will be decrypted and injected on mesos master when assigning
# tasks.
env["SPARK_MESOS_SECRET"] = "SHARED_SECRET(SPARK_MESOS_SECRET)"
if clusterman_metrics:
env["CLUSTERMAN_RESOURCES"] = json.dumps(
generate_clusterman_metrics_entries(
clusterman_metrics,
get_resources_requested(spark_config_dict),
spark_config_dict["spark.app.name"],
get_webui_url(spark_config_dict["spark.ui.port"]),
)
)
else:
env["CLUSTERMAN_RESOURCES"] = "{}"
if "AWS_ACCESS_KEY_ID" not in env or "AWS_SECRET_ACCESS_KEY" not in env:
try:
access_key, secret_key, session_token = get_aws_credentials(
service=self.get_service(),
aws_credentials_yaml=self.config_dict.get(
"aws_credentials_yaml"
),
)
env["AWS_ACCESS_KEY_ID"] = access_key
env["AWS_SECRET_ACCESS_KEY"] = secret_key
except Exception:
log.warning(
f"Cannot set AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment "
f"variables for tron action {self.get_instance()} of service "
f"{self.get_service()} via credentail file. Traceback:\n"
f"{traceback.format_exc()}"
)
if "AWS_DEFAULT_REGION" not in env:
env["AWS_DEFAULT_REGION"] = DEFAULT_AWS_REGION
return env
def get_extra_volumes(self):
extra_volumes = super().get_extra_volumes()
if (
self.get_executor() == "spark"
and self.get_spark_cluster_manager() == "kubernetes"
):
extra_volumes.append(
DockerVolume(
{
"hostPath": "/etc/pki/spark",
"containerPath": K8S_AUTH_FOLDER,
"mode": "RO",
}
)
)
return extra_volumes
def get_cpu_burst_add(self) -> float:
""" For Tron jobs, we don't let them burst by default, because they
don't represent "real-time" workloads, and should not impact
neighbors """
return self.config_dict.get("cpu_burst_add", 0)
def get_executor(self):
return self.config_dict.get("executor", "paasta")
def get_healthcheck_mode(self, _) -> None:
return None
def get_node(self):
return self.config_dict.get("node")
def get_retries(self):
return self.config_dict.get("retries")
def get_retries_delay(self):
return self.config_dict.get("retries_delay")
def get_requires(self):
return self.config_dict.get("requires")
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def get_triggered_by(self):
return self.config_dict.get("triggered_by", None)
def get_trigger_downstreams(self):
return self.config_dict.get("trigger_downstreams", None)
def get_on_upstream_rerun(self):
return self.config_dict.get("on_upstream_rerun", None)
def get_trigger_timeout(self):
return self.config_dict.get("trigger_timeout", None)
def get_calculated_constraints(self):
"""Combine all configured Mesos constraints."""
constraints = self.get_constraints()
if constraints is not None:
return constraints
else:
constraints = self.get_extra_constraints()
constraints.extend(
self.get_deploy_constraints(
blacklist=self.get_deploy_blacklist(),
whitelist=self.get_deploy_whitelist(),
# Don't have configs for the paasta cluster
system_deploy_blacklist=[],
system_deploy_whitelist=None,
)
)
constraints.extend(self.get_pool_constraints())
return constraints
def get_nerve_namespace(self) -> None:
return None
def validate(self):
error_msgs = []
error_msgs.extend(super().validate())
# Tron is a little special, because it can *not* have a deploy group
# But only if an action is running via ssh and not via paasta
if (
self.get_deploy_group() is None
and self.get_executor() in MESOS_EXECUTOR_NAMES
):
error_msgs.append(
f"{self.get_job_name()}.{self.get_action_name()} must have a deploy_group set"
)
return error_msgs
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional[SystemPaastaConfig] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings"""
parameters = super().format_docker_parameters(
with_labels=with_labels, system_paasta_config=system_paasta_config
)
if self.get_executor() == "spark":
parameters.append({"key": "net", "value": "host"})
return parameters
class TronJobConfig:
"""Represents a job in Tron, consisting of action(s) and job-level configuration values."""
def __init__(
self,
name: str,
config_dict: Dict[str, Any],
cluster: str,
service: Optional[str] = None,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
for_validation: bool = False,
) -> None:
self.name = name
self.config_dict = config_dict
self.cluster = cluster
self.service = service
self.load_deployments = load_deployments
self.soa_dir = soa_dir
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_name(self):
return self.name
def get_node(self):
return self.config_dict.get("node", "paasta")
def get_schedule(self):
return self.config_dict.get("schedule")
def get_monitoring(self):
srv_monitoring = dict(
monitoring_tools.read_monitoring_config(self.service, soa_dir=self.soa_dir)
)
tron_monitoring = self.config_dict.get("monitoring", {})
srv_monitoring.update(tron_monitoring)
# filter out non-tron monitoring keys
srv_monitoring = {
k: v for k, v in srv_monitoring.items() if k in VALID_MONITORING_KEYS
}
return srv_monitoring
def get_queueing(self):
return self.config_dict.get("queueing")
def get_run_limit(self):
return self.config_dict.get("run_limit")
def get_all_nodes(self):
return self.config_dict.get("all_nodes")
def get_enabled(self):
return self.config_dict.get("enabled")
def get_allow_overlap(self):
return self.config_dict.get("allow_overlap")
def get_max_runtime(self):
return self.config_dict.get("max_runtime")
def get_time_zone(self):
return self.config_dict.get("time_zone")
def get_service(self) -> Optional[str]:
return self.service or self.config_dict.get("service")
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_cluster(self):
return self.cluster
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def _get_action_config(self, action_name, action_dict):
action_service = action_dict.setdefault("service", self.get_service())
action_deploy_group = action_dict.setdefault(
"deploy_group", self.get_deploy_group()
)
if action_service and action_deploy_group and self.load_deployments:
try:
deployments_json = load_v2_deployments_json(
service=action_service, soa_dir=self.soa_dir
)
branch_dict = {
"docker_image": deployments_json.get_docker_image_for_deploy_group(
action_deploy_group
),
"git_sha": deployments_json.get_git_sha_for_deploy_group(
action_deploy_group
),
# TODO: add Tron instances when generating deployments json
"desired_state": "start",
"force_bounce": None,
}
except NoDeploymentsAvailable:
log.warning(
f'Docker image unavailable for {action_service}.{self.get_name()}.{action_dict.get("name")}'
" is it deployed yet?"
)
branch_dict = None
else:
branch_dict = None
action_dict["monitoring"] = self.get_monitoring()
return TronActionConfig(
service=action_service,
instance=compose_instance(self.get_name(), action_name),
cluster=self.get_cluster(),
config_dict=action_dict,
branch_dict=branch_dict,
soa_dir=self.soa_dir,
for_validation=self.for_validation,
)
def get_actions(self):
actions = self.config_dict.get("actions")
return [
self._get_action_config(name, action_dict)
for name, action_dict in actions.items()
]
def get_cleanup_action(self):
action_dict = self.config_dict.get("cleanup_action")
if not action_dict:
return None
# TODO: we should keep this trickery outside paasta repo
return self._get_action_config("cleanup", action_dict)
def check_monitoring(self) -> Tuple[bool, str]:
monitoring = self.get_monitoring()
valid_teams = list_teams()
if monitoring is not None:
team_name = monitoring.get("team", None)
if team_name is None:
return False, "Team name is required for monitoring"
elif team_name not in valid_teams:
suggest_teams = difflib.get_close_matches(
word=team_name, possibilities=valid_teams
)
return (
False,
f"Invalid team name: {team_name}. Do you mean one of these: {suggest_teams}",
)
return True, ""
def check_actions(self) -> Tuple[bool, List[str]]:
actions = self.get_actions()
cleanup_action = self.get_cleanup_action()
if cleanup_action:
actions.append(cleanup_action)
checks_passed = True
msgs: List[str] = []
for action in actions:
action_msgs = action.validate()
if action_msgs:
checks_passed = False
msgs.extend(action_msgs)
return checks_passed, msgs
def validate(self) -> List[str]:
_, error_msgs = self.check_actions()
checks = ["check_monitoring"]
for check in checks:
check_passed, check_msg = getattr(self, check)()
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def __eq__(self, other):
if isinstance(other, type(self)):
return self.config_dict == other.config_dict
return False
def format_volumes(paasta_volume_list):
return [
{
"container_path": v["containerPath"],
"host_path": v["hostPath"],
"mode": v["mode"],
}
for v in paasta_volume_list
]
def format_master_config(master_config, default_volumes, dockercfg_location):
mesos_options = master_config.get("mesos_options", {})
mesos_options.update(
{
"default_volumes": format_volumes(default_volumes),
"dockercfg_location": dockercfg_location,
}
)
master_config["mesos_options"] = mesos_options
return master_config
def format_tron_action_dict(action_config):
"""Generate a dict of tronfig for an action, from the TronActionConfig.
:param job_config: TronActionConfig
"""
executor = action_config.get_executor()
result = {
"command": action_config.get_cmd(),
"executor": executor,
"requires": action_config.get_requires(),
"node": action_config.get_node(),
"retries": action_config.get_retries(),
"retries_delay": action_config.get_retries_delay(),
"expected_runtime": action_config.get_expected_runtime(),
"trigger_downstreams": action_config.get_trigger_downstreams(),
"triggered_by": action_config.get_triggered_by(),
"on_upstream_rerun": action_config.get_on_upstream_rerun(),
"trigger_timeout": action_config.get_trigger_timeout(),
}
if executor in MESOS_EXECUTOR_NAMES:
result["executor"] = "mesos"
result["cpus"] = action_config.get_cpus()
result["mem"] = action_config.get_mem()
result["disk"] = action_config.get_disk()
result["env"] = action_config.get_env()
result["extra_volumes"] = format_volumes(action_config.get_extra_volumes())
result["docker_parameters"] = [
{"key": param["key"], "value": param["value"]}
for param in action_config.format_docker_parameters()
]
constraint_labels = ["attribute", "operator", "value"]
result["constraints"] = [
dict(zip(constraint_labels, constraint))
for constraint in action_config.get_calculated_constraints()
]
result["docker_image"] = action_config.get_docker_url()
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def format_tron_job_dict(job_config):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
action_dict = {
action_config.get_action_name(): format_tron_action_dict(action_config)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(cleanup_config)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def load_tron_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> TronActionConfig:
jobs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
requested_job, requested_action = instance.split(".")
for job in jobs:
if job.get_name() == requested_job:
for action in job.get_actions():
if action.get_action_name() == requested_action:
return action
raise NoConfigurationForServiceError(
f"No tron configuration found for {service} {instance}"
)
@time_cache(ttl=5)
def load_tron_service_config(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
return load_tron_service_config_no_cache(
service, cluster, load_deployments, soa_dir, for_validation,
)
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs
def create_complete_master_config(cluster, soa_dir=DEFAULT_SOA_DIR):
system_paasta_config = load_system_paasta_config()
tronfig_folder = get_tronfig_folder(soa_dir=soa_dir, cluster=cluster)
config = read_yaml_file(os.path.join(tronfig_folder, f"MASTER.yaml"))
master_config = format_master_config(
config,
system_paasta_config.get_volumes(),
system_paasta_config.get_dockercfg_location(),
)
return yaml.dump(master_config, Dumper=Dumper, default_flow_style=False)
def create_complete_config(service, cluster, soa_dir=DEFAULT_SOA_DIR):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
def validate_complete_config(
service: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
job_configs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
for_validation=True,
)
# PaaSTA-specific validation
for job_config in job_configs:
check_msgs = job_config.validate()
if check_msgs:
return check_msgs
master_config_path = os.path.join(
os.path.abspath(soa_dir), "tron", cluster, MASTER_NAMESPACE + ".yaml"
)
preproccessed_config = {}
# Use Tronfig on generated config from PaaSTA to validate the rest
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(job_config)
for job_config in job_configs
}
complete_config = yaml.dump(preproccessed_config, Dumper=Dumper)
proc = subprocess.run(
["tronfig", "-", "-V", "-n", service, "-m", master_config_path],
input=complete_config,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if proc.returncode != 0:
process_errors = proc.stderr.strip()
if process_errors: # Error running tronfig
print(proc.stderr)
return [proc.stdout.strip()]
return []
def get_tron_namespaces(cluster, soa_dir):
tron_config_file = f"tron-{cluster}.yaml"
config_dirs = [
_dir[0]
for _dir in os.walk(os.path.abspath(soa_dir))
if tron_config_file in _dir[2]
]
namespaces = [os.path.split(config_dir)[1] for config_dir in config_dirs]
return namespaces
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters
def get_tron_dashboard_for_cluster(cluster: str):
dashboards = load_system_paasta_config().get_dashboard_links()[cluster]
if "Tron" not in dashboards:
raise Exception(f"tron api endpoint is not defined for cluster {cluster}")
return dashboards["Tron"]
def tron_jobs_running_here() -> List[Tuple[str, str, int]]:
return mesos_services_running_here(
framework_filter=lambda fw: fw["name"].startswith("tron"),
parse_service_instance_from_executor_id=parse_service_instance_from_executor_id,
)
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}"
| 0
| 259
| 0
| 17,423
| 0
| 4,488
| 0
| 775
| 1,122
|
6f6564a4b79638714786a730792e5cd34d3f9e05
| 1,755
|
py
|
Python
|
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | null | null | null |
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | 4
|
2019-03-19T16:18:22.000Z
|
2021-06-28T12:33:14.000Z
|
invenio_records_presentation/workflows/presentation.py
|
CESNET/invenio-records-presentation
|
547a2652a97feb1c6cd50e1ea917c2b5decb9286
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio Records Presentation is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
""" Example Presentation workflow."""
from invenio_records_presentation.workflows import presentation_workflow_factory
example = presentation_workflow_factory(task_list=[
print_extra_data,
create_example_file,
print_data,
transform_example_file,
output_example_file,
])
| 27
| 89
| 0.688889
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CESNET.
#
# Invenio Records Presentation is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
""" Example Presentation workflow."""
from invenio_workflows import WorkflowEngine
from invenio_records_presentation.api import PresentationOutputFile
from invenio_records_presentation.workflows import presentation_workflow_factory
def print_extra_data(obj, eng: WorkflowEngine):
print(obj.extra_data)
return obj
def print_data(obj, eng: WorkflowEngine):
print(obj.data)
return obj
def create_example_file(obj, eng: WorkflowEngine):
# creates an example input file and passes a path to it
input = obj.scratch.create_file(task_name='example_input')
with open(input, 'w') as tf:
tf.write("example file\n")
obj.data = input
return obj
def transform_example_file(obj, eng: WorkflowEngine):
input_data = ''
try:
with open(obj.data, 'r') as input:
input_data = input.read()
except OSError:
eng.abort() # Cannot read input data, abort workflow execution
output = obj.scratch.create_file(task_name='example_output')
with open(output, 'w') as tf:
tf.write(input_data.title())
obj.data = output
return obj
def output_example_file(obj, eng: WorkflowEngine):
obj.data = PresentationOutputFile(path=obj.data,
mimetype='text/plain',
filename='example.txt')
return obj
example = presentation_workflow_factory(task_list=[
print_extra_data,
create_example_file,
print_data,
transform_example_file,
output_example_file,
])
| 0
| 0
| 0
| 0
| 0
| 1,008
| 0
| 69
| 160
|
af18231ed684c46a269b36519eb707e9ab6b7d6a
| 34,191
|
py
|
Python
|
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
twit_analytics.py
|
nikb999/Twitter-analytics
|
35074503be495e62fad282b9c723756df87119a7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from tempfile import TemporaryFile
def lex_anal(incomingTweetList):
'''
routine to do lexical analysis
'''
#final_tweet_list --- date / sender full name / tweet
#read the tweets and create a list of sender-htag and sender-@
#incoming TweetList has two layer lists
sender_htag = []
sender_at = []
h_tags_all = []
at_items_all = []
ts_all = []
for lex2 in incomingTweetList:
for lex22 in lex2:
td = lex22[0] #this is the tweet date
try:
ts = text_sanitize(lex22[1]) #this is the tweet sender
except:
print 'something wrong with ',lex22[1]
ts = '---'
ts_all.append(ts)
h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags
at_items = re.findall('[@]\w+',lex22[2]) #these are the other users
h_tags = [hti.lower() for hti in h_tags]
at_items = [ati.lower() for ati in at_items]
for h2 in h_tags:
sender_htag.append([td,ts.lower()+'-'+h2])
h_tags_all.append(h2)
for at2 in at_items:
sender_at.append([td,ts.lower()+'-'+at2])
at_items_all.append(at2)
#summarize the two new lists
#following lists don't have dates
sender_htag2 = [xx[1] for xx in sender_htag]
sender_at2 = [yy[1] for yy in sender_at]
#make a list of the tweet senders only
ts_all = list(set(ts_all))
#print ts_all
#get the top 10 htags
#py2.6 ht_col = collections.Counter(h_tags_all)
htag_data4heatmap = []
at_data4heatmap = []
#print '<ul>Top 10 Hashtags'
#py2.6 for h_item in ht_col.most_common(10):
for h_item in top_list(h_tags_all,10):
#print '<li>', h_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
try:
itemtocount = str(tsitem+'-'+h_item[1])
htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)])
except:
print 'Problem here: ',h_item,tsitem
except:
print 'Problem here',h_item
print '</ul>'
#get the top 10 user references
#py2.6 at_col = collections.Counter(at_items_all)
#print '<ul>Top 10 Users'
#py2.6 for a_item in at_col.most_common(10):
for a_item in top_list(at_items_all,10):
#print '<li>', a_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
itemtocount = str(tsitem+'-'+a_item[1])
at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)])
except:
print 'Problem here 2',a_item
print '</ul>'
#draw the table with the heatmap
tcols = len(ts_all) #number of tweet senders - rows
trows = len(htag_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Popular Hashtags</h3>'
heatmap_table(trows,tcols,htag_data4heatmap)
tcols = len(ts_all) #number of tweet senders - rows
trows = len(at_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Referenced Users</h3>'
heatmap_table(trows,tcols,at_data4heatmap)
# Define main function.
main()
| 40.800716
| 197
| 0.534176
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#add the path of the twitter egg
import sys
egg_path = '/home/users/web/........./cgi-bin/PyPkg/twitter-1.14.3-py2.7.egg'
sys.path.append(egg_path)
# Import the CGI, string, sys, and md5crypt modules
import json, urllib2, re, time, datetime, sys, cgi, os
import sqlite3
import MySQLdb as mdb
import string, random
from urlparse import urlparse
from twitter import *
from tempfile import TemporaryFile
from collections import *
from py_site_header import *
def thisPYfile():
return 'twit_analytics.py'
def define_keys():
CONSUMER_KEY="......................"
CONSUMER_SECRET="...................."
ACCESS_TOKEN="..........................."
ACCESS_TOKEN_SECRET="...................................."
return CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
def start_database_to_store_tweets():
dbhost="......................" # Host name
dbuser="......." # Mysql username
dbpswd="......." # Mysql password
dbname = '........' # MySql db
try:
conn = mdb.connect(host=dbhost,user=dbuser,passwd=dbpswd,db=dbname)
c = conn.cursor()
return c, True, conn
except mdb.Error, e:
return e, False
def site_header(st=''):
site_start()
print '</div>'
site_title(st)
def site_start():
print '''
Content-type:text/html\r\n\r\n
<html>
<div class="wrap" id="wrap_id">
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Financial Models</title>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script type="text/javascript" src="../js/js_functions.js"></script>
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3.css">
<link rel="stylesheet" href="http://www.w3schools.com/lib/w3-theme-indigo.css">
<link href='http://code.ionicframework.com/ionicons/2.0.1/css/ionicons.min.css' rel='stylesheet' type='text/css'>
<link rel="stylesheet" href="http://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.4.0/css/font-awesome.min.css">
<style>
a:link { text-decoration: none; }
a:visited { text-decoration: none; }
a:hover { text-decoration: none; }
a:active { text-decoration: none; }
</style>
</head>
<body>
'''
def site_title(s_title):
print '''
<div id="site_title" class="w3-container w3-theme-d4 w3-center w3-padding-jumbo">
<p> </p>
<div class="w3-row w3-jumbo">
'''
print s_title
print '''
<br>
</div>
</div>
'''
def site_footer():
import datetime
curr_year = datetime.datetime.now().strftime("%Y")
print '<div class="w3-container w3-border-top" style="text-align:center">'
print '<p> © 2013-'+curr_year+' | '
print '<a>Contact Us</a> </p>'
print '<p><a href="./termsofuse.py">Terms of Use</a> |',
print '<a href="./home.py#aboutus">About Us</a> </p>'
print '</div>'
print '</form>'
print ' </body>'
print ' </div>' #for the div id = wrap
print ' </html>'
def html_start():
# Start the HLML Block
site_header('Twitter Analytics')
def html_end():
site_footer()
def top_list(in_l,topx):
#function to get the top xx items in a list
# Need this because v2.6 of python does not have Counter in collections
counter = {}
for i in in_l:
counter[i] = counter.get(i, 0) + 1
final_dict = sorted([ (freq,word) for word, freq in counter.items() ], reverse=True)[:topx]
return final_dict
def text_sanitize(in_text):
out_text = in_text.replace("'","")
out_text = out_text.replace("\""," ").replace("\\"," ").replace("="," ").replace("''",'\"').replace("' '",'\"')
return out_text
def generate_form():
html_start()
print '<div id="body_sty">'
print '<p>Explore the world of Twitter and discover information about twitter users, their friends and followers as well as lexical analysis of the tweets.</p>'
print '<TABLE style="display: block;" BORDER = 0>'
print "<FORM METHOD = post ACTION=\'"+thisPYfile()+"\'>"
print "<TR><TH align=\"left\">Screen Name:</TH><TD><INPUT type = text name=\"scn_name\"></TD><TR>"
print "</TABLE>"
print "<INPUT TYPE = hidden NAME = \"action\" VALUE = \"display\">"
print "<INPUT TYPE = submit VALUE = \"Enter\">"
print "</FORM>"
print '</div>'
html_end()
def user_public_info(find_id_for):
#html_start()
#this line gets the public info for the user
print '<h2>'+'\nUsers Public Info'+'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response = t.users.lookup(screen_name=find_id_for)
do_rest_of_module = 1
except:
print '<p>', 'Error getting public data' ,'</p>'
if do_rest_of_module == 1:
print '<h3>'+'\nBasic Info for: ', find_id_for+'</h3>'
print '<p>', '\tKey Data' ,'</p>'
print '<ul>'
print '<li>ID:',response[0]['id'],'</li>'
print '<li>Screen Name:',response[0]['screen_name'],'</li>'
print '<li>Name:',response[0]['name'] ,'</li>'
print '<li>Location:',response[0]['location'] ,'</li>'
print '<li>Friends:',response[0]['friends_count'] ,'</li>'
print '<li>Followers:',response[0]['followers_count'] ,'</li>'
print '<li>Messages posted:',response[0]['statuses_count'] ,'</li>'
print '</ul>'
def get_last200_tweets(in_user):
#this method will get the last 200 tweets of the user
#rate limit is 180 requests per 15 min window
#print '<h2>'+'\nAnalysis of Past Tweets for',in_user,'</h2>'
do_rest_of_module = 0
try:
t = Twitter(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
response=t.statuses.user_timeline(screen_name=in_user,count=200)
#print '<p>', '\tResponses left:', response.headers['x-rate-limit-remaining'] ,'</p>'
#print '<p>Line 201. Response length: ',len(response),'</p>'
if len(response) > 0:
do_rest_of_module = 1
else:
print '<p>', 'No info found for: ',in_user ,'</p>'
except:
print '<p>', 'Error getting tweets info for: ',in_user ,'</p>'
if do_rest_of_module == 1:
base_twit_list = []
data_for_plots = []
x = response
#x = [element.lower() for element in response] #x is list - LOWER CASE
hashtag_list = [] #start an empty list of hashtags
at_list = [] #start an empty list of twitter IDs
re_twt_list = [] #start a list of retweets
#get the start and end dates
sdf = x[0]['created_at'] #get the full date of last tweet
start_date = datetime.date(int(sdf[26:30]), int(time.strptime(sdf[4:7],'%b').tm_mon), int(sdf[8:10]))
edf = x[len(x)-1]['created_at'] #get the full date of first tweet
end_date = datetime.date(int(edf[26:30]), int(time.strptime(edf[4:7],'%b').tm_mon), int(edf[8:10]))
#end_date = str(edf[8:10])+'-'+str(edf[4:7])+'-'+str(edf[26:30])
twit_day_range = (start_date-end_date).days
avg_twit_day = (1.0*len(x)/max(1,twit_day_range))
print >> t2, '<h4>'+'Tweet Stats for ', in_user+'</h4>'
#print x[0]
#print '\tStats for last',len(x), 'tweets by',in_user
fix_nm = x[0]['user']['screen_name']
try:
if str(x[0]['user']['name']).decode('ascii'): fix_nm = str(x[0]['user']['name'])
except:
#print 'something wrong with the name for ', x[0]['user']['name']
fix_nm = x[0]['user']['screen_name']
print >> t2, '<ul>'
print >> t2, '<li>Key Personal Data</li>'
print >> t2, '<ul>'
print >> t2, '<li>ID:',x[0]['user']['id'],'</li>'
print >> t2, '<li>Screen Name:',x[0]['user']['screen_name'],'</li>'
print >> t2, '<li>Name:',fix_nm,'</li>'
#print '<li>Location:',x[0]['user']['location'],'</li>'
print >> t2, '<li>Friends:',x[0]['user']['friends_count'] ,'</li>'
print >> t2, '<li>Followers:',x[0]['user']['followers_count'] ,'</li>'
print >> t2, '<li>Messages posted:',x[0]['user']['statuses_count'] ,'</li>'
foll_frnd_rat = 1.0*x[0]['user']['followers_count'] / max(1,x[0]['user']['friends_count'])
print >> t2, '<li>Follower to Friend Ratio:', '%.1f' %(foll_frnd_rat),'</li>'
print >> t2, '</ul>'
print >> t2, '</ul>'
print >> t2, '<ul>'
print >> t2, '<li>',len(x),'tweets in past',twit_day_range,'days',
print >> t2, '(',end_date,'to',start_date,')' ,'</li>'
print >> t2, '<li>', 'Avg of ','%.1f' %(avg_twit_day),'tweets per day' ,'</li>'
#add info to the data for charts list
data_for_plots.extend([x[0]['user']['screen_name']])
data_for_plots.extend([x[0]['user']['friends_count']])
data_for_plots.extend([x[0]['user']['followers_count']])
data_for_plots.extend([x[0]['user']['statuses_count']])
data_for_plots.extend([twit_day_range])
data_for_plots.extend([len(x)])
for item in x:
#the encode(ascii,ignore) will convert text to ascii and ignore other
td = item['created_at']
twt_date = datetime.date(int(td[26:30]), int(time.strptime(td[4:7],'%b').tm_mon), int(td[8:10]))
fix_nm = item['user']['screen_name']
try:
if str(item['user']['name']).encode('utf8','ignore'): fix_nm = str(item['user']['name'])
except:
fix_nm = item['user']['screen_name']
try:
fix_text = text_sanitize(item['text'].encode('utf8','ignore'))
except:
#print 'something wrong with the text in tweet for: ',in_user
fix_text = 'Did not process'
#print fix_text,'\t',type(item['text']),'\t',len(item['text']),'\t',item['text'],
twt_list_data = [twt_date] + [fix_nm.lower()] + [fix_text]
try:
base_twit_list.append(twt_list_data)
except:
print '<p>Unknown Error:', type(twt_list_data), twt_list_data, '</p>'
textitem = fix_text
newhastags = re.findall('[#]\w+',textitem)
newatitems = re.findall('[@]\w+',textitem)
re_tweets = re.findall('RT',textitem)
#before adding to the final lists, convert the hashtags and atitems
#to lower case. This will avoid issues of double counting same names
newhastags = [hti.lower() for hti in newhastags]
newatitems = [ati.lower() for ati in newatitems]
#Now add to the list.
#Use EXTEND function that adds elements to the list rahter than another list.
hashtag_list.extend(newhastags)
at_list.extend(newatitems)
re_twt_list.extend(re_tweets)
#now try to find some patterns in the last 200 tweets
#print 'use the collections library to find out the top 5'
#Version 2.6 of python does not support Counters within collections
#py2.6 hashcollect = collections.Counter(hashtag_list)
#py2.6 atcollect = collections.Counter(at_list)
totalretweets = len(re_twt_list)
retwpercent = (1.0 * totalretweets / max(1,len(x)) ) * 100
top10users = []
#print '\n.............................' ,'</p>'
print >> t2, '<li>', '\t',"%.2f%%" % retwpercent, 'are retweets (',totalretweets,'of a total of',len(x),'tweets)' ,'</li>'
print >> t2, '<ul>'
print >> t2, '<li>',(len(x)-totalretweets), 'tweets in ',twit_day_range,' days (without retweets)</li>'
print >> t2, '<li>','Avg of ','%.1f' %( 1.0*(len(x)-totalretweets)/max(twit_day_range,1) ),'tweets per day (without retweets)</li>'
print >> t2, '</ul></ul>'
data_for_plots.extend([totalretweets])
print >> t2, '<ul>'
print >> t2, '<li>', '\tHastags referenced over past',len(x),'tweets = ',len(hashtag_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced hashtags' ,'</li>'
print >> t2, '<ul>'
#py2.6 for h_item in hashcollect.most_common(10): #can't use in python 2.6
for h_item in top_list(hashtag_list,10):
print >> t2, '<li>',text_sanitize(h_item[1]),'|',h_item[0] ,'</li>'
print >> t2, '</ul></ul>'
print >> t2, '<ul>'
print >> t2, '<li>', '\tTwitter IDs referenced over past',len(x),'tweets = ',len(at_list) ,'</li>'
print >> t2, '<li>', '\t10 Most referenced Tweeter IDs' ,'</li>'
print >> t2, '<ul>'
#py2.6 for at_item in atcollect.most_common(10):
for at_item in top_list(at_list,10):
print >> t2, '<li>', '\t\t',text_sanitize(at_item[1]),'|',at_item[0],'</li>'
#add the list of users to the top10user list
top10users.append(at_item[1].replace('@',''))
print >> t2, '</ul></ul>'
#print '<p>Twit list:',type(base_twit_list),'\t',len(base_twit_list),'</p>'
return top10users, base_twit_list, data_for_plots
def display_data(scn_name):
html_start()
print '<div id="body_sty">'
print '<h4>Data shown for '+scn_name.upper()+' and 10 other users most referenced in '+scn_name.upper()+'\'s tweets.</h4><hr>'
user_to_check = scn_name
if user_to_check[0] == '@':
user_raw = user_to_check
user_to_check = user_raw.replace('@','')
# the following lines get the user info
# -- this is response limited to 180
#user_public_info(user_to_check)
max_items_to_show = 200
max_tweets_to_get = 200
#if temp file exists, close it
global t2
try:
t2.close()
except:
print ''
#open the temp file
t2=TemporaryFile()
print >> t2, '''
<a href="#" onclick="show_hideStuff('detailed_data'); return false;">
<br><br><hr><br>
<h3>Detailed Data (click to see or hide)</h3></a><br>
<div id="detailed_data" style="display:none">
'''
# last xx tweets is response limited to 180
res_last200_tweets = get_last200_tweets(user_to_check.lower())
#print '<p>', type(res_last200_tweets), len(res_last200_tweets), '</p>'
final_tweet_list = []
final_data_for_plots = []
do_rest_of_display_data = 0
try:
user_reference = res_last200_tweets[0]
tweet_last200_tweets = res_last200_tweets[1]
final_tweet_list.append(tweet_last200_tweets)
final_data_for_plots.append(res_last200_tweets[2])
do_rest_of_display_data = 1
except:
print '<p>Something wrong to get the list of twitter IDs</p>'
if (do_rest_of_display_data == 1):
print >> t2, '<br>'
try:
if len(user_reference) > 0:
for newuser in user_reference:
if newuser != user_to_check:
res_last200_tweets = get_last200_tweets(newuser.lower())
tweets_from_res_last200 = res_last200_tweets[1]
final_tweet_list.append(tweets_from_res_last200)
final_data_for_plots.append(res_last200_tweets[2])
else:
print >>t2, '<p>', 'Did not find any instance of other users referenced in your tweets.' ,'</p>'
except:
print >>t2, '<p>', 'No info found.' ,'</p>'
#Add the data to the temp file also
print >> t2, '<br><br><hr><h4>List of Tweets Analyzed</h4>'
print >> t2, '<table id="table1" class="pure-table" width=100% style="display: block;">'
print >> t2, '<thead><tr bgcolor=#def><td>Date</td><td>Sender</td><td>Text</td></tr></thead>'
row_even = True
for i1 in final_tweet_list:
for i2 in i1:
#database fields: current date, username, screen name, twt_date, twt_writer, twt_text
twts = [datetime.date.today(),scn_name,user_to_check,i2[0],text_sanitize(i2[1]),text_sanitize(i2[2])]
try:
if row_even == True:
print >> t2, '<tr><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = False
else:
print >> t2, '<tr class="pure-table-odd"><td><sm>', twts[3] ,'</sm></td><td><sm>', str(twts[4]),'</sm></td><td><sm>', str(twts[5]),'</sm></td></tr>'
row_even = True
except:
print '',
print >> t2, '</table>'
#print out the chart data
#data fields: screen_name, friends, followers, msgs, daterange, tweets, retweets
#print json.dumps(final_data_for_plots,indent=2)
#try doing a chart
#draw a chart showing friends and followers
print '<h3>Friends and Followers</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Friends' )
y2_fdfp.append( 'Followers' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][1] )
y2_fdfp.append( final_data_for_plots[xy1][2] )
two_bar_chart_data("Friends and Followers", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Followers to Friends Ratio</h3>'
#Draw a bar chart to show followers to friends ratio
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] / max(final_data_for_plots[xy1][1],1),1) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Followers to Friends Ratio", x_fdfp, y_fdfp)
print '<h3>Tweets sent per day</h3>'
x_fdfp = []
y1_fdfp = []
y2_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
x_fdfp.append( 'Screen Name' )
y1_fdfp.append( 'Tweets per day - with retweets' )
y2_fdfp.append( 'Tweets per day - without retweets' )
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y1_fdfp.append( final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) )
y2_fdfp.append( (final_data_for_plots[xy1][5]-final_data_for_plots[xy1][6]) / max(final_data_for_plots[xy1][4],1) )
two_bar_chart_data("Tweets sent per day", x_fdfp, y1_fdfp, y2_fdfp)
print '<h3>Tweet range (tweets seen per day)</h3>'
x_fdfp = []
y_fdfp = []
#print '<p>Before adding data:',x_fdfp, y_fdfp, '</p>'
for xy1 in range(len(final_data_for_plots)):
x_fdfp.append( final_data_for_plots[xy1][0] )
y_fdfp.append( round( 1.0 * final_data_for_plots[xy1][2] * final_data_for_plots[xy1][5] / max(final_data_for_plots[xy1][4],1) ) )
#print '<p>',x_fdfp, y_fdfp, '</p>'
bar_chart_data("Tweet Range", x_fdfp, y_fdfp)
lex_anal(final_tweet_list)
#print out the detailed data
# go to the first record of the temp file first
print >> t2, ' </div> '
t2.seek(0)
print t2.read()
t2.close()
#if this works - can delete below this.
else:
print '<p>Not able to process this user. Please try another.</p>'
print '</div>' #close the body_sty div
html_end()
def lex_anal(incomingTweetList):
'''
routine to do lexical analysis
'''
#final_tweet_list --- date / sender full name / tweet
#read the tweets and create a list of sender-htag and sender-@
#incoming TweetList has two layer lists
sender_htag = []
sender_at = []
h_tags_all = []
at_items_all = []
ts_all = []
for lex2 in incomingTweetList:
for lex22 in lex2:
td = lex22[0] #this is the tweet date
try:
ts = text_sanitize(lex22[1]) #this is the tweet sender
except:
print 'something wrong with ',lex22[1]
ts = '---'
ts_all.append(ts)
h_tags = re.findall('[#]\w+',lex22[2]) #these are the h-tags
at_items = re.findall('[@]\w+',lex22[2]) #these are the other users
h_tags = [hti.lower() for hti in h_tags]
at_items = [ati.lower() for ati in at_items]
for h2 in h_tags:
sender_htag.append([td,ts.lower()+'-'+h2])
h_tags_all.append(h2)
for at2 in at_items:
sender_at.append([td,ts.lower()+'-'+at2])
at_items_all.append(at2)
#summarize the two new lists
#following lists don't have dates
sender_htag2 = [xx[1] for xx in sender_htag]
sender_at2 = [yy[1] for yy in sender_at]
#make a list of the tweet senders only
ts_all = list(set(ts_all))
#print ts_all
#get the top 10 htags
#py2.6 ht_col = collections.Counter(h_tags_all)
htag_data4heatmap = []
at_data4heatmap = []
#print '<ul>Top 10 Hashtags'
#py2.6 for h_item in ht_col.most_common(10):
for h_item in top_list(h_tags_all,10):
#print '<li>', h_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
try:
itemtocount = str(tsitem+'-'+h_item[1])
htag_data4heatmap.append([tsitem,h_item[1], sender_htag2.count(itemtocount)])
except:
print 'Problem here: ',h_item,tsitem
except:
print 'Problem here',h_item
print '</ul>'
#get the top 10 user references
#py2.6 at_col = collections.Counter(at_items_all)
#print '<ul>Top 10 Users'
#py2.6 for a_item in at_col.most_common(10):
for a_item in top_list(at_items_all,10):
#print '<li>', a_item, '</li>'
#count the number of times each of the hastag was referenced by each tweet sender
try:
for tsitem in ts_all:
itemtocount = str(tsitem+'-'+a_item[1])
at_data4heatmap.append([tsitem,a_item[1], sender_at2.count(itemtocount)])
except:
print 'Problem here 2',a_item
print '</ul>'
#draw the table with the heatmap
tcols = len(ts_all) #number of tweet senders - rows
trows = len(htag_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Popular Hashtags</h3>'
heatmap_table(trows,tcols,htag_data4heatmap)
tcols = len(ts_all) #number of tweet senders - rows
trows = len(at_data4heatmap) / tcols #number of hastags - cols
#print trows, tcols
if trows>0:
print '<br><br>'
print '<h3>Most Referenced Users</h3>'
heatmap_table(trows,tcols,at_data4heatmap)
def heatmap_table(trows,tcols,hm):
#calculate the max and min of the references
#and create a normalized color scale
mx = max(i[2] for i in hm)
mn = min(i[2] for i in hm)
itv = mx - mn
#COLOR pallete from http://colorbrewer2.org/
for arow in hm:
rval = 1.0*arow[2]/itv
if rval<0.1:
arow[2]='#FFF5F0'
elif rval>=0.1 and rval<0.25:
arow[2]='#FEE0D2'
elif rval>=0.25 and rval<0.4:
arow[2]='#FCBBA1'
elif rval>=0.4 and rval<0.5:
arow[2]='#FC9272'
elif rval>=0.5 and rval<0.6:
arow[2]='#FB6A4A'
elif rval>=0.6 and rval<0.7:
arow[2]='#EF3B2C'
elif rval>=0.7 and rval<0.8:
arow[2]='#CB181D'
elif rval>=0.8 and rval<0.9:
arow[2]='#A50F15'
elif rval>=0.9:
arow[2]='#67000D'
print '<table width=100% style="display: block;"> '
for i in range(trows+1):
print '<tr>',
for j in range(tcols+1):
if (i==0 and j==0):
print '<td width="15%">','','</td>',
elif i==0 and j>0 and j<(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td>',
elif i==0 and j==(tcols):
print '<td width="8.5%"><sm>',hm[j-1][0][:10],'</sm></td></tr>'
elif i>0 and j==0:
print '<td><sm>',hm[(i-1)*tcols+j+1-1][1],'</sm></td>',
elif i>0 and j>0 and j<tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td>',
elif i>0 and j==tcols:
print '<td bgcolor=',hm[(i-1)*tcols+j-1][2],'></td></tr>'
print '</table> '
def print_detailed_tweets(in_usertocheck):
html_start()
check_another_user_button()
#print '<h3>Listing of tweets analyzed:</h3>'
sd2st = start_database_to_store_tweets()
if sd2st[1] == True:
c2 = sd2st[0]
conn2 = sd2st[2]
#read all the tweets for the username and screen name
read_text = "SELECT * FROM tweetlist WHERE (username =\'"+in_usertocheck+"\')"
#print '<p>Select tweet command:',read_text,'</p>'
try:
c2.execute(read_text)
for crow in c2:
print crow[1]
conn2.close()
#print '<h2>Finished with the tweet list</h2>'
except conn2.Error, e:
print "E Error %d: %s" % (e.args[0], e.args[1])
else:
print "F Error %d: %s" % (sd2st[0].args[0],sd2st[0].args[1])
html_end()
def bar_chart_data(cht_title,xdata,ydata):
#this routine will draw a bar chart
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
# Callback that creates and populates a data table,
# instantiates the pie chart, passes in the data and
# draws it.
print ' function drawChart() { '
# Create the data table.
print ' var data = new google.visualization.arrayToDataTable([ '
print ' [ \'Screen Name\', \' ' , cht_title, ' \', {role:\'style\'} ], '
for cdi in range(len(xdata)):
if cdi == 0:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'orange\' ], "
else:
print " [ \'", xdata[cdi], "\',", ydata[cdi], ", \'blue\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : true} , '
print ' legend :\'none\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def two_bar_chart_data(cht_title,xdata,ydata1,ydata2):
#this routine will draw a bar chart with two bara
#print '<p>DO NOT PRINT anaything inside chart modules except needed items</p>'
print '<!--Load the AJAX API-->'
print '<script type=\"text/javascript\" src=\"https://www.google.com/jsapi\"></script>'
print '<script type=\"text/javascript\">'
# Load the Visualization API and the piechart package.
print ' google.load(\'visualization\', \'1.0\', {\'packages\':[\'corechart\']}); '
# Set a callback to run when the Google Visualization API is loaded.
print ' google.setOnLoadCallback(drawChart);'
print ' function drawChart() { '
print ' var data = new google.visualization.arrayToDataTable([ '
print " [ \'Screen Name\', \' ",ydata1[0], "\' ,{role:\'style\'}, \'" ,ydata2[0], "\' , {role:\'style\'} ], "
for cdi in range(len(xdata)):
if cdi>0:
print " [ \'", xdata[cdi], "\',", ydata1[cdi],",\'blue\',", ydata2[cdi], ", \'red\' ], "
print ' ]); '
#Set chart options
print " var options = {\'title\':\'",cht_title,"\', "
print ' \'width\':600, '
print ' \'height\':400, '
print ' \'hAxis\' : {\'logScale\' : false} , '
print ' legend :\'top\' , \'backgroundColor\': { fill: \"none\" } '
print ' }; '
# chart_bottom():
# Instantiate and draw our chart, passing in some options.
print ' var chart = new google.visualization.BarChart(document.getElementById(\"',cht_title+'DIV','\")); '
print ' function selectHandler() { '
print ' var selectedItem = chart.getSelection()[0]; '
print ' if (selectedItem) { '
print ' var topping = data.getValue(selectedItem.row, 0); '
print ' alert(\'The user selected \' + topping); '
print ' } '
print ' } '
print ' google.visualization.events.addListener(chart, \'select\', selectHandler); '
print ' chart.draw(data, options); '
print ' } '
print '</script> '
print '<!--Div that will hold the pie chart--> '
print '<div id=\"',cht_title+'DIV','\" style=\"width:600; height:400\"></div> '
def test3():
#Test some random twitter functions on stream data
html_start()
testname = "concession,privatization,public private"
#testname = "mining,mines,metal,oil,gas,petroleum"
try:
ts = TwitterStream(auth=OAuth(define_keys()[2],define_keys()[3],define_keys()[0],define_keys()[1]))
#response = ts.statuses.sample()
response = ts.statuses.filter(track=testname)
showcount = 0
maxshow = 50
for tweet in response:
showcount += 1
if showcount>= maxshow: break
# You must test that your tweet has text. It might be a delete
# or data message.
if tweet is None:
print_para("-- None --")
elif tweet.get('text'):
print_para(tweet['user']['name']+'.....'+str(twit_date(tweet['created_at']))+'---'+tweet['text'])
else:
print_para(str(showcount)+'...')
#print_para(json.dumps(tweet,indent=2))
except TwitterHTTPError, e:
print '<p>Error getting tweets info for:',e['details'],'</p>'
html_end()
def print_para(instr):
print '<p>',instr,'</p>'
def twit_date(in_created_at):
out_date = datetime.date(int(in_created_at[26:30]), int(time.strptime(in_created_at[4:7],'%b').tm_mon), int(in_created_at[8:10]))
return out_date
# Define main function.
def main():
form = cgi.FieldStorage()
if (form.has_key("action") and form.has_key("scn_name")):
if (form["action"].value == "display"):
display_data(text_sanitize(form["scn_name"].value))
else:
generate_form()
main()
| 0
| 0
| 0
| 0
| 0
| 29,208
| 0
| 11
| 682
|
47aeba5f5a974bde56729cafe676435b3057e324
| 3,765
|
py
|
Python
|
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | 1
|
2017-12-06T04:28:59.000Z
|
2017-12-06T04:28:59.000Z
|
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | null | null | null |
sonde/qaqc_viewer.py
|
wilsaj/pint
|
a2b2a6ea9ff480a168358af642cf36c7f3c5d0e4
|
[
"BSD-3-Clause"
] | null | null | null |
"""
QAQC Viewer based on Chaco & Traits
"""
#from enthought.chaco.example_support import COLOR_PALETTE
#from enthought.enable.example_support import DemoFrame, demo_main
# Enthought library imports
# Chaco imports
#==============================================================================
# Attributes to use for the plot view.
#size=(800,600)
#title="Salinity plot example"
if __name__ == "__main__":
viewer = BaseViewer()
viewer.configure_traits()
| 41.833333
| 110
| 0.601594
|
"""
QAQC Viewer based on Chaco & Traits
"""
#from enthought.chaco.example_support import COLOR_PALETTE
#from enthought.enable.example_support import DemoFrame, demo_main
# Enthought library imports
from enthought.enable.api import Window, Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, Group, View
# Chaco imports
from enthought.chaco.api import Plot, ArrayDataSource, ArrayPlotData, \
BarPlot, DataRange1D, LabelAxis, LinearMapper, VPlotContainer, \
PlotAxis, PlotGrid, LinePlot, add_default_grids, PlotLabel
from enthought.chaco.tools.api import PanTool, ZoomTool
from enthought.chaco.scales.api import CalendarScaleSystem
from enthought.chaco.scales_tick_generator import ScalesTickGenerator
from sonde import Sonde
import time
import numpy as np
class BaseViewer(HasTraits):
main_tab = Instance(Component)
traits_view = View(Item('main_tab', editor=ComponentEditor),
width=500, height=500, resizable=True, title="Salinity Plot")
def __init__(self, **kwargs):
HasTraits.__init__(self, **kwargs)
self.init_data()
def init_data(self):
file_name = '/home/dpothina/work/apps/pysonde/tests/ysi_test_files/BAYT_20070323_CDT_YS1772AA_000.dat'
sonde = Sonde(file_name)
sal_ds = np.array([1, 2, 3, 4, 5, 6, 7, 8]) # sonde.data['seawater_salinity']
time_ds = sal_ds**2 # [time.mktime(date.utctimetuple()) for date in sonde.dates]
#time_ds = ArrayDataSource(dt)
#sal_ds = ArrayDataSource(salinity, sort_order="none")
self.plot_data = ArrayPlotData(sal_ds=sal_ds,
time_ds=time_ds)
def _main_tab_default(self):
self.sal_plot = Plot(self.plot_data)
self.sal_plot.plot(('time_ds', 'sal_ds'), type='line')
#sal_plot.overlays.append(PlotAxis(sal_plot, orientation='left'))
#bottom_axis = PlotAxis(sal_plot, orientation="bottom",# mapper=xmapper,
# tick_generator=ScalesTickGenerator(scale=CalendarScaleSystem()))
#sal_plot.overlays.append(bottom_axis)
#hgrid, vgrid = add_default_grids(sal_plot)
#vgrid.tick_generator = bottom_axis.tick_generator
#sal_plot.tools.append(PanTool(sal_plot, constrain=True,
# constrain_direction="x"))
#sal_plot.overlays.append(ZoomTool(sal_plot, drag_button="right",
# always_on=True,
# tool_mode="range",
# axis="index",
# max_zoom_out_factor=10.0,
# ))
container = VPlotContainer(bgcolor="lightblue",
spacing=40,
padding=50,
fill_padding=False)
container.add(sal_plot)
#container.add(price_plot)
#container.overlays.append(PlotLabel("Salinity Plot with Date Axis",
# component=container,
# #font="Times New Roman 24"))
# font="Arial 24"))
return container
#def default_traits_view(self):
# return View(Group(Item('main_tab', editor=ComponentEditor)),
# width=500, height=500, resizable=True, title="Salinity Plot")
#==============================================================================
# Attributes to use for the plot view.
#size=(800,600)
#title="Salinity plot example"
if __name__ == "__main__":
viewer = BaseViewer()
viewer.configure_traits()
| 0
| 0
| 0
| 2,650
| 0
| 0
| 0
| 401
| 244
|
120fa0d15479ccd5b4653c3adf9354e51e55b55c
| 573
|
py
|
Python
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 8
|
2017-09-02T07:04:59.000Z
|
2020-12-17T17:30:34.000Z
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 1
|
2017-10-24T12:49:57.000Z
|
2017-10-24T15:04:44.000Z
|
ComicPub/comics/admin.py
|
Xonshiz/ComicPub
|
d332ee1b62d6c28347954280696c86898de6d125
|
[
"MIT"
] | 4
|
2017-10-24T14:13:13.000Z
|
2021-12-15T17:09:23.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from comics.models import Comic, ComicChapter
# class PageFileInline(admin.TabularInline):
# model = ComicChapter
#
#
# class PageAdmin(admin.ModelAdmin):
# inlines = [PageFileInline, ]
# class ChapterInline(admin.TabularInline):
# model = ComicChapterFiles
#
# class ComicAdmin(admin.ModelAdmin):
# inlines = [
# ChapterInline,
# ]
# admin.site.register(ComicChapter, ComicAdmin)
admin.site.register(Comic)
admin.site.register(ComicChapter)
| 21.222222
| 47
| 0.724258
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from comics.models import Comic, ComicChapter
# class PageFileInline(admin.TabularInline):
# model = ComicChapter
#
#
# class PageAdmin(admin.ModelAdmin):
# inlines = [PageFileInline, ]
# class ChapterInline(admin.TabularInline):
# model = ComicChapterFiles
#
# class ComicAdmin(admin.ModelAdmin):
# inlines = [
# ChapterInline,
# ]
# admin.site.register(ComicChapter, ComicAdmin)
admin.site.register(Comic)
admin.site.register(ComicChapter)
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
d7e5e4980b5718dcaa9192759e6b4c3e5d658b97
| 2,457
|
py
|
Python
|
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | null | null | null |
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-21T09:39:00.000Z
|
2018-05-27T15:59:15.000Z
|
chpt6/Generate_random_characters.py
|
GDG-Buea/learn-python
|
9dfe8caa4b57489cf4249bf7e64856062a0b93c2
|
[
"Apache-2.0"
] | 2
|
2018-05-19T14:59:56.000Z
|
2018-05-19T15:25:48.000Z
|
# This program displays 100 lowercase letters, fifteen per line
main()
print()
# Draw a line from (x1, y1) to (x2, y2)
# def drawLine(x1, y1, x2, y2):
# turtle.penup()
# turtle.goto(x1, y1)
# turtle.pendown()
# turtle.goto(x2, y2)
# def writeText(s, x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.write(s) # Write a string
# # Draw a point at the specified location (x, y)
# def drawPoint(x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.begin_fill() # Begin to fill color in a shape
# turtle.circle(3)
# turtle.end_fill() # Fill the shape
# # Draw a circle centered at (x, y) with the specified radius
# def drawCircle(x = 0, y = 0, radius = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y - radius)
# turtle.pendown() # Pull the pen down
# turtle.circle(radius)
# # Draw a rectangle at (x, y) with the specified width and height
# def drawRectangle(x = 0, y = 0, width = 10, height = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x + width / 2, y + height / 2)
# turtle.pendown() # Pull the pen down
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# Generate a random uppercase letter
# def getRandomUpperCaseLetter() :
# return getRandomCharacter('A', 'Z')
# # Generate a random digit character
# def getRandomDigitCharacter() :
# return getRandomCharacter('0', '9')
# # Generate a random character
# def getRandomASCIICharacter() :
# return chr(randint(0, 127))
#
# # Generate a random character between ch1 and ch2
# def getRandomCharacter(ch1, ch2) :
# return chr(randint(ord(ch1), ord(ch2)))
#
| 23.179245
| 66
| 0.659341
|
# This program displays 100 lowercase letters, fifteen per line
import turtle
from random import randint
def get_random_lower_case_letter():
return get_random_character('a', 'z')
def get_random_character(ch1, ch2):
return chr(randint(ord(ch1), ord(ch2)))
def write_text(s, x, y):
turtle.penup()
turtle.goto(x, y)
turtle.pendown()
turtle.write(s)
turtle.goto(x, y)
turtle.done()
def main():
count = 0
number_of_characters = 100
characters_per_line = 15
print("\n")
for i in range(number_of_characters):
print("\t", get_random_lower_case_letter(), end=' ')
count += 1
if count % characters_per_line == 0:
print()
main()
print()
# Draw a line from (x1, y1) to (x2, y2)
# def drawLine(x1, y1, x2, y2):
# turtle.penup()
# turtle.goto(x1, y1)
# turtle.pendown()
# turtle.goto(x2, y2)
# def writeText(s, x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.write(s) # Write a string
# # Draw a point at the specified location (x, y)
# def drawPoint(x, y):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y)
# turtle.pendown() # Pull the pen down
# turtle.begin_fill() # Begin to fill color in a shape
# turtle.circle(3)
# turtle.end_fill() # Fill the shape
# # Draw a circle centered at (x, y) with the specified radius
# def drawCircle(x = 0, y = 0, radius = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x, y - radius)
# turtle.pendown() # Pull the pen down
# turtle.circle(radius)
# # Draw a rectangle at (x, y) with the specified width and height
# def drawRectangle(x = 0, y = 0, width = 10, height = 10):
# turtle.penup() # Pull the pen up
# turtle.goto(x + width / 2, y + height / 2)
# turtle.pendown() # Pull the pen down
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# turtle.right(90)
# turtle.forward(height)
# turtle.right(90)
# turtle.forward(width)
# Generate a random uppercase letter
# def getRandomUpperCaseLetter() :
# return getRandomCharacter('A', 'Z')
# # Generate a random digit character
# def getRandomDigitCharacter() :
# return getRandomCharacter('0', '9')
# # Generate a random character
# def getRandomASCIICharacter() :
# return chr(randint(0, 127))
#
# # Generate a random character between ch1 and ch2
# def getRandomCharacter(ch1, ch2) :
# return chr(randint(ord(ch1), ord(ch2)))
#
| 0
| 0
| 0
| 0
| 0
| 533
| 0
| -3
| 137
|
b4b58aa4d7d83f1298f775781fc1a78f79bf902f
| 531
|
py
|
Python
|
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
miniProject/miniApp/urls.py
|
cs-fullstack-2019-spring/django-mini-project5-gkg901
|
35af15000480a104f46adb62ba9ceebd4d0ad7a1
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('allrecipes/', views.allrecipes, name='allrecipes'),
path('newrecipe/', views.newrecipe, name='newrecipe'),
path('profile/', views.profile, name='profile'),
path('newuser/', views.newuser, name='newuser'),
path('details/<int:ID>', views.details, name='details'),
path('edituser/<int:ID>', views.edituser, name='edituser'),
path('editrecipe/<int:ID>', views.editrecipe, name='editrecipe'),
]
| 37.928571
| 69
| 0.664783
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('allrecipes/', views.allrecipes, name='allrecipes'),
path('newrecipe/', views.newrecipe, name='newrecipe'),
path('profile/', views.profile, name='profile'),
path('newuser/', views.newuser, name='newuser'),
path('details/<int:ID>', views.details, name='details'),
path('edituser/<int:ID>', views.edituser, name='edituser'),
path('editrecipe/<int:ID>', views.editrecipe, name='editrecipe'),
]
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
e63a707a6d1aecf82dd0e657d12e6dcba8e4283c
| 3,996
|
py
|
Python
|
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | 2
|
2021-10-20T13:38:45.000Z
|
2022-01-11T12:36:49.000Z
|
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | null | null | null |
hash_code.py
|
Arpan-206/EncryptoCLI
|
26a7718ef387d46bfcf2d167e17a494de0165858
|
[
"MIT"
] | null | null | null |
# Importing the hashing library
# Importing the visual libraries
# Defining the hash function.
| 27.75
| 129
| 0.508008
|
# Importing the hashing library
import hashlib
# Importing the visual libraries
from PyInquirer import Separator, prompt
from termcolor import colored
# Defining the hash function.
def hash_func():
# Asking the user for further data regarding algoritms
hash_info = prompt([
{
'type': 'list',
'qmark': '>',
'name': 'algorithm',
'message': 'Which algorithm do you want to use?',
'choices': [
Separator(),
{
'name': 'MD5',
},
{
'name': 'SHA256',
},
{
'name': 'SHA512',
},
{
'name': 'BLAKE2',
},
{
'name': 'BLAKE2b',
},
],
},
{
'type': 'list',
'qmark': '>',
'name': 'type_of_data',
'message': 'What do you want to hash?',
'choices': [
Separator(),
{
'name': 'Text',
},
{
'name': 'File',
},
],
},
])
# Storing the data into seperate variables
algorithm = hash_info['algorithm']
type_of_data = hash_info['type_of_data']
# Determining the type of data to hash and calling the appropriate functions
if type_of_data == 'File':
handle_file_hashing(algorithm)
else:
handle_text_hashing(algorithm)
def handle_text_hashing(algorithm):
# Asking the user for the data
data_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'hash_data',
'message': 'Enter data to hash.',
},
])
# Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary
hash_out.update(data_info['hash_data'].encode())
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
return None
def handle_file_hashing(algorithm):
# Asking the user for the path to the file
file_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'file_name',
'message': 'Enter the path to the file.',
},
])
try:
# Again, Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary but this time in chunks so as to not put too much strain on memory
with open(file_info['file_name'], 'rb') as file_path:
chunk = 0
while chunk != b'':
chunk = file_path.read(1024)
hash_out.update(chunk)
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
except Exception as e:
print(colored(
'Can\'t find the file please check the name and make sure the extension is also present.', 'red'))
| 0
| 0
| 0
| 0
| 0
| 3,741
| 0
| 20
| 135
|
e155cdbdf8a6a6a7a4d4cc1a43c09c3a16b32d5c
| 3,800
|
py
|
Python
|
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2017-05-22T21:15:22.000Z
|
2017-05-22T21:15:22.000Z
|
examples/plugins/single_project/sample_project/data/plugin/ui_service.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
|
cf79e5b2a0c3b46898a60b5fe5a2fb580604808b
|
[
"BSD-3-Clause"
] | 1
|
2019-10-01T07:03:58.000Z
|
2019-10-01T07:03:58.000Z
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2007 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
The UI service for the Data plugin.
"""
# Standard library imports.
import logging
# Enthought library imports.
# Data library imports.
# Local imports.
# Setup a logger for this module
logger = logging.getLogger(__name__)
#### EOF #####################################################################
| 29.007634
| 78
| 0.460789
|
#-----------------------------------------------------------------------------
#
# Copyright (c) 2007 by Enthought, Inc.
# All rights reserved.
#
#-----------------------------------------------------------------------------
"""
The UI service for the Data plugin.
"""
# Standard library imports.
import logging
# Enthought library imports.
from envisage.api import ApplicationObject, UOL
from pyface.api import confirm, error, FileDialog, information, YES
# Data library imports.
# Local imports.
from services import IDATA_MODEL
# Setup a logger for this module
logger = logging.getLogger(__name__)
class UiService(ApplicationObject):
"""
The UI service for the Data plugin.
"""
##########################################################################
# Attributes
##########################################################################
#### public 'UiService' interface ########################################
# A reference to the Data plugin's model service.
model_service = UOL
##########################################################################
# 'Object' interface
##########################################################################
#### operator methods ####################################################
def __init__(self, **kws):
"""
Constructor.
Extended to ensure our UOL properties are set.
"""
super(UiService, self).__init__(**kws)
# Ensure we have a default model-service if one wasn't specified.
if self.model_service is None:
self.model_service = 'service://%s' % IDATA_MODEL
return
##########################################################################
# 'UIService' interface
##########################################################################
#### public methods ######################################################
#TODO cgalvan: to be implemented
# def delete_data(self, context, data_name, parent_window):
# """
# Delete a Data.
#
# """
#
# # Open confirmation-dialog to confirm deletion
# message = 'Are you sure you want to delete %s?' % data_name
# if confirm(parent_window, message) == YES:
# self.model_service.delete_context_item(context, data_name)
#
# return
def edit_data(self, window, data):
"""
Edit the data parameters of the specified data.
"""
data_parameters = data.data_parameters
edit_ui = data_parameters.edit_traits(
view='data_view',
kind='livemodal',
# handler=handler,
parent=window)
return edit_ui.result
def display_message(self, msg, title=None, is_error=False):
"""
Display the specified message to the user.
"""
# Ensure we record any reasons this method doesn't work. Especially
# since it's critical in displaying errors to users!
try:
# Attempt to identify the current application window.
parent_window = None
workbench = self.application.get_service('envisage.'
'workbench.IWorkbench')
if workbench is not None:
parent_window = workbench.active_window.control
# Display the requested message
if is_error:
error(parent_window, msg, title=title)
else:
information(parent_window, msg, title=title)
except:
logger.exception('Unable to display pop-up message')
return
#### EOF #####################################################################
| 0
| 0
| 0
| 3,086
| 0
| 0
| 0
| 83
| 89
|
20dc02eb654f867beadeef8c295396bcf7913d05
| 8,460
|
py
|
Python
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 21
|
2020-04-02T21:39:58.000Z
|
2022-01-31T19:43:47.000Z
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 1,613
|
2020-03-26T16:39:57.000Z
|
2022-03-07T14:54:16.000Z
|
metecho/tests/consumers.py
|
almostolmos/Metecho
|
7f58eca163faafea1ce07ffb6f4de2449fa0b8df
|
[
"BSD-3-Clause"
] | 21
|
2020-07-21T11:58:47.000Z
|
2021-11-25T00:48:21.000Z
|
import pytest
pytestmark = pytest.mark.asyncio
# These tests need to go last, after any tests that start up a Communicator:
| 33.307087
| 88
| 0.711348
|
import pytest
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from ..api.model_mixins import Request
from ..api.push import push_message_about_instance, report_error
from ..api.serializers import (
EpicSerializer,
ProjectSerializer,
ScratchOrgSerializer,
TaskSerializer,
)
from ..consumers import PushNotificationConsumer
from ..routing import websockets
pytestmark = pytest.mark.asyncio
@database_sync_to_async
def serialize_model(serializer_model, instance, user):
serializer = serializer_model(instance, context={"request": Request(user)})
return serializer.data
@pytest.mark.django_db
async def test_push_notification_consumer__project(user_factory, project_factory):
user = await database_sync_to_async(user_factory)()
project = await database_sync_to_async(project_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "project", "id": str(project.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
project, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(ProjectSerializer, project, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__scratch_org__list(
user_factory, scratch_org_factory
):
user = await database_sync_to_async(user_factory)()
scratch_org = await database_sync_to_async(scratch_org_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "scratch_org", "id": "list", "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
scratch_org,
{"type": "SCRATCH_ORG_RECREATE", "payload": {"originating_user_id": "abc"}},
for_list=True,
)
response = await communicator.receive_json_from()
model = await serialize_model(ScratchOrgSerializer, scratch_org, user)
assert response == {
"type": "SCRATCH_ORG_RECREATE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__epic(user_factory, epic_factory):
user = await database_sync_to_async(user_factory)()
epic = await database_sync_to_async(epic_factory)(project__repo_id=1234)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "epic", "id": str(epic.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
epic, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(EpicSerializer, epic, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__task(user_factory, task_factory):
user = await database_sync_to_async(user_factory)()
task = await database_sync_to_async(task_factory)(epic__project__repo_id=4321)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "task", "id": str(task.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
task, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(TaskSerializer, task, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__scratch_org(
user_factory, scratch_org_factory
):
user = await database_sync_to_async(user_factory)()
scratch_org = await database_sync_to_async(scratch_org_factory)(
task__epic__project__repo_id=2468
)
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "scratch_org", "id": str(scratch_org.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await push_message_about_instance(
scratch_org, {"type": "TEST_MESSAGE", "payload": {"originating_user_id": "abc"}}
)
response = await communicator.receive_json_from()
model = await serialize_model(ScratchOrgSerializer, scratch_org, user)
assert response == {
"type": "TEST_MESSAGE",
"payload": {"originating_user_id": "abc", "model": model},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__report_error(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await report_error(user)
response = await communicator.receive_json_from()
assert response == {
"type": "BACKEND_ERROR",
"payload": {"message": "There was an error"},
}
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__unsubscribe(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "SUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await communicator.send_json_to(
{"model": "user", "id": str(user.id), "action": "UNSUBSCRIBE"}
)
response = await communicator.receive_json_from()
assert "ok" in response
await communicator.disconnect()
@pytest.mark.django_db
async def test_push_notification_consumer__invalid_subscription(user_factory):
user = await database_sync_to_async(user_factory)()
communicator = WebsocketCommunicator(websockets, "/ws/notifications/")
communicator.scope["user"] = user
connected, _ = await communicator.connect()
assert connected
await communicator.send_json_to({"model": "foobar", "id": "buzbaz"})
response = await communicator.receive_json_from()
assert "error" in response
await communicator.disconnect()
# These tests need to go last, after any tests that start up a Communicator:
@pytest.mark.django_db
async def test_push_notification_consumer__missing_instance():
content = {
"model_name": "scratchorg",
"id": "bet this is an invalid ID",
"payload": {},
}
consumer = PushNotificationConsumer()
new_content = await consumer.hydrate_message(content)
assert new_content == {"payload": {}}
| 0
| 7,687
| 0
| 0
| 0
| 0
| 0
| 253
| 384
|
f3976e2ec215dc1bd2bd45dd144b13e71688e6f1
| 6,227
|
py
|
Python
|
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | null | null | null |
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | 7
|
2020-05-08T19:51:22.000Z
|
2022-03-11T23:37:57.000Z
|
cajitos_site/users/routes.py
|
OlgaKuratkina/cajitos
|
0bc13f71281a1a67c8bcd1a3ae343ad0b14d9bad
|
[
"MIT"
] | null | null | null |
# Disbaled temporarily or forever
# @users.route("/register", methods=['GET', 'POST'])
| 40.967105
| 120
| 0.696965
|
import markdown
from flask import redirect, url_for, flash, render_template, session, request, current_app, abort
from flask_login import current_user, login_user, logout_user, login_required
from cajitos_site import bcrypt
from cajitos_site.users import users
from cajitos_site.users.forms import RegistrationForm, LoginForm, UpdateAccountForm, RequestResetForm, ResetPasswordForm
from cajitos_site.models import User, load_user
from cajitos_site.utils.email import send_service_email
from cajitos_site.utils.utils import (
get_redirect_target, save_picture
)
from cajitos_site.utils.auth_utils import generate_google_auth_request, get_google_user_info
# Disbaled temporarily or forever
# @users.route("/register", methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
form = RegistrationForm()
if form.validate_on_submit():
user = User.create(username=form.username.data, email=form.email.data)
flash(f'Account created for {form.username.data}!', 'success')
flash(f'Check your email to confirm your new account', 'success')
token = user.get_validation_token()
reset_link = f"{url_for('users.validate_token', token=token, _external=True)}"
send_service_email(user, reset_link)
return redirect(url_for('blog.posts'))
return render_template('user/register.html', title='Register', form=form)
@users.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.select().where(User.email == form.email.data).first()
if user and user.status != 'Confirmed':
flash('You need to confirm your account to proceed!', 'info')
elif user and bcrypt.check_password_hash(user.password, form.password.data):
flash('You have been logged in!', 'success')
login_user(user, remember=form.remember.data)
next_page = get_redirect_target()
return redirect(next_page) if next_page else redirect(url_for('blog.posts'))
else:
flash('Login Unsuccessful. Please check email and password', 'danger')
return render_template('user/login.html', title='Login', form=form)
@users.route('/google_login')
def google_login():
request_uri = generate_google_auth_request()
return redirect(request_uri)
@users.route('/google_login/callback')
def callback():
userinfo_response = get_google_user_info(request)
if userinfo_response.get('email_verified'):
google_id = userinfo_response['sub']
email = userinfo_response['email']
profile_picture = userinfo_response['picture']
username = userinfo_response['given_name']
else:
return 'User email not available or not verified by Google.', 400
user = User.get_user_by_email(email)
if not user:
user = User.create(
google_id=google_id, username=username, email=email, password='', profile_picture=profile_picture,
status='Confirmed'
)
else:
user.google_id = google_id
user.username = username
if profile_picture:
user.profile_picture = profile_picture
user.status = 'Confirmed'
user.save()
login_user(user)
return redirect(url_for('blog.posts'))
@users.route('/logout')
def logout():
logout_user()
return redirect(url_for('blog.posts'))
@users.route('/account/<int:user_id>')
def account(user_id):
user = load_user(user_id)
return render_template('user/account.html', title='Account', user=user)
@users.route('/account/<int:user_id>/update', methods=['GET', 'POST'])
@login_required
def account_update(user_id):
form = UpdateAccountForm()
if request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.about_me.data = current_user.about_me
if form.validate_on_submit() and current_user.id == user_id:
if form.picture.data:
picture_file = save_picture(form.picture.data)
current_user.profile_picture = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.about_me = markdown.markdown(form.about_me.data)
current_user.save()
flash('Your account has been updated!', 'success')
return redirect(url_for('users.account', user_id=user_id))
elif current_user.id != user_id:
abort(403)
return render_template('create_entry.html', title='Account', form=form)
@users.route("/reset_password", methods=['GET', 'POST'])
def reset_request():
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
form = RequestResetForm()
if form.validate_on_submit():
user = User.select().where(User.email == form.email.data).first()
token = user.get_validation_token()
reset_link = f"{url_for('users.validate_token', token=token, _external=True)}"
send_service_email(user, reset_link, confirm_account=False)
flash('An email has been sent with instructions to complete operation.', 'info')
return redirect(url_for('users.login'))
return render_template('user/reset_request.html', title='Reset Password', form=form)
@users.route("/reset_password/<token>", methods=['GET', 'POST'])
def validate_token(token):
if current_user.is_authenticated:
return redirect(url_for('blog.posts'))
user = User.verify_token(token)
if user is None:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('users.reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
# Instead of default implementation with user.is_active
user.status = 'Confirmed'
user.save()
flash('Your password has been updated! You are now able to log in', 'success')
return redirect(url_for('users.login'))
return render_template('user/validate_token.html', title='Reset Password', form=form)
| 0
| 4,597
| 0
| 0
| 0
| 668
| 0
| 438
| 427
|